diff --git a/Pipfile b/Pipfile index 5ecbde1f..efce9513 100644 --- a/Pipfile +++ b/Pipfile @@ -9,38 +9,41 @@ pbr = "*" coverage = "*" [packages] +alembic = "*" connexion = {extras = ["swagger-ui"],version = "*"} -swagger-ui-bundle = "*" +coverage = "*" +docxtpl = "*" flask = "*" +flask-admin = "*" flask-bcrypt = "*" flask-cors = "*" +flask-mail = "*" flask-marshmallow = "*" flask-migrate = "*" flask-restful = "*" +gunicorn = "*" httpretty = "*" +ldap3 = "*" +lxml = "*" +markdown = "*" marshmallow = "*" marshmallow-enum = "*" marshmallow-sqlalchemy = "*" openpyxl = "*" -pyjwt = "*" -requests = "*" -xlsxwriter = "*" -webtest = "*" -spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "deploy"} -alembic = "*" -coverage = "*" -sphinx = "*" -recommonmark = "*" -psycopg2-binary = "*" -docxtpl = "*" -python-dateutil = "*" pandas = "*" -xlrd = "*" -ldap3 = "*" -gunicorn = "*" -werkzeug = "*" +psycopg2-binary = "*" +pyjwt = "*" +python-dateutil = "*" +recommonmark = "*" +requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} -flask-mail = "*" +sphinx = "*" +spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "STG-26"} +swagger-ui-bundle = "*" +webtest = "*" +werkzeug = "*" +xlrd = "*" +xlsxwriter = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index 2f99c84f..9b79a526 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "faaf0e1f31f4bf99df366e52df20bb148a05996a0e6467767660665c514af2d7" + "sha256": "45ac71a0a66c2f55518be6fbc93a1b76e6a53ad3c7a557c3cb371d07781698b6" }, "pipfile-spec": 6, "requires": { @@ -35,6 +35,7 @@ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.6.0" }, "aniso8601": { @@ -49,6 +50,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -56,6 +58,7 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -79,6 +82,7 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -104,17 +108,18 @@ }, "celery": { "hashes": [ - "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647", - "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b" + "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", + "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], - "version": "==4.4.5" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.4.6" }, "certifi": { "hashes": [ - "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", - "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" + "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", + "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" ], - "version": "==2020.4.5.2" + "version": "==2020.6.20" }, "cffi": { "hashes": [ @@ -161,6 +166,7 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -182,6 +188,7 @@ "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" ], + "markers": "python_version >= '3.6'", "version": "==5.0.0" }, "connexion": { @@ -237,6 +244,7 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -261,6 +269,13 @@ "index": "pypi", "version": "==1.1.2" }, + "flask-admin": { + "hashes": [ + "sha256:68c761d8582d59b1f7702013e944a7ad11d7659a72f3006b89b68b0bd8df61b8" + ], + "index": "pypi", + "version": "==1.5.6" + }, "flask-bcrypt": { "hashes": [ "sha256:d71c8585b2ee1c62024392ebdbc447438564e2c8c02b4e57b56a4cafd8d13c5f" @@ -312,12 +327,14 @@ "sha256:0b656fbf87c5f24109d859bafa791d29751fabbda2302b606881ae5485b557a5", "sha256:fcfe6df52cd2ed8a63008ca36b86a51fa7a4b70cef1c39e5625f722fca32308e" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.3" }, "future": { "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "gunicorn": { @@ -340,6 +357,7 @@ "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.9" }, "imagesize": { @@ -347,6 +365,7 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -362,6 +381,7 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], + "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -369,6 +389,7 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -383,6 +404,7 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -394,14 +416,19 @@ }, "kombu": { "hashes": [ - "sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a", - "sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3" + "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", + "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], - "version": "==4.6.10" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.6.11" }, "ldap3": { "hashes": [ "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", + "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", + "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", + "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", + "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", @@ -437,6 +464,7 @@ "sha256:f95d28193c3863132b1f55c1056036bf580b5a488d908f7d22a04ace8935a3a9", "sha256:fadd2a63a2bfd7fb604508e553d1cf68eca250b2fbdbd81213b5f6f2fbf23529" ], + "index": "pypi", "version": "==4.5.1" }, "mako": { @@ -444,8 +472,17 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, + "markdown": { + "hashes": [ + "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17", + "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59" + ], + "index": "pypi", + "version": "==3.2.2" + }, "markupsafe": { "hashes": [ "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", @@ -482,6 +519,7 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -510,29 +548,35 @@ }, "numpy": { "hashes": [ - "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233", - "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b", - "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7", - "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f", - "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5", - "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb", - "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583", - "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1", - "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a", - "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271", - "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824", - "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3", - "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc", - "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161", - "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f", - "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f", - "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf", - "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b", - "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0", - "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675", - "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8" + "sha256:13af0184177469192d80db9bd02619f6fa8b922f9f327e077d6f2a6acb1ce1c0", + "sha256:26a45798ca2a4e168d00de75d4a524abf5907949231512f372b217ede3429e98", + "sha256:26f509450db547e4dfa3ec739419b31edad646d21fb8d0ed0734188b35ff6b27", + "sha256:30a59fb41bb6b8c465ab50d60a1b298d1cd7b85274e71f38af5a75d6c475d2d2", + "sha256:33c623ef9ca5e19e05991f127c1be5aeb1ab5cdf30cb1c5cf3960752e58b599b", + "sha256:356f96c9fbec59974a592452ab6a036cd6f180822a60b529a975c9467fcd5f23", + "sha256:3c40c827d36c6d1c3cf413694d7dc843d50997ebffbc7c87d888a203ed6403a7", + "sha256:4d054f013a1983551254e2379385e359884e5af105e3efe00418977d02f634a7", + "sha256:63d971bb211ad3ca37b2adecdd5365f40f3b741a455beecba70fd0dde8b2a4cb", + "sha256:658624a11f6e1c252b2cd170d94bf28c8f9410acab9f2fd4369e11e1cd4e1aaf", + "sha256:76766cc80d6128750075378d3bb7812cf146415bd29b588616f72c943c00d598", + "sha256:7b57f26e5e6ee2f14f960db46bd58ffdca25ca06dd997729b1b179fddd35f5a3", + "sha256:7b852817800eb02e109ae4a9cef2beda8dd50d98b76b6cfb7b5c0099d27b52d4", + "sha256:8cde829f14bd38f6da7b2954be0f2837043e8b8d7a9110ec5e318ae6bf706610", + "sha256:a2e3a39f43f0ce95204beb8fe0831199542ccab1e0c6e486a0b4947256215632", + "sha256:a86c962e211f37edd61d6e11bb4df7eddc4a519a38a856e20a6498c319efa6b0", + "sha256:a8705c5073fe3fcc297fb8e0b31aa794e05af6a329e81b7ca4ffecab7f2b95ef", + "sha256:b6aaeadf1e4866ca0fdf7bb4eed25e521ae21a7947c59f78154b24fc7abbe1dd", + "sha256:be62aeff8f2f054eff7725f502f6228298891fd648dc2630e03e44bf63e8cee0", + "sha256:c2edbb783c841e36ca0fa159f0ae97a88ce8137fb3a6cd82eae77349ba4b607b", + "sha256:cbe326f6d364375a8e5a8ccb7e9cd73f4b2f6dc3b2ed205633a0db8243e2a96a", + "sha256:d34fbb98ad0d6b563b95de852a284074514331e6b9da0a9fc894fb1cdae7a79e", + "sha256:d97a86937cf9970453c3b62abb55a6475f173347b4cde7f8dcdb48c8e1b9952d", + "sha256:dd53d7c4a69e766e4900f29db5872f5824a06827d594427cf1a4aa542818b796", + "sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a", + "sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596" ], - "version": "==1.18.5" + "markers": "python_version >= '3.6'", + "version": "==1.19.0" }, "openapi-spec-validator": { "hashes": [ @@ -544,39 +588,40 @@ }, "openpyxl": { "hashes": [ - "sha256:547a9fc6aafcf44abe358b89ed4438d077e9d92e4f182c87e2dc294186dc4b64" + "sha256:6e62f058d19b09b95d20ebfbfb04857ad08d0833190516c1660675f699c6186f" ], "index": "pypi", - "version": "==3.0.3" + "version": "==3.0.4" }, "packaging": { "hashes": [ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { "hashes": [ - "sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46", - "sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5", - "sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa", - "sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc", - "sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678", - "sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc", - "sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31", - "sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8", - "sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6", - "sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa", - "sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4", - "sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874", - "sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd", - "sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4", - "sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126", - "sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648" + "sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096", + "sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0", + "sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453", + "sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc", + "sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7", + "sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b", + "sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8", + "sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8", + "sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef", + "sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d", + "sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705", + "sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9", + "sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91", + "sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3", + "sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107", + "sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e" ], "index": "pypi", - "version": "==1.0.4" + "version": "==1.0.5" }, "psycopg2-binary": { "hashes": [ @@ -616,8 +661,19 @@ }, "pyasn1": { "hashes": [ + "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", + "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", + "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", + "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" + "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", + "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", + "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", + "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", + "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776", + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", + "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", + "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3" ], "version": "==0.4.8" }, @@ -626,6 +682,7 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -633,6 +690,7 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], + "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -648,6 +706,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -674,10 +733,67 @@ "hashes": [ "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", + "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", + "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522" ], "version": "==1.0.4" }, + "python-levenshtein-wheels": { + "hashes": [ + "sha256:0065529c8aec4c044468286177761857d36981ba6f7fdb62d7d5f7ffd143de5d", + "sha256:016924a59d689f9f47d5f7b26b70f31e309255e8dd72602c91e93ceb752b9f92", + "sha256:089d046ea7727e583233c71fef1046663ed67b96967063ae8ddc9f551e86a4fc", + "sha256:09f9faaaa8f65726f91b44c11d3d622fee0f1780cfbe2bf3f410dd0e7345adcb", + "sha256:0aea217eab612acd45dcc3424a2e8dbd977cc309f80359d0c01971f1e65b9a9b", + "sha256:0beb91ad80b1573829066e5af36b80190c367be6e0a65292f073353b0388c7fc", + "sha256:0ec1bc73f5ed3a1a06e02d13bb3cd22a0b32ebf65a9667bbccba106bfa0546f1", + "sha256:0fa2ca69ef803bc6037a8c919e2e8a17b55e94c9c9ffcb4c21befbb15a1d0f40", + "sha256:11c77d0d74ab7f46f89a58ae9c2d67349ebc1ae3e18636627f9939d810167c31", + "sha256:19a68716a322486ddffc8bf7e5cf44a82f7700b05a10658e6e7fc5c7ae92b13d", + "sha256:19a95a01d28d63b042438ba860c4ace90362906a038fa77962ba33325d377d10", + "sha256:1a61f3a51e00a3608659bbaabb3f27af37c9dbe84d843369061a3e45cf0d5103", + "sha256:1c50aebebab403fb2dd415d70355446ac364dece502b0e2737a1a085bb9a4aa4", + "sha256:1d2390d04f9b673391e5ce1a0b054d0565f2e00ea5d1187a044221dc5c02c3e6", + "sha256:1e51cdc123625a28709662d24ea0cb4cf6f991845e6054d9f803c78da1d6b08f", + "sha256:1eca6dc97dfcf588f53281fe48a6d5c423d4e14bdab658a1aa6efd447acc64e0", + "sha256:1f0056d3216b0fe38f25c6f8ebc84bd9f6d34c55a7a9414341b674fb98961399", + "sha256:228b59460e9a786e498bdfc8011838b89c6054650b115c86c9c819a055a793b0", + "sha256:23020f9ff2cb3457a926dcc470b84f9bd5b7646bd8b8e06b915bdbbc905cb23f", + "sha256:2b7b7cf0f43b677f818aa9a610464abf06106c19a51b9ac35bd051a439f337a5", + "sha256:3b591c9a7e91480f0d7bf2041d325f578b9b9c2f2d593304377cb28862e7f9a2", + "sha256:3ca9c70411ab587d071c1d8fc8b69d0558be8e4aa920f2595e2cb5eb229ccc4c", + "sha256:3e6bcca97a7ff4e720352b57ddc26380c0583dcdd4b791acef7b574ad58468a7", + "sha256:3ed88f9e638da57647149115c34e0e120cae6f3d35eee7d77e22cc9c1d8eced3", + "sha256:445bf7941cb1fa05d6c2a4a502ad4868a5cacd92e8eb77b2bd008cdda9d37c55", + "sha256:4ba5e147d76d7ee884fd6eae461438b080bcc9f2c6eb9b576811e1bcfe8f808e", + "sha256:4bb128b719c30f3b9feacfe71a338ae07d39dbffc077139416f3535c89f12362", + "sha256:4e951907b9b5d40c9f1b611c8bdfe46ff8cf8371877cebbd589bf5840feab662", + "sha256:53c0c9964390368fd64460b690f168221c669766b193b7e80ae3950c2b9551f8", + "sha256:57c4edef81611098d37176278f2b6a3712bf864eed313496d7d80504805896d1", + "sha256:5b36e406937c6463d1c1ef3dd82d3f771d9d845f21351e8a026fe4dd398ea8d0", + "sha256:7d0821dab24b430dfdc2cba70a06e6d7a45cb839d0dd0e6db97bb99e23c3d884", + "sha256:7f7283dfe50eac8a8cd9b777de9eb50b1edf7dbb46fc7cc9d9b0050d0c135021", + "sha256:7f9759095b3fc825464a72b1cae95125e610eba3c70f91557754c32a0bf32ea2", + "sha256:8005a4df455569c0d490ddfd9e5a163f21293477fd0ed4ea9effdd723ddd8eaa", + "sha256:86e865f29ad3dc3bb4733e5247220173d90f05ac8d2ad18e9689a220f90de55f", + "sha256:98727050ba70eb8d318ec8a8203531c20119347fc8f281102b097326812742ab", + "sha256:ac9cdf044dcb9481c7da782db01b50c1f0e7cdd78c8507b963b6d072829c0263", + "sha256:acfad8ffed96891fe7c583d92717cd8ec0c03b59a954c389fd4e26a5cdeac610", + "sha256:ad15f25abff8220e556d64e2a27c646241b08f00faf1bc02313655696cd3edfa", + "sha256:b679f951f842c38665aa54bea4d7403099131f71fac6d8584f893a731fe1266d", + "sha256:b8c183dc4aa4e95dc5c373eedc3d205c176805835611fcfec5d9050736c695c4", + "sha256:c097a6829967c76526a037ed34500a028f78f0d765c8e3dbd1a7717afd09fb92", + "sha256:c2c76f483d05eddec60a5cd89e92385adef565a4f243b1d9a6abe2f6bd2a7c0a", + "sha256:c388baa3c04272a7c585d3da24030c142353eb26eb531dd2681502e6be7d7a26", + "sha256:cb0f2a711db665b5bf8697b5af3b9884bb1139385c5c12c2e472e4bbee62da99", + "sha256:cbac984d7b36e75b440d1c8ff9d3425d778364a0cbc23f8943383d4decd35d5e", + "sha256:f55adf069be2d655f8d668594fe1be1b84d9dc8106d380a9ada06f34941c33c8", + "sha256:f9084ed3b8997ad4353d124b903f2860a9695b9e080663276d9e58c32e293244", + "sha256:fb7df3504222fcb1fa593f76623abbb54d6019eec15aac5d05cd07ad90ac016c" + ], + "version": "==0.13.1" + }, "pytz": { "hashes": [ "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed", @@ -711,11 +827,11 @@ }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "index": "pypi", - "version": "==2.23.0" + "version": "==2.24.0" }, "sentry-sdk": { "extras": [ @@ -733,6 +849,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -747,6 +864,7 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], + "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -762,6 +880,7 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -769,6 +888,7 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -776,6 +896,7 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -783,6 +904,7 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -790,6 +912,7 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -797,45 +920,47 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], + "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0" + "ref": "599f41fcf9257196710806e16bef023c836735f4" }, "sqlalchemy": { "hashes": [ - "sha256:128bc917ed20d78143a45024455ff0aed7d3b96772eba13d5dbaf9cc57e5c41b", - "sha256:156a27548ba4e1fed944ff9fcdc150633e61d350d673ae7baaf6c25c04ac1f71", - "sha256:27e2efc8f77661c9af2681755974205e7462f1ae126f498f4fe12a8b24761d15", - "sha256:2a12f8be25b9ea3d1d5b165202181f2b7da4b3395289000284e5bb86154ce87c", - "sha256:31c043d5211aa0e0773821fcc318eb5cbe2ec916dfbc4c6eea0c5188971988eb", - "sha256:65eb3b03229f684af0cf0ad3bcc771970c1260a82a791a8d07bffb63d8c95bcc", - "sha256:6cd157ce74a911325e164441ff2d9b4e244659a25b3146310518d83202f15f7a", - "sha256:703c002277f0fbc3c04d0ae4989a174753a7554b2963c584ce2ec0cddcf2bc53", - "sha256:869bbb637de58ab0a912b7f20e9192132f9fbc47fc6b5111cd1e0f6cdf5cf9b0", - "sha256:8a0e0cd21da047ea10267c37caf12add400a92f0620c8bc09e4a6531a765d6d7", - "sha256:8d01e949a5d22e5c4800d59b50617c56125fc187fbeb8fa423e99858546de616", - "sha256:925b4fe5e7c03ed76912b75a9a41dfd682d59c0be43bce88d3b27f7f5ba028fb", - "sha256:9cb1819008f0225a7c066cac8bb0cf90847b2c4a6eb9ebb7431dbd00c56c06c5", - "sha256:a87d496884f40c94c85a647c385f4fd5887941d2609f71043e2b73f2436d9c65", - "sha256:a9030cd30caf848a13a192c5e45367e3c6f363726569a56e75dc1151ee26d859", - "sha256:a9e75e49a0f1583eee0ce93270232b8e7bb4b1edc89cc70b07600d525aef4f43", - "sha256:b50f45d0e82b4562f59f0e0ca511f65e412f2a97d790eea5f60e34e5f1aabc9a", - "sha256:b7878e59ec31f12d54b3797689402ee3b5cfcb5598f2ebf26491732758751908", - "sha256:ce1ddaadee913543ff0154021d31b134551f63428065168e756d90bdc4c686f5", - "sha256:ce2646e4c0807f3461be0653502bb48c6e91a5171d6e450367082c79e12868bf", - "sha256:ce6c3d18b2a8ce364013d47b9cad71db815df31d55918403f8db7d890c9d07ae", - "sha256:e4e2664232005bd306f878b0f167a31f944a07c4de0152c444f8c61bbe3cfb38", - "sha256:e8aa395482728de8bdcca9cc0faf3765ab483e81e01923aaa736b42f0294f570", - "sha256:eb4fcf7105bf071c71068c6eee47499ab8d4b8f5a11fc35147c934f0faa60f23", - "sha256:ed375a79f06cad285166e5be74745df1ed6845c5624aafadec4b7a29c25866ef", - "sha256:f35248f7e0d63b234a109dd72fbfb4b5cb6cb6840b221d0df0ecbf54ab087654", - "sha256:f502ef245c492b391e0e23e94cba030ab91722dcc56963c85bfd7f3441ea2bbe", - "sha256:fe01bac7226499aedf472c62fa3b85b2c619365f3f14dd222ffe4f3aa91e5f98" + "sha256:0942a3a0df3f6131580eddd26d99071b48cfe5aaf3eab2783076fbc5a1c1882e", + "sha256:0ec575db1b54909750332c2e335c2bb11257883914a03bc5a3306a4488ecc772", + "sha256:109581ccc8915001e8037b73c29590e78ce74be49ca0a3630a23831f9e3ed6c7", + "sha256:16593fd748944726540cd20f7e83afec816c2ac96b082e26ae226e8f7e9688cf", + "sha256:427273b08efc16a85aa2b39892817e78e3ed074fcb89b2a51c4979bae7e7ba98", + "sha256:50c4ee32f0e1581828843267d8de35c3298e86ceecd5e9017dc45788be70a864", + "sha256:512a85c3c8c3995cc91af3e90f38f460da5d3cade8dc3a229c8e0879037547c9", + "sha256:57aa843b783179ab72e863512e14bdcba186641daf69e4e3a5761d705dcc35b1", + "sha256:621f58cd921cd71ba6215c42954ffaa8a918eecd8c535d97befa1a8acad986dd", + "sha256:6ac2558631a81b85e7fb7a44e5035347938b0a73f5fdc27a8566777d0792a6a4", + "sha256:716754d0b5490bdcf68e1e4925edc02ac07209883314ad01a137642ddb2056f1", + "sha256:736d41cfebedecc6f159fc4ac0769dc89528a989471dc1d378ba07d29a60ba1c", + "sha256:8619b86cb68b185a778635be5b3e6018623c0761dde4df2f112896424aa27bd8", + "sha256:87fad64529cde4f1914a5b9c383628e1a8f9e3930304c09cf22c2ae118a1280e", + "sha256:89494df7f93b1836cae210c42864b292f9b31eeabca4810193761990dc689cce", + "sha256:8cac7bb373a5f1423e28de3fd5fc8063b9c8ffe8957dc1b1a59cb90453db6da1", + "sha256:8fd452dc3d49b3cc54483e033de6c006c304432e6f84b74d7b2c68afa2569ae5", + "sha256:adad60eea2c4c2a1875eb6305a0b6e61a83163f8e233586a4d6a55221ef984fe", + "sha256:c26f95e7609b821b5f08a72dab929baa0d685406b953efd7c89423a511d5c413", + "sha256:cbe1324ef52ff26ccde2cb84b8593c8bf930069dfc06c1e616f1bfd4e47f48a3", + "sha256:d05c4adae06bd0c7f696ae3ec8d993ed8ffcc4e11a76b1b35a5af8a099bd2284", + "sha256:d98bc827a1293ae767c8f2f18be3bb5151fd37ddcd7da2a5f9581baeeb7a3fa1", + "sha256:da2fb75f64792c1fc64c82313a00c728a7c301efe6a60b7a9fe35b16b4368ce7", + "sha256:e4624d7edb2576cd72bb83636cd71c8ce544d8e272f308bd80885056972ca299", + "sha256:e89e0d9e106f8a9180a4ca92a6adde60c58b1b0299e1b43bd5e0312f535fbf33", + "sha256:f11c2437fb5f812d020932119ba02d9e2bc29a6eca01a055233a8b449e3e1e7d", + "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", + "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], - "version": "==1.3.17" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.3.18" }, "swagger-ui-bundle": { "hashes": [ @@ -851,6 +976,7 @@ "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.25.9" }, "vine": { @@ -858,6 +984,7 @@ "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.0" }, "waitress": { @@ -865,6 +992,7 @@ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -872,6 +1000,7 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -890,6 +1019,13 @@ "index": "pypi", "version": "==1.0.1" }, + "wtforms": { + "hashes": [ + "sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b", + "sha256:861a13b3ae521d6700dac3b2771970bd354a63ba7043ecc3a82b5288596a1972" + ], + "version": "==2.3.1" + }, "xlrd": { "hashes": [ "sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2", @@ -911,6 +1047,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -920,6 +1057,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -972,6 +1110,7 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], + "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -979,6 +1118,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -994,20 +1134,23 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { "hashes": [ - "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44", - "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b" + "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", + "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], - "version": "==1.8.2" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.9.0" }, "pyparsing": { "hashes": [ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1023,20 +1166,22 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { "hashes": [ - "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f", - "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f" + "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784", + "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83" ], - "version": "==0.2.4" + "version": "==0.2.5" }, "zipp": { "hashes": [ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } } diff --git a/config/default.py b/config/default.py index bee6f968..ed44e6fe 100644 --- a/config/default.py +++ b/config/default.py @@ -46,6 +46,7 @@ LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1)) # Email configuration +DEFAULT_SENDER = 'askresearch@virginia.edu' FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com'] MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True) MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io') diff --git a/crc/__init__.py b/crc/__init__.py index 1ac2678f..d169b547 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -4,6 +4,8 @@ import sentry_sdk import connexion from jinja2 import Environment, FileSystemLoader +from flask_admin import Admin +from flask_admin.contrib.sqla import ModelView from flask_cors import CORS from flask_marshmallow import Marshmallow from flask_mail import Mail @@ -32,18 +34,24 @@ db = SQLAlchemy(app) session = db.session """:type: sqlalchemy.orm.Session""" +# Mail settings +mail = Mail(app) + migrate = Migrate(app, db) ma = Marshmallow(app) from crc import models from crc import api +from crc.api import admin connexion_app.add_api('api.yml', base_path='/v1.0') + # Convert list of allowed origins to list of regexes origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']] cors = CORS(connexion_app.app, origins=origins_re) +# Sentry error handling if app.config['ENABLE_SENTRY']: sentry_sdk.init( dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915", @@ -53,8 +61,6 @@ if app.config['ENABLE_SENTRY']: # Jinja environment definition, used to render mail templates template_dir = os.getcwd() + '/crc/static/templates/mails' env = Environment(loader=FileSystemLoader(template_dir)) -# Mail settings -mail = Mail(app) print('=== USING THESE CONFIG SETTINGS: ===') print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT']) @@ -88,3 +94,10 @@ def clear_db(): """Load example data into the database.""" from example_data import ExampleDataLoader ExampleDataLoader.clean_db() + +@app.cli.command() +def rrt_data_fix(): + """Finds all the empty task event logs, and populates + them with good wholesome data.""" + from crc.services.workflow_service import WorkflowService + WorkflowService.fix_legacy_data_model_for_rrt() diff --git a/crc/api.yml b/crc/api.yml index 64f6086a..3f72bf7f 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -626,6 +626,12 @@ paths: schema: type: string format: uuid + - name: terminate_loop + in: query + required: false + description: Terminate the loop on a looping task + schema: + type: boolean put: operationId: crc.api.workflow.update_task summary: Exclusively for User Tasks, submits form data as a flat set of key/values. @@ -917,6 +923,21 @@ paths: application/json: schema: type: object + /health_attesting: + get: + operationId: crc.api.approval.get_health_attesting_csv + summary: Returns a CSV file with health attesting records + tags: + - Approvals + responses: + '200': + description: A CSV file + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Approval" components: securitySchemes: jwt: diff --git a/crc/api/admin.py b/crc/api/admin.py new file mode 100644 index 00000000..37532c38 --- /dev/null +++ b/crc/api/admin.py @@ -0,0 +1,72 @@ +# Admin app +import json + +from flask import url_for +from flask_admin import Admin +from flask_admin.contrib import sqla +from flask_admin.contrib.sqla import ModelView +from werkzeug.utils import redirect +from jinja2 import Markup + +from crc import db, app +from crc.api.user import verify_token, verify_token_admin +from crc.models.approval import ApprovalModel +from crc.models.file import FileModel +from crc.models.stats import TaskEventModel +from crc.models.study import StudyModel +from crc.models.user import UserModel +from crc.models.workflow import WorkflowModel + + +class AdminModelView(sqla.ModelView): + can_create = False + can_edit = False + can_delete = False + page_size = 50 # the number of entries to display on the list view + column_exclude_list = ['bpmn_workflow_json', ] + column_display_pk = True + can_export = True + + def is_accessible(self): + return verify_token_admin() + + def inaccessible_callback(self, name, **kwargs): + # redirect to login page if user doesn't have access + return redirect(url_for('home')) + +class UserView(AdminModelView): + column_filters = ['uid'] + +class StudyView(AdminModelView): + column_filters = ['id', 'primary_investigator_id'] + column_searchable_list = ['title'] + +class ApprovalView(AdminModelView): + column_filters = ['study_id', 'approver_uid'] + +class WorkflowView(AdminModelView): + column_filters = ['study_id', 'id'] + +class FileView(AdminModelView): + column_filters = ['workflow_id'] + +def json_formatter(view, context, model, name): + value = getattr(model, name) + json_value = json.dumps(value, ensure_ascii=False, indent=2) + return Markup('
{}
'.format(json_value)) + +class TaskEventView(AdminModelView): + column_filters = ['workflow_id', 'action'] + column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'form_data', 'date'] + column_formatters = { + 'form_data': json_formatter, + } + +admin = Admin(app) + +admin.add_view(StudyView(StudyModel, db.session)) +admin.add_view(ApprovalView(ApprovalModel, db.session)) +admin.add_view(UserView(UserModel, db.session)) +admin.add_view(WorkflowView(WorkflowModel, db.session)) +admin.add_view(FileView(FileModel, db.session)) +admin.add_view(TaskEventView(TaskEventModel, db.session)) diff --git a/crc/api/approval.py b/crc/api/approval.py index b3ee0fed..fd01e221 100644 --- a/crc/api/approval.py +++ b/crc/api/approval.py @@ -1,9 +1,11 @@ +import csv +import io import json import pickle from base64 import b64decode from datetime import datetime -from flask import g +from flask import g, make_response from crc import db, session from crc.api.common import ApiError @@ -88,71 +90,25 @@ def get_approvals_for_study(study_id=None): return results +def get_health_attesting_csv(): + records = ApprovalService.get_health_attesting_records() + si = io.StringIO() + cw = csv.writer(si) + cw.writerows(records) + output = make_response(si.getvalue()) + output.headers["Content-Disposition"] = "attachment; filename=health_attesting.csv" + output.headers["Content-type"] = "text/csv" + return output + + # ----- Begin descent into madness ---- # def get_csv(): """A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a man to do just about anything""" - approvals = ApprovalService.get_all_approvals(include_cancelled=False) - output = [] - errors = [] - for approval in approvals: - try: - if approval.status != ApprovalStatus.APPROVED.value: - continue - for related_approval in approval.related_approvals: - if related_approval.status != ApprovalStatus.APPROVED.value: - continue - workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() - data = json.loads(workflow.bpmn_workflow_json) - last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) - personnel = extract_value(last_task, 'personnel') - training_val = extract_value(last_task, 'RequiredTraining') - pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] - review_complete = 'AllRequiredTraining' in training_val - pi_uid = workflow.study.primary_investigator_id - pi_details = LdapService.user_info(pi_uid) - details = [] - details.append(pi_details) - for person in personnel: - uid = person['PersonnelComputingID']['value'] - details.append(LdapService.user_info(uid)) + content = ApprovalService.get_not_really_csv_content() - for person in details: - record = { - "study_id": approval.study_id, - "pi_uid": pi_details.uid, - "pi": pi_details.display_name, - "name": person.display_name, - "uid": person.uid, - "email": person.email_address, - "supervisor": "", - "review_complete": review_complete, - } - # We only know the PI's supervisor. - if person.uid == pi_details.uid: - record["supervisor"] = pi_supervisor + return content - output.append(record) - - except Exception as e: - errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) - return {"results": output, "errors": errors } - - -def extract_value(task, key): - if key in task['data']: - return pickle.loads(b64decode(task['data'][key]['__bytes__'])) - else: - return "" - - -def find_task(uuid, task): - if task['id']['__uuid__'] == uuid: - return task - for child in task['children']: - task = find_task(uuid, child) - if task: - return task # ----- come back to the world of the living ---- # diff --git a/crc/api/tools.py b/crc/api/tools.py index d140e962..760d0d71 100644 --- a/crc/api/tools.py +++ b/crc/api/tools.py @@ -14,7 +14,7 @@ from crc.services.mails import send_test_email def render_markdown(data, template): """ - Provides a quick way to very that a Jinja markdown template will work properly on a given json + Provides a quick way to very that a Jinja markdown template will work properly on a given json data structure. Useful for folks that are building these markdown templates. """ try: @@ -65,4 +65,4 @@ def send_email(address): """Just sends a quick test email to assure the system is working.""" if not address: address = "dan@sartography.com" - return send_test_email(address, [address]) \ No newline at end of file + return send_test_email(address, [address]) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 655a85e7..2e35dad2 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -1,7 +1,7 @@ import uuid +from SpiffWorkflow.util.deep_merge import DeepMerge from flask import g - from crc import session, app from crc.api.common import ApiError, ApiErrorSchema from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema @@ -96,59 +96,10 @@ def delete_workflow_specification(spec_id): session.commit() -def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None): - """Returns an API model representing the state of the current workflow, if requested, and - possible, next_task is set to the current_task.""" - - nav_dict = processor.bpmn_workflow.get_nav_list() - navigation = [] - for nav_item in nav_dict: - spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) - if 'description' in nav_item: - nav_item['title'] = nav_item.pop('description') - # fixme: duplicate code from the workflow_service. Should only do this in one place. - if ' ' in nav_item['title']: - nav_item['title'] = nav_item['title'].partition(' ')[2] - else: - nav_item['title'] = "" - if spiff_task: - nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) - nav_item['title'] = nav_item['task'].title # Prefer the task title. - else: - nav_item['task'] = None - if not 'is_decision' in nav_item: - nav_item['is_decision'] = False - - navigation.append(NavigationItem(**nav_item)) - NavigationItemSchema().dump(nav_item) - - spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() - workflow_api = WorkflowApi( - id=processor.get_workflow_id(), - status=processor.get_status(), - next_task=None, - navigation=navigation, - workflow_spec_id=processor.workflow_spec_id, - spec_version=processor.get_version_string(), - is_latest_spec=processor.is_latest_spec, - total_tasks=len(navigation), - completed_tasks=processor.workflow_model.completed_tasks, - last_updated=processor.workflow_model.last_updated, - title=spec.display_name - ) - if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. - # This may or may not work, sometimes there is no next task to complete. - next_task = processor.next_task() - if next_task: - workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) - - return workflow_api - - def get_workflow(workflow_id, soft_reset=False, hard_reset=False): workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - workflow_api_model = __get_workflow_api_model(processor) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) @@ -161,21 +112,24 @@ def set_current_task(workflow_id, task_id): user_uid = __get_user_uid(workflow_model.study.user_uid) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.COMPLETED and task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not move the token to a task who's state is not " "currently set to COMPLETE or READY.") # Only reset the token if the task doesn't already have it. - if task.state == task.COMPLETED: - task.reset_token(reset_data=False) # we could optionally clear the previous data. + if spiff_task.state == spiff_task.COMPLETED: + spiff_task.reset_token(reset_data=True) # Don't try to copy the existing data back into this task. + processor.save() - WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET) - workflow_api_model = __get_workflow_api_model(processor, task) + WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, + WorkflowService.TASK_ACTION_TOKEN_RESET, + version=processor.get_version_string()) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor, spiff_task) return WorkflowApiSchema().dump(workflow_api_model) -def update_task(workflow_id, task_id, body): +def update_task(workflow_id, task_id, body, terminate_loop=None): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() if workflow_model is None: @@ -187,17 +141,21 @@ def update_task(workflow_id, task_id, body): user_uid = __get_user_uid(workflow_model.study.user_uid) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + if spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") - task.update_data(body) - processor.complete_task(task) + if terminate_loop: + spiff_task.terminate_loop() + + spiff_task.update_data(body) + processor.complete_task(spiff_task) processor.do_engine_steps() processor.save() - WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE) - workflow_api_model = __get_workflow_api_model(processor) + WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, WorkflowService.TASK_ACTION_COMPLETE, + version=processor.get_version_string()) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 53706a75..361b9183 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -36,6 +36,7 @@ class Task(object): PROP_OPTIONS_FILE = "spreadsheet.name" PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column" PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column" + PROP_OPTIONS_READ_ONLY = "read_only" PROP_LDAP_LOOKUP = "ldap.lookup" VALIDATION_REQUIRED = "required" FIELD_TYPE_AUTO_COMPLETE = "autocomplete" diff --git a/crc/models/email.py b/crc/models/email.py new file mode 100644 index 00000000..dc8c6834 --- /dev/null +++ b/crc/models/email.py @@ -0,0 +1,18 @@ +from flask_marshmallow.sqla import SQLAlchemyAutoSchema +from marshmallow import EXCLUDE +from sqlalchemy import func + +from crc import db +from crc.models.study import StudyModel + + +class EmailModel(db.Model): + __tablename__ = 'email' + id = db.Column(db.Integer, primary_key=True) + subject = db.Column(db.String) + sender = db.Column(db.String) + recipients = db.Column(db.String) + content = db.Column(db.String) + content_html = db.Column(db.String) + study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=True) + study = db.relationship(StudyModel) diff --git a/crc/models/ldap.py b/crc/models/ldap.py index 7e05eccd..802e0d36 100644 --- a/crc/models/ldap.py +++ b/crc/models/ldap.py @@ -29,6 +29,9 @@ class LdapModel(db.Model): affiliation=", ".join(entry.uvaPersonIAMAffiliation), sponsor_type=", ".join(entry.uvaPersonSponsoredType)) + def proper_name(self): + return f'{self.display_name} - ({self.uid})' + class LdapSchema(SQLAlchemyAutoSchema): class Meta: diff --git a/crc/models/stats.py b/crc/models/stats.py index c72df7d4..0a2e69b7 100644 --- a/crc/models/stats.py +++ b/crc/models/stats.py @@ -17,6 +17,7 @@ class TaskEventModel(db.Model): task_title = db.Column(db.String) task_type = db.Column(db.String) task_state = db.Column(db.String) + form_data = db.Column(db.JSON) # And form data submitted when the task was completed. mi_type = db.Column(db.String) mi_count = db.Column(db.Integer) mi_index = db.Column(db.Integer) diff --git a/crc/scripts/email.py b/crc/scripts/email.py new file mode 100644 index 00000000..6f8244dd --- /dev/null +++ b/crc/scripts/email.py @@ -0,0 +1,91 @@ +import markdown +from jinja2 import Template + +from crc import app +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService +from crc.services.mails import send_mail + + +class Email(Script): + """This Script allows to be introduced as part of a workflow and called from there, specifying + recipients and content """ + + def get_description(self): + return """ +Creates an email, using the provided arguments (a list of UIDs)" +Each argument will be used to look up personal information needed for +the email creation. + +Example: +Email Subject ApprvlApprvr1 PIComputingID +""" + + def do_task_validate_only(self, task, *args, **kwargs): + self.get_subject(task, args) + self.get_users_info(task, args) + self.get_content(task) + + def do_task(self, task, *args, **kwargs): + args = [arg for arg in args if type(arg) == str] + subject = self.get_subject(task, args) + recipients = self.get_users_info(task, args) + content, content_html = self.get_content(task) + if recipients: + send_mail( + subject=subject, + sender=app.config['DEFAULT_SENDER'], + recipients=recipients, + content=content, + content_html=content_html + ) + + def get_users_info(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one argument. The " + "name of the variable in the task data that contains user" + "id to process. Multiple arguments are accepted.") + emails = [] + for arg in args: + try: + uid = task.workflow.script_engine.evaluate_expression(task, arg) + except Exception as e: + app.logger.error(f'Workflow engines could not parse {arg}') + app.logger.error(str(e)) + continue + user_info = LdapService.user_info(uid) + email = user_info.email_address + emails.append(user_info.email_address) + if not isinstance(email, str): + raise ApiError(code="invalid_argument", + message="The Email script requires at least 1 UID argument. The " + "name of the variable in the task data that contains subject and" + " user ids to process. This must point to an array or a string, but " + "it currently points to a %s " % emails.__class__.__name__) + + return emails + + def get_subject(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one subject argument. The " + "name of the variable in the task data that contains subject" + " to process. Multiple arguments are accepted.") + subject = args[0] + if not isinstance(subject, str): + raise ApiError(code="invalid_argument", + message="The Email script requires 1 argument. The " + "the name of the variable in the task data that contains user" + "ids to process. This must point to an array or a string, but " + "it currently points to a %s " % subject.__class__.__name__) + + return subject + + def get_content(self, task): + content = task.task_spec.documentation + template = Template(content) + rendered = template.render(task.data) + rendered_markdown = markdown.markdown(rendered).replace('\n', '
') + return rendered, rendered_markdown diff --git a/crc/scripts/fact_service.py b/crc/scripts/fact_service.py index c4468721..b3701312 100644 --- a/crc/scripts/fact_service.py +++ b/crc/scripts/fact_service.py @@ -5,7 +5,7 @@ from crc.scripts.script import Script class FactService(Script): def get_description(self): - return """Just your basic class that can pull in data from a few api endpoints and + return """Just your basic class that can pull in data from a few api endpoints and do a basic task.""" def get_cat(self): diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index 1f6f56b3..eacac72c 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -1,6 +1,9 @@ -from datetime import datetime +import json +import pickle +from base64 import b64decode +from datetime import datetime, timedelta -from sqlalchemy import desc +from sqlalchemy import desc, func from crc import app, db, session from crc.api.common import ApiError @@ -109,16 +112,129 @@ class ApprovalService(object): db_approvals = query.all() return [Approval.from_model(approval_model) for approval_model in db_approvals] + @staticmethod + def get_approval_details(approval): + """Returns a list of packed approval details, obtained from + the task data sent during the workflow """ + def extract_value(task, key): + if key in task['data']: + return pickle.loads(b64decode(task['data'][key]['__bytes__'])) + else: + return "" + + def find_task(uuid, task): + if task['id']['__uuid__'] == uuid: + return task + for child in task['children']: + task = find_task(uuid, child) + if task: + return task + + if approval.status != ApprovalStatus.APPROVED.value: + return {} + for related_approval in approval.related_approvals: + if related_approval.status != ApprovalStatus.APPROVED.value: + continue + workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() + data = json.loads(workflow.bpmn_workflow_json) + last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) + personnel = extract_value(last_task, 'personnel') + training_val = extract_value(last_task, 'RequiredTraining') + pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] + review_complete = 'AllRequiredTraining' in training_val + pi_uid = workflow.study.primary_investigator_id + pi_details = LdapService.user_info(pi_uid) + details = { + 'Supervisor': pi_supervisor, + 'PI_Details': pi_details, + 'Review': review_complete + } + details['person_details'] = [] + details['person_details'].append(pi_details) + for person in personnel: + uid = person['PersonnelComputingID']['value'] + details['person_details'].append(LdapService.user_info(uid)) + + return details + + @staticmethod + def get_health_attesting_records(): + """Return a list with prepared information related to all approvals """ + + approvals = ApprovalService.get_all_approvals(include_cancelled=False) + + health_attesting_rows = [ + ['university_computing_id', + 'last_name', + 'first_name', + 'department', + 'job_title', + 'supervisor_university_computing_id'] + ] + + for approval in approvals: + try: + details = ApprovalService.get_approval_details(approval) + if not details: + continue + + for person in details['person_details']: + first_name = person.given_name + last_name = person.display_name.replace(first_name, '').strip() + record = [ + person.uid, + last_name, + first_name, + '', + 'Academic Researcher', + details['Supervisor'] if person.uid == details['person_details'][0].uid else 'askresearch' + ] + + if record not in health_attesting_rows: + health_attesting_rows.append(record) + + except Exception as e: + app.logger.error("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) + + return health_attesting_rows + + @staticmethod + def get_not_really_csv_content(): + approvals = ApprovalService.get_all_approvals(include_cancelled=False) + output = [] + errors = [] + for approval in approvals: + try: + details = ApprovalService.get_approval_details(approval) + + for person in details['person_details']: + record = { + "study_id": approval.study_id, + "pi_uid": details['PI_Details'].uid, + "pi": details['PI_Details'].display_name, + "name": person.display_name, + "uid": person.uid, + "email": person.email_address, + "supervisor": details['Supervisor'] if person.uid == details['person_details'][0].uid else "", + "review_complete": details['Review'], + } + + output.append(record) + + except Exception as e: + errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) + return {"results": output, "errors": errors } @staticmethod def update_approval(approval_id, approver_uid): - """Update a specific approval""" + """Update a specific approval + NOTE: Actual update happens in the API layer, this + funtion is currently in charge of only sending + corresponding emails + """ db_approval = session.query(ApprovalModel).get(approval_id) status = db_approval.status if db_approval: - # db_approval.status = status - # session.add(db_approval) - # session.commit() if status == ApprovalStatus.APPROVED.value: # second_approval = ApprovalModel().query.filter_by( # study_id=db_approval.study_id, workflow_id=db_approval.workflow_id, diff --git a/crc/services/email_service.py b/crc/services/email_service.py new file mode 100644 index 00000000..3d78eada --- /dev/null +++ b/crc/services/email_service.py @@ -0,0 +1,42 @@ +from datetime import datetime +from flask_mail import Message +from sqlalchemy import desc + +from crc import app, db, mail, session +from crc.api.common import ApiError + +from crc.models.study import StudyModel +from crc.models.email import EmailModel + + +class EmailService(object): + """Provides common tools for working with an Email""" + + @staticmethod + def add_email(subject, sender, recipients, content, content_html, study_id): + """We will receive all data related to an email and store it""" + + # Find corresponding study - if any + study = None + if type(study_id) == int: + study = db.session.query(StudyModel).get(study_id) + + # Create EmailModel + email_model = EmailModel(subject=subject, sender=sender, recipients=str(recipients), + content=content, content_html=content_html, study=study) + + # Send mail + try: + msg = Message(subject, + sender=sender, + recipients=recipients) + + msg.body = content + msg.html = content_html + + mail.send(msg) + except Exception as e: + app.logger.error(str(e)) + + db.session.add(email_model) + db.session.commit() diff --git a/crc/services/file_service.py b/crc/services/file_service.py index ff234a79..6ba2e1ad 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -3,7 +3,7 @@ import json import os from datetime import datetime from uuid import UUID -from xml.etree import ElementTree +from lxml import etree import flask from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException @@ -58,7 +58,7 @@ class FileService(object): "irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code) """Assure this is unique to the workflow, task, and document code AND the Name - Because we will allow users to upload multiple files for the same form field + Because we will allow users to upload multiple files for the same form field in some cases """ file_model = session.query(FileModel)\ .filter(FileModel.workflow_id == workflow_id)\ @@ -151,7 +151,7 @@ class FileService(object): # If this is a BPMN, extract the process id. if file_model.type == FileType.bpmn: - bpmn: ElementTree.Element = ElementTree.fromstring(binary_data) + bpmn: etree.Element = etree.fromstring(binary_data) file_model.primary_process_id = FileService.get_process_id(bpmn) new_file_data_model = FileDataModel( @@ -165,7 +165,7 @@ class FileService(object): return file_model @staticmethod - def get_process_id(et_root: ElementTree.Element): + def get_process_id(et_root: etree.Element): process_elements = [] for child in et_root: if child.tag.endswith('process') and child.attrib.get('isExecutable', False): @@ -179,7 +179,7 @@ class FileService(object): # Look for the element that has the startEvent in it for e in process_elements: - this_element: ElementTree.Element = e + this_element: etree.Element = e for child_element in list(this_element): if child_element.tag.endswith('startEvent'): return this_element.attrib['id'] diff --git a/crc/services/mails.py b/crc/services/mails.py index bd825f69..a1570035 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -3,13 +3,15 @@ import os from flask import render_template, render_template_string from flask_mail import Message +from crc.services.email_service import EmailService + -# TODO: Extract common mailing code into its own function def send_test_email(sender, recipients): try: msg = Message('Research Ramp-up Plan test', sender=sender, - recipients=recipients) + recipients=recipients, + bcc=['rrt_emails@googlegroups.com']) from crc import env, mail template = env.get_template('ramp_up_approval_request_first_review.txt') template_vars = {'primary_investigator': "test"} @@ -20,109 +22,84 @@ def send_test_email(sender, recipients): except Exception as e: return str(e) - +def send_mail(subject, sender, recipients, content, content_html, study_id=None): + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, study_id=study_id) def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None): - try: - msg = Message('Research Ramp-up Plan Submitted', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_submission.txt') - template_vars = {'approver_1': approver_1, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_submission.html') - msg.html = template.render(template_vars) + from crc import env + subject = 'Research Ramp-up Plan Submitted' - mail.send(msg) - except Exception as e: - return str(e) + template = env.get_template('ramp_up_submission.txt') + template_vars = {'approver_1': approver_1, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_submission.html') + content_html = template.render(template_vars) + + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_approval_request_email(sender, recipients, primary_investigator): - try: - msg = Message('Research Ramp-up Plan Approval Request', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_approval_request.txt') - template_vars = {'primary_investigator': primary_investigator} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approval_request.html') - msg.html = template.render(template_vars) + from crc import env + subject = 'Research Ramp-up Plan Approval Request' - mail.send(msg) - except Exception as e: - return str(e) + template = env.get_template('ramp_up_approval_request.txt') + template_vars = {'primary_investigator': primary_investigator} + content = template.render(template_vars) + template = env.get_template('ramp_up_approval_request.html') + content_html = template.render(template_vars) + + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator): - try: - msg = Message('Research Ramp-up Plan Approval Request', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_approval_request_first_review.txt') - template_vars = {'primary_investigator': primary_investigator} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approval_request_first_review.html') - msg.html = template.render(template_vars) + from crc import env + subject = 'Research Ramp-up Plan Approval Request' - mail.send(msg) - except Exception as e: - return str(e) + template = env.get_template('ramp_up_approval_request_first_review.txt') + template_vars = {'primary_investigator': primary_investigator} + content = template.render(template_vars) + template = env.get_template('ramp_up_approval_request_first_review.html') + content_html = template.render(template_vars) + + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None): - try: - msg = Message('Research Ramp-up Plan Approved', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + from crc import env + subject = 'Research Ramp-up Plan Approved' - from crc import env, mail - template = env.get_template('ramp_up_approved.txt') - template_vars = {'approver_1': approver_1, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approved.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_approved.txt') + template_vars = {'approver_1': approver_1, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_approved.html') + content_html = template.render(template_vars) - mail.send(msg) - except Exception as e: - return str(e) + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_denied_email(sender, recipients, approver): - try: - msg = Message('Research Ramp-up Plan Denied', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + from crc import env + subject = 'Research Ramp-up Plan Denied' - from crc import env, mail - template = env.get_template('ramp_up_denied.txt') - template_vars = {'approver': approver} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_denied.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_denied.txt') + template_vars = {'approver': approver} + content = template.render(template_vars) + template = env.get_template('ramp_up_denied.html') + content_html = template.render(template_vars) - mail.send(msg) - except Exception as e: - return str(e) + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2): - try: - msg = Message('Research Ramp-up Plan Denied', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + from crc import env + subject = 'Research Ramp-up Plan Denied' - from crc import env, mail - template = env.get_template('ramp_up_denied_first_approver.txt') - template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_denied_first_approver.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_denied_first_approver.txt') + template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_denied_first_approver.html') + content_html = template.render(template_vars) - mail.send(msg) - except Exception as e: - return str(e) + result = send_mail(subject, sender, recipients, content, content_html) + return result diff --git a/crc/services/study_service.py b/crc/services/study_service.py index dade7998..142d6166 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -181,8 +181,6 @@ class StudyService(object): documents[code] = doc return documents - - @staticmethod def get_investigators(study_id): @@ -224,7 +222,6 @@ class StudyService(object): return FileModelSchema().dump(file) - @staticmethod def synch_with_protocol_builder_if_enabled(user): """Assures that the studies we have locally for the given user are diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 93590d94..0af63ff9 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -1,5 +1,6 @@ import re -import xml.etree.ElementTree as ElementTree +from lxml import etree +import shlex from datetime import datetime from typing import List @@ -13,7 +14,6 @@ from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser from SpiffWorkflow.exceptions import WorkflowTaskExecException from SpiffWorkflow.specs import WorkflowSpec -from sqlalchemy import desc from crc import session from crc.api.common import ApiError @@ -36,7 +36,9 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): This allows us to reference custom code from the BPMN diagram. """ - commands = script.split(" ") + # Shlex splits the whole string while respecting double quoted strings within + commands = shlex.split(script) + printable_comms = commands path_and_command = commands[0].rsplit(".", 1) if len(path_and_command) == 1: module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0]) @@ -60,7 +62,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): "does not properly implement the CRC Script class.", task=task) if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]: - """If this is running a validation, and not a normal process, then we want to + """If this is running a validation, and not a normal process, then we want to mimic running the script, but not make any external calls or database changes.""" klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:]) else: @@ -102,14 +104,15 @@ class WorkflowProcessor(object): def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False, validate_only=False): """Create a Workflow Processor based on the serialized information available in the workflow model. - If soft_reset is set to true, it will try to use the latest version of the workflow specification. - If hard_reset is set to true, it will create a new Workflow, but embed the data from the last - completed task in the previous workflow. + If soft_reset is set to true, it will try to use the latest version of the workflow specification + without resetting to the beginning of the workflow. This will work for some minor changes to the spec. + If hard_reset is set to true, it will use the latest spec, and start the workflow over from the beginning. + which should work in casees where a soft reset fails. If neither flag is set, it will use the same version of the specification that was used to originally create the workflow model. """ self.workflow_model = workflow_model - if soft_reset or len(workflow_model.dependencies) == 0: + if soft_reset or len(workflow_model.dependencies) == 0: # Depenencies of 0 means the workflow was never started. self.spec_data_files = FileService.get_spec_data_files( workflow_spec_id=workflow_model.workflow_spec_id) else: @@ -216,8 +219,6 @@ class WorkflowProcessor(object): full_version = "v%s (%s)" % (version, files) return full_version - - def update_dependencies(self, spec_data_files): existing_dependencies = FileService.get_spec_data_files( workflow_spec_id=self.workflow_model.workflow_spec_id, @@ -267,12 +268,12 @@ class WorkflowProcessor(object): for file_data in file_data_models: if file_data.file_model.type == FileType.bpmn: - bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data) + bpmn: etree.Element = etree.fromstring(file_data.data) if file_data.file_model.primary: process_id = FileService.get_process_id(bpmn) parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name) elif file_data.file_model.type == FileType.dmn: - dmn: ElementTree.Element = ElementTree.fromstring(file_data.data) + dmn: etree.Element = etree.fromstring(file_data.data) parser.add_dmn_xml(dmn, filename=file_data.file_model.name) if process_id is None: raise (ApiError(code="no_primary_bpmn_error", @@ -299,25 +300,12 @@ class WorkflowProcessor(object): return WorkflowStatus.waiting def hard_reset(self): - """Recreate this workflow, but keep the data from the last completed task and add - it back into the first task. This may be useful when a workflow specification changes, - and users need to review all the prior steps, but they don't need to reenter all the previous data. - - Returns the new version. + """Recreate this workflow. This will be useful when a workflow specification changes. """ - - # Create a new workflow based on the latest specs. self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id) new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id) new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine) new_bpmn_workflow.data = self.bpmn_workflow.data - - # Reset the current workflow to the beginning - which we will consider to be the first task after the root - # element. This feels a little sketchy, but I think it is safe to assume root will have one child. - first_task = self.bpmn_workflow.task_tree.children[0] - first_task.reset_token(reset_data=False) - for task in new_bpmn_workflow.get_tasks(SpiffTask.READY): - task.data = first_task.data new_bpmn_workflow.do_engine_steps() self.bpmn_workflow = new_bpmn_workflow diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 310bd7fd..0faf3b76 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -1,3 +1,4 @@ +import copy import string from datetime import datetime import random @@ -5,25 +6,26 @@ import random import jinja2 from SpiffWorkflow import Task as SpiffTask, WorkflowException from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask +from SpiffWorkflow.bpmn.specs.MultiInstanceTask import MultiInstanceTask from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask from SpiffWorkflow.bpmn.specs.UserTask import UserTask from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask from SpiffWorkflow.specs import CancelTask, StartTask -from flask import g +from SpiffWorkflow.util.deep_merge import DeepMerge from jinja2 import Template from crc import db, app from crc.api.common import ApiError -from crc.models.api_models import Task, MultiInstanceType +from crc.models.api_models import Task, MultiInstanceType, NavigationItem, NavigationItemSchema, WorkflowApi from crc.models.file import LookupDataModel from crc.models.stats import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel -from crc.models.workflow import WorkflowModel, WorkflowStatus +from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel from crc.services.file_service import FileService from crc.services.lookup_service import LookupService from crc.services.study_service import StudyService -from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine +from crc.services.workflow_processor import WorkflowProcessor class WorkflowService(object): @@ -37,7 +39,7 @@ class WorkflowService(object): the workflow Processor should be hidden behind this service. This will help maintain a structure that avoids circular dependencies. But for now, this contains tools for converting spiff-workflow models into our - own API models with additional information and capabilities and + own API models with additional information and capabilities and handles the testing of a workflow specification by completing it with random selections, attempting to mimic a front end as much as possible. """ @@ -180,13 +182,87 @@ class WorkflowService(object): def __get_options(self): pass - @staticmethod def _random_string(string_length=10): """Generate a random string of fixed length """ letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(string_length)) + @staticmethod + def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None): + """Returns an API model representing the state of the current workflow, if requested, and + possible, next_task is set to the current_task.""" + + nav_dict = processor.bpmn_workflow.get_nav_list() + navigation = [] + for nav_item in nav_dict: + spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) + if 'description' in nav_item: + nav_item['title'] = nav_item.pop('description') + # fixme: duplicate code from the workflow_service. Should only do this in one place. + if ' ' in nav_item['title']: + nav_item['title'] = nav_item['title'].partition(' ')[2] + else: + nav_item['title'] = "" + if spiff_task: + nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) + nav_item['title'] = nav_item['task'].title # Prefer the task title. + else: + nav_item['task'] = None + if not 'is_decision' in nav_item: + nav_item['is_decision'] = False + + navigation.append(NavigationItem(**nav_item)) + NavigationItemSchema().dump(nav_item) + + spec = db.session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() + workflow_api = WorkflowApi( + id=processor.get_workflow_id(), + status=processor.get_status(), + next_task=None, + navigation=navigation, + workflow_spec_id=processor.workflow_spec_id, + spec_version=processor.get_version_string(), + is_latest_spec=processor.is_latest_spec, + total_tasks=len(navigation), + completed_tasks=processor.workflow_model.completed_tasks, + last_updated=processor.workflow_model.last_updated, + title=spec.display_name + ) + if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. + # This may or may not work, sometimes there is no next task to complete. + next_task = processor.next_task() + if next_task: + previous_form_data = WorkflowService.get_previously_submitted_data(processor.workflow_model.id, next_task) + DeepMerge.merge(next_task.data, previous_form_data) + workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) + + return workflow_api + + @staticmethod + def get_previously_submitted_data(workflow_id, spiff_task): + """ If the user has completed this task previously, find the form data for the last submission.""" + query = db.session.query(TaskEventModel) \ + .filter_by(workflow_id=workflow_id) \ + .filter_by(task_name=spiff_task.task_spec.name) \ + .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) + + if hasattr(spiff_task, 'internal_data') and 'runtimes' in spiff_task.internal_data: + query = query.filter_by(mi_index=spiff_task.internal_data['runtimes']) + + latest_event = query.order_by(TaskEventModel.date.desc()).first() + if latest_event: + if latest_event.form_data is not None: + return latest_event.form_data + else: + app.logger.error("missing_form_data", "We have lost data for workflow %i, " + "task %s, it is not in the task event model, " + "and it should be." % (workflow_id, spiff_task.task_spec.name)) + return {} + else: + return {} + + @staticmethod def spiff_task_to_api_task(spiff_task, add_docs_and_forms=False): task_type = spiff_task.task_spec.__class__.__name__ @@ -218,8 +294,8 @@ class WorkflowService(object): props = {} if hasattr(spiff_task.task_spec, 'extensions'): - for id, val in spiff_task.task_spec.extensions.items(): - props[id] = val + for key, val in spiff_task.task_spec.extensions.items(): + props[key] = val task = Task(spiff_task.id, spiff_task.task_spec.name, @@ -318,21 +394,22 @@ class WorkflowService(object): field.options.append({"id": d.value, "name": d.label}) @staticmethod - def log_task_action(user_uid, processor, spiff_task, action): + def log_task_action(user_uid, workflow_model, spiff_task, action, version): task = WorkflowService.spiff_task_to_api_task(spiff_task) - workflow_model = processor.workflow_model + form_data = WorkflowService.extract_form_data(spiff_task.data, spiff_task) task_event = TaskEventModel( study_id=workflow_model.study_id, user_uid=user_uid, workflow_id=workflow_model.id, workflow_spec_id=workflow_model.workflow_spec_id, - spec_version=processor.get_version_string(), + spec_version=version, action=action, task_id=task.id, task_name=task.name, task_title=task.title, task_type=str(task.type), task_state=task.state, + form_data=form_data, mi_type=task.multi_instance_type.value, # Some tasks have a repeat behavior. mi_count=task.multi_instance_count, # This is the number of times the task could repeat. mi_index=task.multi_instance_index, # And the index of the currently repeating task. @@ -342,3 +419,64 @@ class WorkflowService(object): db.session.add(task_event) db.session.commit() + @staticmethod + def fix_legacy_data_model_for_rrt(): + """ Remove this after use! This is just to fix RRT so the data is handled correctly. + + Utility that is likely called via the flask command line, it will loop through all the + workflows in the system and attempt to add the right data into the task action log so that + users do not have to re fill out all of the forms if they start over or go back in the workflow. + Viciously inefficient, but should only have to run one time for RRT""" + workflows = db.session.query(WorkflowModel).all() + for workflow_model in workflows: + task_logs = db.session.query(TaskEventModel) \ + .filter(TaskEventModel.workflow_id == workflow_model.id) \ + .filter(TaskEventModel.action == WorkflowService.TASK_ACTION_COMPLETE) \ + .order_by(TaskEventModel.date.desc()).all() + + processor = WorkflowProcessor(workflow_model) + # Grab all the data from last task completed, which will be everything in this + # rrt situation because of how we were keeping all the data at the time. + latest_data = processor.next_task().data + + # Move forward in the task spec tree, dropping any data that would have been + # added in subsequent tasks, just looking at form data, will not track the automated + # task data additions, hopefully this doesn't hang us. + for log in task_logs: +# if log.task_data is not None: # Only do this if the task event does not have data populated in it. +# continue + data = copy.deepcopy(latest_data) # Or you end up with insane crazy issues. + # In the simple case of RRT, there is exactly one task for the given task_spec + task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0] + data = WorkflowService.extract_form_data(data, task) + log.form_data = data + db.session.add(log) + + db.session.commit() + + @staticmethod + def extract_form_data(latest_data, task): + """Removes data from latest_data that would be added by the child task or any of it's children.""" + data = {} + + if hasattr(task.task_spec, 'form'): + for field in task.task_spec.form.fields: + if field.has_property(Task.PROP_OPTIONS_READ_ONLY) and \ + field.get_property(Task.PROP_OPTIONS_READ_ONLY).lower().strip() == "true": + continue # Don't add read-only data + elif field.has_property(Task.PROP_OPTIONS_REPEAT): + group = field.get_property(Task.PROP_OPTIONS_REPEAT) + if group in latest_data: + data[group] = latest_data[group] + elif isinstance(task.task_spec, MultiInstanceTask): + group = task.task_spec.elementVar + if group in latest_data: + data[group] = latest_data[group] + else: + if field.id in latest_data: + data[field.id] = latest_data[field.id] + + return data + + + diff --git a/crc/static/templates/mails/ramp_up_denied.txt b/crc/static/templates/mails/ramp_up_denied.txt index 5fbaefda..120522b8 100644 --- a/crc/static/templates/mails/ramp_up_denied.txt +++ b/crc/static/templates/mails/ramp_up_denied.txt @@ -1 +1 @@ - Your Research Ramp-up Plan has been denied by {{ approver_1 }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver_1 }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval. \ No newline at end of file + Your Research Ramp-up Plan has been denied by {{ approver }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval. \ No newline at end of file diff --git a/docker_run.sh b/docker_run.sh index 6bc3c90b..8ad66274 100755 --- a/docker_run.sh +++ b/docker_run.sh @@ -23,8 +23,16 @@ if [ "$RESET_DB_RRT" = "true" ]; then pipenv run flask load-example-rrt-data fi +if [ "$FIX_RRT_DATA" = "true" ]; then + echo 'Fixing RRT data...' + pipenv run flask rrt-data-fix +fi + + +# THIS MUST BE THE LAST COMMAND! if [ "$APPLICATION_ROOT" = "/" ]; then pipenv run gunicorn --bind 0.0.0.0:$PORT0 wsgi:app else pipenv run gunicorn -e SCRIPT_NAME="$APPLICATION_ROOT" --bind 0.0.0.0:$PORT0 wsgi:app fi + diff --git a/migrations/versions/1fdd1bdb600e_.py b/migrations/versions/1fdd1bdb600e_.py new file mode 100644 index 00000000..dff1fdae --- /dev/null +++ b/migrations/versions/1fdd1bdb600e_.py @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: 1fdd1bdb600e +Revises: 17597692d0b0 +Create Date: 2020-06-17 16:44:16.427988 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '1fdd1bdb600e' +down_revision = '17597692d0b0' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_data', sa.JSON(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('task_event', 'task_data') + # ### end Alembic commands ### diff --git a/migrations/versions/5acd138e969c_.py b/migrations/versions/5acd138e969c_.py new file mode 100644 index 00000000..22b6b79a --- /dev/null +++ b/migrations/versions/5acd138e969c_.py @@ -0,0 +1,38 @@ +"""empty message + +Revision ID: 5acd138e969c +Revises: de30304ff5e6 +Create Date: 2020-06-24 21:36:15.128632 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '5acd138e969c' +down_revision = 'de30304ff5e6' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('email', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('subject', sa.String(), nullable=True), + sa.Column('sender', sa.String(), nullable=True), + sa.Column('recipients', sa.String(), nullable=True), + sa.Column('content', sa.String(), nullable=True), + sa.Column('content_html', sa.String(), nullable=True), + sa.Column('study_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['study_id'], ['study.id'], ), + sa.PrimaryKeyConstraint('id') + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('email') + # ### end Alembic commands ### diff --git a/migrations/versions/de30304ff5e6_.py b/migrations/versions/de30304ff5e6_.py new file mode 100644 index 00000000..46a43f18 --- /dev/null +++ b/migrations/versions/de30304ff5e6_.py @@ -0,0 +1,30 @@ +"""empty message + +Revision ID: de30304ff5e6 +Revises: 1fdd1bdb600e +Create Date: 2020-06-18 16:19:11.133665 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = 'de30304ff5e6' +down_revision = '1fdd1bdb600e' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('form_data', sa.JSON(), nullable=True)) + op.drop_column('task_event', 'task_data') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_data', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) + op.drop_column('task_event', 'form_data') + # ### end Alembic commands ### diff --git a/tests/test_approvals_api.py b/tests/approval/test_approvals_api.py similarity index 100% rename from tests/test_approvals_api.py rename to tests/approval/test_approvals_api.py diff --git a/tests/test_approvals_service.py b/tests/approval/test_approvals_service.py similarity index 73% rename from tests/test_approvals_service.py rename to tests/approval/test_approvals_service.py index 26a26ef4..34871fec 100644 --- a/tests/test_approvals_service.py +++ b/tests/approval/test_approvals_service.py @@ -57,6 +57,32 @@ class TestApprovalsService(BaseTest): self.assertEqual(1, models[0].version) self.assertEqual(2, models[1].version) + def test_get_health_attesting_records(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="AD_CoCAppr") + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + records = ApprovalService.get_health_attesting_records() + + self.assertEqual(len(records), 1) + + def test_get_not_really_csv_content(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="AD_CoCAppr") + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + records = ApprovalService.get_not_really_csv_content() + + self.assertEqual(len(records), 2) + def test_new_approval_sends_proper_emails(self): self.assertEqual(1, 1) diff --git a/tests/test_request_approval_script.py b/tests/approval/test_request_approval_script.py similarity index 100% rename from tests/test_request_approval_script.py rename to tests/approval/test_request_approval_script.py diff --git a/tests/base_test.py b/tests/base_test.py index 93294193..3bdae053 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -290,7 +290,7 @@ class BaseTest(unittest.TestCase): self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id) return workflow_api - def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"): + def complete_form(self, workflow_in, task_in, dict_data, error_code=None, terminate_loop=None, user_uid="dhf8r"): prev_completed_task_count = workflow_in.completed_tasks if isinstance(task_in, dict): task_id = task_in["id"] @@ -299,11 +299,16 @@ class BaseTest(unittest.TestCase): user = session.query(UserModel).filter_by(uid=user_uid).first() self.assertIsNotNone(user) - - rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), - headers=self.logged_in_headers(user=user), - content_type="application/json", - data=json.dumps(dict_data)) + if terminate_loop: + rv = self.app.put('/v1.0/workflow/%i/task/%s/data?terminate_loop=true' % (workflow_in.id, task_id), + headers=self.logged_in_headers(user=user), + content_type="application/json", + data=json.dumps(dict_data)) + else: + rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), + headers=self.logged_in_headers(user=user), + content_type="application/json", + data=json.dumps(dict_data)) if error_code: self.assert_failure(rv, error_code=error_code) return @@ -316,7 +321,9 @@ class BaseTest(unittest.TestCase): # The total number of tasks may change over time, as users move through gateways # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created... self.assertIsNotNone(workflow.total_tasks) - self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) + # presumably, we also need to deal with sequential items here too . . + if not task_in.multi_instance_type == 'looping': + self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) # Assure a record exists in the Task Events task_events = session.query(TaskEventModel) \ @@ -335,7 +342,8 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.name, event.task_name) self.assertEqual(task_in.title, event.task_title) self.assertEqual(task_in.type, event.task_type) - self.assertEqual("COMPLETED", event.task_state) + if not task_in.multi_instance_type == 'looping': + self.assertEqual("COMPLETED", event.task_state) # Not sure what voodoo is happening inside of marshmallow to get me in this state. if isinstance(task_in.multi_instance_type, MultiInstanceType): @@ -344,7 +352,10 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.multi_instance_type, event.mi_type) self.assertEqual(task_in.multi_instance_count, event.mi_count) - self.assertEqual(task_in.multi_instance_index, event.mi_index) + if task_in.multi_instance_type == 'looping' and not terminate_loop: + self.assertEqual(task_in.multi_instance_index+1, event.mi_index) + else: + self.assertEqual(task_in.multi_instance_index, event.mi_index) self.assertEqual(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn new file mode 100644 index 00000000..1b8d5252 --- /dev/null +++ b/tests/data/email/email.bpmn @@ -0,0 +1,67 @@ + + + + + Flow_1synsig + + + Flow_1xlrgne + + + # Dear Approver +## you have been requested for approval + + +--- +New request submitted by {{ PIComputingID }} + +Email content to be delivered to {{ ApprvlApprvr1 }} + +--- + Flow_08n2npe + Flow_1xlrgne + Email "Camunda Email Subject" ApprvlApprvr1 PIComputingID + + + + + + + + + + + + Flow_1synsig + Flow_08n2npe + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/looping_task/looping_task.bpmn b/tests/data/looping_task/looping_task.bpmn new file mode 100644 index 00000000..96b1b32f --- /dev/null +++ b/tests/data/looping_task/looping_task.bpmn @@ -0,0 +1,45 @@ + + + + + Flow_0vlor2k + + + + + + + + + Flow_0vlor2k + Flow_1tvod7v + + + + Flow_1tvod7v + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/multi_instance/multi_instance.bpmn b/tests/data/multi_instance/multi_instance.bpmn index d53f7b17..28bda546 100644 --- a/tests/data/multi_instance/multi_instance.bpmn +++ b/tests/data/multi_instance/multi_instance.bpmn @@ -8,8 +8,8 @@ Flow_0ugjw69 - - + + # Please provide addtional information about: ## Investigator ID: {{investigator.NETBADGEID}} ## Role: {{investigator.INVESTIGATORTYPEFULL}} @@ -25,7 +25,7 @@ Flow_0ugjw69 - + Flow_0t6p1sb SequenceFlow_1p568pp @@ -58,7 +58,7 @@ - + diff --git a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn index ba1fd76b..dd6215ed 100644 --- a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn +++ b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn @@ -8,8 +8,8 @@ Flow_0ugjw69 - - + + # Please provide addtional information about: ## Investigator ID: {{investigator.user_id}} ## Role: {{investigator.type_full}} @@ -22,7 +22,7 @@ Flow_0ugjw69 - + Flow_0t6p1sb SequenceFlow_1p568pp @@ -55,7 +55,7 @@ - + diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn index 628f1bd4..fc5e41bb 100644 --- a/tests/data/random_fact/random_fact.bpmn +++ b/tests/data/random_fact/random_fact.bpmn @@ -175,9 +175,6 @@ Your random fact is: - - - @@ -187,6 +184,9 @@ Your random fact is: + + + diff --git a/tests/emails/test_email_script.py b/tests/emails/test_email_script.py new file mode 100644 index 00000000..12a00fac --- /dev/null +++ b/tests/emails/test_email_script.py @@ -0,0 +1,39 @@ +from tests.base_test import BaseTest + +from crc.models.email import EmailModel +from crc.services.file_service import FileService +from crc.scripts.email import Email +from crc.services.workflow_processor import WorkflowProcessor +from crc.api.common import ApiError + +from crc import db, mail + + +class TestEmailScript(BaseTest): + + def test_do_task(self): + workflow = self.create_workflow('email') + + task_data = { + 'PIComputingID': 'dhf8r', + 'ApprvlApprvr1': 'lb3dp' + } + task = self.get_workflow_api(workflow).next_task + + with mail.record_messages() as outbox: + + self.complete_form(workflow, task, task_data) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Camunda Email Subject') + + # PI is present + self.assertIn(task_data['PIComputingID'], outbox[0].body) + self.assertIn(task_data['PIComputingID'], outbox[0].html) + + # Approver is present + self.assertIn(task_data['ApprvlApprvr1'], outbox[0].body) + self.assertIn(task_data['ApprvlApprvr1'], outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) diff --git a/tests/emails/test_email_service.py b/tests/emails/test_email_service.py new file mode 100644 index 00000000..e2bcd139 --- /dev/null +++ b/tests/emails/test_email_service.py @@ -0,0 +1,34 @@ +from tests.base_test import BaseTest + +from crc import session +from crc.models.approval import ApprovalModel, ApprovalStatus +from crc.models.email import EmailModel +from crc.services.email_service import EmailService + + +class TestEmailService(BaseTest): + + def test_add_email(self): + self.load_example_data() + study = self.create_study() + workflow = self.create_workflow('random_fact') + + subject = 'Email Subject' + sender = 'sender@sartography.com' + recipients = ['recipient@sartography.com', 'back@sartography.com'] + content = 'Content for this email' + content_html = '

Hypertext Markup Language content for this email

' + + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, study_id=study.id) + + email_model = EmailModel.query.first() + + self.assertEqual(email_model.subject, subject) + self.assertEqual(email_model.sender, sender) + self.assertEqual(email_model.recipients, str(recipients)) + self.assertEqual(email_model.content, content) + self.assertEqual(email_model.content_html, content_html) + self.assertEqual(email_model.study, study) + + # TODO: Create email model without study diff --git a/tests/emails/test_mails.py b/tests/emails/test_mails.py new file mode 100644 index 00000000..0710e02e --- /dev/null +++ b/tests/emails/test_mails.py @@ -0,0 +1,117 @@ + +from tests.base_test import BaseTest + +from crc import mail, session +from crc.models.approval import ApprovalModel, ApprovalStatus +from crc.models.email import EmailModel +from crc.services.mails import ( + send_ramp_up_submission_email, + send_ramp_up_approval_request_email, + send_ramp_up_approval_request_first_review_email, + send_ramp_up_approved_email, + send_ramp_up_denied_email, + send_ramp_up_denied_email_to_approver +) + + +class TestMails(BaseTest): + + def setUp(self): + """Initial setup shared by all TestApprovals tests""" + self.load_example_data() + self.study = self.create_study() + self.workflow = self.create_workflow('random_fact') + + self.sender = 'sender@sartography.com' + self.recipients = ['recipient@sartography.com'] + self.primary_investigator = 'Dr. Bartlett' + self.approver_1 = 'Max Approver' + self.approver_2 = 'Close Reviewer' + + def test_send_ramp_up_submission_email(self): + with mail.record_messages() as outbox: + + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + + def test_send_ramp_up_approval_request_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_ramp_up_approval_request_first_review_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approval_request_first_review_email( + self.sender, self.recipients, self.primary_investigator + ) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_ramp_up_approved_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + + def test_send_ramp_up_denied_email(self): + with mail.record_messages() as outbox: + send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_send_ramp_up_denied_email_to_approver(self): + with mail.record_messages() as outbox: + send_ramp_up_denied_email_to_approver( + self.sender, self.recipients, self.primary_investigator, self.approver_2 + ) + + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + self.assertIn(self.approver_2, outbox[0].body) + self.assertIn(self.approver_2, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) diff --git a/tests/test_file_service.py b/tests/files/test_file_service.py similarity index 98% rename from tests/test_file_service.py rename to tests/files/test_file_service.py index 1dea810c..dd95e458 100644 --- a/tests/test_file_service.py +++ b/tests/files/test_file_service.py @@ -61,14 +61,14 @@ class TestFileService(BaseTest): # Archive the file file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(1, len(file_models)) + self.assertEqual(1, len(file_models)) file_model = file_models[0] file_model.archived = True db.session.add(file_model) # Assure that the file no longer comes back. file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(0, len(file_models)) + self.assertEqual(0, len(file_models)) # Add the file again with different data FileService.add_workflow_file(workflow_id=workflow.id, diff --git a/tests/test_files_api.py b/tests/files/test_files_api.py similarity index 98% rename from tests/test_files_api.py rename to tests/files/test_files_api.py index 2d14a8b5..59e6c1f6 100644 --- a/tests/test_files_api.py +++ b/tests/files/test_files_api.py @@ -91,7 +91,6 @@ class TestFilesApi(BaseTest): content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) - def test_archive_file_no_longer_shows_up(self): self.load_example_data() self.create_reference_document() @@ -109,21 +108,16 @@ class TestFilesApi(BaseTest): self.assert_success(rv) rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(1, len(json.loads(rv.get_data(as_text=True)))) + self.assertEqual(1, len(json.loads(rv.get_data(as_text=True)))) file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all() - self.assertEquals(1, len(file_model)) + self.assertEqual(1, len(file_model)) file_model[0].archived = True db.session.commit() rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(0, len(json.loads(rv.get_data(as_text=True)))) - - - - - + self.assertEqual(0, len(json.loads(rv.get_data(as_text=True)))) def test_set_reference_file(self): file_name = "irb_document_types.xls" @@ -285,8 +279,8 @@ class TestFilesApi(BaseTest): .filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\ .filter(ApprovalModel.study_id == workflow.study_id).all() - self.assertEquals(1, len(approvals)) - self.assertEquals(1, len(approvals[0].approval_files)) + self.assertEqual(1, len(approvals)) + self.assertEqual(1, len(approvals[0].approval_files)) def test_change_primary_bpmn(self): diff --git a/tests/test_study_api.py b/tests/study/test_study_api.py similarity index 100% rename from tests/test_study_api.py rename to tests/study/test_study_api.py diff --git a/tests/test_study_details_documents.py b/tests/study/test_study_details_documents.py similarity index 100% rename from tests/test_study_details_documents.py rename to tests/study/test_study_details_documents.py diff --git a/tests/test_study_service.py b/tests/study/test_study_service.py similarity index 100% rename from tests/test_study_service.py rename to tests/study/test_study_service.py diff --git a/tests/test_update_study_script.py b/tests/study/test_update_study_script.py similarity index 100% rename from tests/test_update_study_script.py rename to tests/study/test_update_study_script.py diff --git a/tests/test_looping_task.py b/tests/test_looping_task.py new file mode 100644 index 00000000..e56e0877 --- /dev/null +++ b/tests/test_looping_task.py @@ -0,0 +1,54 @@ +from unittest.mock import patch + +from crc import session +from crc.models.api_models import MultiInstanceType +from crc.models.study import StudyModel +from crc.models.workflow import WorkflowStatus +from crc.services.study_service import StudyService +from crc.services.workflow_processor import WorkflowProcessor +from crc.services.workflow_service import WorkflowService +from tests.base_test import BaseTest + + +class TestWorkflowProcessorLoopingTask(BaseTest): + """Tests the Workflow Processor as it deals with a Looping task""" + + def _populate_form_with_random_data(self, task): + api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) + WorkflowService.populate_form_with_random_data(task, api_task, required_only=False) + + def get_processor(self, study_model, spec_model): + workflow_model = StudyService._create_workflow_model(study_model, spec_model) + return WorkflowProcessor(workflow_model) + + def test_create_and_complete_workflow(self): + # This depends on getting a list of investigators back from the protocol builder. + + workflow = self.create_workflow('looping_task') + task = self.get_workflow_api(workflow).next_task + + self.assertEqual("GetNames", task.name) + + self.assertEqual(task.multi_instance_type, 'looping') + self.assertEqual(1, task.multi_instance_index) + self.complete_form(workflow,task,{'GetNames_CurrentVar':{'Name': 'Peter Norvig', 'Nickname': 'Pete'}}) + task = self.get_workflow_api(workflow).next_task + + self.assertEqual(task.multi_instance_type,'looping') + self.assertEqual(2, task.multi_instance_index) + self.complete_form(workflow, + task, + {'GetNames_CurrentVar':{'Name': 'Stuart Russell', 'Nickname': 'Stu'}}, + terminate_loop=True) + + task = self.get_workflow_api(workflow).next_task + self.assertEqual(task.name,'Event_End') + self.assertEqual(workflow.completed_tasks,workflow.total_tasks) + self.assertEqual(task.data, {'GetNames_CurrentVar': 2, + 'GetNames': {'1': {'Name': 'Peter Norvig', + 'Nickname': 'Pete'}, + '2': {'Name': 'Stuart Russell', + 'Nickname': 'Stu'}}}) + + + diff --git a/tests/test_mails.py b/tests/test_mails.py deleted file mode 100644 index 15a01583..00000000 --- a/tests/test_mails.py +++ /dev/null @@ -1,55 +0,0 @@ - -from tests.base_test import BaseTest - -from crc.services.mails import ( - send_ramp_up_submission_email, - send_ramp_up_approval_request_email, - send_ramp_up_approval_request_first_review_email, - send_ramp_up_approved_email, - send_ramp_up_denied_email, - send_ramp_up_denied_email_to_approver -) - - -class TestMails(BaseTest): - - def setUp(self): - self.sender = 'sender@sartography.com' - self.recipients = ['recipient@sartography.com'] - self.primary_investigator = 'Dr. Bartlett' - self.approver_1 = 'Max Approver' - self.approver_2 = 'Close Reviewer' - - def test_send_ramp_up_submission_email(self): - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) - - def test_send_ramp_up_approval_request_email(self): - send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) - self.assertTrue(True) - - def test_send_ramp_up_approval_request_first_review_email(self): - send_ramp_up_approval_request_first_review_email( - self.sender, self.recipients, self.primary_investigator - ) - self.assertTrue(True) - - def test_send_ramp_up_approved_email(self): - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) - - def test_send_ramp_up_denied_email(self): - send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - def test_send_send_ramp_up_denied_email_to_approver(self): - send_ramp_up_denied_email_to_approver( - self.sender, self.recipients, self.primary_investigator, self.approver_2 - ) - self.assertTrue(True) diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 654b777e..c6b09dae 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -4,14 +4,86 @@ import random from unittest.mock import patch from tests.base_test import BaseTest + from crc import session, app from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema from crc.models.file import FileModelSchema from crc.models.workflow import WorkflowStatus - +from crc.services.workflow_service import WorkflowService +from crc.models.stats import TaskEventModel class TestTasksApi(BaseTest): + def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False): + rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' % + (workflow.id, str(soft_reset), str(hard_reset)), + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + workflow_api = WorkflowApiSchema().load(json_data) + self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id) + return workflow_api + + def complete_form(self, workflow_in, task_in, dict_data, error_code = None): + prev_completed_task_count = workflow_in.completed_tasks + if isinstance(task_in, dict): + task_id = task_in["id"] + else: + task_id = task_in.id + rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), + headers=self.logged_in_headers(), + content_type="application/json", + data=json.dumps(dict_data)) + if error_code: + self.assert_failure(rv, error_code=error_code) + return + + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + + # Assure stats are updated on the model + workflow = WorkflowApiSchema().load(json_data) + # The total number of tasks may change over time, as users move through gateways + # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created... + self.assertIsNotNone(workflow.total_tasks) + self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) + # Assure a record exists in the Task Events + task_events = session.query(TaskEventModel) \ + .filter_by(workflow_id=workflow.id) \ + .filter_by(task_id=task_id) \ + .order_by(TaskEventModel.date.desc()).all() + self.assertGreater(len(task_events), 0) + event = task_events[0] + self.assertIsNotNone(event.study_id) + self.assertEqual("dhf8r", event.user_uid) + self.assertEqual(workflow.id, event.workflow_id) + self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id) + self.assertEqual(workflow.spec_version, event.spec_version) + self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action) + self.assertEqual(task_in.id, task_id) + self.assertEqual(task_in.name, event.task_name) + self.assertEqual(task_in.title, event.task_title) + self.assertEqual(task_in.type, event.task_type) + self.assertEqual("COMPLETED", event.task_state) + # Not sure what vodoo is happening inside of marshmallow to get me in this state. + if isinstance(task_in.multi_instance_type, MultiInstanceType): + self.assertEqual(task_in.multi_instance_type.value, event.mi_type) + else: + self.assertEqual(task_in.multi_instance_type, event.mi_type) + + self.assertEqual(task_in.multi_instance_count, event.mi_count) + self.assertEqual(task_in.multi_instance_index, event.mi_index) + self.assertEqual(task_in.process_name, event.process_name) + self.assertIsNotNone(event.date) + + # Assure that there is data in the form_data + self.assertIsNotNone(event.form_data) + + workflow = WorkflowApiSchema().load(json_data) + return workflow + + def test_get_current_user_tasks(self): self.load_example_data() workflow = self.create_workflow('random_fact') @@ -299,13 +371,13 @@ class TestTasksApi(BaseTest): self.assertEqual("UserTask", task.type) self.assertEqual("Activity_A", task.name) self.assertEqual("My Sub Process", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldA": "Dan"}) task = workflow_api.next_task self.assertIsNotNone(task) self.assertEqual("Activity_B", task.name) self.assertEqual("Sub Workflow Example", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldB": "Dan"}) self.assertEqual(WorkflowStatus.complete, workflow_api.status) def test_update_task_resets_token(self): @@ -368,12 +440,14 @@ class TestTasksApi(BaseTest): self.assertEqual(9, len(ready_items)) self.assertEqual("UserTask", workflow_api.next_task.type) - self.assertEqual("MutiInstanceTask",workflow_api.next_task.name) + self.assertEqual("MultiInstanceTask",workflow_api.next_task.name) self.assertEqual("more information", workflow_api.next_task.title) for i in random.sample(range(9), 9): task = TaskSchema().load(ready_items[i]['task']) - self.complete_form(workflow, task, {"investigator":{"email": "dhf8r@virginia.edu"}}) + data = workflow_api.next_task.data + data['investigator']['email'] = "dhf8r@virginia.edu" + self.complete_form(workflow, task, data) #tasks = self.get_workflow_api(workflow).user_tasks workflow = self.get_workflow_api(workflow) diff --git a/tests/test_workflow_processor.py b/tests/workflow/test_workflow_processor.py similarity index 87% rename from tests/test_workflow_processor.py rename to tests/workflow/test_workflow_processor.py index b3f6c374..30f9150b 100644 --- a/tests/test_workflow_processor.py +++ b/tests/workflow/test_workflow_processor.py @@ -270,53 +270,6 @@ class TestWorkflowProcessor(BaseTest): processor = self.get_processor(study, workflow_spec_model) self.assertTrue(processor.get_version_string().startswith('v2.1.1')) - def test_restart_workflow(self): - self.load_example_data() - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"key": "Value"} - processor.complete_task(task) - task_before_restart = processor.next_task() - processor.hard_reset() - task_after_restart = processor.next_task() - - self.assertNotEqual(task.get_name(), task_before_restart.get_name()) - self.assertEqual(task.get_name(), task_after_restart.get_name()) - self.assertEqual(task.data, task_after_restart.data) - - def test_soft_reset(self): - self.load_example_data() - - # Start the two_forms workflow, and enter some data in the first form. - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"color": "blue"} - processor.complete_task(task) - - # Modify the specification, with a minor text change. - file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_text_mod.bpmn') - self.replace_file("two_forms.bpmn", file_path) - - # Setting up another processor should not error out, but doesn't pick up the update. - processor.workflow_model.bpmn_workflow_json = processor.serialize() - processor2 = WorkflowProcessor(processor.workflow_model) - self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description) - self.assertNotEqual("# This is some documentation I wanted to add.", - processor2.bpmn_workflow.last_task.task_spec.documentation) - - # You can do a soft update and get the right response. - processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True) - self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description) - self.assertEqual("# This is some documentation I wanted to add.", - processor3.bpmn_workflow.last_task.task_spec.documentation) - - def test_hard_reset(self): self.load_example_data() @@ -344,8 +297,10 @@ class TestWorkflowProcessor(BaseTest): # Do a hard reset, which should bring us back to the beginning, but retain the data. processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True) self.assertEqual("Step 1", processor3.next_task().task_spec.description) - self.assertEqual({"color": "blue"}, processor3.next_task().data) - processor3.complete_task(processor3.next_task()) + self.assertTrue(processor3.is_latest_spec) # Now at version 2. + task = processor3.next_task() + task.data = {"color": "blue"} + processor3.complete_task(task) self.assertEqual("New Step", processor3.next_task().task_spec.description) self.assertEqual("blue", processor3.next_task().data["color"]) diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py similarity index 64% rename from tests/test_workflow_processor_multi_instance.py rename to tests/workflow/test_workflow_processor_multi_instance.py index aefb73f1..76821fed 100644 --- a/tests/test_workflow_processor_multi_instance.py +++ b/tests/workflow/test_workflow_processor_multi_instance.py @@ -1,13 +1,13 @@ from unittest.mock import patch +from tests.base_test import BaseTest -from crc import session +from crc import session, db from crc.models.api_models import MultiInstanceType from crc.models.study import StudyModel -from crc.models.workflow import WorkflowStatus +from crc.models.workflow import WorkflowStatus, WorkflowModel from crc.services.study_service import StudyService from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService -from tests.base_test import BaseTest class TestWorkflowProcessorMultiInstance(BaseTest): @@ -32,7 +32,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): 'error': 'Unable to locate a user with id asd3v in LDAP'}} def _populate_form_with_random_data(self, task): - WorkflowProcessor.populate_form_with_random_data(task) + WorkflowService.populate_form_with_random_data(task) def get_processor(self, study_model, spec_model): workflow_model = StudyService._create_workflow_model(study_model, spec_model) @@ -51,51 +51,72 @@ class TestWorkflowProcessorMultiInstance(BaseTest): self.assertIsNotNone(processor) self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) processor.bpmn_workflow.do_engine_steps() - next_user_tasks = processor.next_user_tasks() - self.assertEqual(1, len(next_user_tasks)) - - task = next_user_tasks[0] + workflow_api = WorkflowService.processor_to_workflow_api(processor) + self.assertIsNotNone(workflow_api) + self.assertIsNotNone(workflow_api.next_task) + # 1st investigator + api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) - self.assertEqual("dhf8r", task.data["investigator"]["user_id"]) - - self.assertEqual("MutiInstanceTask", task.get_name()) - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual(MultiInstanceType.sequential, api_task.multi_instance_type) + self.assertEqual("dhf8r", api_task.data["investigator"]["user_id"]) + self.assertEqual("MultiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(1, api_task.multi_instance_index) - task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) - task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) + # 2nd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual(None, api_task.data["investigator"]["user_id"]) + self.assertEqual("MultiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(2, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + # 3rd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual("asd3v", api_task.data["investigator"]["user_id"]) + self.assertEqual("MultiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(3, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() - task = processor.bpmn_workflow.last_task + workflow_api = WorkflowService.processor_to_workflow_api(processor) + + # Last task + api_task = workflow_api.next_task expected = self.mock_investigator_response expected['PI']['email'] = "asd3v@virginia.edu" expected['SC_I']['email'] = "asdf32@virginia.edu" expected['DC']['email'] = "dhf8r@virginia.edu" - self.assertEqual(expected, - task.data['StudyInfo']['investigators']) + self.assertEqual(expected, api_task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + def refresh_processor(self, processor): + """Saves the processor, and returns a new one read in from the database""" + processor.save() + processor = WorkflowProcessor(processor.workflow_model) + return processor + @patch('crc.services.study_service.StudyService.get_investigators') def test_create_and_complete_workflow_parallel(self, mock_study_service): """Unlike the test above, the parallel task allows us to complete the items in any order.""" @@ -107,11 +128,15 @@ class TestWorkflowProcessorMultiInstance(BaseTest): workflow_spec_model = self.load_test_spec("multi_instance_parallel") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor = self.refresh_processor(processor) processor.bpmn_workflow.do_engine_steps() # In the Parallel instance, there should be three tasks, all of them in the ready state. next_user_tasks = processor.next_user_tasks() self.assertEqual(3, len(next_user_tasks)) + # There should be six tasks in the navigation: start event, the script task, end event, and three tasks + # for the three executions of hte multi-instance. + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # We can complete the tasks out of order. task = next_user_tasks[2] @@ -121,23 +146,26 @@ class TestWorkflowProcessorMultiInstance(BaseTest): api_task = WorkflowService.spiff_task_to_api_task(task) self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[0] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) + self.assertEqual("MultiInstanceTask", api_task.name) task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[1] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) + self.assertEqual("MultiInstanceTask", task.get_name()) task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # Completing the tasks out of order, still provides the correct information. expected = self.mock_investigator_response @@ -148,3 +176,4 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) diff --git a/tests/test_workflow_service.py b/tests/workflow/test_workflow_service.py similarity index 52% rename from tests/test_workflow_service.py rename to tests/workflow/test_workflow_service.py index 9f3ceda1..6b1b5c58 100644 --- a/tests/test_workflow_service.py +++ b/tests/workflow/test_workflow_service.py @@ -1,7 +1,14 @@ +import json + from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService +from SpiffWorkflow import Task as SpiffTask, WorkflowException +from example_data import ExampleDataLoader +from crc import db +from crc.models.stats import TaskEventModel +from crc.models.api_models import Task class TestWorkflowService(BaseTest): @@ -78,4 +85,50 @@ class TestWorkflowService(BaseTest): task = processor.next_task() task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) WorkflowService.populate_form_with_random_data(task, task_api, required_only=False) - self.assertTrue(isinstance(task.data["sponsor"], dict)) \ No newline at end of file + self.assertTrue(isinstance(task.data["sponsor"], dict)) + + def test_fix_legacy_data_model_for_rrt(self): + ExampleDataLoader().load_rrt() # Make sure the research_rampup is loaded, as it's not a test spec. + workflow = self.create_workflow('research_rampup') + processor = WorkflowProcessor(workflow, validate_only=True) + + # Use the test spec code to complete the workflow of research rampup. + while not processor.bpmn_workflow.is_completed(): + processor.bpmn_workflow.do_engine_steps() + tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY) + for task in tasks: + task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) + WorkflowService.populate_form_with_random_data(task, task_api, False) + task.complete() + # create the task events + WorkflowService.log_task_action('dhf8r', workflow, task, + WorkflowService.TASK_ACTION_COMPLETE, + version=processor.get_version_string()) + processor.save() + db.session.commit() + + WorkflowService.fix_legacy_data_model_for_rrt() + + # All tasks should now have data associated with them. + task_logs = db.session.query(TaskEventModel) \ + .filter(TaskEventModel.workflow_id == workflow.id) \ + .filter(TaskEventModel.action == WorkflowService.TASK_ACTION_COMPLETE) \ + .order_by(TaskEventModel.date).all() # Get them back in order. + + self.assertEqual(17, len(task_logs)) + for log in task_logs: + task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0] + self.assertIsNotNone(log.form_data) + # Each task should have the data in the form for that task in the task event. + if hasattr(task.task_spec, 'form'): + for field in task.task_spec.form.fields: + if field.has_property(Task.PROP_OPTIONS_REPEAT): + self.assertIn(field.get_property(Task.PROP_OPTIONS_REPEAT), log.form_data) + else: + self.assertIn(field.id, log.form_data) + + # Some spot checks: + # The first task should be empty, with all the data removed. + self.assertEqual({}, task_logs[0].form_data) + + diff --git a/tests/test_workflow_spec_api.py b/tests/workflow/test_workflow_spec_api.py similarity index 100% rename from tests/test_workflow_spec_api.py rename to tests/workflow/test_workflow_spec_api.py diff --git a/tests/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py similarity index 100% rename from tests/test_workflow_spec_validation_api.py rename to tests/workflow/test_workflow_spec_validation_api.py