diff --git a/Pipfile b/Pipfile index 5ecbde1f..6b28197a 100644 --- a/Pipfile +++ b/Pipfile @@ -9,38 +9,42 @@ pbr = "*" coverage = "*" [packages] +alembic = "*" connexion = {extras = ["swagger-ui"],version = "*"} -swagger-ui-bundle = "*" +coverage = "*" +docxtpl = "*" flask = "*" +flask-admin = "*" flask-bcrypt = "*" flask-cors = "*" +flask-mail = "*" flask-marshmallow = "*" flask-migrate = "*" flask-restful = "*" +gunicorn = "*" httpretty = "*" +ldap3 = "*" +lxml = "*" +markdown = "*" marshmallow = "*" marshmallow-enum = "*" marshmallow-sqlalchemy = "*" openpyxl = "*" -pyjwt = "*" -requests = "*" -xlsxwriter = "*" -webtest = "*" -spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "deploy"} -alembic = "*" -coverage = "*" -sphinx = "*" -recommonmark = "*" -psycopg2-binary = "*" -docxtpl = "*" -python-dateutil = "*" pandas = "*" -xlrd = "*" -ldap3 = "*" -gunicorn = "*" -werkzeug = "*" +psycopg2-binary = "*" +pyjwt = "*" +python-dateutil = "*" +recommonmark = "*" +requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} -flask-mail = "*" +sphinx = "*" +spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"} +#spiffworkflow = {editable = true,path="/home/kelly/sartography/SpiffWorkflow/"} +swagger-ui-bundle = "*" +webtest = "*" +werkzeug = "*" +xlrd = "*" +xlsxwriter = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index 2f99c84f..bd8581a5 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "faaf0e1f31f4bf99df366e52df20bb148a05996a0e6467767660665c514af2d7" + "sha256": "97a15c4ade88db2b384d52436633889a4d9b0bdcaeea86b8a679ebda6f73fb59" }, "pipfile-spec": 6, "requires": { @@ -104,17 +104,17 @@ }, "celery": { "hashes": [ - "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647", - "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b" + "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", + "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], - "version": "==4.4.5" + "version": "==4.4.6" }, "certifi": { "hashes": [ - "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", - "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" + "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", + "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" ], - "version": "==2020.4.5.2" + "version": "==2020.6.20" }, "cffi": { "hashes": [ @@ -197,40 +197,43 @@ }, "coverage": { "hashes": [ - "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a", - "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355", - "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65", - "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7", - "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9", - "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1", - "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0", - "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55", - "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c", - "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6", - "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef", - "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019", - "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e", - "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0", - "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf", - "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24", - "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2", - "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c", - "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4", - "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0", - "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd", - "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04", - "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e", - "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730", - "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2", - "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768", - "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796", - "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7", - "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a", - "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489", - "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052" + "sha256:0fc4e0d91350d6f43ef6a61f64a48e917637e1dcfcba4b4b7d543c628ef82c2d", + "sha256:10f2a618a6e75adf64329f828a6a5b40244c1c50f5ef4ce4109e904e69c71bd2", + "sha256:12eaccd86d9a373aea59869bc9cfa0ab6ba8b1477752110cb4c10d165474f703", + "sha256:1874bdc943654ba46d28f179c1846f5710eda3aeb265ff029e0ac2b52daae404", + "sha256:1dcebae667b73fd4aa69237e6afb39abc2f27520f2358590c1b13dd90e32abe7", + "sha256:1e58fca3d9ec1a423f1b7f2aa34af4f733cbfa9020c8fe39ca451b6071237405", + "sha256:214eb2110217f2636a9329bc766507ab71a3a06a8ea30cdeebb47c24dce5972d", + "sha256:25fe74b5b2f1b4abb11e103bb7984daca8f8292683957d0738cd692f6a7cc64c", + "sha256:32ecee61a43be509b91a526819717d5e5650e009a8d5eda8631a59c721d5f3b6", + "sha256:3740b796015b889e46c260ff18b84683fa2e30f0f75a171fb10d2bf9fb91fc70", + "sha256:3b2c34690f613525672697910894b60d15800ac7e779fbd0fccf532486c1ba40", + "sha256:41d88736c42f4a22c494c32cc48a05828236e37c991bd9760f8923415e3169e4", + "sha256:42fa45a29f1059eda4d3c7b509589cc0343cd6bbf083d6118216830cd1a51613", + "sha256:4bb385a747e6ae8a65290b3df60d6c8a692a5599dc66c9fa3520e667886f2e10", + "sha256:509294f3e76d3f26b35083973fbc952e01e1727656d979b11182f273f08aa80b", + "sha256:5c74c5b6045969b07c9fb36b665c9cac84d6c174a809fc1b21bdc06c7836d9a0", + "sha256:60a3d36297b65c7f78329b80120f72947140f45b5c7a017ea730f9112b40f2ec", + "sha256:6f91b4492c5cde83bfe462f5b2b997cdf96a138f7c58b1140f05de5751623cf1", + "sha256:7403675df5e27745571aba1c957c7da2dacb537c21e14007ec3a417bf31f7f3d", + "sha256:87bdc8135b8ee739840eee19b184804e5d57f518578ffc797f5afa2c3c297913", + "sha256:8a3decd12e7934d0254939e2bf434bf04a5890c5bf91a982685021786a08087e", + "sha256:9702e2cb1c6dec01fb8e1a64c015817c0800a6eca287552c47a5ee0ebddccf62", + "sha256:a4d511012beb967a39580ba7d2549edf1e6865a33e5fe51e4dce550522b3ac0e", + "sha256:bbb387811f7a18bdc61a2ea3d102be0c7e239b0db9c83be7bfa50f095db5b92a", + "sha256:bfcc811883699ed49afc58b1ed9f80428a18eb9166422bce3c31a53dba00fd1d", + "sha256:c32aa13cc3fe86b0f744dfe35a7f879ee33ac0a560684fef0f3e1580352b818f", + "sha256:ca63dae130a2e788f2b249200f01d7fa240f24da0596501d387a50e57aa7075e", + "sha256:d54d7ea74cc00482a2410d63bf10aa34ebe1c49ac50779652106c867f9986d6b", + "sha256:d67599521dff98ec8c34cd9652cbcfe16ed076a2209625fca9dc7419b6370e5c", + "sha256:d82db1b9a92cb5c67661ca6616bdca6ff931deceebb98eecbd328812dab52032", + "sha256:d9ad0a988ae20face62520785ec3595a5e64f35a21762a57d115dae0b8fb894a", + "sha256:ebf2431b2d457ae5217f3a1179533c456f3272ded16f8ed0b32961a6d90e38ee", + "sha256:ed9a21502e9223f563e071759f769c3d6a2e1ba5328c31e86830368e8d78bc9c", + "sha256:f50632ef2d749f541ca8e6c07c9928a37f87505ce3a9f20c8446ad310f1aa87b" ], "index": "pypi", - "version": "==5.1" + "version": "==5.2" }, "docutils": { "hashes": [ @@ -261,6 +264,13 @@ "index": "pypi", "version": "==1.1.2" }, + "flask-admin": { + "hashes": [ + "sha256:68c761d8582d59b1f7702013e944a7ad11d7659a72f3006b89b68b0bd8df61b8" + ], + "index": "pypi", + "version": "==1.5.6" + }, "flask-bcrypt": { "hashes": [ "sha256:d71c8585b2ee1c62024392ebdbc447438564e2c8c02b4e57b56a4cafd8d13c5f" @@ -309,10 +319,10 @@ }, "flask-sqlalchemy": { "hashes": [ - "sha256:0b656fbf87c5f24109d859bafa791d29751fabbda2302b606881ae5485b557a5", - "sha256:fcfe6df52cd2ed8a63008ca36b86a51fa7a4b70cef1c39e5625f722fca32308e" + "sha256:05b31d2034dd3f2a685cbbae4cfc4ed906b2a733cff7964ada450fd5e462b84e", + "sha256:bfc7150eaf809b1c283879302f04c42791136060c6eeb12c0c6674fb1291fae5" ], - "version": "==2.4.3" + "version": "==2.4.4" }, "future": { "hashes": [ @@ -337,10 +347,10 @@ }, "idna": { "hashes": [ - "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", - "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "version": "==2.9" + "version": "==2.10" }, "imagesize": { "hashes": [ @@ -351,11 +361,11 @@ }, "importlib-metadata": { "hashes": [ - "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545", - "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958" + "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83", + "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070" ], "markers": "python_version < '3.8'", - "version": "==1.6.1" + "version": "==1.7.0" }, "inflection": { "hashes": [ @@ -394,10 +404,10 @@ }, "kombu": { "hashes": [ - "sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a", - "sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3" + "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", + "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], - "version": "==4.6.10" + "version": "==4.6.11" }, "ldap3": { "hashes": [ @@ -409,35 +419,40 @@ }, "lxml": { "hashes": [ - "sha256:06748c7192eab0f48e3d35a7adae609a329c6257495d5e53878003660dc0fec6", - "sha256:0790ddca3f825dd914978c94c2545dbea5f56f008b050e835403714babe62a5f", - "sha256:1aa7a6197c1cdd65d974f3e4953764eee3d9c7b67e3966616b41fab7f8f516b7", - "sha256:22c6d34fdb0e65d5f782a4d1a1edb52e0a8365858dafb1c08cb1d16546cf0786", - "sha256:2754d4406438c83144f9ffd3628bbe2dcc6d62b20dbc5c1ec4bc4385e5d44b42", - "sha256:27ee0faf8077c7c1a589573b1450743011117f1aa1a91d5ae776bbc5ca6070f2", - "sha256:2b02c106709466a93ed424454ce4c970791c486d5fcdf52b0d822a7e29789626", - "sha256:2d1ddce96cf15f1254a68dba6935e6e0f1fe39247de631c115e84dd404a6f031", - "sha256:4f282737d187ae723b2633856085c31ae5d4d432968b7f3f478a48a54835f5c4", - "sha256:51bb4edeb36d24ec97eb3e6a6007be128b720114f9a875d6b370317d62ac80b9", - "sha256:7eee37c1b9815e6505847aa5e68f192e8a1b730c5c7ead39ff317fde9ce29448", - "sha256:7fd88cb91a470b383aafad554c3fe1ccf6dfb2456ff0e84b95335d582a799804", - "sha256:9144ce36ca0824b29ebc2e02ca186e54040ebb224292072250467190fb613b96", - "sha256:925baf6ff1ef2c45169f548cc85204433e061360bfa7d01e1be7ae38bef73194", - "sha256:a636346c6c0e1092ffc202d97ec1843a75937d8c98aaf6771348ad6422e44bb0", - "sha256:a87dbee7ad9dce3aaefada2081843caf08a44a8f52e03e0a4cc5819f8398f2f4", - "sha256:a9e3b8011388e7e373565daa5e92f6c9cb844790dc18e43073212bb3e76f7007", - "sha256:afb53edf1046599991fb4a7d03e601ab5f5422a5435c47ee6ba91ec3b61416a6", - "sha256:b26719890c79a1dae7d53acac5f089d66fd8cc68a81f4e4bd355e45470dc25e1", - "sha256:b7462cdab6fffcda853338e1741ce99706cdf880d921b5a769202ea7b94e8528", - "sha256:b77975465234ff49fdad871c08aa747aae06f5e5be62866595057c43f8d2f62c", - "sha256:c47a8a5d00060122ca5908909478abce7bbf62d812e3fc35c6c802df8fb01fe7", - "sha256:c79e5debbe092e3c93ca4aee44c9a7631bdd407b2871cb541b979fd350bbbc29", - "sha256:d8d40e0121ca1606aa9e78c28a3a7d88a05c06b3ca61630242cded87d8ce55fa", - "sha256:ee2be8b8f72a2772e72ab926a3bccebf47bb727bda41ae070dc91d1fb759b726", - "sha256:f95d28193c3863132b1f55c1056036bf580b5a488d908f7d22a04ace8935a3a9", - "sha256:fadd2a63a2bfd7fb604508e553d1cf68eca250b2fbdbd81213b5f6f2fbf23529" + "sha256:05a444b207901a68a6526948c7cc8f9fe6d6f24c70781488e32fd74ff5996e3f", + "sha256:08fc93257dcfe9542c0a6883a25ba4971d78297f63d7a5a26ffa34861ca78730", + "sha256:107781b213cf7201ec3806555657ccda67b1fccc4261fb889ef7fc56976db81f", + "sha256:121b665b04083a1e85ff1f5243d4a93aa1aaba281bc12ea334d5a187278ceaf1", + "sha256:1fa21263c3aba2b76fd7c45713d4428dbcc7644d73dcf0650e9d344e433741b3", + "sha256:2b30aa2bcff8e958cd85d907d5109820b01ac511eae5b460803430a7404e34d7", + "sha256:4b4a111bcf4b9c948e020fd207f915c24a6de3f1adc7682a2d92660eb4e84f1a", + "sha256:5591c4164755778e29e69b86e425880f852464a21c7bb53c7ea453bbe2633bbe", + "sha256:59daa84aef650b11bccd18f99f64bfe44b9f14a08a28259959d33676554065a1", + "sha256:5a9c8d11aa2c8f8b6043d845927a51eb9102eb558e3f936df494e96393f5fd3e", + "sha256:5dd20538a60c4cc9a077d3b715bb42307239fcd25ef1ca7286775f95e9e9a46d", + "sha256:74f48ec98430e06c1fa8949b49ebdd8d27ceb9df8d3d1c92e1fdc2773f003f20", + "sha256:786aad2aa20de3dbff21aab86b2fb6a7be68064cbbc0219bde414d3a30aa47ae", + "sha256:7ad7906e098ccd30d8f7068030a0b16668ab8aa5cda6fcd5146d8d20cbaa71b5", + "sha256:80a38b188d20c0524fe8959c8ce770a8fdf0e617c6912d23fc97c68301bb9aba", + "sha256:8f0ec6b9b3832e0bd1d57af41f9238ea7709bbd7271f639024f2fc9d3bb01293", + "sha256:92282c83547a9add85ad658143c76a64a8d339028926d7dc1998ca029c88ea6a", + "sha256:94150231f1e90c9595ccc80d7d2006c61f90a5995db82bccbca7944fd457f0f6", + "sha256:9dc9006dcc47e00a8a6a029eb035c8f696ad38e40a27d073a003d7d1443f5d88", + "sha256:a76979f728dd845655026ab991df25d26379a1a8fc1e9e68e25c7eda43004bed", + "sha256:aa8eba3db3d8761db161003e2d0586608092e217151d7458206e243be5a43843", + "sha256:bea760a63ce9bba566c23f726d72b3c0250e2fa2569909e2d83cda1534c79443", + "sha256:c3f511a3c58676147c277eff0224c061dd5a6a8e1373572ac817ac6324f1b1e0", + "sha256:c9d317efde4bafbc1561509bfa8a23c5cab66c44d49ab5b63ff690f5159b2304", + "sha256:cc411ad324a4486b142c41d9b2b6a722c534096963688d879ea6fa8a35028258", + "sha256:cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6", + "sha256:cfd7c5dd3c35c19cec59c63df9571c67c6d6e5c92e0fe63517920e97f61106d1", + "sha256:e1cacf4796b20865789083252186ce9dc6cc59eca0c2e79cca332bdff24ac481", + "sha256:e70d4e467e243455492f5de463b72151cc400710ac03a0678206a5f27e79ddef", + "sha256:ecc930ae559ea8a43377e8b60ca6f8d61ac532fc57efb915d899de4a67928efd", + "sha256:f161af26f596131b63b236372e4ce40f3167c1b5b5d459b29d2514bd8c9dc9ee" ], - "version": "==4.5.1" + "index": "pypi", + "version": "==4.5.2" }, "mako": { "hashes": [ @@ -446,6 +461,14 @@ ], "version": "==1.1.3" }, + "markdown": { + "hashes": [ + "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17", + "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59" + ], + "index": "pypi", + "version": "==3.2.2" + }, "markupsafe": { "hashes": [ "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", @@ -486,11 +509,11 @@ }, "marshmallow": { "hashes": [ - "sha256:35ee2fb188f0bd9fc1cf9ac35e45fd394bd1c153cee430745a465ea435514bd5", - "sha256:9aa20f9b71c992b4782dad07c51d92884fd0f7c5cb9d3c737bea17ec1bad765f" + "sha256:67bf4cae9d3275b3fc74bd7ff88a7c98ee8c57c94b251a67b031dc293ecc4b76", + "sha256:a2a5eefb4b75a3b43f05be1cca0b6686adf56af7465c3ca629e5ad8d1e1fe13d" ], "index": "pypi", - "version": "==3.6.1" + "version": "==3.7.1" }, "marshmallow-enum": { "hashes": [ @@ -510,29 +533,34 @@ }, "numpy": { "hashes": [ - "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233", - "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b", - "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7", - "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f", - "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5", - "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb", - "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583", - "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1", - "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a", - "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271", - "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824", - "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3", - "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc", - "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161", - "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f", - "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f", - "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf", - "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b", - "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0", - "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675", - "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8" + "sha256:082f8d4dd69b6b688f64f509b91d482362124986d98dc7dc5f5e9f9b9c3bb983", + "sha256:1bc0145999e8cb8aed9d4e65dd8b139adf1919e521177f198529687dbf613065", + "sha256:309cbcfaa103fc9a33ec16d2d62569d541b79f828c382556ff072442226d1968", + "sha256:3673c8b2b29077f1b7b3a848794f8e11f401ba0b71c49fbd26fb40b71788b132", + "sha256:480fdd4dbda4dd6b638d3863da3be82873bba6d32d1fc12ea1b8486ac7b8d129", + "sha256:56ef7f56470c24bb67fb43dae442e946a6ce172f97c69f8d067ff8550cf782ff", + "sha256:5a936fd51049541d86ccdeef2833cc89a18e4d3808fe58a8abeb802665c5af93", + "sha256:5b6885c12784a27e957294b60f97e8b5b4174c7504665333c5e94fbf41ae5d6a", + "sha256:667c07063940e934287993366ad5f56766bc009017b4a0fe91dbd07960d0aba7", + "sha256:7ed448ff4eaffeb01094959b19cbaf998ecdee9ef9932381420d514e446601cd", + "sha256:8343bf67c72e09cfabfab55ad4a43ce3f6bf6e6ced7acf70f45ded9ebb425055", + "sha256:92feb989b47f83ebef246adabc7ff3b9a59ac30601c3f6819f8913458610bdcc", + "sha256:935c27ae2760c21cd7354402546f6be21d3d0c806fffe967f745d5f2de5005a7", + "sha256:aaf42a04b472d12515debc621c31cf16c215e332242e7a9f56403d814c744624", + "sha256:b12e639378c741add21fbffd16ba5ad25c0a1a17cf2b6fe4288feeb65144f35b", + "sha256:b1cca51512299841bf69add3b75361779962f9cee7d9ee3bb446d5982e925b69", + "sha256:b8456987b637232602ceb4d663cb34106f7eb780e247d51a260b84760fd8f491", + "sha256:b9792b0ac0130b277536ab8944e7b754c69560dac0415dd4b2dbd16b902c8954", + "sha256:c9591886fc9cbe5532d5df85cb8e0cc3b44ba8ce4367bd4cf1b93dc19713da72", + "sha256:cf1347450c0b7644ea142712619533553f02ef23f92f781312f6a3553d031fc7", + "sha256:de8b4a9b56255797cbddb93281ed92acbc510fb7b15df3f01bd28f46ebc4edae", + "sha256:e1b1dc0372f530f26a03578ac75d5e51b3868b9b76cd2facba4c9ee0eb252ab1", + "sha256:e45f8e981a0ab47103181773cc0a54e650b2aef8c7b6cd07405d0fa8d869444a", + "sha256:e4f6d3c53911a9d103d8ec9518190e52a8b945bab021745af4939cfc7c0d4a9e", + "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e", + "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc" ], - "version": "==1.18.5" + "version": "==1.19.1" }, "openapi-spec-validator": { "hashes": [ @@ -544,10 +572,11 @@ }, "openpyxl": { "hashes": [ - "sha256:547a9fc6aafcf44abe358b89ed4438d077e9d92e4f182c87e2dc294186dc4b64" + "sha256:6e62f058d19b09b95d20ebfbfb04857ad08d0833190516c1660675f699c6186f", + "sha256:d88dd1480668019684c66cfff3e52a5de4ed41e9df5dd52e008cbf27af0dbf87" ], "index": "pypi", - "version": "==3.0.3" + "version": "==3.0.4" }, "packaging": { "hashes": [ @@ -558,25 +587,25 @@ }, "pandas": { "hashes": [ - "sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46", - "sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5", - "sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa", - "sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc", - "sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678", - "sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc", - "sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31", - "sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8", - "sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6", - "sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa", - "sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4", - "sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874", - "sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd", - "sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4", - "sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126", - "sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648" + "sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096", + "sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0", + "sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453", + "sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc", + "sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7", + "sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b", + "sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8", + "sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8", + "sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef", + "sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d", + "sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705", + "sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9", + "sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91", + "sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3", + "sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107", + "sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e" ], "index": "pypi", - "version": "==1.0.4" + "version": "==1.0.5" }, "psycopg2-binary": { "hashes": [ @@ -656,6 +685,13 @@ ], "version": "==0.16.0" }, + "python-box": { + "hashes": [ + "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111", + "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f" + ], + "version": "==5.0.1" + }, "python-dateutil": { "hashes": [ "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", @@ -678,6 +714,61 @@ ], "version": "==1.0.4" }, + "python-levenshtein-wheels": { + "hashes": [ + "sha256:0065529c8aec4c044468286177761857d36981ba6f7fdb62d7d5f7ffd143de5d", + "sha256:016924a59d689f9f47d5f7b26b70f31e309255e8dd72602c91e93ceb752b9f92", + "sha256:089d046ea7727e583233c71fef1046663ed67b96967063ae8ddc9f551e86a4fc", + "sha256:09f9faaaa8f65726f91b44c11d3d622fee0f1780cfbe2bf3f410dd0e7345adcb", + "sha256:0aea217eab612acd45dcc3424a2e8dbd977cc309f80359d0c01971f1e65b9a9b", + "sha256:0beb91ad80b1573829066e5af36b80190c367be6e0a65292f073353b0388c7fc", + "sha256:0ec1bc73f5ed3a1a06e02d13bb3cd22a0b32ebf65a9667bbccba106bfa0546f1", + "sha256:0fa2ca69ef803bc6037a8c919e2e8a17b55e94c9c9ffcb4c21befbb15a1d0f40", + "sha256:11c77d0d74ab7f46f89a58ae9c2d67349ebc1ae3e18636627f9939d810167c31", + "sha256:19a68716a322486ddffc8bf7e5cf44a82f7700b05a10658e6e7fc5c7ae92b13d", + "sha256:19a95a01d28d63b042438ba860c4ace90362906a038fa77962ba33325d377d10", + "sha256:1a61f3a51e00a3608659bbaabb3f27af37c9dbe84d843369061a3e45cf0d5103", + "sha256:1c50aebebab403fb2dd415d70355446ac364dece502b0e2737a1a085bb9a4aa4", + "sha256:1d2390d04f9b673391e5ce1a0b054d0565f2e00ea5d1187a044221dc5c02c3e6", + "sha256:1e51cdc123625a28709662d24ea0cb4cf6f991845e6054d9f803c78da1d6b08f", + "sha256:1eca6dc97dfcf588f53281fe48a6d5c423d4e14bdab658a1aa6efd447acc64e0", + "sha256:1f0056d3216b0fe38f25c6f8ebc84bd9f6d34c55a7a9414341b674fb98961399", + "sha256:228b59460e9a786e498bdfc8011838b89c6054650b115c86c9c819a055a793b0", + "sha256:23020f9ff2cb3457a926dcc470b84f9bd5b7646bd8b8e06b915bdbbc905cb23f", + "sha256:2b7b7cf0f43b677f818aa9a610464abf06106c19a51b9ac35bd051a439f337a5", + "sha256:3b591c9a7e91480f0d7bf2041d325f578b9b9c2f2d593304377cb28862e7f9a2", + "sha256:3ca9c70411ab587d071c1d8fc8b69d0558be8e4aa920f2595e2cb5eb229ccc4c", + "sha256:3e6bcca97a7ff4e720352b57ddc26380c0583dcdd4b791acef7b574ad58468a7", + "sha256:3ed88f9e638da57647149115c34e0e120cae6f3d35eee7d77e22cc9c1d8eced3", + "sha256:445bf7941cb1fa05d6c2a4a502ad4868a5cacd92e8eb77b2bd008cdda9d37c55", + "sha256:4ba5e147d76d7ee884fd6eae461438b080bcc9f2c6eb9b576811e1bcfe8f808e", + "sha256:4bb128b719c30f3b9feacfe71a338ae07d39dbffc077139416f3535c89f12362", + "sha256:4e951907b9b5d40c9f1b611c8bdfe46ff8cf8371877cebbd589bf5840feab662", + "sha256:53c0c9964390368fd64460b690f168221c669766b193b7e80ae3950c2b9551f8", + "sha256:57c4edef81611098d37176278f2b6a3712bf864eed313496d7d80504805896d1", + "sha256:5b36e406937c6463d1c1ef3dd82d3f771d9d845f21351e8a026fe4dd398ea8d0", + "sha256:7d0821dab24b430dfdc2cba70a06e6d7a45cb839d0dd0e6db97bb99e23c3d884", + "sha256:7f7283dfe50eac8a8cd9b777de9eb50b1edf7dbb46fc7cc9d9b0050d0c135021", + "sha256:7f9759095b3fc825464a72b1cae95125e610eba3c70f91557754c32a0bf32ea2", + "sha256:8005a4df455569c0d490ddfd9e5a163f21293477fd0ed4ea9effdd723ddd8eaa", + "sha256:86e865f29ad3dc3bb4733e5247220173d90f05ac8d2ad18e9689a220f90de55f", + "sha256:98727050ba70eb8d318ec8a8203531c20119347fc8f281102b097326812742ab", + "sha256:ac9cdf044dcb9481c7da782db01b50c1f0e7cdd78c8507b963b6d072829c0263", + "sha256:acfad8ffed96891fe7c583d92717cd8ec0c03b59a954c389fd4e26a5cdeac610", + "sha256:ad15f25abff8220e556d64e2a27c646241b08f00faf1bc02313655696cd3edfa", + "sha256:b679f951f842c38665aa54bea4d7403099131f71fac6d8584f893a731fe1266d", + "sha256:b8c183dc4aa4e95dc5c373eedc3d205c176805835611fcfec5d9050736c695c4", + "sha256:c097a6829967c76526a037ed34500a028f78f0d765c8e3dbd1a7717afd09fb92", + "sha256:c2c76f483d05eddec60a5cd89e92385adef565a4f243b1d9a6abe2f6bd2a7c0a", + "sha256:c388baa3c04272a7c585d3da24030c142353eb26eb531dd2681502e6be7d7a26", + "sha256:cb0f2a711db665b5bf8697b5af3b9884bb1139385c5c12c2e472e4bbee62da99", + "sha256:cbac984d7b36e75b440d1c8ff9d3425d778364a0cbc23f8943383d4decd35d5e", + "sha256:f55adf069be2d655f8d668594fe1be1b84d9dc8106d380a9ada06f34941c33c8", + "sha256:f9084ed3b8997ad4353d124b903f2860a9695b9e080663276d9e58c32e293244", + "sha256:fb7df3504222fcb1fa593f76623abbb54d6019eec15aac5d05cd07ad90ac016c" + ], + "version": "==0.13.1" + }, "pytz": { "hashes": [ "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed", @@ -711,11 +802,11 @@ }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "index": "pypi", - "version": "==2.23.0" + "version": "==2.24.0" }, "sentry-sdk": { "extras": [ @@ -751,11 +842,11 @@ }, "sphinx": { "hashes": [ - "sha256:74fbead182a611ce1444f50218a1c5fc70b6cc547f64948f5182fb30a2a20258", - "sha256:97c9e3bcce2f61d9f5edf131299ee9d1219630598d9f9a8791459a4d9e815be5" + "sha256:97dbf2e31fc5684bb805104b8ad34434ed70e6c588f6896991b2fdfd2bef8c00", + "sha256:b9daeb9b39aa1ffefc2809b43604109825300300b987a24f45976c001ba1a8fd" ], "index": "pypi", - "version": "==3.1.1" + "version": "==3.1.2" }, "sphinxcontrib-applehelp": { "hashes": [ @@ -802,49 +893,48 @@ "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0" + "ref": "74529738b4e16be5aadd846669a201560f81a6d4" }, "sqlalchemy": { "hashes": [ - "sha256:128bc917ed20d78143a45024455ff0aed7d3b96772eba13d5dbaf9cc57e5c41b", - "sha256:156a27548ba4e1fed944ff9fcdc150633e61d350d673ae7baaf6c25c04ac1f71", - "sha256:27e2efc8f77661c9af2681755974205e7462f1ae126f498f4fe12a8b24761d15", - "sha256:2a12f8be25b9ea3d1d5b165202181f2b7da4b3395289000284e5bb86154ce87c", - "sha256:31c043d5211aa0e0773821fcc318eb5cbe2ec916dfbc4c6eea0c5188971988eb", - "sha256:65eb3b03229f684af0cf0ad3bcc771970c1260a82a791a8d07bffb63d8c95bcc", - "sha256:6cd157ce74a911325e164441ff2d9b4e244659a25b3146310518d83202f15f7a", - "sha256:703c002277f0fbc3c04d0ae4989a174753a7554b2963c584ce2ec0cddcf2bc53", - "sha256:869bbb637de58ab0a912b7f20e9192132f9fbc47fc6b5111cd1e0f6cdf5cf9b0", - "sha256:8a0e0cd21da047ea10267c37caf12add400a92f0620c8bc09e4a6531a765d6d7", - "sha256:8d01e949a5d22e5c4800d59b50617c56125fc187fbeb8fa423e99858546de616", - "sha256:925b4fe5e7c03ed76912b75a9a41dfd682d59c0be43bce88d3b27f7f5ba028fb", - "sha256:9cb1819008f0225a7c066cac8bb0cf90847b2c4a6eb9ebb7431dbd00c56c06c5", - "sha256:a87d496884f40c94c85a647c385f4fd5887941d2609f71043e2b73f2436d9c65", - "sha256:a9030cd30caf848a13a192c5e45367e3c6f363726569a56e75dc1151ee26d859", - "sha256:a9e75e49a0f1583eee0ce93270232b8e7bb4b1edc89cc70b07600d525aef4f43", - "sha256:b50f45d0e82b4562f59f0e0ca511f65e412f2a97d790eea5f60e34e5f1aabc9a", - "sha256:b7878e59ec31f12d54b3797689402ee3b5cfcb5598f2ebf26491732758751908", - "sha256:ce1ddaadee913543ff0154021d31b134551f63428065168e756d90bdc4c686f5", - "sha256:ce2646e4c0807f3461be0653502bb48c6e91a5171d6e450367082c79e12868bf", - "sha256:ce6c3d18b2a8ce364013d47b9cad71db815df31d55918403f8db7d890c9d07ae", - "sha256:e4e2664232005bd306f878b0f167a31f944a07c4de0152c444f8c61bbe3cfb38", - "sha256:e8aa395482728de8bdcca9cc0faf3765ab483e81e01923aaa736b42f0294f570", - "sha256:eb4fcf7105bf071c71068c6eee47499ab8d4b8f5a11fc35147c934f0faa60f23", - "sha256:ed375a79f06cad285166e5be74745df1ed6845c5624aafadec4b7a29c25866ef", - "sha256:f35248f7e0d63b234a109dd72fbfb4b5cb6cb6840b221d0df0ecbf54ab087654", - "sha256:f502ef245c492b391e0e23e94cba030ab91722dcc56963c85bfd7f3441ea2bbe", - "sha256:fe01bac7226499aedf472c62fa3b85b2c619365f3f14dd222ffe4f3aa91e5f98" + "sha256:0942a3a0df3f6131580eddd26d99071b48cfe5aaf3eab2783076fbc5a1c1882e", + "sha256:0ec575db1b54909750332c2e335c2bb11257883914a03bc5a3306a4488ecc772", + "sha256:109581ccc8915001e8037b73c29590e78ce74be49ca0a3630a23831f9e3ed6c7", + "sha256:16593fd748944726540cd20f7e83afec816c2ac96b082e26ae226e8f7e9688cf", + "sha256:427273b08efc16a85aa2b39892817e78e3ed074fcb89b2a51c4979bae7e7ba98", + "sha256:50c4ee32f0e1581828843267d8de35c3298e86ceecd5e9017dc45788be70a864", + "sha256:512a85c3c8c3995cc91af3e90f38f460da5d3cade8dc3a229c8e0879037547c9", + "sha256:57aa843b783179ab72e863512e14bdcba186641daf69e4e3a5761d705dcc35b1", + "sha256:621f58cd921cd71ba6215c42954ffaa8a918eecd8c535d97befa1a8acad986dd", + "sha256:6ac2558631a81b85e7fb7a44e5035347938b0a73f5fdc27a8566777d0792a6a4", + "sha256:716754d0b5490bdcf68e1e4925edc02ac07209883314ad01a137642ddb2056f1", + "sha256:736d41cfebedecc6f159fc4ac0769dc89528a989471dc1d378ba07d29a60ba1c", + "sha256:8619b86cb68b185a778635be5b3e6018623c0761dde4df2f112896424aa27bd8", + "sha256:87fad64529cde4f1914a5b9c383628e1a8f9e3930304c09cf22c2ae118a1280e", + "sha256:89494df7f93b1836cae210c42864b292f9b31eeabca4810193761990dc689cce", + "sha256:8cac7bb373a5f1423e28de3fd5fc8063b9c8ffe8957dc1b1a59cb90453db6da1", + "sha256:8fd452dc3d49b3cc54483e033de6c006c304432e6f84b74d7b2c68afa2569ae5", + "sha256:adad60eea2c4c2a1875eb6305a0b6e61a83163f8e233586a4d6a55221ef984fe", + "sha256:c26f95e7609b821b5f08a72dab929baa0d685406b953efd7c89423a511d5c413", + "sha256:cbe1324ef52ff26ccde2cb84b8593c8bf930069dfc06c1e616f1bfd4e47f48a3", + "sha256:d05c4adae06bd0c7f696ae3ec8d993ed8ffcc4e11a76b1b35a5af8a099bd2284", + "sha256:d98bc827a1293ae767c8f2f18be3bb5151fd37ddcd7da2a5f9581baeeb7a3fa1", + "sha256:da2fb75f64792c1fc64c82313a00c728a7c301efe6a60b7a9fe35b16b4368ce7", + "sha256:e4624d7edb2576cd72bb83636cd71c8ce544d8e272f308bd80885056972ca299", + "sha256:e89e0d9e106f8a9180a4ca92a6adde60c58b1b0299e1b43bd5e0312f535fbf33", + "sha256:f11c2437fb5f812d020932119ba02d9e2bc29a6eca01a055233a8b449e3e1e7d", + "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", + "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], - "version": "==1.3.17" + "version": "==1.3.18" }, "swagger-ui-bundle": { "hashes": [ - "sha256:49d2e12d60a6499e9d37ea37953b5d700f4e114edc7520fe918bae5eb693a20e", - "sha256:c5373b683487b1b914dccd23bcd9a3016afa2c2d1cda10f8713c0a9af0f91dd3", - "sha256:f776811855092c086dbb08216c8810a84accef8c76c796a135caa13645c5cc68" + "sha256:f5255f786cde67a2638111f4a7d04355836743198a83c4ecbe815d9fc384b0c8", + "sha256:f5691167f2e9f73ecbe8229a89454ae5ea958f90bb0d4583ed7adaae598c4122" ], "index": "pypi", - "version": "==0.0.6" + "version": "==0.0.8" }, "urllib3": { "hashes": [ @@ -890,6 +980,13 @@ "index": "pypi", "version": "==1.0.1" }, + "wtforms": { + "hashes": [ + "sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b", + "sha256:861a13b3ae521d6700dac3b2771970bd354a63ba7043ecc3a82b5288596a1972" + ], + "version": "==2.3.1" + }, "xlrd": { "hashes": [ "sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2", @@ -924,48 +1021,51 @@ }, "coverage": { "hashes": [ - "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a", - "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355", - "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65", - "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7", - "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9", - "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1", - "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0", - "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55", - "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c", - "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6", - "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef", - "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019", - "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e", - "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0", - "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf", - "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24", - "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2", - "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c", - "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4", - "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0", - "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd", - "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04", - "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e", - "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730", - "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2", - "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768", - "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796", - "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7", - "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a", - "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489", - "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052" + "sha256:0fc4e0d91350d6f43ef6a61f64a48e917637e1dcfcba4b4b7d543c628ef82c2d", + "sha256:10f2a618a6e75adf64329f828a6a5b40244c1c50f5ef4ce4109e904e69c71bd2", + "sha256:12eaccd86d9a373aea59869bc9cfa0ab6ba8b1477752110cb4c10d165474f703", + "sha256:1874bdc943654ba46d28f179c1846f5710eda3aeb265ff029e0ac2b52daae404", + "sha256:1dcebae667b73fd4aa69237e6afb39abc2f27520f2358590c1b13dd90e32abe7", + "sha256:1e58fca3d9ec1a423f1b7f2aa34af4f733cbfa9020c8fe39ca451b6071237405", + "sha256:214eb2110217f2636a9329bc766507ab71a3a06a8ea30cdeebb47c24dce5972d", + "sha256:25fe74b5b2f1b4abb11e103bb7984daca8f8292683957d0738cd692f6a7cc64c", + "sha256:32ecee61a43be509b91a526819717d5e5650e009a8d5eda8631a59c721d5f3b6", + "sha256:3740b796015b889e46c260ff18b84683fa2e30f0f75a171fb10d2bf9fb91fc70", + "sha256:3b2c34690f613525672697910894b60d15800ac7e779fbd0fccf532486c1ba40", + "sha256:41d88736c42f4a22c494c32cc48a05828236e37c991bd9760f8923415e3169e4", + "sha256:42fa45a29f1059eda4d3c7b509589cc0343cd6bbf083d6118216830cd1a51613", + "sha256:4bb385a747e6ae8a65290b3df60d6c8a692a5599dc66c9fa3520e667886f2e10", + "sha256:509294f3e76d3f26b35083973fbc952e01e1727656d979b11182f273f08aa80b", + "sha256:5c74c5b6045969b07c9fb36b665c9cac84d6c174a809fc1b21bdc06c7836d9a0", + "sha256:60a3d36297b65c7f78329b80120f72947140f45b5c7a017ea730f9112b40f2ec", + "sha256:6f91b4492c5cde83bfe462f5b2b997cdf96a138f7c58b1140f05de5751623cf1", + "sha256:7403675df5e27745571aba1c957c7da2dacb537c21e14007ec3a417bf31f7f3d", + "sha256:87bdc8135b8ee739840eee19b184804e5d57f518578ffc797f5afa2c3c297913", + "sha256:8a3decd12e7934d0254939e2bf434bf04a5890c5bf91a982685021786a08087e", + "sha256:9702e2cb1c6dec01fb8e1a64c015817c0800a6eca287552c47a5ee0ebddccf62", + "sha256:a4d511012beb967a39580ba7d2549edf1e6865a33e5fe51e4dce550522b3ac0e", + "sha256:bbb387811f7a18bdc61a2ea3d102be0c7e239b0db9c83be7bfa50f095db5b92a", + "sha256:bfcc811883699ed49afc58b1ed9f80428a18eb9166422bce3c31a53dba00fd1d", + "sha256:c32aa13cc3fe86b0f744dfe35a7f879ee33ac0a560684fef0f3e1580352b818f", + "sha256:ca63dae130a2e788f2b249200f01d7fa240f24da0596501d387a50e57aa7075e", + "sha256:d54d7ea74cc00482a2410d63bf10aa34ebe1c49ac50779652106c867f9986d6b", + "sha256:d67599521dff98ec8c34cd9652cbcfe16ed076a2209625fca9dc7419b6370e5c", + "sha256:d82db1b9a92cb5c67661ca6616bdca6ff931deceebb98eecbd328812dab52032", + "sha256:d9ad0a988ae20face62520785ec3595a5e64f35a21762a57d115dae0b8fb894a", + "sha256:ebf2431b2d457ae5217f3a1179533c456f3272ded16f8ed0b32961a6d90e38ee", + "sha256:ed9a21502e9223f563e071759f769c3d6a2e1ba5328c31e86830368e8d78bc9c", + "sha256:f50632ef2d749f541ca8e6c07c9928a37f87505ce3a9f20c8446ad310f1aa87b" ], "index": "pypi", - "version": "==5.1" + "version": "==5.2" }, "importlib-metadata": { "hashes": [ - "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545", - "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958" + "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83", + "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070" ], "markers": "python_version < '3.8'", - "version": "==1.6.1" + "version": "==1.7.0" }, "more-itertools": { "hashes": [ @@ -998,10 +1098,10 @@ }, "py": { "hashes": [ - "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44", - "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b" + "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", + "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], - "version": "==1.8.2" + "version": "==1.9.0" }, "pyparsing": { "hashes": [ @@ -1027,10 +1127,10 @@ }, "wcwidth": { "hashes": [ - "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f", - "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f" + "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784", + "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83" ], - "version": "==0.2.4" + "version": "==0.2.5" }, "zipp": { "hashes": [ diff --git a/README.md b/README.md index a8191a67..6bd7dd67 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# CrConnectFrontend +# sartography/cr-connect-workflow [![Build Status](https://travis-ci.com/sartography/cr-connect-workflow.svg?branch=master)](https://travis-ci.com/sartography/cr-connect-workflow) @@ -27,7 +27,7 @@ Make sure all of the following are properly installed on your system: - Select the directory where you cloned this repository and click `Ok`. - Expand the `Project Interpreter` section. - Select the `New environment using` radio button and choose `Pipenv` in the dropdown. - - Under `Base interpreter`, select `Python 3.6` + - Under `Base interpreter`, select `Python 3.7` - In the `Pipenv executable` field, enter `/home/your_username_goes_here/.local/bin/pipenv` - Click `Create` ![Project Interpreter](readme_images/new_project.png) @@ -47,22 +47,15 @@ run configuration so it doesn't go away.) : Just click the "Play" button next to RUN in the top right corner of the screen. The Swagger based view of the API will be avialable at http://0.0.0.0:5000/v1.0/ui/ -### Testing from the Shell -This app includes a command line interface that will read in BPMN files and let you -play with it at the command line. To run it right click on app/command_line/joke.py and -click run. Type "?" to get a list of commands. -So far the joke system will work a little, when you file it up try these commands -in this order: -```bash -> engine (this will run all tasks up to first user task and should print a joke) -> answer clock (this is the correct answer) -> next (this completes the user task) -> engine (this runs the rest of the tasks, and should tell you that you got the question right) +### Running Tests +We use pytest to execute tests. You can run this from the command line with: ``` +pipenv run coverage run -m pytest +``` +To run the tests within PyCharm set up a run configuration using pytest (Go to Run, configurations, click the +plus icon, select Python Tests, and under this select pytest, defaults should work good-a-plenty with no +additional edits required.) -You can try re-running this and getting the question wrong. -You might open up the Joke bpmn diagram so you can see what this looks like to -draw out. ## Documentation Additional Documentation is available on [ReadTheDocs](https://cr-connect-workflow.readthedocs.io/en/latest/#) diff --git a/config/default.py b/config/default.py index bee6f968..b295bf4b 100644 --- a/config/default.py +++ b/config/default.py @@ -15,7 +15,8 @@ TEST_UID = environ.get('TEST_UID', default="dhf8r") ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah3us,cl3wf")) # Sentry flag -ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true" +ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true" # To be removed soon +SENTRY_ENVIRONMENT = environ.get('SENTRY_ENVIRONMENT', None) # Add trailing slash to base path APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/')) @@ -30,7 +31,7 @@ SQLALCHEMY_DATABASE_URI = environ.get( default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME) ) TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24)) -TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.") +SECRET_KEY = environ.get('SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.") FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session") SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER") @@ -46,6 +47,7 @@ LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1)) # Email configuration +DEFAULT_SENDER = 'askresearch@virginia.edu' FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com'] MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True) MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io') diff --git a/config/testing.py b/config/testing.py index c7a777ad..5b03cc41 100644 --- a/config/testing.py +++ b/config/testing.py @@ -5,7 +5,7 @@ basedir = os.path.abspath(os.path.dirname(__file__)) NAME = "CR Connect Workflow" TESTING = True -TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod." +SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod." # This is here, for when we are running the E2E Tests in the frontend code bases. # which will set the TESTING envronment to true, causing this to execute, but we need diff --git a/crc/__init__.py b/crc/__init__.py index 1ac2678f..9081f739 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -4,6 +4,8 @@ import sentry_sdk import connexion from jinja2 import Environment, FileSystemLoader +from flask_admin import Admin +from flask_admin.contrib.sqla import ModelView from flask_cors import CORS from flask_marshmallow import Marshmallow from flask_mail import Mail @@ -32,30 +34,31 @@ db = SQLAlchemy(app) session = db.session """:type: sqlalchemy.orm.Session""" +# Mail settings +mail = Mail(app) + migrate = Migrate(app, db) ma = Marshmallow(app) from crc import models from crc import api +from crc.api import admin connexion_app.add_api('api.yml', base_path='/v1.0') + # Convert list of allowed origins to list of regexes origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']] cors = CORS(connexion_app.app, origins=origins_re) -if app.config['ENABLE_SENTRY']: +# Sentry error handling +if app.config['SENTRY_ENVIRONMENT']: sentry_sdk.init( + environment=app.config['SENTRY_ENVIRONMENT'], dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915", integrations=[FlaskIntegration()] ) -# Jinja environment definition, used to render mail templates -template_dir = os.getcwd() + '/crc/static/templates/mails' -env = Environment(loader=FileSystemLoader(template_dir)) -# Mail settings -mail = Mail(app) - print('=== USING THESE CONFIG SETTINGS: ===') print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT']) print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS']) @@ -88,3 +91,4 @@ def clear_db(): """Load example data into the database.""" from example_data import ExampleDataLoader ExampleDataLoader.clean_db() + diff --git a/crc/api.yml b/crc/api.yml index 64f6086a..4c6ebd1b 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -502,7 +502,6 @@ paths: application/json: schema: $ref: "#/components/schemas/File" - # /v1.0/workflow/0 /reference_file: get: operationId: crc.api.file.get_reference_files @@ -565,6 +564,26 @@ paths: type: string format: binary example: '' + /task_events: + parameters: + - name: action + in: query + required: false + description: The type of action the event documents, options include "ASSIGNMENT" for tasks that are waiting on you, "COMPLETE" for things have completed. + schema: + type: string + get: + operationId: crc.api.workflow.get_task_events + summary: Returns a list of task events related to the current user. Can be filtered by type. + tags: + - Workflows and Tasks + responses: + '200': + description: Returns details about tasks that are waiting on the current user. + content: + application/json: + schema: + $ref: "#/components/schemas/TaskEvent" # /v1.0/workflow/0 /workflow/{workflow_id}: parameters: @@ -626,6 +645,12 @@ paths: schema: type: string format: uuid + - name: terminate_loop + in: query + required: false + description: Terminate the loop on a looping task + schema: + type: boolean put: operationId: crc.api.workflow.update_task summary: Exclusively for User Tasks, submits form data as a flat set of key/values. @@ -697,12 +722,19 @@ paths: description: The string to search for in the Value column of the lookup table. schema: type: string + - name: value + in: query + required: false + description: An alternative to query, this accepts the specific value or id selected in a dropdown list or auto-complete, and will return the one matching record. Useful for getting additional details about an item selected in a dropdown. + schema: + type: string - name: limit in: query required: false description: The total number of records to return, defaults to 10. schema: type: integer + get: operationId: crc.api.workflow.lookup summary: Provides type-ahead search against a lookup table associted with a form field. @@ -806,6 +838,33 @@ paths: type: array items: $ref: "#/components/schemas/Script" + /eval: + parameters: + - name: expression + in: query + required: true + description: The python expression to execute. + schema: + type: string + put: + operationId: crc.api.tools.evaluate_python_expression + summary: Execute the given python expression, with the given json data structure used as local variables, returns the result of the evaluation. + tags: + - Configurator Tools + requestBody: + description: The json data to use as local variables when evaluating the expresson. + required: true + content: + application/json: + schema: + type: object + responses: + '200': + description: Returns the result of executing the given python script. + content: + text/plain: + schema: + type: string /approval-counts: parameters: - name: as_user @@ -917,6 +976,21 @@ paths: application/json: schema: type: object + /health_attesting: + get: + operationId: crc.api.approval.get_health_attesting_csv + summary: Returns a CSV file with health attesting records + tags: + - Approvals + responses: + '200': + description: A CSV file + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Approval" components: securitySchemes: jwt: @@ -1164,6 +1238,36 @@ components: value: "model.my_boolean_field_id && model.my_enum_field_value !== 'something'" - id: "hide_expression" value: "model.my_enum_field_value === 'something'" + TaskEvent: + properties: + workflow: + $ref: "#/components/schemas/Workflow" + study: + $ref: "#/components/schemas/Study" + workflow_sec: + $ref: "#/components/schemas/WorkflowSpec" + spec_version: + type: string + action: + type: string + task_id: + type: string + task_type: + type: string + task_lane: + type: string + form_data: + type: object + mi_type: + type: string + mi_count: + type: integer + mi_index: + type: integer + process_name: + type: string + date: + type: string Form: properties: key: diff --git a/crc/api/admin.py b/crc/api/admin.py new file mode 100644 index 00000000..4e96fcd8 --- /dev/null +++ b/crc/api/admin.py @@ -0,0 +1,72 @@ +# Admin app +import json + +from flask import url_for +from flask_admin import Admin +from flask_admin.contrib import sqla +from flask_admin.contrib.sqla import ModelView +from werkzeug.utils import redirect +from jinja2 import Markup + +from crc import db, app +from crc.api.user import verify_token, verify_token_admin +from crc.models.approval import ApprovalModel +from crc.models.file import FileModel +from crc.models.task_event import TaskEventModel +from crc.models.study import StudyModel +from crc.models.user import UserModel +from crc.models.workflow import WorkflowModel + + +class AdminModelView(sqla.ModelView): + can_create = False + can_edit = False + can_delete = False + page_size = 50 # the number of entries to display on the list view + column_exclude_list = ['bpmn_workflow_json', ] + column_display_pk = True + can_export = True + + def is_accessible(self): + return verify_token_admin() + + def inaccessible_callback(self, name, **kwargs): + # redirect to login page if user doesn't have access + return redirect(url_for('home')) + +class UserView(AdminModelView): + column_filters = ['uid'] + +class StudyView(AdminModelView): + column_filters = ['id', 'primary_investigator_id'] + column_searchable_list = ['title'] + +class ApprovalView(AdminModelView): + column_filters = ['study_id', 'approver_uid'] + +class WorkflowView(AdminModelView): + column_filters = ['study_id', 'id'] + +class FileView(AdminModelView): + column_filters = ['workflow_id'] + +def json_formatter(view, context, model, name): + value = getattr(model, name) + json_value = json.dumps(value, ensure_ascii=False, indent=2) + return Markup('
{}
'.format(json_value)) + +class TaskEventView(AdminModelView): + column_filters = ['workflow_id', 'action'] + column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'form_data', 'date'] + column_formatters = { + 'form_data': json_formatter, + } + +admin = Admin(app) + +admin.add_view(StudyView(StudyModel, db.session)) +admin.add_view(ApprovalView(ApprovalModel, db.session)) +admin.add_view(UserView(UserModel, db.session)) +admin.add_view(WorkflowView(WorkflowModel, db.session)) +admin.add_view(FileView(FileModel, db.session)) +admin.add_view(TaskEventView(TaskEventModel, db.session)) diff --git a/crc/api/approval.py b/crc/api/approval.py index b3ee0fed..fd01e221 100644 --- a/crc/api/approval.py +++ b/crc/api/approval.py @@ -1,9 +1,11 @@ +import csv +import io import json import pickle from base64 import b64decode from datetime import datetime -from flask import g +from flask import g, make_response from crc import db, session from crc.api.common import ApiError @@ -88,71 +90,25 @@ def get_approvals_for_study(study_id=None): return results +def get_health_attesting_csv(): + records = ApprovalService.get_health_attesting_records() + si = io.StringIO() + cw = csv.writer(si) + cw.writerows(records) + output = make_response(si.getvalue()) + output.headers["Content-Disposition"] = "attachment; filename=health_attesting.csv" + output.headers["Content-type"] = "text/csv" + return output + + # ----- Begin descent into madness ---- # def get_csv(): """A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a man to do just about anything""" - approvals = ApprovalService.get_all_approvals(include_cancelled=False) - output = [] - errors = [] - for approval in approvals: - try: - if approval.status != ApprovalStatus.APPROVED.value: - continue - for related_approval in approval.related_approvals: - if related_approval.status != ApprovalStatus.APPROVED.value: - continue - workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() - data = json.loads(workflow.bpmn_workflow_json) - last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) - personnel = extract_value(last_task, 'personnel') - training_val = extract_value(last_task, 'RequiredTraining') - pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] - review_complete = 'AllRequiredTraining' in training_val - pi_uid = workflow.study.primary_investigator_id - pi_details = LdapService.user_info(pi_uid) - details = [] - details.append(pi_details) - for person in personnel: - uid = person['PersonnelComputingID']['value'] - details.append(LdapService.user_info(uid)) + content = ApprovalService.get_not_really_csv_content() - for person in details: - record = { - "study_id": approval.study_id, - "pi_uid": pi_details.uid, - "pi": pi_details.display_name, - "name": person.display_name, - "uid": person.uid, - "email": person.email_address, - "supervisor": "", - "review_complete": review_complete, - } - # We only know the PI's supervisor. - if person.uid == pi_details.uid: - record["supervisor"] = pi_supervisor + return content - output.append(record) - - except Exception as e: - errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) - return {"results": output, "errors": errors } - - -def extract_value(task, key): - if key in task['data']: - return pickle.loads(b64decode(task['data'][key]['__bytes__'])) - else: - return "" - - -def find_task(uuid, task): - if task['id']['__uuid__'] == uuid: - return task - for child in task['children']: - task = find_task(uuid, child) - if task: - return task # ----- come back to the world of the living ---- # diff --git a/crc/api/common.py b/crc/api/common.py index f8673a5b..cb527c73 100644 --- a/crc/api/common.py +++ b/crc/api/common.py @@ -25,6 +25,7 @@ class ApiError(Exception): instance.task_name = task.task_spec.description or "" instance.file_name = task.workflow.spec.file or "" instance.task_data = task.data + app.logger.error(message, exc_info=True) return instance @classmethod @@ -35,6 +36,7 @@ class ApiError(Exception): instance.task_name = task_spec.description or "" if task_spec._wf_spec: instance.file_name = task_spec._wf_spec.file + app.logger.error(message, exc_info=True) return instance @classmethod diff --git a/crc/api/tools.py b/crc/api/tools.py index d140e962..de30d10d 100644 --- a/crc/api/tools.py +++ b/crc/api/tools.py @@ -2,6 +2,7 @@ import io import json import connexion +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine from flask import send_file from jinja2 import Template, UndefinedError @@ -10,11 +11,12 @@ from crc.scripts.complete_template import CompleteTemplate from crc.scripts.script import Script import crc.scripts from crc.services.mails import send_test_email +from crc.services.workflow_processor import WorkflowProcessor def render_markdown(data, template): """ - Provides a quick way to very that a Jinja markdown template will work properly on a given json + Provides a quick way to very that a Jinja markdown template will work properly on a given json data structure. Useful for folks that are building these markdown templates. """ try: @@ -61,8 +63,22 @@ def list_scripts(): }) return script_meta + def send_email(address): """Just sends a quick test email to assure the system is working.""" if not address: address = "dan@sartography.com" - return send_test_email(address, [address]) \ No newline at end of file + return send_test_email(address, [address]) + + +def evaluate_python_expression(expression, body): + """Evaluate the given python expression, returning it's result. This is useful if the + front end application needs to do real-time processing on task data. If for instance + there is a hide expression that is based on a previous value in the same form.""" + try: + # fixme: The script engine should be pulled from Workflow Processor, + # but the one it returns overwrites the evaluate expression making it uncallable. + script_engine = PythonScriptEngine() + return script_engine.evaluate(expression, **body) + except Exception as e: + raise ApiError("expression_error", str(e)) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 655a85e7..a290d340 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -1,12 +1,13 @@ import uuid +from SpiffWorkflow.util.deep_merge import DeepMerge from flask import g - from crc import session, app from crc.api.common import ApiError, ApiErrorSchema from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema from crc.models.file import FileModel, LookupDataSchema -from crc.models.stats import TaskEventModel +from crc.models.study import StudyModel, WorkflowMetadata +from crc.models.task_event import TaskEventModel, TaskEventModelSchema, TaskEvent, TaskEventSchema from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \ WorkflowSpecCategoryModelSchema from crc.services.file_service import FileService @@ -41,7 +42,6 @@ def get_workflow_specification(spec_id): def validate_workflow_specification(spec_id): - errors = [] try: WorkflowService.test_spec(spec_id) @@ -57,7 +57,6 @@ def validate_workflow_specification(spec_id): return ApiErrorSchema(many=True).dump(errors) - def update_workflow_specification(spec_id, body): if spec_id is None: raise ApiError('unknown_spec', 'Please provide a valid Workflow Spec ID.') @@ -89,115 +88,95 @@ def delete_workflow_specification(spec_id): session.query(TaskEventModel).filter(TaskEventModel.workflow_spec_id == spec_id).delete() - # Delete all stats and workflow models related to this specification + # Delete all events and workflow models related to this specification for workflow in session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id): StudyService.delete_workflow(workflow) session.query(WorkflowSpecModel).filter_by(id=spec_id).delete() session.commit() -def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None): - """Returns an API model representing the state of the current workflow, if requested, and - possible, next_task is set to the current_task.""" - - nav_dict = processor.bpmn_workflow.get_nav_list() - navigation = [] - for nav_item in nav_dict: - spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) - if 'description' in nav_item: - nav_item['title'] = nav_item.pop('description') - # fixme: duplicate code from the workflow_service. Should only do this in one place. - if ' ' in nav_item['title']: - nav_item['title'] = nav_item['title'].partition(' ')[2] - else: - nav_item['title'] = "" - if spiff_task: - nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) - nav_item['title'] = nav_item['task'].title # Prefer the task title. - else: - nav_item['task'] = None - if not 'is_decision' in nav_item: - nav_item['is_decision'] = False - - navigation.append(NavigationItem(**nav_item)) - NavigationItemSchema().dump(nav_item) - - spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() - workflow_api = WorkflowApi( - id=processor.get_workflow_id(), - status=processor.get_status(), - next_task=None, - navigation=navigation, - workflow_spec_id=processor.workflow_spec_id, - spec_version=processor.get_version_string(), - is_latest_spec=processor.is_latest_spec, - total_tasks=len(navigation), - completed_tasks=processor.workflow_model.completed_tasks, - last_updated=processor.workflow_model.last_updated, - title=spec.display_name - ) - if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. - # This may or may not work, sometimes there is no next task to complete. - next_task = processor.next_task() - if next_task: - workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) - - return workflow_api - - def get_workflow(workflow_id, soft_reset=False, hard_reset=False): workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - workflow_api_model = __get_workflow_api_model(processor) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) + WorkflowService.update_task_assignments(processor) return WorkflowApiSchema().dump(workflow_api_model) +def get_task_events(action): + """Provides a way to see a history of what has happened, or get a list of tasks that need your attention.""" + query = session.query(TaskEventModel).filter(TaskEventModel.user_uid == g.user.uid) + if action: + query = query.filter(TaskEventModel.action == action) + events = query.all() + + # Turn the database records into something a little richer for the UI to use. + task_events = [] + for event in events: + study = session.query(StudyModel).filter(StudyModel.id == event.study_id).first() + workflow = session.query(WorkflowModel).filter(WorkflowModel.id == event.workflow_id).first() + workflow_meta = WorkflowMetadata.from_workflow(workflow) + task_events.append(TaskEvent(event, study, workflow_meta)) + return TaskEventSchema(many=True).dump(task_events) + + def delete_workflow(workflow_id): StudyService.delete_workflow(workflow_id) def set_current_task(workflow_id, task_id): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() - user_uid = __get_user_uid(workflow_model.study.user_uid) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.COMPLETED and task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + _verify_user_and_role(processor, spiff_task) + user_uid = g.user.uid + if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not move the token to a task who's state is not " "currently set to COMPLETE or READY.") # Only reset the token if the task doesn't already have it. - if task.state == task.COMPLETED: - task.reset_token(reset_data=False) # we could optionally clear the previous data. + if spiff_task.state == spiff_task.COMPLETED: + spiff_task.reset_token(reset_data=True) # Don't try to copy the existing data back into this task. + processor.save() - WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET) - workflow_api_model = __get_workflow_api_model(processor, task) + WorkflowService.log_task_action(user_uid, processor, spiff_task, WorkflowService.TASK_ACTION_TOKEN_RESET) + WorkflowService.update_task_assignments(processor) + + workflow_api_model = WorkflowService.processor_to_workflow_api(processor, spiff_task) return WorkflowApiSchema().dump(workflow_api_model) -def update_task(workflow_id, task_id, body): +def update_task(workflow_id, task_id, body, terminate_loop=None): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() - if workflow_model is None: raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404) elif workflow_model.study is None: raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404) - user_uid = __get_user_uid(workflow_model.study.user_uid) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + _verify_user_and_role(processor, spiff_task) + if not spiff_task: + raise ApiError("empty_task", "Processor failed to obtain task.", status_code=404) + if spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") - task.update_data(body) - processor.complete_task(task) + + if terminate_loop: + spiff_task.terminate_loop() + spiff_task.update_data(body) + processor.complete_task(spiff_task) processor.do_engine_steps() processor.save() - WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE) - workflow_api_model = __get_workflow_api_model(processor) + # Log the action, and any pending task assignments in the event of lanes in the workflow. + WorkflowService.log_task_action(g.user.uid, processor, spiff_task, WorkflowService.TASK_ACTION_COMPLETE) + WorkflowService.update_task_assignments(processor) + + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) @@ -240,7 +219,7 @@ def delete_workflow_spec_category(cat_id): session.commit() -def lookup(workflow_id, field_id, query, limit): +def lookup(workflow_id, field_id, query=None, value=None, limit=10): """ given a field in a task, attempts to find the lookup table or function associated with that field and runs a full-text query against it to locate the values and @@ -248,16 +227,25 @@ def lookup(workflow_id, field_id, query, limit): Tries to be fast, but first runs will be very slow. """ workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first() - lookup_data = LookupService.lookup(workflow, field_id, query, limit) + lookup_data = LookupService.lookup(workflow, field_id, query, value, limit) return LookupDataSchema(many=True).dump(lookup_data) -def __get_user_uid(user_uid): - if 'user' in g: - if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid: - raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403) - else: - return g.user.uid +def _verify_user_and_role(processor, spiff_task): + """Assures the currently logged in user can access the given workflow and task, or + raises an error. + Allow administrators to modify tasks, otherwise assure that the current user + is allowed to edit or update the task. Will raise the appropriate error if user + is not authorized. """ - else: + if 'user' not in g: raise ApiError("logged_out", "You are no longer logged in.", status_code=401) + + if g.user.uid in app.config['ADMIN_UIDS']: + return g.user.uid + + allowed_users = WorkflowService.get_users_assigned_to_task(processor, spiff_task) + if g.user.uid not in allowed_users: + raise ApiError.from_task("permission_denied", + f"This task must be completed by '{allowed_users}', " + f"but you are {g.user.uid}", spiff_task) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 53706a75..843609e0 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -29,20 +29,44 @@ class NavigationItem(object): self.state = state self.is_decision = is_decision self.task = task + self.lane = lane class Task(object): + ########################################################################## + # Custom properties and validations defined in Camunda form fields # + ########################################################################## + + # Repeating form section PROP_OPTIONS_REPEAT = "repeat" - PROP_OPTIONS_FILE = "spreadsheet.name" - PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column" - PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column" + + # Read-only field + PROP_OPTIONS_READ_ONLY = "read_only" + + # LDAP lookup PROP_LDAP_LOOKUP = "ldap.lookup" - VALIDATION_REQUIRED = "required" + + # Autocomplete field FIELD_TYPE_AUTO_COMPLETE = "autocomplete" + # Required field + VALIDATION_REQUIRED = "required" - def __init__(self, id, name, title, type, state, form, documentation, data, - multi_instance_type, multi_instance_count, multi_instance_index, process_name, properties): + # Enum field options values pulled from a spreadsheet + PROP_OPTIONS_FILE_NAME = "spreadsheet.name" + PROP_OPTIONS_FILE_VALUE_COLUMN = "spreadsheet.value.column" + PROP_OPTIONS_FILE_LABEL_COLUMN = "spreadsheet.label.column" + + # Enum field options values pulled from task data + PROP_OPTIONS_DATA_NAME = "data.name" + PROP_OPTIONS_DATA_VALUE_COLUMN = "data.value.column" + PROP_OPTIONS_DATA_LABEL_COLUMN = "data.label.column" + + ########################################################################## + + def __init__(self, id, name, title, type, state, lane, form, documentation, data, + multi_instance_type, multi_instance_count, multi_instance_index, + process_name, properties): self.id = id self.name = name self.title = title @@ -51,6 +75,7 @@ class Task(object): self.form = form self.documentation = documentation self.data = data + self.lane = lane self.multi_instance_type = multi_instance_type # Some tasks have a repeat behavior. self.multi_instance_count = multi_instance_count # This is the number of times the task could repeat. self.multi_instance_index = multi_instance_index # And the index of the currently repeating task. @@ -60,7 +85,7 @@ class Task(object): class OptionSchema(ma.Schema): class Meta: - fields = ["id", "name"] + fields = ["id", "name", "data"] class ValidationSchema(ma.Schema): @@ -70,15 +95,11 @@ class ValidationSchema(ma.Schema): class FormFieldPropertySchema(ma.Schema): class Meta: - fields = [ - "id", "value" - ] + fields = ["id", "value"] class FormFieldSchema(ma.Schema): class Meta: - fields = [ - "id", "type", "label", "default_value", "options", "validation", "properties", "value" - ] + fields = ["id", "type", "label", "default_value", "options", "validation", "properties", "value"] default_value = marshmallow.fields.String(required=False, allow_none=True) options = marshmallow.fields.List(marshmallow.fields.Nested(OptionSchema)) @@ -93,7 +114,7 @@ class FormSchema(ma.Schema): class TaskSchema(ma.Schema): class Meta: - fields = ["id", "name", "title", "type", "state", "form", "documentation", "data", "multi_instance_type", + fields = ["id", "name", "title", "type", "state", "lane", "form", "documentation", "data", "multi_instance_type", "multi_instance_count", "multi_instance_index", "process_name", "properties"] multi_instance_type = EnumField(MultiInstanceType) @@ -101,6 +122,7 @@ class TaskSchema(ma.Schema): form = marshmallow.fields.Nested(FormSchema, required=False, allow_none=True) title = marshmallow.fields.String(required=False, allow_none=True) process_name = marshmallow.fields.String(required=False, allow_none=True) + lane = marshmallow.fields.String(required=False, allow_none=True) @marshmallow.post_load def make_task(self, data, **kwargs): @@ -110,10 +132,11 @@ class TaskSchema(ma.Schema): class NavigationItemSchema(ma.Schema): class Meta: fields = ["id", "task_id", "name", "title", "backtracks", "level", "indent", "child_count", "state", - "is_decision", "task"] + "is_decision", "task", "lane"] unknown = INCLUDE task = marshmallow.fields.Nested(TaskSchema, dump_only=True, required=False, allow_none=True) backtracks = marshmallow.fields.String(required=False, allow_none=True) + lane = marshmallow.fields.String(required=False, allow_none=True) title = marshmallow.fields.String(required=False, allow_none=True) task_id = marshmallow.fields.String(required=False, allow_none=True) diff --git a/crc/models/approval.py b/crc/models/approval.py index 0592fbd1..df433fac 100644 --- a/crc/models/approval.py +++ b/crc/models/approval.py @@ -57,28 +57,16 @@ class Approval(object): @classmethod def from_model(cls, model: ApprovalModel): - # TODO: Reduce the code by iterating over model's dict keys - instance = cls() - instance.id = model.id - instance.study_id = model.study_id - instance.workflow_id = model.workflow_id - instance.version = model.version - instance.approver_uid = model.approver_uid - instance.status = model.status - instance.message = model.message - instance.date_created = model.date_created - instance.date_approved = model.date_approved - instance.version = model.version - instance.title = '' + args = dict((k, v) for k, v in model.__dict__.items() if not k.startswith('_')) + instance = cls(**args) instance.related_approvals = [] + instance.title = model.study.title if model.study else '' - if model.study: - instance.title = model.study.title try: instance.approver = LdapService.user_info(model.approver_uid) instance.primary_investigator = LdapService.user_info(model.study.primary_investigator_id) except ApiError as ae: - app.logger.error("Ldap lookup failed for approval record %i" % model.id) + app.logger.error(f'Ldap lookup failed for approval record {model.id}', exc_info=True) doc_dictionary = FileService.get_doc_dictionary() instance.associated_files = [] diff --git a/crc/models/email.py b/crc/models/email.py new file mode 100644 index 00000000..dc8c6834 --- /dev/null +++ b/crc/models/email.py @@ -0,0 +1,18 @@ +from flask_marshmallow.sqla import SQLAlchemyAutoSchema +from marshmallow import EXCLUDE +from sqlalchemy import func + +from crc import db +from crc.models.study import StudyModel + + +class EmailModel(db.Model): + __tablename__ = 'email' + id = db.Column(db.Integer, primary_key=True) + subject = db.Column(db.String) + sender = db.Column(db.String) + recipients = db.Column(db.String) + content = db.Column(db.String) + content_html = db.Column(db.String) + study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=True) + study = db.relationship(StudyModel) diff --git a/crc/models/file.py b/crc/models/file.py index 15a48709..8afed6cd 100644 --- a/crc/models/file.py +++ b/crc/models/file.py @@ -144,7 +144,6 @@ class LookupFileModel(db.Model): """Gives us a quick way to tell what kind of lookup is set on a form field. Connected to the file data model, so that if a new version of the same file is created, we can update the listing.""" - #fixme: What happens if they change the file associated with a lookup field? __tablename__ = 'lookup_file' id = db.Column(db.Integer, primary_key=True) workflow_spec_id = db.Column(db.String) @@ -153,6 +152,7 @@ class LookupFileModel(db.Model): file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id')) dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan") + class LookupDataModel(db.Model): __tablename__ = 'lookup_data' id = db.Column(db.Integer, primary_key=True) @@ -181,6 +181,7 @@ class LookupDataSchema(SQLAlchemyAutoSchema): load_instance = True include_relationships = False include_fk = False # Includes foreign keys + exclude = ['id'] # Do not include the id field, it should never be used via the API. class SimpleFileSchema(ma.Schema): diff --git a/crc/models/ldap.py b/crc/models/ldap.py index 7e05eccd..802e0d36 100644 --- a/crc/models/ldap.py +++ b/crc/models/ldap.py @@ -29,6 +29,9 @@ class LdapModel(db.Model): affiliation=", ".join(entry.uvaPersonIAMAffiliation), sponsor_type=", ".join(entry.uvaPersonSponsoredType)) + def proper_name(self): + return f'{self.display_name} - ({self.uid})' + class LdapSchema(SQLAlchemyAutoSchema): class Meta: diff --git a/crc/models/stats.py b/crc/models/stats.py deleted file mode 100644 index c72df7d4..00000000 --- a/crc/models/stats.py +++ /dev/null @@ -1,32 +0,0 @@ -from marshmallow_sqlalchemy import SQLAlchemyAutoSchema - -from crc import db - - -class TaskEventModel(db.Model): - __tablename__ = 'task_event' - id = db.Column(db.Integer, primary_key=True) - study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False) - user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=False) - workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False) - workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id')) - spec_version = db.Column(db.String) - action = db.Column(db.String) - task_id = db.Column(db.String) - task_name = db.Column(db.String) - task_title = db.Column(db.String) - task_type = db.Column(db.String) - task_state = db.Column(db.String) - mi_type = db.Column(db.String) - mi_count = db.Column(db.Integer) - mi_index = db.Column(db.Integer) - process_name = db.Column(db.String) - date = db.Column(db.DateTime) - - -class TaskEventModelSchema(SQLAlchemyAutoSchema): - class Meta: - model = TaskEventModel - load_instance = True - include_relationships = True - include_fk = True # Includes foreign keys diff --git a/crc/models/study.py b/crc/models/study.py index 540ee018..47d4eb8f 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -31,10 +31,8 @@ class StudyModel(db.Model): self.title = pbs.TITLE self.user_uid = pbs.NETBADGEID self.last_updated = pbs.DATE_MODIFIED - self.protocol_builder_status = ProtocolBuilderStatus.INCOMPLETE - if pbs.Q_COMPLETE: - self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE + self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE if pbs.HSRNUMBER: self.protocol_builder_status = ProtocolBuilderStatus.OPEN if self.on_hold: diff --git a/crc/models/task_event.py b/crc/models/task_event.py new file mode 100644 index 00000000..a6cb1a2d --- /dev/null +++ b/crc/models/task_event.py @@ -0,0 +1,64 @@ +from marshmallow import INCLUDE, fields +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema + +from crc import db, ma +from crc.models.study import StudyModel, StudySchema, WorkflowMetadataSchema, WorkflowMetadata +from crc.models.workflow import WorkflowModel + + +class TaskEventModel(db.Model): + __tablename__ = 'task_event' + id = db.Column(db.Integer, primary_key=True) + study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False) + user_uid = db.Column(db.String, nullable=False) # In some cases the unique user id may not exist in the db yet. + workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False) + workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id')) + spec_version = db.Column(db.String) + action = db.Column(db.String) + task_id = db.Column(db.String) + task_name = db.Column(db.String) + task_title = db.Column(db.String) + task_type = db.Column(db.String) + task_state = db.Column(db.String) + task_lane = db.Column(db.String) + form_data = db.Column(db.JSON) # And form data submitted when the task was completed. + mi_type = db.Column(db.String) + mi_count = db.Column(db.Integer) + mi_index = db.Column(db.Integer) + process_name = db.Column(db.String) + date = db.Column(db.DateTime) + + +class TaskEventModelSchema(SQLAlchemyAutoSchema): + class Meta: + model = TaskEventModel + load_instance = True + include_relationships = True + include_fk = True # Includes foreign keys + + +class TaskEvent(object): + def __init__(self, model: TaskEventModel, study: StudyModel, workflow: WorkflowMetadata): + self.id = model.id + self.study = study + self.workflow = workflow + self.user_uid = model.user_uid + self.action = model.action + self.task_id = model.task_id + self.task_title = model.task_title + self.task_name = model.task_name + self.task_type = model.task_type + self.task_state = model.task_state + self.task_lane = model.task_lane + + +class TaskEventSchema(ma.Schema): + + study = fields.Nested(StudySchema, dump_only=True) + workflow = fields.Nested(WorkflowMetadataSchema, dump_only=True) + + class Meta: + model = TaskEvent + additional = ["id", "user_uid", "action", "task_id", "task_title", + "task_name", "task_type", "task_state", "task_lane"] + unknown = INCLUDE diff --git a/crc/models/user.py b/crc/models/user.py index 55bba35f..221176bc 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -35,7 +35,7 @@ class UserModel(db.Model): } return jwt.encode( payload, - app.config.get('TOKEN_AUTH_SECRET_KEY'), + app.config.get('SECRET_KEY'), algorithm='HS256', ) @@ -47,7 +47,7 @@ class UserModel(db.Model): :return: integer|string """ try: - payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256') + payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'), algorithms='HS256') return payload except jwt.ExpiredSignatureError: raise ApiError('token_expired', 'The Authentication token you provided expired and must be renewed.') diff --git a/crc/scripts/email.py b/crc/scripts/email.py new file mode 100644 index 00000000..855ec8a4 --- /dev/null +++ b/crc/scripts/email.py @@ -0,0 +1,90 @@ +import markdown +from jinja2 import Template + +from crc import app +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService +from crc.services.mails import send_mail + + +class Email(Script): + """This Script allows to be introduced as part of a workflow and called from there, specifying + recipients and content """ + + def get_description(self): + return """ +Creates an email, using the provided arguments (a list of UIDs)" +Each argument will be used to look up personal information needed for +the email creation. + +Example: +Email Subject ApprvlApprvr1 PIComputingID +""" + + def do_task_validate_only(self, task, *args, **kwargs): + self.get_subject(task, args) + self.get_users_info(task, args) + self.get_content(task) + + def do_task(self, task, *args, **kwargs): + args = [arg for arg in args if type(arg) == str] + subject = self.get_subject(task, args) + recipients = self.get_users_info(task, args) + content, content_html = self.get_content(task) + if recipients: + send_mail( + subject=subject, + sender=app.config['DEFAULT_SENDER'], + recipients=recipients, + content=content, + content_html=content_html + ) + + def get_users_info(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one argument. The " + "name of the variable in the task data that contains user" + "id to process. Multiple arguments are accepted.") + emails = [] + for arg in args: + try: + uid = task.workflow.script_engine.evaluate_expression(task, arg) + except Exception as e: + app.logger.error(f'Workflow engines could not parse {arg}', exc_info=True) + continue + user_info = LdapService.user_info(uid) + email = user_info.email_address + emails.append(user_info.email_address) + if not isinstance(email, str): + raise ApiError(code="invalid_argument", + message="The Email script requires at least 1 UID argument. The " + "name of the variable in the task data that contains subject and" + " user ids to process. This must point to an array or a string, but " + "it currently points to a %s " % emails.__class__.__name__) + + return emails + + def get_subject(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one subject argument. The " + "name of the variable in the task data that contains subject" + " to process. Multiple arguments are accepted.") + subject = args[0] + if not isinstance(subject, str): + raise ApiError(code="invalid_argument", + message="The Email script requires 1 argument. The " + "the name of the variable in the task data that contains user" + "ids to process. This must point to an array or a string, but " + "it currently points to a %s " % subject.__class__.__name__) + + return subject + + def get_content(self, task): + content = task.task_spec.documentation + template = Template(content) + rendered = template.render(task.data) + rendered_markdown = markdown.markdown(rendered).replace('\n', '
') + return rendered, rendered_markdown diff --git a/crc/scripts/fact_service.py b/crc/scripts/fact_service.py index c4468721..b3701312 100644 --- a/crc/scripts/fact_service.py +++ b/crc/scripts/fact_service.py @@ -5,7 +5,7 @@ from crc.scripts.script import Script class FactService(Script): def get_description(self): - return """Just your basic class that can pull in data from a few api endpoints and + return """Just your basic class that can pull in data from a few api endpoints and do a basic task.""" def get_cat(self): diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index e336685d..f274b899 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -14,7 +14,7 @@ class StudyInfo(Script): """Please see the detailed description that is provided below. """ pb = ProtocolBuilderService() - type_options = ['info', 'investigators', 'details', 'approvals', 'documents', 'protocol'] + type_options = ['info', 'investigators', 'roles', 'details', 'approvals', 'documents', 'protocol'] # This is used for test/workflow validation, as well as documentation. example_data = { @@ -106,11 +106,20 @@ Returns the basic information such as the id and title ### Investigators ### Returns detailed information about related personnel. The order returned is guaranteed to match the order provided in the investigators.xslx reference file. -If possible, detailed information is added in from LDAP about each personnel based on their user_id. +Detailed information is added in from LDAP about each personnel based on their user_id. ``` {investigators_example} ``` +### Investigator Roles ### +Returns a list of all investigator roles, populating any roles with additional information available from +the Protocol Builder and LDAP. Its basically just like Investigators, but it includes all the roles, rather +that just those that were set in Protocol Builder. +``` +{investigators_example} +``` + + ### Details ### Returns detailed information about variable keys read in from the Protocol Builder. @@ -161,6 +170,12 @@ Returns information specific to the protocol. "INVESTIGATORTYPEFULL": "Primary Investigator", "NETBADGEID": "dhf8r" }, + "roles": + { + "INVESTIGATORTYPE": "PI", + "INVESTIGATORTYPEFULL": "Primary Investigator", + "NETBADGEID": "dhf8r" + }, "details": { "IS_IND": 0, @@ -177,7 +192,7 @@ Returns information specific to the protocol. "workflow_spec_id": "irb_api_details", }, 'protocol': { - id: 0, + 'id': 0, } } } @@ -198,6 +213,8 @@ Returns information specific to the protocol. self.add_data_to_task(task, {cmd: schema.dump(study)}) if cmd == 'investigators': self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id)}) + if cmd == 'roles': + self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id, all=True)}) if cmd == 'details': self.add_data_to_task(task, {cmd: self.pb.get_study_details(study_id)}) if cmd == 'approvals': diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index 1f6f56b3..28b97b6b 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -1,6 +1,10 @@ -from datetime import datetime +import json +import pickle +import sys +from base64 import b64decode +from datetime import datetime, timedelta -from sqlalchemy import desc +from sqlalchemy import desc, func from crc import app, db, session from crc.api.common import ApiError @@ -109,16 +113,135 @@ class ApprovalService(object): db_approvals = query.all() return [Approval.from_model(approval_model) for approval_model in db_approvals] + @staticmethod + def get_approval_details(approval): + """Returns a list of packed approval details, obtained from + the task data sent during the workflow """ + def extract_value(task, key): + if key in task['data']: + return pickle.loads(b64decode(task['data'][key]['__bytes__'])) + else: + return "" + + def find_task(uuid, task): + if task['id']['__uuid__'] == uuid: + return task + for child in task['children']: + task = find_task(uuid, child) + if task: + return task + + if approval.status != ApprovalStatus.APPROVED.value: + return {} + for related_approval in approval.related_approvals: + if related_approval.status != ApprovalStatus.APPROVED.value: + continue + workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() + data = json.loads(workflow.bpmn_workflow_json) + last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) + personnel = extract_value(last_task, 'personnel') + training_val = extract_value(last_task, 'RequiredTraining') + pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] + review_complete = 'AllRequiredTraining' in training_val + pi_uid = workflow.study.primary_investigator_id + pi_details = LdapService.user_info(pi_uid) + details = { + 'Supervisor': pi_supervisor, + 'PI_Details': pi_details, + 'Review': review_complete + } + details['person_details'] = [] + details['person_details'].append(pi_details) + for person in personnel: + uid = person['PersonnelComputingID']['value'] + details['person_details'].append(LdapService.user_info(uid)) + + return details + + @staticmethod + def get_health_attesting_records(): + """Return a list with prepared information related to all approvals """ + + approvals = ApprovalService.get_all_approvals(include_cancelled=False) + + health_attesting_rows = [ + ['university_computing_id', + 'last_name', + 'first_name', + 'department', + 'job_title', + 'supervisor_university_computing_id'] + ] + + for approval in approvals: + try: + details = ApprovalService.get_approval_details(approval) + if not details: + continue + + for person in details['person_details']: + first_name = person.given_name + last_name = person.display_name.replace(first_name, '').strip() + record = [ + person.uid, + last_name, + first_name, + '', + 'Academic Researcher', + details['Supervisor'] if person.uid == details['person_details'][0].uid else 'askresearch' + ] + + if record not in health_attesting_rows: + health_attesting_rows.append(record) + + except Exception as e: + app.logger.error(f'Error pulling data for workflow {approval.workflow_id}', exc_info=True) + + return health_attesting_rows + + @staticmethod + def get_not_really_csv_content(): + approvals = ApprovalService.get_all_approvals(include_cancelled=False) + output = [] + errors = [] + for approval in approvals: + try: + details = ApprovalService.get_approval_details(approval) + + for person in details['person_details']: + record = { + "study_id": approval.study_id, + "pi_uid": details['PI_Details'].uid, + "pi": details['PI_Details'].display_name, + "name": person.display_name, + "uid": person.uid, + "email": person.email_address, + "supervisor": details['Supervisor'] if person.uid == details['person_details'][0].uid else "", + "review_complete": details['Review'], + } + + output.append(record) + + except Exception as e: + errors.append( + f'Error pulling data for workflow #{approval.workflow_id} ' + f'(Approval status: {approval.status} - ' + f'More details in Sentry): {str(e)}' + ) + # Detailed information sent to Sentry + app.logger.error(f'Error pulling data for workflow {approval.workflow_id}', exc_info=True) + return {"results": output, "errors": errors } @staticmethod def update_approval(approval_id, approver_uid): - """Update a specific approval""" + """Update a specific approval + NOTE: Actual update happens in the API layer, this + funtion is currently in charge of only sending + corresponding emails + """ db_approval = session.query(ApprovalModel).get(approval_id) status = db_approval.status if db_approval: - # db_approval.status = status - # session.add(db_approval) - # session.commit() if status == ApprovalStatus.APPROVED.value: # second_approval = ApprovalModel().query.filter_by( # study_id=db_approval.study_id, workflow_id=db_approval.workflow_id, @@ -135,7 +258,7 @@ class ApprovalService(object): f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: - app.logger.error(mail_result) + app.logger.error(mail_result, exc_info=True) elif status == ApprovalStatus.DECLINED.value: ldap_service = LdapService() pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id) @@ -147,7 +270,7 @@ class ApprovalService(object): f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: - app.logger.error(mail_result) + app.logger.error(mail_result, exc_info=True) first_approval = ApprovalModel().query.filter_by( study_id=db_approval.study_id, workflow_id=db_approval.workflow_id, status=ApprovalStatus.APPROVED.value, version=db_approval.version).first() @@ -163,8 +286,8 @@ class ApprovalService(object): f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: - app.logger.error(mail_result) - # TODO: Log update action by approver_uid - maybe ? + app.logger.error(mail_result, exc_info=True) + return db_approval @staticmethod @@ -176,11 +299,12 @@ class ApprovalService(object): pending approvals and create a new approval for the latest version of the workflow.""" - # Find any existing approvals for this workflow and approver. - latest_approval_request = db.session.query(ApprovalModel). \ + # Find any existing approvals for this workflow. + latest_approval_requests = db.session.query(ApprovalModel). \ filter(ApprovalModel.workflow_id == workflow_id). \ - filter(ApprovalModel.approver_uid == approver_uid). \ - order_by(desc(ApprovalModel.version)).first() + order_by(desc(ApprovalModel.version)) + + latest_approver_request = latest_approval_requests.filter(ApprovalModel.approver_uid == approver_uid).first() # Construct as hash of the latest files to see if things have changed since # the last approval. @@ -195,16 +319,20 @@ class ApprovalService(object): # If an existing approval request exists and no changes were made, do nothing. # If there is an existing approval request for a previous version of the workflow # then add a new request, and cancel any waiting/pending requests. - if latest_approval_request: - request_file_ids = list(file.file_data_id for file in latest_approval_request.approval_files) + if latest_approver_request: + request_file_ids = list(file.file_data_id for file in latest_approver_request.approval_files) current_data_file_ids.sort() request_file_ids.sort() + other_approver = latest_approval_requests.filter(ApprovalModel.approver_uid != approver_uid).first() if current_data_file_ids == request_file_ids: - return # This approval already exists. + return # This approval already exists or we're updating other approver. else: - latest_approval_request.status = ApprovalStatus.CANCELED.value - db.session.add(latest_approval_request) - version = latest_approval_request.version + 1 + for approval_request in latest_approval_requests: + if (approval_request.version == latest_approver_request.version and + approval_request.status != ApprovalStatus.CANCELED.value): + approval_request.status = ApprovalStatus.CANCELED.value + db.session.add(approval_request) + version = latest_approver_request.version + 1 else: version = 1 @@ -234,7 +362,7 @@ class ApprovalService(object): f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: - app.logger.error(mail_result) + app.logger.error(mail_result, exc_info=True) # send rrp approval request for first approver # enhance the second part in case it bombs approver_email = [approver_info.email_address] if approver_info.email_address else app.config['FALLBACK_EMAILS'] @@ -244,7 +372,7 @@ class ApprovalService(object): f'{pi_user_info.display_name} - ({pi_user_info.uid})' ) if mail_result: - app.logger.error(mail_result) + app.logger.error(mail_result, exc_info=True) @staticmethod def _create_approval_files(workflow_data_files, approval): diff --git a/crc/services/email_service.py b/crc/services/email_service.py new file mode 100644 index 00000000..f800d900 --- /dev/null +++ b/crc/services/email_service.py @@ -0,0 +1,43 @@ +from datetime import datetime +from flask_mail import Message +from sqlalchemy import desc + +from crc import app, db, mail, session +from crc.api.common import ApiError + +from crc.models.study import StudyModel +from crc.models.email import EmailModel + + +class EmailService(object): + """Provides common tools for working with an Email""" + + @staticmethod + def add_email(subject, sender, recipients, content, content_html, study_id=None): + """We will receive all data related to an email and store it""" + + # Find corresponding study - if any + study = None + if type(study_id) == int: + study = db.session.query(StudyModel).get(study_id) + + # Create EmailModel + email_model = EmailModel(subject=subject, sender=sender, recipients=str(recipients), + content=content, content_html=content_html, study=study) + + # Send mail + try: + msg = Message(subject, + sender=sender, + recipients=recipients) + + msg.body = content + msg.html = content_html + + mail.send(msg) + except Exception as e: + app.logger.error('An exception happened in EmailService', exc_info=True) + app.logger.error(str(e)) + + db.session.add(email_model) + db.session.commit() diff --git a/crc/services/file_service.py b/crc/services/file_service.py index ff234a79..6ba2e1ad 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -3,7 +3,7 @@ import json import os from datetime import datetime from uuid import UUID -from xml.etree import ElementTree +from lxml import etree import flask from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException @@ -58,7 +58,7 @@ class FileService(object): "irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code) """Assure this is unique to the workflow, task, and document code AND the Name - Because we will allow users to upload multiple files for the same form field + Because we will allow users to upload multiple files for the same form field in some cases """ file_model = session.query(FileModel)\ .filter(FileModel.workflow_id == workflow_id)\ @@ -151,7 +151,7 @@ class FileService(object): # If this is a BPMN, extract the process id. if file_model.type == FileType.bpmn: - bpmn: ElementTree.Element = ElementTree.fromstring(binary_data) + bpmn: etree.Element = etree.fromstring(binary_data) file_model.primary_process_id = FileService.get_process_id(bpmn) new_file_data_model = FileDataModel( @@ -165,7 +165,7 @@ class FileService(object): return file_model @staticmethod - def get_process_id(et_root: ElementTree.Element): + def get_process_id(et_root: etree.Element): process_elements = [] for child in et_root: if child.tag.endswith('process') and child.attrib.get('isExecutable', False): @@ -179,7 +179,7 @@ class FileService(object): # Look for the element that has the startEvent in it for e in process_elements: - this_element: ElementTree.Element = e + this_element: etree.Element = e for child_element in list(this_element): if child_element.tag.endswith('startEvent'): return this_element.attrib['id'] diff --git a/crc/services/lookup_service.py b/crc/services/lookup_service.py index b3e0bddc..c9eb1dd8 100644 --- a/crc/services/lookup_service.py +++ b/crc/services/lookup_service.py @@ -1,8 +1,10 @@ import logging import re +from collections import OrderedDict -from pandas import ExcelFile -from sqlalchemy import func, desc +import pandas as pd +from pandas import ExcelFile, np +from sqlalchemy import desc from sqlalchemy.sql.functions import GenericFunction from crc import db @@ -19,8 +21,8 @@ class TSRank(GenericFunction): package = 'full_text' name = 'ts_rank' -class LookupService(object): +class LookupService(object): """Provides tools for doing lookups for auto-complete fields. This can currently take two forms: 1) Lookup from spreadsheet data associated with a workflow specification. @@ -44,61 +46,68 @@ class LookupService(object): def __get_lookup_model(workflow, field_id): lookup_model = db.session.query(LookupFileModel) \ .filter(LookupFileModel.workflow_spec_id == workflow.workflow_spec_id) \ - .filter(LookupFileModel.field_id == field_id).first() + .filter(LookupFileModel.field_id == field_id) \ + .order_by(desc(LookupFileModel.id)).first() # one more quick query, to see if the lookup file is still related to this workflow. # if not, we need to rebuild the lookup table. is_current = False if lookup_model: - is_current = db.session.query(WorkflowSpecDependencyFile).\ - filter(WorkflowSpecDependencyFile.file_data_id == lookup_model.file_data_model_id).count() + is_current = db.session.query(WorkflowSpecDependencyFile). \ + filter(WorkflowSpecDependencyFile.file_data_id == lookup_model.file_data_model_id).\ + filter(WorkflowSpecDependencyFile.workflow_id == workflow.id).count() if not is_current: - if lookup_model: - db.session.delete(lookup_model) # Very very very expensive, but we don't know need this till we do. lookup_model = LookupService.create_lookup_model(workflow, field_id) return lookup_model @staticmethod - def lookup(workflow, field_id, query, limit): + def lookup(workflow, field_id, query, value=None, limit=10): lookup_model = LookupService.__get_lookup_model(workflow, field_id) if lookup_model.is_ldap: return LookupService._run_ldap_query(query, limit) else: - return LookupService._run_lookup_query(lookup_model, query, limit) - - + return LookupService._run_lookup_query(lookup_model, query, value, limit) @staticmethod def create_lookup_model(workflow_model, field_id): """ - This is all really expensive, but should happen just once (per file change). - Checks to see if the options are provided in a separate lookup table associated with the - workflow, and if so, assures that data exists in the database, and return a model than can be used - to locate that data. - Returns: an array of LookupData, suitable for returning to the api. + This is all really expensive, but should happen just once (per file change). + + Checks to see if the options are provided in a separate lookup table associated with the workflow, and if so, + assures that data exists in the database, and return a model than can be used to locate that data. + + Returns: an array of LookupData, suitable for returning to the API. """ processor = WorkflowProcessor(workflow_model) # VERY expensive, Ludicrous for lookup / type ahead spiff_task, field = processor.find_task_and_field_by_field_id(field_id) - if field.has_property(Task.PROP_OPTIONS_FILE): - if not field.has_property(Task.PROP_OPTIONS_VALUE_COLUMN) or \ - not field.has_property(Task.PROP_OPTIONS_LABEL_COL): - raise ApiError.from_task("invalid_emum", + # Clear out all existing lookup models for this workflow and field. + existing_models = db.session.query(LookupFileModel) \ + .filter(LookupFileModel.workflow_spec_id == workflow_model.workflow_spec_id) \ + .filter(LookupFileModel.field_id == field_id).all() + for model in existing_models: # Do it one at a time to cause the required cascade of deletes. + db.session.delete(model) + + # Use the contents of a file to populate enum field options + if field.has_property(Task.PROP_OPTIONS_FILE_NAME): + if not (field.has_property(Task.PROP_OPTIONS_FILE_VALUE_COLUMN) or + field.has_property(Task.PROP_OPTIONS_FILE_LABEL_COLUMN)): + raise ApiError.from_task("invalid_enum", "For enumerations based on an xls file, you must include 3 properties: %s, " - "%s, and %s" % (Task.PROP_OPTIONS_FILE, - Task.PROP_OPTIONS_VALUE_COLUMN, - Task.PROP_OPTIONS_LABEL_COL), + "%s, and %s" % (Task.PROP_OPTIONS_FILE_NAME, + Task.PROP_OPTIONS_FILE_VALUE_COLUMN, + Task.PROP_OPTIONS_FILE_LABEL_COLUMN), task=spiff_task) # Get the file data from the File Service - file_name = field.get_property(Task.PROP_OPTIONS_FILE) - value_column = field.get_property(Task.PROP_OPTIONS_VALUE_COLUMN) - label_column = field.get_property(Task.PROP_OPTIONS_LABEL_COL) + file_name = field.get_property(Task.PROP_OPTIONS_FILE_NAME) + value_column = field.get_property(Task.PROP_OPTIONS_FILE_VALUE_COLUMN) + label_column = field.get_property(Task.PROP_OPTIONS_FILE_LABEL_COLUMN) latest_files = FileService.get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id, workflow_id=workflow_model.id, name=file_name) @@ -110,14 +119,15 @@ class LookupService(object): lookup_model = LookupService.build_lookup_table(data_model, value_column, label_column, workflow_model.workflow_spec_id, field_id) + # Use the results of an LDAP request to populate enum field options elif field.has_property(Task.PROP_LDAP_LOOKUP): lookup_model = LookupFileModel(workflow_spec_id=workflow_model.workflow_spec_id, field_id=field_id, is_ldap=True) else: raise ApiError("unknown_lookup_option", - "Lookup supports using spreadsheet options or ldap options, and neither " - "was provided.") + "Lookup supports using spreadsheet or LDAP options, " + "and neither of those was provided.") db.session.add(lookup_model) db.session.commit() return lookup_model @@ -130,12 +140,13 @@ class LookupService(object): changed. """ xls = ExcelFile(data_model.data) df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet. + df = pd.DataFrame(df).replace({np.nan: None}) if value_column not in df: - raise ApiError("invalid_emum", + raise ApiError("invalid_enum", "The file %s does not contain a column named % s" % (data_model.file_model.name, value_column)) if label_column not in df: - raise ApiError("invalid_emum", + raise ApiError("invalid_enum", "The file %s does not contain a column named % s" % (data_model.file_model.name, label_column)) @@ -149,39 +160,40 @@ class LookupService(object): lookup_data = LookupDataModel(lookup_file_model=lookup_model, value=row[value_column], label=row[label_column], - data=row.to_json()) + data=row.to_dict(OrderedDict)) db.session.add(lookup_data) db.session.commit() return lookup_model @staticmethod - def _run_lookup_query(lookup_file_model, query, limit): + def _run_lookup_query(lookup_file_model, query, value, limit): db_query = LookupDataModel.query.filter(LookupDataModel.lookup_file_model == lookup_file_model) + if value is not None: # Then just find the model with that value + db_query = db_query.filter(LookupDataModel.value == value) + else: + # Build a full text query that takes all the terms provided and executes each term as a prefix query, and + # OR's those queries together. The order of the results is handled as a standard "Like" on the original + # string which seems to work intuitively for most entries. + query = re.sub('[^A-Za-z0-9 ]+', '', query) # Strip out non ascii characters. + query = re.sub(r'\s+', ' ', query) # Convert multiple space like characters to just one space, as we split on spaces. + print("Query: " + query) + query = query.strip() + if len(query) > 0: + if ' ' in query: + terms = query.split(' ') + new_terms = ["'%s'" % query] + for t in terms: + new_terms.append("%s:*" % t) + new_query = ' | '.join(new_terms) + else: + new_query = "%s:*" % query - query = re.sub('[^A-Za-z0-9 ]+', '', query) - print("Query: " + query) - query = query.strip() - if len(query) > 0: - if ' ' in query: - terms = query.split(' ') - new_terms = ["'%s'" % query] - for t in terms: - new_terms.append("%s:*" % t) - new_query = ' | '.join(new_terms) - else: - new_query = "%s:*" % query + # Run the full text query + db_query = db_query.filter(LookupDataModel.label.match(new_query)) + # But hackishly order by like, which does a good job of + # pulling more relevant matches to the top. + db_query = db_query.order_by(desc(LookupDataModel.label.like("%" + query + "%"))) - # Run the full text query - db_query = db_query.filter(LookupDataModel.label.match(new_query)) - # But hackishly order by like, which does a good job of - # pulling more relevant matches to the top. - db_query = db_query.order_by(desc(LookupDataModel.label.like("%" + query + "%"))) - #ORDER BY name LIKE concat('%', ticker, '%') desc, rank DESC - -# db_query = db_query.order_by(desc(func.full_text.ts_rank( -# func.to_tsvector(LookupDataModel.label), -# func.to_tsquery(query)))) - from sqlalchemy.dialects import postgresql logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) result = db_query.limit(limit).all() logging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR) @@ -196,8 +208,9 @@ class LookupService(object): we return a lookup data model.""" user_list = [] for user in users: - user_list.append( {"value": user['uid'], - "label": user['display_name'] + " (" + user['uid'] + ")", - "data": user - }) - return user_list \ No newline at end of file + user_list.append({"value": user['uid'], + "label": user['display_name'] + " (" + user['uid'] + ")", + "data": user + }) + return user_list + diff --git a/crc/services/mails.py b/crc/services/mails.py index bd825f69..7bffacee 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -1,16 +1,20 @@ -import os - -from flask import render_template, render_template_string from flask_mail import Message +from jinja2 import Environment, FileSystemLoader + +from crc import app, mail +from crc.services.email_service import EmailService + +# Jinja environment definition, used to render mail templates +template_dir = app.root_path + '/static/templates/mails' +env = Environment(loader=FileSystemLoader(template_dir)) -# TODO: Extract common mailing code into its own function def send_test_email(sender, recipients): try: msg = Message('Research Ramp-up Plan test', - sender=sender, - recipients=recipients) - from crc import env, mail + sender=sender, + recipients=recipients, + bcc=['rrt_emails@googlegroups.com']) template = env.get_template('ramp_up_approval_request_first_review.txt') template_vars = {'primary_investigator': "test"} msg.body = template.render(template_vars) @@ -21,108 +25,78 @@ def send_test_email(sender, recipients): return str(e) +def send_mail(subject, sender, recipients, content, content_html, study_id=None): + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, study_id=study_id) + def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None): - try: - msg = Message('Research Ramp-up Plan Submitted', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_submission.txt') - template_vars = {'approver_1': approver_1, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_submission.html') - msg.html = template.render(template_vars) + subject = 'Research Ramp-up Plan Submitted' + + template = env.get_template('ramp_up_submission.txt') + template_vars = {'approver_1': approver_1, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_submission.html') + content_html = template.render(template_vars) + + send_mail(subject, sender, recipients, content, content_html) - mail.send(msg) - except Exception as e: - return str(e) def send_ramp_up_approval_request_email(sender, recipients, primary_investigator): - try: - msg = Message('Research Ramp-up Plan Approval Request', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_approval_request.txt') - template_vars = {'primary_investigator': primary_investigator} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approval_request.html') - msg.html = template.render(template_vars) + subject = 'Research Ramp-up Plan Approval Request' + + template = env.get_template('ramp_up_approval_request.txt') + template_vars = {'primary_investigator': primary_investigator} + content = template.render(template_vars) + template = env.get_template('ramp_up_approval_request.html') + content_html = template.render(template_vars) + + send_mail(subject, sender, recipients, content, content_html) - mail.send(msg) - except Exception as e: - return str(e) def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator): - try: - msg = Message('Research Ramp-up Plan Approval Request', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_approval_request_first_review.txt') - template_vars = {'primary_investigator': primary_investigator} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approval_request_first_review.html') - msg.html = template.render(template_vars) + subject = 'Research Ramp-up Plan Approval Request' + + template = env.get_template('ramp_up_approval_request_first_review.txt') + template_vars = {'primary_investigator': primary_investigator} + content = template.render(template_vars) + template = env.get_template('ramp_up_approval_request_first_review.html') + content_html = template.render(template_vars) + + send_mail(subject, sender, recipients, content, content_html) - mail.send(msg) - except Exception as e: - return str(e) def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None): - try: - msg = Message('Research Ramp-up Plan Approved', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + subject = 'Research Ramp-up Plan Approved' - from crc import env, mail - template = env.get_template('ramp_up_approved.txt') - template_vars = {'approver_1': approver_1, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approved.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_approved.txt') + template_vars = {'approver_1': approver_1, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_approved.html') + content_html = template.render(template_vars) + + send_mail(subject, sender, recipients, content, content_html) - mail.send(msg) - except Exception as e: - return str(e) def send_ramp_up_denied_email(sender, recipients, approver): - try: - msg = Message('Research Ramp-up Plan Denied', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + subject = 'Research Ramp-up Plan Denied' - from crc import env, mail - template = env.get_template('ramp_up_denied.txt') - template_vars = {'approver': approver} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_denied.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_denied.txt') + template_vars = {'approver': approver} + content = template.render(template_vars) + template = env.get_template('ramp_up_denied.html') + content_html = template.render(template_vars) + + send_mail(subject, sender, recipients, content, content_html) - mail.send(msg) - except Exception as e: - return str(e) def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2): - try: - msg = Message('Research Ramp-up Plan Denied', - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + subject = 'Research Ramp-up Plan Denied' - from crc import env, mail - template = env.get_template('ramp_up_denied_first_approver.txt') - template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_denied_first_approver.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_denied_first_approver.txt') + template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_denied_first_approver.html') + content_html = template.render(template_vars) - mail.send(msg) - except Exception as e: - return str(e) + send_mail(subject, sender, recipients, content, content_html) diff --git a/crc/services/study_service.py b/crc/services/study_service.py index dade7998..fbc62d01 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -1,3 +1,4 @@ +from copy import copy from datetime import datetime import json from typing import List @@ -12,7 +13,7 @@ from crc.api.common import ApiError from crc.models.file import FileModel, FileModelSchema, File from crc.models.ldap import LdapSchema from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus -from crc.models.stats import TaskEventModel +from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel, Study, Category, WorkflowMetadata from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \ WorkflowStatus @@ -64,13 +65,15 @@ class StudyService(object): study.files = list(files) # Calling this line repeatedly is very very slow. It creates the - # master spec and runs it. - status = StudyService.__get_study_status(study_model) - study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status) + # master spec and runs it. Don't execute this for Abandoned studies, as + # we don't have the information to process them. + if study.protocol_builder_status != ProtocolBuilderStatus.ABANDONED: + status = StudyService.__get_study_status(study_model) + study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status) - # Group the workflows into their categories. - for category in study.categories: - category.workflows = {w for w in workflow_metas if w.category_id == category.id} + # Group the workflows into their categories. + for category in study.categories: + category.workflows = {w for w in workflow_metas if w.category_id == category.id} return study @@ -137,7 +140,7 @@ class StudyService(object): try: pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id) except requests.exceptions.ConnectionError as ce: - app.logger.error("Failed to connect to the Protocol Builder - %s" % str(ce)) + app.logger.error(f'Failed to connect to the Protocol Builder - {str(ce)}', exc_info=True) pb_docs = [] else: pb_docs = [] @@ -181,10 +184,9 @@ class StudyService(object): documents[code] = doc return documents - - @staticmethod - def get_investigators(study_id): + def get_investigators(study_id, all=False): + """Convert array of investigators from protocol builder into a dictionary keyed on the type. """ # Loop through all known investigator types as set in the reference file inv_dictionary = FileService.get_reference_data(FileService.INVESTIGATOR_LIST, 'code') @@ -192,16 +194,26 @@ class StudyService(object): # Get PB required docs pb_investigators = ProtocolBuilderService.get_investigators(study_id=study_id) - """Convert array of investigators from protocol builder into a dictionary keyed on the type""" + # It is possible for the same type to show up more than once in some circumstances, in those events + # append a counter to the name. + investigators = {} for i_type in inv_dictionary: - pb_data = next((item for item in pb_investigators if item['INVESTIGATORTYPE'] == i_type), None) - if pb_data: - inv_dictionary[i_type]['user_id'] = pb_data["NETBADGEID"] - inv_dictionary[i_type].update(StudyService.get_ldap_dict_if_available(pb_data["NETBADGEID"])) - else: - inv_dictionary[i_type]['user_id'] = None - - return inv_dictionary + pb_data_entries = list(item for item in pb_investigators if item['INVESTIGATORTYPE'] == i_type) + entry_count = 0 + investigators[i_type] = copy(inv_dictionary[i_type]) + investigators[i_type]['user_id'] = None + for pb_data in pb_data_entries: + entry_count += 1 + if entry_count == 1: + t = i_type + else: + t = i_type + "_" + str(entry_count) + investigators[t] = copy(inv_dictionary[i_type]) + investigators[t]['user_id'] = pb_data["NETBADGEID"] + investigators[t].update(StudyService.get_ldap_dict_if_available(pb_data["NETBADGEID"])) + if not all: + investigators = dict(filter(lambda elem: elem[1]['user_id'] is not None, investigators.items())) + return investigators @staticmethod def get_ldap_dict_if_available(user_id): @@ -224,7 +236,6 @@ class StudyService(object): return FileModelSchema().dump(file) - @staticmethod def synch_with_protocol_builder_if_enabled(user): """Assures that the studies we have locally for the given user are diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 93590d94..165d3313 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -1,5 +1,8 @@ import re -import xml.etree.ElementTree as ElementTree + +from SpiffWorkflow.serializer.exceptions import MissingSpecError +from lxml import etree +import shlex from datetime import datetime from typing import List @@ -13,14 +16,14 @@ from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser from SpiffWorkflow.exceptions import WorkflowTaskExecException from SpiffWorkflow.specs import WorkflowSpec -from sqlalchemy import desc -from crc import session +from crc import session, app from crc.api.common import ApiError from crc.models.file import FileDataModel, FileModel, FileType from crc.models.workflow import WorkflowStatus, WorkflowModel, WorkflowSpecDependencyFile from crc.scripts.script import Script from crc.services.file_service import FileService +from crc import app class CustomBpmnScriptEngine(BpmnScriptEngine): @@ -28,15 +31,29 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): Rather than execute arbitrary code, this assumes the script references a fully qualified python class such as myapp.RandomFact. """ - def execute(self, task: SpiffTask, script, **kwargs): + def execute(self, task: SpiffTask, script, data): """ - Assume that the script read in from the BPMN file is a fully qualified python class. Instantiate - that class, pass in any data available to the current task so that it might act on it. - Assume that the class implements the "do_task" method. + Functions in two modes. + 1. If the command is proceeded by #! then this is assumed to be a python script, and will + attempt to load that python module and execute the do_task method on that script. Scripts + must be located in the scripts package and they must extend the script.py class. + 2. If not proceeded by the #! this will attempt to execute the script directly and assumes it is + valid Python. + """ + # Shlex splits the whole string while respecting double quoted strings within + if not script.startswith('#!'): + try: + super().execute(task, script, data) + except SyntaxError as e: + raise ApiError.from_task('syntax_error', + f'If you are running a pre-defined script, please' + f' proceed the script with "#!", otherwise this is assumed to be' + f' pure python: {script}, {e.msg}', task=task) + else: + self.run_predefined_script(task, script[2:], data) # strip off the first two characters. - This allows us to reference custom code from the BPMN diagram. - """ - commands = script.split(" ") + def run_predefined_script(self, task: SpiffTask, script, data): + commands = shlex.split(script) path_and_command = commands[0].rsplit(".", 1) if len(path_and_command) == 1: module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0]) @@ -55,20 +72,20 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): if not isinstance(klass(), Script): raise ApiError.from_task("invalid_script", - "This is an internal error. The script '%s:%s' you called " % - (module_name, class_name) + - "does not properly implement the CRC Script class.", - task=task) + "This is an internal error. The script '%s:%s' you called " % + (module_name, class_name) + + "does not properly implement the CRC Script class.", + task=task) if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]: - """If this is running a validation, and not a normal process, then we want to + """If this is running a validation, and not a normal process, then we want to mimic running the script, but not make any external calls or database changes.""" klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:]) else: klass().do_task(task, study_id, workflow_id, *commands[1:]) except ModuleNotFoundError: raise ApiError.from_task("invalid_script", - "Unable to locate Script: '%s:%s'" % (module_name, class_name), - task=task) + "Unable to locate Script: '%s:%s'" % (module_name, class_name), + task=task) def evaluate_expression(self, task, expression): """ @@ -102,14 +119,15 @@ class WorkflowProcessor(object): def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False, validate_only=False): """Create a Workflow Processor based on the serialized information available in the workflow model. - If soft_reset is set to true, it will try to use the latest version of the workflow specification. - If hard_reset is set to true, it will create a new Workflow, but embed the data from the last - completed task in the previous workflow. + If soft_reset is set to true, it will try to use the latest version of the workflow specification + without resetting to the beginning of the workflow. This will work for some minor changes to the spec. + If hard_reset is set to true, it will use the latest spec, and start the workflow over from the beginning. + which should work in casees where a soft reset fails. If neither flag is set, it will use the same version of the specification that was used to originally create the workflow model. """ self.workflow_model = workflow_model - if soft_reset or len(workflow_model.dependencies) == 0: + if soft_reset or len(workflow_model.dependencies) == 0: # Depenencies of 0 means the workflow was never started. self.spec_data_files = FileService.get_spec_data_files( workflow_spec_id=workflow_model.workflow_spec_id) else: @@ -135,7 +153,7 @@ class WorkflowProcessor(object): workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(self.bpmn_workflow) self.save() - except KeyError as ke: + except MissingSpecError as ke: raise ApiError(code="unexpected_workflow_structure", message="Failed to deserialize workflow" " '%s' version %s, due to a mis-placed or missing task '%s'" % @@ -162,7 +180,10 @@ class WorkflowProcessor(object): bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine) bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only - bpmn_workflow.do_engine_steps() + try: + bpmn_workflow.do_engine_steps() + except WorkflowException as we: + raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) return bpmn_workflow def save(self): @@ -216,8 +237,6 @@ class WorkflowProcessor(object): full_version = "v%s (%s)" % (version, files) return full_version - - def update_dependencies(self, spec_data_files): existing_dependencies = FileService.get_spec_data_files( workflow_spec_id=self.workflow_model.workflow_spec_id, @@ -267,12 +286,12 @@ class WorkflowProcessor(object): for file_data in file_data_models: if file_data.file_model.type == FileType.bpmn: - bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data) + bpmn: etree.Element = etree.fromstring(file_data.data) if file_data.file_model.primary: process_id = FileService.get_process_id(bpmn) parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name) elif file_data.file_model.type == FileType.dmn: - dmn: ElementTree.Element = ElementTree.fromstring(file_data.data) + dmn: etree.Element = etree.fromstring(file_data.data) parser.add_dmn_xml(dmn, filename=file_data.file_model.name) if process_id is None: raise (ApiError(code="no_primary_bpmn_error", @@ -299,26 +318,16 @@ class WorkflowProcessor(object): return WorkflowStatus.waiting def hard_reset(self): - """Recreate this workflow, but keep the data from the last completed task and add - it back into the first task. This may be useful when a workflow specification changes, - and users need to review all the prior steps, but they don't need to reenter all the previous data. - - Returns the new version. + """Recreate this workflow. This will be useful when a workflow specification changes. """ - - # Create a new workflow based on the latest specs. self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id) new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id) new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine) new_bpmn_workflow.data = self.bpmn_workflow.data - - # Reset the current workflow to the beginning - which we will consider to be the first task after the root - # element. This feels a little sketchy, but I think it is safe to assume root will have one child. - first_task = self.bpmn_workflow.task_tree.children[0] - first_task.reset_token(reset_data=False) - for task in new_bpmn_workflow.get_tasks(SpiffTask.READY): - task.data = first_task.data - new_bpmn_workflow.do_engine_steps() + try: + new_bpmn_workflow.do_engine_steps() + except WorkflowException as we: + raise ApiError.from_task_spec("hard_reset_engine_steps_error", str(we), we.sender) self.bpmn_workflow = new_bpmn_workflow def get_status(self): diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 310bd7fd..3205e800 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -1,43 +1,53 @@ +import copy +import json import string +import uuid from datetime import datetime import random import jinja2 from SpiffWorkflow import Task as SpiffTask, WorkflowException +from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask +from SpiffWorkflow.bpmn.specs.MultiInstanceTask import MultiInstanceTask from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask +from SpiffWorkflow.bpmn.specs.StartEvent import StartEvent from SpiffWorkflow.bpmn.specs.UserTask import UserTask from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask from SpiffWorkflow.specs import CancelTask, StartTask +from SpiffWorkflow.util.deep_merge import DeepMerge from flask import g from jinja2 import Template from crc import db, app from crc.api.common import ApiError -from crc.models.api_models import Task, MultiInstanceType +from crc.models.api_models import Task, MultiInstanceType, NavigationItem, NavigationItemSchema, WorkflowApi from crc.models.file import LookupDataModel -from crc.models.stats import TaskEventModel +from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel -from crc.models.workflow import WorkflowModel, WorkflowStatus +from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel from crc.services.file_service import FileService from crc.services.lookup_service import LookupService from crc.services.study_service import StudyService -from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine +from crc.services.workflow_processor import WorkflowProcessor class WorkflowService(object): - TASK_ACTION_COMPLETE = "Complete" - TASK_ACTION_TOKEN_RESET = "Backwards Move" - TASK_ACTION_HARD_RESET = "Restart (Hard)" - TASK_ACTION_SOFT_RESET = "Restart (Soft)" + TASK_ACTION_COMPLETE = "COMPLETE" + TASK_ACTION_TOKEN_RESET = "TOKEN_RESET" + TASK_ACTION_HARD_RESET = "HARD_RESET" + TASK_ACTION_SOFT_RESET = "SOFT_RESET" + TASK_ACTION_ASSIGNMENT = "ASSIGNMENT" # Whenever the lane changes between tasks we assign the task to specifc user. + + TASK_STATE_LOCKED = "LOCKED" # When the task belongs to a different user. """Provides tools for processing workflows and tasks. This should at some point, be the only way to work with Workflows, and the workflow Processor should be hidden behind this service. This will help maintain a structure that avoids circular dependencies. But for now, this contains tools for converting spiff-workflow models into our - own API models with additional information and capabilities and + own API models with additional information and capabilities and handles the testing of a workflow specification by completing it with random selections, attempting to mimic a front end as much as possible. """ @@ -89,11 +99,16 @@ class WorkflowService(object): processor.bpmn_workflow.do_engine_steps() tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY) for task in tasks: + if task.task_spec.lane is not None and task.task_spec.lane not in task.data: + raise ApiError.from_task("invalid_role", + f"This task is in a lane called '{task.task_spec.lane}', The " + f" current task data must have information mapping this role to " + f" a unique user id.", task) task_api = WorkflowService.spiff_task_to_api_task( task, - add_docs_and_forms=True) # Assure we try to process the documenation, and raise those errors. + add_docs_and_forms=True) # Assure we try to process the documentation, and raise those errors. WorkflowService.populate_form_with_random_data(task, task_api, required_only) - task.complete() + processor.complete_task(task) except WorkflowException as we: WorkflowService.delete_test_data() raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we) @@ -128,20 +143,37 @@ class WorkflowService(object): @staticmethod def get_random_data_for_field(field, task): - if field.type == "enum": + has_ldap_lookup = field.has_property(Task.PROP_LDAP_LOOKUP) + has_file_lookup = field.has_property(Task.PROP_OPTIONS_FILE_NAME) + has_data_lookup = field.has_property(Task.PROP_OPTIONS_DATA_NAME) + has_lookup = has_ldap_lookup or has_file_lookup or has_data_lookup + + if field.type == "enum" and not has_lookup: + # If it's a normal enum field with no lookup, + # return a random option. if len(field.options) > 0: random_choice = random.choice(field.options) if isinstance(random_choice, dict): - return random.choice(field.options)['id'] + choice = random.choice(field.options) + return { + 'value': choice['id'], + 'label': choice['name'] + } else: # fixme: why it is sometimes an EnumFormFieldOption, and other times not? - return random_choice.id ## Assume it is an EnumFormFieldOption + # Assume it is an EnumFormFieldOption + return { + 'value': random_choice.id, + 'label': random_choice.name + } else: raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s)," " with no options" % field.id, task) - elif field.type == "autocomplete": + elif field.type == "autocomplete" or field.type == "enum": + # If it has a lookup, get the lookup model from the spreadsheet or task data, then return a random option + # from the lookup model lookup_model = LookupService.get_lookup_model(task, field) - if field.has_property(Task.PROP_LDAP_LOOKUP): # All ldap records get the same person. + if has_ldap_lookup: # All ldap records get the same person. return { "label": "dhf8r", "value": "Dan Funk", @@ -157,9 +189,7 @@ class WorkflowService(object): elif lookup_model: data = db.session.query(LookupDataModel).filter( LookupDataModel.lookup_file_model == lookup_model).limit(10).all() - options = [] - for d in data: - options.append({"id": d.value, "label": d.label}) + options = [{"value": d.value, "label": d.label, "data": d.data} for d in data] return random.choice(options) else: raise ApiError.from_task("unknown_lookup_option", "The settings for this auto complete field " @@ -180,31 +210,111 @@ class WorkflowService(object): def __get_options(self): pass - @staticmethod def _random_string(string_length=10): """Generate a random string of fixed length """ letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(string_length)) + @staticmethod + def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None): + """Returns an API model representing the state of the current workflow, if requested, and + possible, next_task is set to the current_task.""" + + nav_dict = processor.bpmn_workflow.get_nav_list() + + # Some basic cleanup of the title for the for the navigation. + navigation = [] + for nav_item in nav_dict: + spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) + if 'description' in nav_item: + nav_item['title'] = nav_item.pop('description') + # fixme: duplicate code from the workflow_service. Should only do this in one place. + if nav_item['title'] is not None and ' ' in nav_item['title']: + nav_item['title'] = nav_item['title'].partition(' ')[2] + else: + nav_item['title'] = "" + if spiff_task: + nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) + nav_item['title'] = nav_item['task'].title # Prefer the task title. + + user_uids = WorkflowService.get_users_assigned_to_task(processor, spiff_task) + if 'user' not in g or not g.user or g.user.uid not in user_uids: + nav_item['state'] = WorkflowService.TASK_STATE_LOCKED + + else: + nav_item['task'] = None + + + navigation.append(NavigationItem(**nav_item)) + NavigationItemSchema().dump(nav_item) + + spec = db.session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() + workflow_api = WorkflowApi( + id=processor.get_workflow_id(), + status=processor.get_status(), + next_task=None, + navigation=navigation, + workflow_spec_id=processor.workflow_spec_id, + spec_version=processor.get_version_string(), + is_latest_spec=processor.is_latest_spec, + total_tasks=len(navigation), + completed_tasks=processor.workflow_model.completed_tasks, + last_updated=processor.workflow_model.last_updated, + title=spec.display_name + ) + if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. + # This may or may not work, sometimes there is no next task to complete. + next_task = processor.next_task() + if next_task: + previous_form_data = WorkflowService.get_previously_submitted_data(processor.workflow_model.id, next_task) + DeepMerge.merge(next_task.data, previous_form_data) + workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) + # Update the state of the task to locked if the current user does not own the task. + user_uids = WorkflowService.get_users_assigned_to_task(processor, next_task) + if 'user' not in g or not g.user or g.user.uid not in user_uids: + workflow_api.next_task.state = WorkflowService.TASK_STATE_LOCKED + return workflow_api + + @staticmethod + def get_previously_submitted_data(workflow_id, spiff_task): + """ If the user has completed this task previously, find the form data for the last submission.""" + query = db.session.query(TaskEventModel) \ + .filter_by(workflow_id=workflow_id) \ + .filter_by(task_name=spiff_task.task_spec.name) \ + .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) + + if hasattr(spiff_task, 'internal_data') and 'runtimes' in spiff_task.internal_data: + query = query.filter_by(mi_index=spiff_task.internal_data['runtimes']) + + latest_event = query.order_by(TaskEventModel.date.desc()).first() + if latest_event: + if latest_event.form_data is not None: + return latest_event.form_data + else: + missing_form_error = ( + f'We have lost data for workflow {workflow_id}, ' + f'task {spiff_task.task_spec.name}, it is not in the task event model, ' + f'and it should be.' + ) + app.logger.error("missing_form_data", missing_form_error, exc_info=True) + return {} + else: + return {} + + @staticmethod def spiff_task_to_api_task(spiff_task, add_docs_and_forms=False): task_type = spiff_task.task_spec.__class__.__name__ - if isinstance(spiff_task.task_spec, UserTask): - task_type = "UserTask" - elif isinstance(spiff_task.task_spec, ManualTask): - task_type = "ManualTask" - elif isinstance(spiff_task.task_spec, BusinessRuleTask): - task_type = "BusinessRuleTask" - elif isinstance(spiff_task.task_spec, CancelTask): - task_type = "CancelTask" - elif isinstance(spiff_task.task_spec, ScriptTask): - task_type = "ScriptTask" - elif isinstance(spiff_task.task_spec, StartTask): - task_type = "StartTask" - else: - task_type = "NoneTask" + task_types = [UserTask, ManualTask, BusinessRuleTask, CancelTask, ScriptTask, StartTask, EndEvent, StartEvent] + + for t in task_types: + if isinstance(spiff_task.task_spec, t): + task_type = t.__name__ + break + else: + task_type = "NoneTask" info = spiff_task.task_info() if info["is_looping"]: @@ -218,14 +328,20 @@ class WorkflowService(object): props = {} if hasattr(spiff_task.task_spec, 'extensions'): - for id, val in spiff_task.task_spec.extensions.items(): - props[id] = val + for key, val in spiff_task.task_spec.extensions.items(): + props[key] = val + + if hasattr(spiff_task.task_spec, 'lane'): + lane = spiff_task.task_spec.lane + else: + lane = None task = Task(spiff_task.id, spiff_task.task_spec.name, spiff_task.task_spec.description, task_type, spiff_task.get_state_name(), + lane, None, "", {}, @@ -243,8 +359,8 @@ class WorkflowService(object): task.data = spiff_task.data if hasattr(spiff_task.task_spec, "form"): task.form = spiff_task.task_spec.form - for field in task.form.fields: - WorkflowService.process_options(spiff_task, field) + for i, field in enumerate(task.form.fields): + task.form.fields[i] = WorkflowService.process_options(spiff_task, field) task.documentation = WorkflowService._process_documentation(spiff_task) # All ready tasks should have a valid name, and this can be computed for @@ -257,10 +373,12 @@ class WorkflowService(object): # otherwise strip off the first word of the task, as that should be following # a BPMN standard, and should not be included in the display. if task.properties and "display_name" in task.properties: - task.title = task.properties['display_name'] + try: + task.title = spiff_task.workflow.script_engine.evaluate_expression(spiff_task, task.properties['display_name']) + except Exception as e: + app.logger.error("Failed to set title on task due to type error." + str(e), exc_info=True) elif task.title and ' ' in task.title: task.title = task.title.partition(' ')[2] - return task @staticmethod @@ -271,7 +389,7 @@ class WorkflowService(object): template = Template(v) props[k] = template.render(**spiff_task.data) except jinja2.exceptions.TemplateError as ue: - app.logger.error("Failed to process task property %s " % str(ue)) + app.logger.error(f'Failed to process task property {str(ue)}', exc_info=True) return props @staticmethod @@ -301,7 +419,8 @@ class WorkflowService(object): except TypeError as te: raise ApiError.from_task(code="template_error", message="Error processing template for task %s: %s" % (spiff_task.task_spec.name, str(te)), task=spiff_task) - # TODO: Catch additional errors and report back. + except Exception as e: + app.logger.error(str(e), exc_info=True) @staticmethod def process_options(spiff_task, field): @@ -309,23 +428,76 @@ class WorkflowService(object): # If this is an auto-complete field, do not populate options, a lookup will happen later. if field.type == Task.FIELD_TYPE_AUTO_COMPLETE: pass - elif field.has_property(Task.PROP_OPTIONS_FILE): + elif field.has_property(Task.PROP_OPTIONS_FILE_NAME): lookup_model = LookupService.get_lookup_model(spiff_task, field) data = db.session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_model).all() if not hasattr(field, 'options'): field.options = [] for d in data: - field.options.append({"id": d.value, "name": d.label}) + field.options.append({"id": d.value, "name": d.label, "data": d.data}) + elif field.has_property(Task.PROP_OPTIONS_DATA_NAME): + field.options = WorkflowService.get_options_from_task_data(spiff_task, field) + return field + + @staticmethod + def get_options_from_task_data(spiff_task, field): + if not (field.has_property(Task.PROP_OPTIONS_DATA_VALUE_COLUMN) or + field.has_property(Task.PROP_OPTIONS_DATA_LABEL_COLUMN)): + raise ApiError.from_task("invalid_enum", + f"For enumerations based on task data, you must include 3 properties: " + f"{Task.PROP_OPTIONS_DATA_NAME}, {Task.PROP_OPTIONS_DATA_VALUE_COLUMN}, " + f"{Task.PROP_OPTIONS_DATA_LABEL_COLUMN}", task=spiff_task) + prop = field.get_property(Task.PROP_OPTIONS_DATA_NAME) + if prop not in spiff_task.data: + raise ApiError.from_task("invalid_enum", f"For enumerations based on task data, task data must have " + f"a property called {prop}", task=spiff_task) + # Get the enum options from the task data + data_model = spiff_task.data[prop] + value_column = field.get_property(Task.PROP_OPTIONS_DATA_VALUE_COLUMN) + label_column = field.get_property(Task.PROP_OPTIONS_DATA_LABEL_COLUMN) + items = data_model.items() if isinstance(data_model, dict) else data_model + options = [] + for item in items: + options.append({"id": item[value_column], "name": item[label_column], "data": item}) + return options + + @staticmethod + def update_task_assignments(processor): + """For every upcoming user task, log a task action + that connects the assigned user(s) to that task. All + existing assignment actions for this workflow are removed from the database, + so that only the current valid actions are available. update_task_assignments + should be called whenever progress is made on a workflow.""" + db.session.query(TaskEventModel). \ + filter(TaskEventModel.workflow_id == processor.workflow_model.id). \ + filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).delete() + + for task in processor.get_current_user_tasks(): + user_ids = WorkflowService.get_users_assigned_to_task(processor, task) + for user_id in user_ids: + WorkflowService.log_task_action(user_id, processor, task, WorkflowService.TASK_ACTION_ASSIGNMENT) + + @staticmethod + def get_users_assigned_to_task(processor, spiff_task): + if not hasattr(spiff_task.task_spec, 'lane') or spiff_task.task_spec.lane is None: + return [processor.workflow_model.study.user_uid] + # todo: return a list of all users that can edit the study by default + if spiff_task.task_spec.lane not in spiff_task.data: + return [] # No users are assignable to the task at this moment + lane_users = spiff_task.data[spiff_task.task_spec.lane] + if not isinstance(lane_users, list): + lane_users = [lane_users] + return lane_users @staticmethod def log_task_action(user_uid, processor, spiff_task, action): task = WorkflowService.spiff_task_to_api_task(spiff_task) - workflow_model = processor.workflow_model + form_data = WorkflowService.extract_form_data(spiff_task.data, spiff_task) task_event = TaskEventModel( - study_id=workflow_model.study_id, + study_id=processor.workflow_model.study_id, user_uid=user_uid, - workflow_id=workflow_model.id, - workflow_spec_id=workflow_model.workflow_spec_id, + workflow_id=processor.workflow_model.id, + workflow_spec_id=processor.workflow_model.workflow_spec_id, spec_version=processor.get_version_string(), action=action, task_id=task.id, @@ -333,6 +505,8 @@ class WorkflowService(object): task_title=task.title, task_type=str(task.type), task_state=task.state, + task_lane=task.lane, + form_data=form_data, mi_type=task.multi_instance_type.value, # Some tasks have a repeat behavior. mi_count=task.multi_instance_count, # This is the number of times the task could repeat. mi_index=task.multi_instance_index, # And the index of the currently repeating task. @@ -342,3 +516,29 @@ class WorkflowService(object): db.session.add(task_event) db.session.commit() + @staticmethod + def extract_form_data(latest_data, task): + """Removes data from latest_data that would be added by the child task or any of its children.""" + data = {} + + if hasattr(task.task_spec, 'form'): + for field in task.task_spec.form.fields: + if field.has_property(Task.PROP_OPTIONS_READ_ONLY) and \ + field.get_property(Task.PROP_OPTIONS_READ_ONLY).lower().strip() == "true": + continue # Don't add read-only data + elif field.has_property(Task.PROP_OPTIONS_REPEAT): + group = field.get_property(Task.PROP_OPTIONS_REPEAT) + if group in latest_data: + data[group] = latest_data[group] + elif isinstance(task.task_spec, MultiInstanceTask): + group = task.task_spec.elementVar + if group in latest_data: + data[group] = latest_data[group] + else: + if field.id in latest_data: + data[field.id] = latest_data[field.id] + + return data + + + diff --git a/crc/static/bpmn/core_info/core_info.bpmn b/crc/static/bpmn/core_info/core_info.bpmn index 8e790f98..8c69ffb3 100644 --- a/crc/static/bpmn/core_info/core_info.bpmn +++ b/crc/static/bpmn/core_info/core_info.bpmn @@ -1,5 +1,5 @@ - + Flow_1wqp7vf @@ -212,7 +212,7 @@ SequenceFlow_1r3yrhy Flow_09h1imz - StudyInfo details + #! StudyInfo details Flow_09h1imz diff --git a/crc/static/bpmn/data_security_plan/HIPAA_Ids.xls b/crc/static/bpmn/data_security_plan/HIPAA_Ids.xls new file mode 100644 index 00000000..2d703832 Binary files /dev/null and b/crc/static/bpmn/data_security_plan/HIPAA_Ids.xls differ diff --git a/crc/static/bpmn/data_security_plan/NEW_DSP_template.docx b/crc/static/bpmn/data_security_plan/NEW_DSP_template.docx index 9c282eaa..f6faeb28 100644 Binary files a/crc/static/bpmn/data_security_plan/NEW_DSP_template.docx and b/crc/static/bpmn/data_security_plan/NEW_DSP_template.docx differ diff --git a/crc/static/bpmn/data_security_plan/data_security_plan.bpmn b/crc/static/bpmn/data_security_plan/data_security_plan.bpmn index fc6704fa..86426d6d 100644 --- a/crc/static/bpmn/data_security_plan/data_security_plan.bpmn +++ b/crc/static/bpmn/data_security_plan/data_security_plan.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_100w7co @@ -10,45 +10,27 @@ - + + + + - - - - - - - - - - - - - - - - - - - - - + - - + - + @@ -162,28 +144,10 @@ + + + - - - - - - - - - - - - - - - - - - - - - @@ -202,31 +166,14 @@ - + + + + - - - - - - - - - - - - - - - - - - - - @@ -253,9 +200,8 @@ Answer the questions for each of the Individual Use Devices that you use to collect or store your data onto your individual use device during the course of your research. Do not select these items if they are only to be used to connect elsewhere (to the items you identified in Electronic Medical Record, UVA approved eCRF or clinical trials management system, UVA servers & websites, and Web-based server, cloud server, or any non-centrally managed server): - + - @@ -276,95 +222,75 @@ - + - - + - + - - + - + - - + + + + - - - - - - - - - - - - - - - - - - - - - + - - + - + - + - + - + + + + + + + + + + + + + - - + - + - - + - + - - - - - - - - - - + @@ -372,7 +298,7 @@ SequenceFlow_0nc6lcs SequenceFlow_0gp2pjm - + @@ -389,7 +315,7 @@ Indicate all the possible formats in which you will transmit your data outside o - Flow_0cpwkms + SequenceFlow_0gp2pjm SequenceFlow_0mgwas4 @@ -415,7 +341,6 @@ Indicate all the possible formats in which you will transmit your data outside o - @@ -424,48 +349,27 @@ Indicate all the possible formats in which you will transmit your data outside o - + + + - - - - - - - - - - - - - - - - - - - - - - - @@ -498,7 +402,7 @@ Indicate all the possible formats in which you will transmit your data outside o - + @@ -506,7 +410,7 @@ Indicate all the possible formats in which you will transmit your data outside o - + @@ -518,6 +422,7 @@ Indicate all the possible formats in which you will transmit your data outside o SequenceFlow_0lere0k + Done message SequenceFlow_16kyite @@ -548,7 +453,7 @@ Indicate all the possible formats in which you will transmit your data outside o SequenceFlow_0k2r83n SequenceFlow_0t6xl9i SequenceFlow_16kyite - CompleteTemplate NEW_DSP_template.docx Study_DataSecurityPlan + #! CompleteTemplate NEW_DSP_template.docx Study_DataSecurityPlan ##### Instructions @@ -568,7 +473,10 @@ Process: The Study Team will answer the questions in this section to create the How to The Data Security Plan is auto-generated based on your answers on this Step. You can save your information here and check the outcomes on the Data Security Plan Upload Step at any time. -Submit the step only when you are ready. After you "Submit" the step, the information will not be available for editing. +Submit the step only when you are ready. After you "Submit" the step, the information will not be available for editing. + + +# test @@ -623,339 +531,222 @@ Indicate all the possible formats in which you will collect or receive your orig SequenceFlow_0blyor8 SequenceFlow_1oq4w2h - - SequenceFlow_0gp2pjm - Flow_0cpwkms - - - - - > Instructions -o Hippa Instructions -o Hippa Indentifiers -o Vuew Definitions and Instructions -o Paper Documents -o Emailed to UVA Personnel -o EMC (EPIC) -o UVA Approvled eCRF -o UVA Servers -o Web or Cloud Server -o Individual Use Devices -o Device Details -0 Outside of UVA - -o Outside of UVA? -     o Yes  -           o Email Methods -           o Data Management -           o Transmission Method -           o Generate DSP  -    o No -           o Generate DSP - - - - *  Instructions -* Hippa Instructions -* Hippa Indentifiers -o Vuew Definitions and Instructions ->> Paper Documents -> Emailed to UVA Personnel -> EMC (EPIC) -> UVA Approvled eCRF -> UVA Servers -> Web or Cloud Server -o Individual Use Devices -o Device Details -o Outside of UVA - -o Outside of UVA? -     o Yes  -           o Email Methods -           o Data Management -           o Transmission Method -           o Generate DSP  -    o No -           o Generate DSP - - - - * Instructions -* Hippa Instructions -* Hippa Indentifiers -* View Definitions and Instructions - - -* Paper Documents (Parallel creates spaces) -* Emailed to UVA Personnel -* EMC (EPIC) -* UVA Approvled eCRF -* UVA Servers -* Web or Cloud Server -* Individual Use Devices - -o Device Details (MultiInstance Indents, Parallel creates spaces)) - > Desktop - >> Laptop - > Cell Phone - > Other - -o Outside of UVA - -o Outside of UVA? -     o Yes  -           o Email Methods -           o Data Management -           o Transmission Method -           o Generate DSP  -    o No -           o Generate DSP - - - - - - - - - - - - - + + - - - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - - + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - - + + + - + - + - + - + - + - + - + + + + + + + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn index bf39615b..12e85e34 100644 --- a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn +++ b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn @@ -41,8 +41,7 @@ {%- else -%} | {{doc.display_name}} | Not started | [?](/help/documents/{{doc.code}}) | No file yet | {%- endif %} -{% endif %}{% endfor %} - +{% endif %}{% endfor %} @@ -54,12 +53,12 @@ Flow_0c7ryff Flow_142jtxs - StudyInfo approvals + #! StudyInfo approvals Flow_1k3su2q Flow_0c7ryff - StudyInfo documents + #! StudyInfo documents diff --git a/crc/static/bpmn/ide_supplement/ide_supplement.bpmn b/crc/static/bpmn/ide_supplement/ide_supplement.bpmn index a886b4d4..7a83643b 100644 --- a/crc/static/bpmn/ide_supplement/ide_supplement.bpmn +++ b/crc/static/bpmn/ide_supplement/ide_supplement.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_1dhb8f4 @@ -36,7 +36,7 @@ SequenceFlow_1dhb8f4 SequenceFlow_1uzcl1f - StudyInfo details + #! StudyInfo details diff --git a/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn b/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn index 719b3257..25a9ad6e 100644 --- a/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn +++ b/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_1dexemq @@ -100,7 +100,7 @@ Protocol Owner: **(need to insert value here)** - + @@ -123,7 +123,7 @@ Protocol Owner: **(need to insert value here)** - + @@ -159,13 +159,13 @@ Protocol Owner: **(need to insert value here)** - + - + @@ -206,7 +206,7 @@ Protocol Owner: **(need to insert value here)** - + SequenceFlow_0lixqzs @@ -217,128 +217,128 @@ Protocol Owner: **(need to insert value here)** SequenceFlow_1dexemq Flow_1x9d2mo - StudyInfo documents + #! StudyInfo documents - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/crc/static/bpmn/ind_supplement/decision_ind_check.dmn b/crc/static/bpmn/ind_supplement/decision_ind_check.dmn deleted file mode 100644 index 9104121b..00000000 --- a/crc/static/bpmn/ind_supplement/decision_ind_check.dmn +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - StudyInfo.details.IS_IND - - - - - StudyInfo.details.IND_1 - - - - - StudyInfo.details.IND_2 - - - - - StudyInfo.details.IND_3 - - - - - - 1 - - - not('') - - - - - - - - - true - - - - - 1 - - - - - - not('') - - - - - - true - - - - - 1 - - - - - - - - - not('') - - - true - - - - - - - - - - - - - - - - - false - - - - - diff --git a/crc/static/bpmn/ind_supplement/ind_supplement.bpmn b/crc/static/bpmn/ind_supplement/ind_supplement.bpmn deleted file mode 100644 index b25e080b..00000000 --- a/crc/static/bpmn/ind_supplement/ind_supplement.bpmn +++ /dev/null @@ -1,127 +0,0 @@ - - - - - SequenceFlow_1dhb8f4 - - - - SequenceFlow_1yhv1qz - SequenceFlow_1enco3g - - - SequenceFlow_1dhb8f4 - SequenceFlow_1uzcl1f - StudyInfo details - - - - SequenceFlow_1lazou8 - SequenceFlow_1yb1vma - SequenceFlow_011l5xt - - - ind_supplement == True - - - ind_supplement == False - - - The use of an Investigational New Drug (IND) was indicated in Protocol Builder, but no IND number was entered. Please enter up to three numbers in the Supplemental section of Protocol Builder so supplemental information can be entered here. - SequenceFlow_011l5xt - SequenceFlow_1yhv1qz - - - - SequenceFlow_1uzcl1f - SequenceFlow_1lazou8 - - - - IND No.: {{StudyInfo.details.IND_1}} - - - - - - - - - - - - - - - - SequenceFlow_1yb1vma - SequenceFlow_1enco3g - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/crc/static/bpmn/ind_update/SponsorList.xls b/crc/static/bpmn/ind_update/SponsorList.xls new file mode 100644 index 00000000..df1cf0d7 Binary files /dev/null and b/crc/static/bpmn/ind_update/SponsorList.xls differ diff --git a/crc/static/bpmn/ind_update/decision_ind_check.dmn b/crc/static/bpmn/ind_update/decision_ind_check.dmn new file mode 100644 index 00000000..658f02ab --- /dev/null +++ b/crc/static/bpmn/ind_update/decision_ind_check.dmn @@ -0,0 +1,220 @@ + + + + + + + StudyInfo.details.IS_IND + + + + + StudyInfo.details.IND_1 + + + + + StudyInfo.details.IND_2 + + + + + StudyInfo.details.IND_3 + + + + + + + + 3 IND #s + + 1 + + + not('') + + + not('') + + + not('') + + + true + + + "three" + + + "Three IND #s entered" + + + + + + + 2 IND #s + + 1 + + + not('') + + + not('') + + + "" + + + true + + + "two" + + + "Two IND #s entered" + + + + + + + 3 IND#s, missing #2 + + 1 + + + not('') + + + "" + + + not('') + + + true + + + "two" + + + "Two IND #s entered" + + + + + + + 3 IND#s, missing #1 + + 1 + + + "" + + + not('') + + + not('') + + + true + + + "two" + + + "Two IND #s entered" + + + + + + + 1 IND # + + 1 + + + not('') + + + "" + + + "" + + + true + + + "one" + + + "One IND # entered" + + + StudyInfo.details.IND_1 + + + + No + + 1 + + + + + + + + + + + + true + + + "na" + + + "No IND Numbers Entered in PB" + + + "" + + + + No IND, PB Q#56 answered as No, should not be needed, but here as stopgap in case memu check failed + + 0 + + + + + + + + + + + + false + + + + + + + + + + + + + + diff --git a/crc/static/bpmn/ind_update/ind_update.bpmn b/crc/static/bpmn/ind_update/ind_update.bpmn new file mode 100644 index 00000000..528a87ce --- /dev/null +++ b/crc/static/bpmn/ind_update/ind_update.bpmn @@ -0,0 +1,276 @@ + + + + + SequenceFlow_1dhb8f4 + + + + Flow_0jqdolk + Flow_OneOnly + + + SequenceFlow_1dhb8f4 + SequenceFlow_1uzcl1f + #! StudyInfo details + + + + SequenceFlow_1uzcl1f + SequenceFlow_1cwibmt + + + IND No.: {{ StudyInfo.details.IND_1 }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_1bn0jp7 + Flow_10rb7gb + + + IND No.: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_TwoOrThree + Flow_1p563xr + + + IND No.: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_1p563xr + Flow_0jqdolk + + + + {{ ind_message }} + + + + + + + + + + + + + + + + + + + SequenceFlow_1cwibmt + Flow_1bn0jp7 + + + + + + + Flow_10rb7gb + Flow_TwoOrThree + Flow_OneOnly + + + IND_CntEntered != "value_one" + + + IND_CntEntered == "value_one" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/crc/static/bpmn/irb_api_details/irb_api_details.bpmn b/crc/static/bpmn/irb_api_details/irb_api_details.bpmn index b5f0da02..b4f540f5 100644 --- a/crc/static/bpmn/irb_api_details/irb_api_details.bpmn +++ b/crc/static/bpmn/irb_api_details/irb_api_details.bpmn @@ -1,5 +1,5 @@ - + @@ -8,7 +8,7 @@ SequenceFlow_1fmyo77 SequenceFlow_18nr0gf - StudyInfo details + #! StudyInfo details diff --git a/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn b/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn index ba522b78..a5258cbe 100644 --- a/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn +++ b/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn @@ -1,5 +1,5 @@ - + Flow_0kcrx5l @@ -7,30 +7,41 @@ Flow_0kcrx5l Flow_1dcsioh - StudyInfo investigators + #! StudyInfo investigators + ## The following information was gathered: +{% for type, investigator in StudyInfo.investigators.items() %} +### {{investigator.label}}: {{investigator.display_name}} + * Edit Acess? {{investigator.edit_access}} + * Send Emails? {{investigator.emails}} +{% if investigator.label == "Primary Investigator" %} + * Experience: {{investigator.experience}} +{% endif %} +{% endfor %} Flow_1mplloa - + ### Please provide supplemental information for: -#### Investigator : {{investigator.display_name}} -##### Role: {{investigator.type_full}} +#### {{investigator.display_name}} ##### Title: {{investigator.title}} ##### Department: {{investigator.department}} ##### Affiliation: {{investigator.affiliation}} - - - + + + - + + + + Flow_1dcsioh Flow_1mplloa @@ -43,28 +54,28 @@ - - + + - - + + - + - - - - + - + + + + diff --git a/crc/static/bpmn/research_rampup/research_rampup.bpmn b/crc/static/bpmn/research_rampup/research_rampup.bpmn index 19588731..4a04eb6d 100644 --- a/crc/static/bpmn/research_rampup/research_rampup.bpmn +++ b/crc/static/bpmn/research_rampup/research_rampup.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_05ja25w @@ -598,7 +598,7 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur This step is internal to the system and do not require and user interaction Flow_11uqavk Flow_0aqgwvu - CompleteTemplate ResearchRampUpPlan.docx RESEARCH_RAMPUP + #! CompleteTemplate ResearchRampUpPlan.docx RESEARCH_RAMPUP @@ -755,7 +755,7 @@ Notify the Area Monitor for This step is internal to the system and do not require and user interaction Flow_0j4rs82 Flow_07ge8uf - RequestApproval ApprvlApprvr1 ApprvlApprvr2 + #!RequestApproval ApprvlApprvr1 ApprvlApprvr2 #### Script Task @@ -764,7 +764,7 @@ This step is internal to the system and do not require and user interaction Flow_16y8glw Flow_0uc4o6c - UpdateStudy title:PIComputingID.label pi:PIComputingID.value + #! UpdateStudy title:PIComputingID.label pi:PIComputingID.value #### Weekly Personnel Schedule(s) diff --git a/crc/static/bpmn/sponsor_funding_source/sponsors.xls b/crc/static/bpmn/sponsor_funding_source/sponsors.xls index 7bb3882c..92c6cf66 100644 Binary files a/crc/static/bpmn/sponsor_funding_source/sponsors.xls and b/crc/static/bpmn/sponsor_funding_source/sponsors.xls differ diff --git a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn index 6806fa5b..23d6ff72 100644 --- a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn +++ b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn @@ -11,7 +11,7 @@ SequenceFlow_1ees8ka SequenceFlow_17ct47v - StudyInfo documents + #! StudyInfo documents Flow_1m8285h @@ -62,7 +62,7 @@ Flow_0pwtiqm Flow_0eq6px2 - StudyInfo details + #! StudyInfo details Flow_14ce1d7 @@ -91,7 +91,7 @@ Flow_1qyrmzn Flow_0vo6ul1 - StudyInfo investigators + #! StudyInfo investigators diff --git a/crc/static/templates/mails/ramp_up_denied.txt b/crc/static/templates/mails/ramp_up_denied.txt index 5fbaefda..120522b8 100644 --- a/crc/static/templates/mails/ramp_up_denied.txt +++ b/crc/static/templates/mails/ramp_up_denied.txt @@ -1 +1 @@ - Your Research Ramp-up Plan has been denied by {{ approver_1 }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver_1 }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval. \ No newline at end of file + Your Research Ramp-up Plan has been denied by {{ approver }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval. \ No newline at end of file diff --git a/docker_run.sh b/docker_run.sh index 6bc3c90b..ec80bb99 100755 --- a/docker_run.sh +++ b/docker_run.sh @@ -23,8 +23,10 @@ if [ "$RESET_DB_RRT" = "true" ]; then pipenv run flask load-example-rrt-data fi +# THIS MUST BE THE LAST COMMAND! if [ "$APPLICATION_ROOT" = "/" ]; then pipenv run gunicorn --bind 0.0.0.0:$PORT0 wsgi:app else pipenv run gunicorn -e SCRIPT_NAME="$APPLICATION_ROOT" --bind 0.0.0.0:$PORT0 wsgi:app fi + diff --git a/example_data.py b/example_data.py index 98746c50..efdfe3b3 100644 --- a/example_data.py +++ b/example_data.py @@ -93,8 +93,8 @@ class ExampleDataLoader: description="Supplemental information for the IDE number entered in Protocol Builder", category_id=0, display_order=3) - self.create_spec(id="ind_supplement", - name="ind_supplement", + self.create_spec(id="ind_update", + name="ind_update", display_name="IND Supplement Info", description="Supplement information for the Investigational New Drug(s) specified in Protocol Builder", category_id=0, diff --git a/migrations/versions/1fdd1bdb600e_.py b/migrations/versions/1fdd1bdb600e_.py new file mode 100644 index 00000000..dff1fdae --- /dev/null +++ b/migrations/versions/1fdd1bdb600e_.py @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: 1fdd1bdb600e +Revises: 17597692d0b0 +Create Date: 2020-06-17 16:44:16.427988 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '1fdd1bdb600e' +down_revision = '17597692d0b0' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_data', sa.JSON(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('task_event', 'task_data') + # ### end Alembic commands ### diff --git a/migrations/versions/5acd138e969c_.py b/migrations/versions/5acd138e969c_.py new file mode 100644 index 00000000..22b6b79a --- /dev/null +++ b/migrations/versions/5acd138e969c_.py @@ -0,0 +1,38 @@ +"""empty message + +Revision ID: 5acd138e969c +Revises: de30304ff5e6 +Create Date: 2020-06-24 21:36:15.128632 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '5acd138e969c' +down_revision = 'de30304ff5e6' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('email', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('subject', sa.String(), nullable=True), + sa.Column('sender', sa.String(), nullable=True), + sa.Column('recipients', sa.String(), nullable=True), + sa.Column('content', sa.String(), nullable=True), + sa.Column('content_html', sa.String(), nullable=True), + sa.Column('study_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['study_id'], ['study.id'], ), + sa.PrimaryKeyConstraint('id') + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('email') + # ### end Alembic commands ### diff --git a/migrations/versions/de30304ff5e6_.py b/migrations/versions/de30304ff5e6_.py new file mode 100644 index 00000000..46a43f18 --- /dev/null +++ b/migrations/versions/de30304ff5e6_.py @@ -0,0 +1,30 @@ +"""empty message + +Revision ID: de30304ff5e6 +Revises: 1fdd1bdb600e +Create Date: 2020-06-18 16:19:11.133665 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = 'de30304ff5e6' +down_revision = '1fdd1bdb600e' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('form_data', sa.JSON(), nullable=True)) + op.drop_column('task_event', 'task_data') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_data', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) + op.drop_column('task_event', 'form_data') + # ### end Alembic commands ### diff --git a/migrations/versions/ffef4661a37d_.py b/migrations/versions/ffef4661a37d_.py new file mode 100644 index 00000000..2a263951 --- /dev/null +++ b/migrations/versions/ffef4661a37d_.py @@ -0,0 +1,38 @@ +"""empty message + +Revision ID: ffef4661a37d +Revises: 5acd138e969c +Create Date: 2020-07-14 19:52:05.270939 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'ffef4661a37d' +down_revision = '5acd138e969c' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_lane', sa.String(), nullable=True)) + op.drop_constraint('task_event_user_uid_fkey', 'task_event', type_='foreignkey') + op.execute("update task_event set action = 'COMPLETE' where action='Complete'") + op.execute("update task_event set action = 'TOKEN_RESET' where action='Backwards Move'") + op.execute("update task_event set action = 'HARD_RESET' where action='Restart (Hard)'") + op.execute("update task_event set action = 'SOFT_RESET' where action='Restart (Soft)'") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_foreign_key('task_event_user_uid_fkey', 'task_event', 'user', ['user_uid'], ['uid']) + op.drop_column('task_event', 'task_lane') + op.execute("update task_event set action = 'Complete' where action='COMPLETE'") + op.execute("update task_event set action = 'Backwards Move' where action='TOKEN_RESET'") + op.execute("update task_event set action = 'Restart (Hard)' where action='HARD_RESET'") + op.execute("update task_event set action = 'Restart (Soft)' where action='SOFT_RESET'") + # ### end Alembic commands ### diff --git a/postgres/docker-compose.yml b/postgres/docker-compose.yml index 2ed67073..31116b85 100644 --- a/postgres/docker-compose.yml +++ b/postgres/docker-compose.yml @@ -1,9 +1,8 @@ version: "3.7" services: db: - image: postgres + image: sartography/cr-connect-db volumes: - - ./pg-init-scripts/initdb.sh:/docker-entrypoint-initdb.d/initdb.sh - $HOME/docker/volumes/postgres:/var/lib/postgresql/data ports: - 5432:5432 diff --git a/tests/test_approvals_api.py b/tests/approval/test_approvals_api.py similarity index 92% rename from tests/test_approvals_api.py rename to tests/approval/test_approvals_api.py index ed0f7c5d..03cd8622 100644 --- a/tests/test_approvals_api.py +++ b/tests/approval/test_approvals_api.py @@ -217,27 +217,6 @@ class TestApprovals(BaseTest): total_counts = sum(counts[status] for status in statuses) self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user') - def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses, - workflow_spec_name="random_fact"): - study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id) - workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study) - approvals = [] - - for i in range(len(approver_uids)): - approvals.append(self.create_approval( - study=study, - workflow=workflow, - approver_uid=approver_uids[i], - status=statuses[i], - version=1 - )) - - return { - 'study': study, - 'workflow': workflow, - 'approvals': approvals, - } - def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"): num_studies_before = db.session.query(StudyModel).count() statuses = [name for name, value in ApprovalStatus.__members__.items()] diff --git a/tests/test_approvals_service.py b/tests/approval/test_approvals_service.py similarity index 55% rename from tests/test_approvals_service.py rename to tests/approval/test_approvals_service.py index 26a26ef4..dae15eee 100644 --- a/tests/test_approvals_service.py +++ b/tests/approval/test_approvals_service.py @@ -1,7 +1,7 @@ from tests.base_test import BaseTest from crc import db from crc.models.approval import ApprovalModel -from crc.services.approval_service import ApprovalService +from crc.services.approval_service import ApprovalService, ApprovalStatus from crc.services.file_service import FileService from crc.services.workflow_processor import WorkflowProcessor @@ -57,6 +57,60 @@ class TestApprovalsService(BaseTest): self.assertEqual(1, models[0].version) self.assertEqual(2, models[1].version) + def test_get_health_attesting_records(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="AD_CoCAppr") + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + records = ApprovalService.get_health_attesting_records() + + self.assertEqual(len(records), 1) + + def test_get_not_really_csv_content(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="AD_CoCAppr") + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + records = ApprovalService.get_not_really_csv_content() + + self.assertEqual(len(records), 2) + + def test_new_approval_cancels_all_previous_approvals(self): + self.create_reference_document() + workflow = self.create_workflow("empty_workflow") + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" ) + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="lb3dp") + + current_count = ApprovalModel.query.count() + self.assertTrue(current_count, 2) + + FileService.add_workflow_file(workflow_id=workflow.id, + name="borderline.png", content_type="text", + binary_data=b'906090', irb_doc_code="AD_CoCAppr" ) + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + + current_count = ApprovalModel.query.count() + canceled_count = ApprovalModel.query.filter(ApprovalModel.status == ApprovalStatus.CANCELED.value) + self.assertTrue(current_count, 2) + self.assertTrue(current_count, 3) + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="lb3dp") + + current_count = ApprovalModel.query.count() + self.assertTrue(current_count, 4) + def test_new_approval_sends_proper_emails(self): self.assertEqual(1, 1) diff --git a/tests/test_request_approval_script.py b/tests/approval/test_request_approval_script.py similarity index 100% rename from tests/test_request_approval_script.py rename to tests/approval/test_request_approval_script.py diff --git a/tests/base_test.py b/tests/base_test.py index 93294193..6ea1966d 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -16,7 +16,7 @@ from crc.models.api_models import WorkflowApiSchema, MultiInstanceType from crc.models.approval import ApprovalModel, ApprovalStatus from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES from crc.models.protocol_builder import ProtocolBuilderStatus -from crc.models.stats import TaskEventModel +from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel @@ -230,7 +230,7 @@ class BaseTest(unittest.TestCase): db.session.commit() return user - def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"): + def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer", primary_investigator_id="lb3dp"): study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first() if study is None: user = self.create_user(uid=uid) @@ -240,13 +240,36 @@ class BaseTest(unittest.TestCase): db.session.commit() return study - def create_workflow(self, workflow_name, study=None, category_id=None): + def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses, + workflow_spec_name="random_fact"): + study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id) + workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study) + approvals = [] + + for i in range(len(approver_uids)): + approvals.append(self.create_approval( + study=study, + workflow=workflow, + approver_uid=approver_uids[i], + status=statuses[i], + version=1 + )) + + full_study = { + 'study': study, + 'workflow': workflow, + 'approvals': approvals, + } + + return full_study + + def create_workflow(self, workflow_name, study=None, category_id=None, as_user="dhf8r"): db.session.flush() spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first() if spec is None: spec = self.load_test_spec(workflow_name, category_id=category_id) if study is None: - study = self.create_study() + study = self.create_study(uid=as_user) workflow_model = StudyService._create_workflow_model(study, spec) return workflow_model @@ -290,7 +313,8 @@ class BaseTest(unittest.TestCase): self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id) return workflow_api - def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"): + + def complete_form(self, workflow_in, task_in, dict_data, error_code=None, terminate_loop=None, user_uid="dhf8r"): prev_completed_task_count = workflow_in.completed_tasks if isinstance(task_in, dict): task_id = task_in["id"] @@ -299,11 +323,16 @@ class BaseTest(unittest.TestCase): user = session.query(UserModel).filter_by(uid=user_uid).first() self.assertIsNotNone(user) - - rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), - headers=self.logged_in_headers(user=user), - content_type="application/json", - data=json.dumps(dict_data)) + if terminate_loop: + rv = self.app.put('/v1.0/workflow/%i/task/%s/data?terminate_loop=true' % (workflow_in.id, task_id), + headers=self.logged_in_headers(user=user), + content_type="application/json", + data=json.dumps(dict_data)) + else: + rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), + headers=self.logged_in_headers(user=user), + content_type="application/json", + data=json.dumps(dict_data)) if error_code: self.assert_failure(rv, error_code=error_code) return @@ -311,17 +340,20 @@ class BaseTest(unittest.TestCase): self.assert_success(rv) json_data = json.loads(rv.get_data(as_text=True)) - # Assure stats are updated on the model + # Assure task events are updated on the model workflow = WorkflowApiSchema().load(json_data) # The total number of tasks may change over time, as users move through gateways # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created... self.assertIsNotNone(workflow.total_tasks) - self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) + # presumably, we also need to deal with sequential items here too . . + if not task_in.multi_instance_type == 'looping': + self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) # Assure a record exists in the Task Events task_events = session.query(TaskEventModel) \ .filter_by(workflow_id=workflow.id) \ .filter_by(task_id=task_id) \ + .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ .order_by(TaskEventModel.date.desc()).all() self.assertGreater(len(task_events), 0) event = task_events[0] @@ -335,7 +367,8 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.name, event.task_name) self.assertEqual(task_in.title, event.task_title) self.assertEqual(task_in.type, event.task_type) - self.assertEqual("COMPLETED", event.task_state) + if not task_in.multi_instance_type == 'looping': + self.assertEqual("COMPLETED", event.task_state) # Not sure what voodoo is happening inside of marshmallow to get me in this state. if isinstance(task_in.multi_instance_type, MultiInstanceType): @@ -344,7 +377,10 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.multi_instance_type, event.mi_type) self.assertEqual(task_in.multi_instance_count, event.mi_count) - self.assertEqual(task_in.multi_instance_index, event.mi_index) + if task_in.multi_instance_type == 'looping' and not terminate_loop: + self.assertEqual(task_in.multi_instance_index+1, event.mi_index) + else: + self.assertEqual(task_in.multi_instance_index, event.mi_index) self.assertEqual(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) diff --git a/tests/data/decision_table_invalid/bad_dmn.dmn b/tests/data/decision_table_invalid/bad_dmn.dmn new file mode 100644 index 00000000..fc846175 --- /dev/null +++ b/tests/data/decision_table_invalid/bad_dmn.dmn @@ -0,0 +1,50 @@ + + + + + + + + + + 1 + + + + + + + 0 + + + 0 + + + + 'one' can't be evaluated, it must be quoted + + 1 + + + one + + + + + 2 + + + 2 + + + + + > 2 + + + 3 + + + + + diff --git a/tests/data/decision_table_invalid/decision_table_invalid.bpmn b/tests/data/decision_table_invalid/decision_table_invalid.bpmn new file mode 100644 index 00000000..bbf0473a --- /dev/null +++ b/tests/data/decision_table_invalid/decision_table_invalid.bpmn @@ -0,0 +1,56 @@ + + + + + SequenceFlow_1ma1wxb + + + + SequenceFlow_1ma1wxb + SequenceFlow_0grui6f + + + # Great Work! + +Based on the information you provided (Ginger left {{num_presents}}, we recommend the following statement be provided to Ginger: + +## {{message}} + +We hope you both have an excellent day! + SequenceFlow_0grui6f + + + + This DMN isn't provided enough information to execute + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/docx/docx.bpmn b/tests/data/docx/docx.bpmn index a95feb07..8c741114 100644 --- a/tests/data/docx/docx.bpmn +++ b/tests/data/docx/docx.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_0637d8i @@ -27,7 +27,7 @@ SequenceFlow_1i7hk1a SequenceFlow_11c35oq - CompleteTemplate Letter.docx AD_CoCApp + #! CompleteTemplate Letter.docx AD_CoCApp SequenceFlow_11c35oq diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn new file mode 100644 index 00000000..11ecec2e --- /dev/null +++ b/tests/data/email/email.bpmn @@ -0,0 +1,67 @@ + + + + + Flow_1synsig + + + Flow_1xlrgne + + + # Dear Approver +## you have been requested for approval + + +--- +New request submitted by {{ PIComputingID }} + +Email content to be delivered to {{ ApprvlApprvr1 }} + +--- + Flow_08n2npe + Flow_1xlrgne + #! Email "Camunda Email Subject" ApprvlApprvr1 PIComputingID + + + + + + + + + + + + Flow_1synsig + Flow_08n2npe + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/enum_options_from_task_data/enum_options_from_task_data.bpmn b/tests/data/enum_options_from_task_data/enum_options_from_task_data.bpmn new file mode 100644 index 00000000..5be4401a --- /dev/null +++ b/tests/data/enum_options_from_task_data/enum_options_from_task_data.bpmn @@ -0,0 +1,100 @@ + + + + + SequenceFlow_0lvudp8 + + + + SequenceFlow_02vev7n + + + + + + + + + + + + + + + Flow_1yet4a9 + SequenceFlow_02vev7n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0lvudp8 + Flow_1yet4a9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/invalid_roles/invalid_roles.bpmn b/tests/data/invalid_roles/invalid_roles.bpmn new file mode 100644 index 00000000..de10f712 --- /dev/null +++ b/tests/data/invalid_roles/invalid_roles.bpmn @@ -0,0 +1,177 @@ + + + + + + + + + StartEvent_1 + Activity_1hljoeq + Event_0lscajc + Activity_19ccxoj + + + Gateway_1fkgc4u + Activity_14eor1x + + + + Flow_0a7090c + + + # Answer me these questions 3, ere the other side you see! + + + + + + + + Flow_0a7090c + Flow_070gq5r + Flow_1hcpt7c + + + Flow_1gp4zfd + Flow_0vnghsi + Flow_1g38q6b + + + # Your responses were approved! + + +Gosh! you must really know a lot about colors and swallows and stuff! +Your supervisor provided the following feedback: + + +{{feedback}} + + +You are all done! WARNING: If you go back and reanswer the questions it will create a new approval request. + + + + + + + Flow_1g38q6b + + + # Your Request was rejected + + +Perhaps you don't know the right answer to one of the questions. +Your Supervisor provided the following feedback: + + +{{feedback}} + + +Please press save to re-try the questions, and submit your responses again. + + + + + + + Flow_0vnghsi + Flow_070gq5r + + + + + + + + + Flow_1hcpt7c + Flow_1gp4zfd + + + + + approval==True + + + approval==True + + + + + Removed a field that would set the supervisor, making this not validate. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/invalid_script/invalid_script.bpmn b/tests/data/invalid_script/invalid_script.bpmn index 80417e90..b85e2bc4 100644 --- a/tests/data/invalid_script/invalid_script.bpmn +++ b/tests/data/invalid_script/invalid_script.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_1pnq3kg @@ -11,7 +11,7 @@ SequenceFlow_1pnq3kg SequenceFlow_12pf6um - NoSuchScript withArg1 + #! NoSuchScript withArg1 diff --git a/tests/data/invalid_script2/invalid_script2.bpmn b/tests/data/invalid_script2/invalid_script2.bpmn new file mode 100644 index 00000000..b061e76c --- /dev/null +++ b/tests/data/invalid_script2/invalid_script2.bpmn @@ -0,0 +1,39 @@ + + + + + SequenceFlow_1pnq3kg + + + + SequenceFlow_12pf6um + + + SequenceFlow_1pnq3kg + SequenceFlow_12pf6um + a really bad error that should fail + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/ldap_response.json b/tests/data/ldap_response.json index f42fee94..cab99457 100644 --- a/tests/data/ldap_response.json +++ b/tests/data/ldap_response.json @@ -1,155 +1,124 @@ { - "entries": [ - { - "attributes": { - "cn": [ - "Laura Barnes (lb3dp)" - ], - "displayName": "Laura Barnes", - "givenName": [ - "Laura" - ], - "mail": [ - "lb3dp@virginia.edu" - ], - "objectClass": [ - "top", - "person", - "organizationalPerson", - "inetOrgPerson", - "uvaPerson", - "uidObject" - ], - "telephoneNumber": [ - "+1 (434) 924-1723" - ], - "title": [ - "E0:Associate Professor of Systems and Information Engineering" - ], - "uvaDisplayDepartment": [ - "E0:EN-Eng Sys and Environment" - ], - "uvaPersonIAMAffiliation": [ - "faculty" - ], - "uvaPersonSponsoredType": [ - "Staff" - ] - }, - "dn": "uid=lb3dp,ou=People,o=University of Virginia,c=US", - "raw": { - "cn": [ - "Laura Barnes (lb3dp)" - ], - "displayName": [ - "Laura Barnes" - ], - "givenName": [ - "Laura" - ], - "mail": [ - "lb3dp@virginia.edu" - ], - "objectClass": [ - "top", - "person", - "organizationalPerson", - "inetOrgPerson", - "uvaPerson", - "uidObject" - ], - "telephoneNumber": [ - "+1 (434) 924-1723" - ], - "title": [ - "E0:Associate Professor of Systems and Information Engineering" - ], - "uvaDisplayDepartment": [ - "E0:EN-Eng Sys and Environment" - ], - "uvaPersonIAMAffiliation": [ - "faculty" - ], - "uvaPersonSponsoredType": [ - "Staff" - ] - } - }, - { - "attributes": { - "cn": [ - "Dan Funk (dhf8r)" - ], - "displayName": "Dan Funk", - "givenName": [ - "Dan" - ], - "mail": [ - "dhf8r@virginia.edu" - ], - "objectClass": [ - "top", - "person", - "organizationalPerson", - "inetOrgPerson", - "uvaPerson", - "uidObject" - ], - "telephoneNumber": [ - "+1 (434) 924-1723" - ], - "title": [ - "E42:He's a hoopy frood" - ], - "uvaDisplayDepartment": [ - "E0:EN-Eng Study of Parallel Universes" - ], - "uvaPersonIAMAffiliation": [ - "faculty" - ], - "uvaPersonSponsoredType": [ - "Staff" - ] - }, - "dn": "uid=dhf8r,ou=People,o=University of Virginia,c=US", - "raw": { - "cn": [ - "Dan Funk (dhf84)" - ], - "displayName": [ - "Dan Funk" - ], - "givenName": [ - "Dan" - ], - "mail": [ - "dhf8r@virginia.edu" - ], - "objectClass": [ - "top", - "person", - "organizationalPerson", - "inetOrgPerson", - "uvaPerson", - "uidObject" - ], - "telephoneNumber": [ - "+1 (434) 924-1723" - ], - "title": [ - "E42:He's a hoopy frood" - ], - "uvaDisplayDepartment": [ - "E0:EN-Eng Study of Parallel Universes" - ], - "uvaPersonIAMAffiliation": [ - "faculty" - ], - "uvaPersonSponsoredType": [ - "Staff" - ] - } - } - - ] + "entries": [ + { + "dn": "uid=lb3dp,ou=People,o=University of Virginia,c=US", + "raw": { + "cn": [ + "Laura Barnes (lb3dp)" + ], + "displayName": [ + "Laura Barnes" + ], + "givenName": [ + "Laura" + ], + "mail": [ + "lb3dp@virginia.edu" + ], + "objectClass": [ + "top", + "person", + "organizationalPerson", + "inetOrgPerson", + "uvaPerson", + "uidObject" + ], + "telephoneNumber": [ + "+1 (434) 924-1723" + ], + "title": [ + "E0:Associate Professor of Systems and Information Engineering" + ], + "uvaDisplayDepartment": [ + "E0:EN-Eng Sys and Environment" + ], + "uvaPersonIAMAffiliation": [ + "faculty" + ], + "uvaPersonSponsoredType": [ + "Staff" + ] + } + }, + { + "dn": "uid=dhf8r,ou=People,o=University of Virginia,c=US", + "raw": { + "cn": [ + "Dan Funk (dhf84)" + ], + "displayName": [ + "Dan Funk" + ], + "givenName": [ + "Dan" + ], + "mail": [ + "dhf8r@virginia.edu" + ], + "objectClass": [ + "top", + "person", + "organizationalPerson", + "inetOrgPerson", + "uvaPerson", + "uidObject" + ], + "telephoneNumber": [ + "+1 (434) 924-1723" + ], + "title": [ + "E42:He's a hoopy frood" + ], + "uvaDisplayDepartment": [ + "E0:EN-Eng Study of Parallel Universes" + ], + "uvaPersonIAMAffiliation": [ + "faculty" + ], + "uvaPersonSponsoredType": [ + "Staff" + ] + } + }, + { + "dn": "uid=lje5u,ou=People,o=University of Virginia,c=US", + "raw": { + "cn": [ + "Elder, Lori J (lje5u)" + ], + "displayName": [ + "Lori Elder" + ], + "givenName": [ + "Lori" + ], + "mail": [ + "lje5u@virginia.edu" + ], + "objectClass": [ + "top", + "person", + "organizationalPerson", + "inetOrgPerson", + "uvaPerson", + "uidObject" + ], + "telephoneNumber": [ + "+1 (434) 924-1723" + ], + "title": [ + "E42:The vision" + ], + "uvaDisplayDepartment": [ + "E0:EN-Phy Anything could go here." + ], + "uvaPersonIAMAffiliation": [ + "faculty" + ], + "uvaPersonSponsoredType": [ + "Staff" + ] + } + } + ] } \ No newline at end of file diff --git a/tests/data/looping_task/looping_task.bpmn b/tests/data/looping_task/looping_task.bpmn new file mode 100644 index 00000000..96b1b32f --- /dev/null +++ b/tests/data/looping_task/looping_task.bpmn @@ -0,0 +1,45 @@ + + + + + Flow_0vlor2k + + + + + + + + + Flow_0vlor2k + Flow_1tvod7v + + + + Flow_1tvod7v + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/multi_instance/multi_instance.bpmn b/tests/data/multi_instance/multi_instance.bpmn index d53f7b17..c1e610a5 100644 --- a/tests/data/multi_instance/multi_instance.bpmn +++ b/tests/data/multi_instance/multi_instance.bpmn @@ -1,5 +1,5 @@ - + Flow_0t6p1sb @@ -8,8 +8,8 @@ Flow_0ugjw69 - - + + # Please provide addtional information about: ## Investigator ID: {{investigator.NETBADGEID}} ## Role: {{investigator.INVESTIGATORTYPEFULL}} @@ -25,11 +25,11 @@ Flow_0ugjw69 - + Flow_0t6p1sb SequenceFlow_1p568pp - StudyInfo investigators + #! StudyInfo investigators @@ -58,7 +58,7 @@ - + diff --git a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn index ba1fd76b..d20c8499 100644 --- a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn +++ b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn @@ -1,5 +1,5 @@ - + Flow_0t6p1sb @@ -8,8 +8,8 @@ Flow_0ugjw69 - - + + # Please provide addtional information about: ## Investigator ID: {{investigator.user_id}} ## Role: {{investigator.type_full}} @@ -17,16 +17,19 @@ + + + SequenceFlow_1p568pp Flow_0ugjw69 - + Flow_0t6p1sb SequenceFlow_1p568pp - StudyInfo investigators + #! StudyInfo investigators @@ -55,7 +58,7 @@ - + diff --git a/tests/data/pb_responses/investigators.json b/tests/data/pb_responses/investigators.json index b0c1c38f..e476c453 100644 --- a/tests/data/pb_responses/investigators.json +++ b/tests/data/pb_responses/investigators.json @@ -13,5 +13,15 @@ "INVESTIGATORTYPE": "PI", "INVESTIGATORTYPEFULL": "Primary Investigator", "NETBADGEID": "dhf8r" + }, + { + "INVESTIGATORTYPE": "SI", + "INVESTIGATORTYPEFULL": "Sub Investigator", + "NETBADGEID": "ajl2j" + }, + { + "INVESTIGATORTYPE": "SI", + "INVESTIGATORTYPEFULL": "Sub Investigator", + "NETBADGEID": "cah3us" } -] \ No newline at end of file +] diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn index 628f1bd4..d5ffcbed 100644 --- a/tests/data/random_fact/random_fact.bpmn +++ b/tests/data/random_fact/random_fact.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_0c7wlth @@ -132,7 +132,7 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see) SequenceFlow_0641sh6 SequenceFlow_0t29gjo - FactService + #! FactService # Great Job! @@ -175,9 +175,6 @@ Your random fact is: - - - @@ -187,6 +184,9 @@ Your random fact is: + + + diff --git a/tests/data/roles/roles.bpmn b/tests/data/roles/roles.bpmn new file mode 100644 index 00000000..55331173 --- /dev/null +++ b/tests/data/roles/roles.bpmn @@ -0,0 +1,155 @@ + + + + + + + + + StartEvent_1 + Activity_1hljoeq + Event_0lscajc + Activity_19ccxoj + + + Gateway_1fkgc4u + Activity_14eor1x + + + + Flow_0a7090c + + + # Answer me these questions 3, ere the other side you see! + + + + + + + + + Flow_0a7090c + Flow_070gq5r + Flow_1hcpt7c + + + Flow_1gp4zfd + Flow_0vnghsi + Flow_1g38q6b + + + # Your responses were approved! + + +Gosh! you must really know a lot about colors and swallows and stuff! +Your supervisor provided the following feedback: + + +{{feedback}} + + +You are all done! WARNING: If you go back and reanswer the questions it will create a new approval request. + Flow_1g38q6b + + + # Your Request was rejected + + +Perhaps you don't know the right answer to one of the questions. +Your Supervisor provided the following feedback: + + +{{feedback}} + + +Please press save to re-try the questions, and submit your responses again. + Flow_0vnghsi + Flow_070gq5r + + + + + approval==False + + + approval==True + + + + + + + + + + + Flow_1hcpt7c + Flow_1gp4zfd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/study_details/study_details.bpmn b/tests/data/study_details/study_details.bpmn index b9aead94..2b46f935 100644 --- a/tests/data/study_details/study_details.bpmn +++ b/tests/data/study_details/study_details.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_1nfe5m9 @@ -8,7 +8,7 @@ SequenceFlow_1nfe5m9 SequenceFlow_1bqiin0 - StudyInfo info + #! StudyInfo info diff --git a/tests/data/top_level_workflow/top_level_workflow.bpmn b/tests/data/top_level_workflow/top_level_workflow.bpmn index cc6e1c57..8b1bb888 100644 --- a/tests/data/top_level_workflow/top_level_workflow.bpmn +++ b/tests/data/top_level_workflow/top_level_workflow.bpmn @@ -1,5 +1,5 @@ - + SequenceFlow_1ees8ka @@ -11,7 +11,7 @@ SequenceFlow_1ees8ka SequenceFlow_17ct47v - StudyInfo documents + #! StudyInfo documents Flow_1m8285h diff --git a/tests/emails/test_email_script.py b/tests/emails/test_email_script.py new file mode 100644 index 00000000..12a00fac --- /dev/null +++ b/tests/emails/test_email_script.py @@ -0,0 +1,39 @@ +from tests.base_test import BaseTest + +from crc.models.email import EmailModel +from crc.services.file_service import FileService +from crc.scripts.email import Email +from crc.services.workflow_processor import WorkflowProcessor +from crc.api.common import ApiError + +from crc import db, mail + + +class TestEmailScript(BaseTest): + + def test_do_task(self): + workflow = self.create_workflow('email') + + task_data = { + 'PIComputingID': 'dhf8r', + 'ApprvlApprvr1': 'lb3dp' + } + task = self.get_workflow_api(workflow).next_task + + with mail.record_messages() as outbox: + + self.complete_form(workflow, task, task_data) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Camunda Email Subject') + + # PI is present + self.assertIn(task_data['PIComputingID'], outbox[0].body) + self.assertIn(task_data['PIComputingID'], outbox[0].html) + + # Approver is present + self.assertIn(task_data['ApprvlApprvr1'], outbox[0].body) + self.assertIn(task_data['ApprvlApprvr1'], outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) diff --git a/tests/emails/test_email_service.py b/tests/emails/test_email_service.py new file mode 100644 index 00000000..174dca13 --- /dev/null +++ b/tests/emails/test_email_service.py @@ -0,0 +1,45 @@ +from tests.base_test import BaseTest + +from crc import session +from crc.models.approval import ApprovalModel, ApprovalStatus +from crc.models.email import EmailModel +from crc.services.email_service import EmailService + + +class TestEmailService(BaseTest): + + def test_add_email(self): + self.load_example_data() + study = self.create_study() + workflow = self.create_workflow('random_fact') + + subject = 'Email Subject' + sender = 'sender@sartography.com' + recipients = ['recipient@sartography.com', 'back@sartography.com'] + content = 'Content for this email' + content_html = '

Hypertext Markup Language content for this email

' + + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, study_id=study.id) + + email_model = EmailModel.query.first() + + self.assertEqual(email_model.subject, subject) + self.assertEqual(email_model.sender, sender) + self.assertEqual(email_model.recipients, str(recipients)) + self.assertEqual(email_model.content, content) + self.assertEqual(email_model.content_html, content_html) + self.assertEqual(email_model.study, study) + + subject = 'Email Subject - Empty study' + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html) + + email_model = EmailModel.query.order_by(EmailModel.id.desc()).first() + + self.assertEqual(email_model.subject, subject) + self.assertEqual(email_model.sender, sender) + self.assertEqual(email_model.recipients, str(recipients)) + self.assertEqual(email_model.content, content) + self.assertEqual(email_model.content_html, content_html) + self.assertEqual(email_model.study, None) diff --git a/tests/emails/test_mails.py b/tests/emails/test_mails.py new file mode 100644 index 00000000..e9320f4d --- /dev/null +++ b/tests/emails/test_mails.py @@ -0,0 +1,113 @@ +from crc import mail +from crc.models.email import EmailModel +from crc.services.mails import ( + send_ramp_up_submission_email, + send_ramp_up_approval_request_email, + send_ramp_up_approval_request_first_review_email, + send_ramp_up_approved_email, + send_ramp_up_denied_email, + send_ramp_up_denied_email_to_approver +) +from tests.base_test import BaseTest + + +class TestMails(BaseTest): + + def setUp(self): + """Initial setup shared by all TestApprovals tests""" + self.load_example_data() + self.study = self.create_study() + self.workflow = self.create_workflow('random_fact') + + self.sender = 'sender@sartography.com' + self.recipients = ['recipient@sartography.com'] + self.primary_investigator = 'Dr. Bartlett' + self.approver_1 = 'Max Approver' + self.approver_2 = 'Close Reviewer' + + def test_send_ramp_up_submission_email(self): + with mail.record_messages() as outbox: + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + + def test_send_ramp_up_approval_request_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_ramp_up_approval_request_first_review_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approval_request_first_review_email( + self.sender, self.recipients, self.primary_investigator + ) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_ramp_up_approved_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + + def test_send_ramp_up_denied_email(self): + with mail.record_messages() as outbox: + send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_send_ramp_up_denied_email_to_approver(self): + with mail.record_messages() as outbox: + send_ramp_up_denied_email_to_approver( + self.sender, self.recipients, self.primary_investigator, self.approver_2 + ) + + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + self.assertIn(self.approver_2, outbox[0].body) + self.assertIn(self.approver_2, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) diff --git a/tests/test_file_service.py b/tests/files/test_file_service.py similarity index 98% rename from tests/test_file_service.py rename to tests/files/test_file_service.py index 1dea810c..dd95e458 100644 --- a/tests/test_file_service.py +++ b/tests/files/test_file_service.py @@ -61,14 +61,14 @@ class TestFileService(BaseTest): # Archive the file file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(1, len(file_models)) + self.assertEqual(1, len(file_models)) file_model = file_models[0] file_model.archived = True db.session.add(file_model) # Assure that the file no longer comes back. file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(0, len(file_models)) + self.assertEqual(0, len(file_models)) # Add the file again with different data FileService.add_workflow_file(workflow_id=workflow.id, diff --git a/tests/test_files_api.py b/tests/files/test_files_api.py similarity index 98% rename from tests/test_files_api.py rename to tests/files/test_files_api.py index 2d14a8b5..59e6c1f6 100644 --- a/tests/test_files_api.py +++ b/tests/files/test_files_api.py @@ -91,7 +91,6 @@ class TestFilesApi(BaseTest): content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) - def test_archive_file_no_longer_shows_up(self): self.load_example_data() self.create_reference_document() @@ -109,21 +108,16 @@ class TestFilesApi(BaseTest): self.assert_success(rv) rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(1, len(json.loads(rv.get_data(as_text=True)))) + self.assertEqual(1, len(json.loads(rv.get_data(as_text=True)))) file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all() - self.assertEquals(1, len(file_model)) + self.assertEqual(1, len(file_model)) file_model[0].archived = True db.session.commit() rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(0, len(json.loads(rv.get_data(as_text=True)))) - - - - - + self.assertEqual(0, len(json.loads(rv.get_data(as_text=True)))) def test_set_reference_file(self): file_name = "irb_document_types.xls" @@ -285,8 +279,8 @@ class TestFilesApi(BaseTest): .filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\ .filter(ApprovalModel.study_id == workflow.study_id).all() - self.assertEquals(1, len(approvals)) - self.assertEquals(1, len(approvals[0].approval_files)) + self.assertEqual(1, len(approvals)) + self.assertEqual(1, len(approvals[0].approval_files)) def test_change_primary_bpmn(self): diff --git a/tests/test_study_api.py b/tests/study/test_study_api.py similarity index 94% rename from tests/test_study_api.py rename to tests/study/test_study_api.py index cdae21c5..3b781f50 100644 --- a/tests/test_study_api.py +++ b/tests/study/test_study_api.py @@ -7,7 +7,8 @@ from unittest.mock import patch from crc import session, app from crc.models.protocol_builder import ProtocolBuilderStatus, \ ProtocolBuilderStudySchema -from crc.models.stats import TaskEventModel +from crc.models.approval import ApprovalStatus +from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel, StudySchema from crc.models.workflow import WorkflowSpecModel, WorkflowModel from crc.services.file_service import FileService @@ -95,8 +96,21 @@ class TestStudyApi(BaseTest): # TODO: WRITE A TEST FOR STUDY FILES def test_get_study_has_details_about_approvals(self): - # TODO: WRITE A TEST FOR STUDY APPROVALS - pass + self.load_example_data() + full_study = self._create_study_workflow_approvals( + user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp", + approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value] + ) + + api_response = self.app.get('/v1.0/study/%i' % full_study['study'].id, + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response) + study = StudySchema().loads(api_response.get_data(as_text=True)) + + self.assertEqual(len(study.approvals), 2) + + for approval in study.approvals: + self.assertEqual(full_study['study'].title, approval['title']) def test_add_study(self): self.load_example_data() @@ -168,8 +182,6 @@ class TestStudyApi(BaseTest): num_open = 0 for study in json_data: - if study['protocol_builder_status'] == 'INCOMPLETE': # One study in user_studies.json is not q_complete - num_incomplete += 1 if study['protocol_builder_status'] == 'ABANDONED': # One study does not exist in user_studies.json num_abandoned += 1 if study['protocol_builder_status'] == 'ACTIVE': # One study is marked complete without HSR Number @@ -182,8 +194,8 @@ class TestStudyApi(BaseTest): self.assertGreater(num_db_studies_after, num_db_studies_before) self.assertEqual(num_abandoned, 1) self.assertEqual(num_open, 1) - self.assertEqual(num_active, 1) - self.assertEqual(num_incomplete, 1) + self.assertEqual(num_active, 2) + self.assertEqual(num_incomplete, 0) self.assertEqual(len(json_data), num_db_studies_after) self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after) diff --git a/tests/test_study_details_documents.py b/tests/study/test_study_details_documents.py similarity index 100% rename from tests/test_study_details_documents.py rename to tests/study/test_study_details_documents.py diff --git a/tests/test_study_service.py b/tests/study/test_study_service.py similarity index 88% rename from tests/test_study_service.py rename to tests/study/test_study_service.py index 1c482bcb..b436835f 100644 --- a/tests/test_study_service.py +++ b/tests/study/test_study_service.py @@ -183,7 +183,7 @@ class TestStudyService(BaseTest): @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs - def test_get_personnel(self, mock_docs): + def test_get_personnel_roles(self, mock_docs): self.load_example_data() # mock out the protocol builder @@ -191,9 +191,9 @@ class TestStudyService(BaseTest): mock_docs.return_value = json.loads(docs_response) workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case. - investigators = StudyService().get_investigators(workflow.study_id) + investigators = StudyService().get_investigators(workflow.study_id, all=True) - self.assertEqual(9, len(investigators)) + self.assertEqual(10, len(investigators)) # dhf8r is in the ldap mock data. self.assertEqual("dhf8r", investigators['PI']['user_id']) @@ -207,3 +207,26 @@ class TestStudyService(BaseTest): # No value is provided for Department Chair self.assertIsNone(investigators['DEPT_CH']['user_id']) + + @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs + def test_get_study_personnel(self, mock_docs): + self.load_example_data() + + # mock out the protocol builder + docs_response = self.protocol_builder_response('investigators.json') + mock_docs.return_value = json.loads(docs_response) + + workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case. + investigators = StudyService().get_investigators(workflow.study_id, all=False) + + self.assertEqual(5, len(investigators)) + + # dhf8r is in the ldap mock data. + self.assertEqual("dhf8r", investigators['PI']['user_id']) + self.assertEqual("Dan Funk", investigators['PI']['display_name']) # Data from ldap + self.assertEqual("Primary Investigator", investigators['PI']['label']) # Data from xls file. + self.assertEqual("Always", investigators['PI']['display']) # Data from xls file. + + # Both Alex and Aaron are SI, and both should be returned. + self.assertEqual("ajl2j", investigators['SI']['user_id']) + self.assertEqual("cah3us", investigators['SI_2']['user_id']) diff --git a/tests/test_update_study_script.py b/tests/study/test_update_study_script.py similarity index 100% rename from tests/test_update_study_script.py rename to tests/study/test_update_study_script.py diff --git a/tests/test_lookup_service.py b/tests/test_lookup_service.py index b61e20e2..a27427f4 100644 --- a/tests/test_lookup_service.py +++ b/tests/test_lookup_service.py @@ -61,6 +61,15 @@ class TestLookupService(BaseTest): lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all() self.assertEqual(4, len(lookup_data)) + def test_lookup_based_on_id(self): + spec = BaseTest.load_test_spec('enum_options_from_file') + workflow = self.create_workflow('enum_options_from_file') + processor = WorkflowProcessor(workflow) + processor.do_engine_steps() + results = LookupService.lookup(workflow, "AllTheNames", "", value="1000", limit=10) + self.assertEqual(1, len(results), "It is possible to find an item based on the id, rather than as a search") + self.assertIsNotNone(results[0].data) + self.assertIsInstance(results[0].data, dict) def test_some_full_text_queries(self): @@ -114,6 +123,9 @@ class TestLookupService(BaseTest): results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10) self.assertEqual("1 Something", results[0].label, "special characters don't flake out") + results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10) + self.assertEqual("1 Something", results[0].label, "double spaces should not be an issue.") + # 1018 10000 Something Industry diff --git a/tests/test_looping_task.py b/tests/test_looping_task.py new file mode 100644 index 00000000..e56e0877 --- /dev/null +++ b/tests/test_looping_task.py @@ -0,0 +1,54 @@ +from unittest.mock import patch + +from crc import session +from crc.models.api_models import MultiInstanceType +from crc.models.study import StudyModel +from crc.models.workflow import WorkflowStatus +from crc.services.study_service import StudyService +from crc.services.workflow_processor import WorkflowProcessor +from crc.services.workflow_service import WorkflowService +from tests.base_test import BaseTest + + +class TestWorkflowProcessorLoopingTask(BaseTest): + """Tests the Workflow Processor as it deals with a Looping task""" + + def _populate_form_with_random_data(self, task): + api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) + WorkflowService.populate_form_with_random_data(task, api_task, required_only=False) + + def get_processor(self, study_model, spec_model): + workflow_model = StudyService._create_workflow_model(study_model, spec_model) + return WorkflowProcessor(workflow_model) + + def test_create_and_complete_workflow(self): + # This depends on getting a list of investigators back from the protocol builder. + + workflow = self.create_workflow('looping_task') + task = self.get_workflow_api(workflow).next_task + + self.assertEqual("GetNames", task.name) + + self.assertEqual(task.multi_instance_type, 'looping') + self.assertEqual(1, task.multi_instance_index) + self.complete_form(workflow,task,{'GetNames_CurrentVar':{'Name': 'Peter Norvig', 'Nickname': 'Pete'}}) + task = self.get_workflow_api(workflow).next_task + + self.assertEqual(task.multi_instance_type,'looping') + self.assertEqual(2, task.multi_instance_index) + self.complete_form(workflow, + task, + {'GetNames_CurrentVar':{'Name': 'Stuart Russell', 'Nickname': 'Stu'}}, + terminate_loop=True) + + task = self.get_workflow_api(workflow).next_task + self.assertEqual(task.name,'Event_End') + self.assertEqual(workflow.completed_tasks,workflow.total_tasks) + self.assertEqual(task.data, {'GetNames_CurrentVar': 2, + 'GetNames': {'1': {'Name': 'Peter Norvig', + 'Nickname': 'Pete'}, + '2': {'Name': 'Stuart Russell', + 'Nickname': 'Stu'}}}) + + + diff --git a/tests/test_mails.py b/tests/test_mails.py deleted file mode 100644 index 15a01583..00000000 --- a/tests/test_mails.py +++ /dev/null @@ -1,55 +0,0 @@ - -from tests.base_test import BaseTest - -from crc.services.mails import ( - send_ramp_up_submission_email, - send_ramp_up_approval_request_email, - send_ramp_up_approval_request_first_review_email, - send_ramp_up_approved_email, - send_ramp_up_denied_email, - send_ramp_up_denied_email_to_approver -) - - -class TestMails(BaseTest): - - def setUp(self): - self.sender = 'sender@sartography.com' - self.recipients = ['recipient@sartography.com'] - self.primary_investigator = 'Dr. Bartlett' - self.approver_1 = 'Max Approver' - self.approver_2 = 'Close Reviewer' - - def test_send_ramp_up_submission_email(self): - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) - - def test_send_ramp_up_approval_request_email(self): - send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) - self.assertTrue(True) - - def test_send_ramp_up_approval_request_first_review_email(self): - send_ramp_up_approval_request_first_review_email( - self.sender, self.recipients, self.primary_investigator - ) - self.assertTrue(True) - - def test_send_ramp_up_approved_email(self): - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) - - def test_send_ramp_up_denied_email(self): - send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - def test_send_send_ramp_up_denied_email_to_approver(self): - send_ramp_up_denied_email_to_approver( - self.sender, self.recipients, self.primary_investigator, self.approver_2 - ) - self.assertTrue(True) diff --git a/tests/test_protocol_builder.py b/tests/test_protocol_builder.py index e5b75632..2a77ec05 100644 --- a/tests/test_protocol_builder.py +++ b/tests/test_protocol_builder.py @@ -24,7 +24,7 @@ class TestProtocolBuilder(BaseTest): mock_get.return_value.text = self.protocol_builder_response('investigators.json') response = ProtocolBuilderService.get_investigators(self.test_study_id) self.assertIsNotNone(response) - self.assertEqual(3, len(response)) + self.assertEqual(5, len(response)) self.assertEqual("DC", response[0]["INVESTIGATORTYPE"]) self.assertEqual("Department Contact", response[0]["INVESTIGATORTYPEFULL"]) self.assertEqual("asd3v", response[0]["NETBADGEID"]) diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 654b777e..8284313d 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -4,6 +4,7 @@ import random from unittest.mock import patch from tests.base_test import BaseTest + from crc import session, app from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema from crc.models.file import FileModelSchema @@ -12,6 +13,18 @@ from crc.models.workflow import WorkflowStatus class TestTasksApi(BaseTest): + def assert_options_populated(self, results, lookup_data_keys): + option_keys = ['value', 'label', 'data'] + self.assertIsInstance(results, list) + for result in results: + for option_key in option_keys: + self.assertTrue(option_key in result, 'should have value, label, and data properties populated') + self.assertIsNotNone(result[option_key], '%s should not be None' % option_key) + + self.assertIsInstance(result['data'], dict) + for lookup_data_key in lookup_data_keys: + self.assertTrue(lookup_data_key in result['data'], 'should have all lookup data columns populated') + def test_get_current_user_tasks(self): self.load_example_data() workflow = self.create_workflow('random_fact') @@ -250,7 +263,7 @@ class TestTasksApi(BaseTest): self.assertEqual(4, len(navigation)) # Start task, form_task, multi_task, end task self.assertEqual("UserTask", workflow.next_task.type) self.assertEqual(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type) - self.assertEqual(9, workflow.next_task.multi_instance_count) + self.assertEqual(5, workflow.next_task.multi_instance_count) # Assure that the names for each task are properly updated, so they aren't all the same. self.assertEqual("Primary Investigator", workflow.next_task.properties['display_name']) @@ -270,6 +283,80 @@ class TestTasksApi(BaseTest): self.assert_success(rv) results = json.loads(rv.get_data(as_text=True)) self.assertEqual(5, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + + def test_lookup_endpoint_for_task_field_using_lookup_entry_id(self): + self.load_example_data() + workflow = self.create_workflow('enum_options_with_search') + # get the first form in the two form workflow. + workflow = self.get_workflow_api(workflow) + task = workflow.next_task + field_id = task.form['fields'][0]['id'] + rv = self.app.get('/v1.0/workflow/%i/lookup/%s?query=%s&limit=5' % + (workflow.id, field_id, 'c'), # All records with a word that starts with 'c' + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + results = json.loads(rv.get_data(as_text=True)) + self.assertEqual(5, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + + rv = self.app.get('/v1.0/workflow/%i/lookup/%s?value=%s' % + (workflow.id, field_id, results[0]['value']), # All records with a word that starts with 'c' + headers=self.logged_in_headers(), + content_type="application/json") + results = json.loads(rv.get_data(as_text=True)) + self.assertEqual(1, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + self.assertNotIn('id', results[0], "Don't include the internal id, that can be very confusing, and should not be used.") + + def test_lookup_endpoint_also_works_for_enum(self): + # Naming here get's a little confusing. fields can be marked as enum or autocomplete. + # In the event of an auto-complete it's a type-ahead search field, for an enum the + # the key/values from the spreadsheet are added directly to the form and it shows up as + # a dropdown. This tests the case of wanting to get additional data when a user selects + # something from a dropdown. + self.load_example_data() + workflow = self.create_workflow('enum_options_from_file') + # get the first form in the two form workflow. + workflow = self.get_workflow_api(workflow) + task = workflow.next_task + field_id = task.form['fields'][0]['id'] + option_id = task.form['fields'][0]['options'][0]['id'] + rv = self.app.get('/v1.0/workflow/%i/lookup/%s?value=%s' % + (workflow.id, field_id, option_id), # All records with a word that starts with 'c' + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + results = json.loads(rv.get_data(as_text=True)) + self.assertEqual(1, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + self.assertIsInstance(results[0]['data'], dict) + + def test_enum_from_task_data(self): + self.load_example_data() + workflow = self.create_workflow('enum_options_from_task_data') + # get the first form in the two form workflow. + workflow_api = self.get_workflow_api(workflow) + task = workflow_api.next_task + + workflow_api = self.complete_form(workflow, task, {'invitees': [ + {'first_name': 'Alistair', 'last_name': 'Aardvark', 'age': 43, 'likes_pie': True, 'num_lumps': 21, 'secret_id': 'Antimony', 'display_name': 'Professor Alistair A. Aardvark'}, + {'first_name': 'Berthilda', 'last_name': 'Binturong', 'age': 12, 'likes_pie': False, 'num_lumps': 34, 'secret_id': 'Beryllium', 'display_name': 'Dr. Berthilda B. Binturong'}, + {'first_name': 'Chesterfield', 'last_name': 'Capybara', 'age': 32, 'likes_pie': True, 'num_lumps': 1, 'secret_id': 'Cadmium', 'display_name': 'The Honorable C. C. Capybara'}, + ]}) + task = workflow_api.next_task + + field_id = task.form['fields'][0]['id'] + options = task.form['fields'][0]['options'] + self.assertEqual(3, len(options)) + option_id = options[0]['id'] + self.assertEqual('Professor Alistair A. Aardvark', options[0]['name']) + self.assertEqual('Dr. Berthilda B. Binturong', options[1]['name']) + self.assertEqual('The Honorable C. C. Capybara', options[2]['name']) + self.assertEqual('Alistair', options[0]['data']['first_name']) + self.assertEqual('Berthilda', options[1]['data']['first_name']) + self.assertEqual('Chesterfield', options[2]['data']['first_name']) def test_lookup_endpoint_for_task_ldap_field_lookup(self): self.load_example_data() @@ -285,6 +372,9 @@ class TestTasksApi(BaseTest): content_type="application/json") self.assert_success(rv) results = json.loads(rv.get_data(as_text=True)) + self.assert_options_populated(results, ['telephone_number', 'affiliation', 'uid', 'title', + 'given_name', 'department', 'date_cached', 'sponsor_type', + 'display_name', 'email_address']) self.assertEqual(1, len(results)) def test_sub_process(self): @@ -299,13 +389,13 @@ class TestTasksApi(BaseTest): self.assertEqual("UserTask", task.type) self.assertEqual("Activity_A", task.name) self.assertEqual("My Sub Process", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldA": "Dan"}) task = workflow_api.next_task self.assertIsNotNone(task) self.assertEqual("Activity_B", task.name) self.assertEqual("Sub Workflow Example", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldB": "Dan"}) self.assertEqual(WorkflowStatus.complete, workflow_api.status) def test_update_task_resets_token(self): @@ -363,17 +453,25 @@ class TestTasksApi(BaseTest): workflow = self.create_workflow('multi_instance_parallel') workflow_api = self.get_workflow_api(workflow) - self.assertEqual(12, len(workflow_api.navigation)) + self.assertEqual(8, len(workflow_api.navigation)) ready_items = [nav for nav in workflow_api.navigation if nav['state'] == "READY"] - self.assertEqual(9, len(ready_items)) + self.assertEqual(5, len(ready_items)) self.assertEqual("UserTask", workflow_api.next_task.type) - self.assertEqual("MutiInstanceTask",workflow_api.next_task.name) - self.assertEqual("more information", workflow_api.next_task.title) + self.assertEqual("MultiInstanceTask",workflow_api.next_task.name) + self.assertEqual("Primary Investigator", workflow_api.next_task.title) - for i in random.sample(range(9), 9): + for i in random.sample(range(5), 5): task = TaskSchema().load(ready_items[i]['task']) - self.complete_form(workflow, task, {"investigator":{"email": "dhf8r@virginia.edu"}}) + rv = self.app.put('/v1.0/workflow/%i/task/%s/set_token' % (workflow.id, task.id), + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + workflow = WorkflowApiSchema().load(json_data) + data = workflow.next_task.data + data['investigator']['email'] = "dhf8r@virginia.edu" + self.complete_form(workflow, task, data) #tasks = self.get_workflow_api(workflow).user_tasks workflow = self.get_workflow_api(workflow) diff --git a/tests/test_tools_api.py b/tests/test_tools_api.py index c6f543c1..3ddf9fea 100644 --- a/tests/test_tools_api.py +++ b/tests/test_tools_api.py @@ -37,3 +37,12 @@ class TestStudyApi(BaseTest): self.assertTrue(len(scripts) > 1) self.assertIsNotNone(scripts[0]['name']) self.assertIsNotNone(scripts[0]['description']) + + def test_eval_hide_expression(self): + """Assures we can use python to process a hide expression fron the front end""" + rv = self.app.put('/v1.0/eval?expression=x.y==2', + data='{"x":{"y":2}}', follow_redirects=True, + content_type='application/json', + headers=self.logged_in_headers()) + self.assert_success(rv) + self.assertEqual("true", rv.get_data(as_text=True).strip()) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py new file mode 100644 index 00000000..6104641c --- /dev/null +++ b/tests/test_user_roles.py @@ -0,0 +1,202 @@ +import json + +from tests.base_test import BaseTest +from crc.models.workflow import WorkflowStatus +from crc import db +from crc.api.common import ApiError +from crc.models.task_event import TaskEventModel, TaskEventSchema +from crc.services.workflow_service import WorkflowService + + +class TestTasksApi(BaseTest): + + def test_raise_error_if_role_does_not_exist_in_data(self): + workflow = self.create_workflow('roles', as_user="lje5u") + workflow_api = self.get_workflow_api(workflow, user_uid="lje5u") + data = workflow_api.next_task.data + # User lje5u can complete the first task + self.complete_form(workflow, workflow_api.next_task, data, user_uid="lje5u") + + # The next task is a supervisor task, and should raise an error if the role + # information is not in the task data. + workflow_api = self.get_workflow_api(workflow, user_uid="lje5u") + data = workflow_api.next_task.data + data["approved"] = True + result = self.complete_form(workflow, workflow_api.next_task, data, user_uid="lje5u", + error_code="permission_denied") + + def test_validation_of_workflow_fails_if_workflow_does_not_define_user_for_lane(self): + error = None + try: + workflow = self.create_workflow('invalid_roles', as_user="lje5u") + WorkflowService.test_spec(workflow.workflow_spec_id) + except ApiError as ae: + error = ae + self.assertIsNotNone(error, "An error should be raised.") + self.assertEquals("invalid_role", error.code) + + def test_raise_error_if_user_does_not_have_the_correct_role(self): + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + # User lje5u can complete the first task, and set her supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + + # But she can not complete the supervisor role. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + data = workflow_api.next_task.data + data["approval"] = True + result = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid, + error_code="permission_denied") + + # Only her supervisor can do that. + self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + + def test_nav_includes_lanes(self): + submitter = self.create_user(uid='lje5u') + workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals("supervisor", nav[1]['lane']) + + def test_get_outstanding_tasks_awaiting_current_user(self): + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + # User lje5u can complete the first task, and set her supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + + # At this point there should be a task_log with an action of Lane Change on it for + # the supervisor. + task_logs = db.session.query(TaskEventModel). \ + filter(TaskEventModel.user_uid == supervisor.uid). \ + filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).all() + self.assertEquals(1, len(task_logs)) + + # A call to the /task endpoint as the supervisor user should return a list of + # tasks that need their attention. + rv = self.app.get('/v1.0/task_events?action=ASSIGNMENT', + headers=self.logged_in_headers(supervisor), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + tasks = TaskEventSchema(many=True).load(json_data) + self.assertEquals(1, len(tasks)) + self.assertEquals(workflow.id, tasks[0]['workflow']['id']) + self.assertEquals(workflow.study.id, tasks[0]['study']['id']) + + # Assure we can say something sensible like: + # You have a task called "Approval" to be completed in the "Supervisor Approval" workflow + # for the study 'Why dogs are stinky' managed by user "Jane Smith (js42x)", + # please check here to complete the task. + # Display name isn't set in the tests, so just checking name, but the full workflow details are included. + # I didn't delve into the full user details to keep things decoupled from ldap, so you just get the + # uid back, but could query to get the full entry. + self.assertEquals("roles", tasks[0]['workflow']['name']) + self.assertEquals("Beer consumption in the bipedal software engineer", tasks[0]['study']['title']) + self.assertEquals("lje5u", tasks[0]['study']['user_uid']) + + # Completing the next step of the workflow will close the task. + data['approval'] = True + self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + + def test_navigation_and_current_task_updates_through_workflow(self): + + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', as_user=submitter.uid) + + # Navigation as Submitter with ready task. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('READY', nav[0]['state']) # First item is ready, no progress yet. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked. + self.assertEquals('NOOP', nav[3]['state']) # Approved Path, has no operation + self.assertEquals('NOOP', nav[4]['state']) # Rejected Path, has no operation. + self.assertEquals('READY', workflow_api.next_task.state) + + # Navigation as Submitter after handoff to supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals('COMPLETED', nav[0]['state']) # First item is ready, no progress yet. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked. + self.assertEquals('LOCKED', workflow_api.next_task.state) + # In the event the next task is locked, we should say something sensible here. + # It is possible to look at the role of the task, and say The next task "TASK TITLE" will + # be handled by 'dhf8r', who is full-filling the role of supervisor. the Task Data + # is guaranteed to have a supervisor attribute in it that will contain the users uid, which + # could be looked up through an ldap service. + self.assertEquals('supervisor', workflow_api.next_task.lane) + + + # Navigation as Supervisor + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('LOCKED', nav[0]['state']) # First item belongs to the submitter, and is locked. + self.assertEquals('READY', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked. + self.assertEquals('READY', workflow_api.next_task.state) + + data = workflow_api.next_task.data + data["approval"] = False + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + + # Navigation as Supervisor, after completing task. + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('LOCKED', nav[0]['state']) # First item belongs to the submitter, and is locked. + self.assertEquals('COMPLETED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('COMPLETED', nav[2]['state']) # third item is a gateway, and is now complete. + self.assertEquals('LOCKED', workflow_api.next_task.state) + + # Navigation as Submitter, coming back in to a rejected workflow to view the rejection message. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('COMPLETED', nav[0]['state']) # First item belongs to the submitter, and is locked. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked. + self.assertEquals('READY', workflow_api.next_task.state) + + # Navigation as Submitter, re-completing the original request a second time, and sending it for review. + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('COMPLETED', nav[0]['state']) # We still have some issues here, the navigation will be off when looping back. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked. + self.assertEquals('READY', workflow_api.next_task.state) + + data["favorite_color"] = "blue" + data["quest"] = "to seek the holy grail" + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals('LOCKED', workflow_api.next_task.state) + + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + self.assertEquals('READY', workflow_api.next_task.state) + + data = workflow_api.next_task.data + data["approval"] = True + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + self.assertEquals('LOCKED', workflow_api.next_task.state) + + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + self.assertEquals('COMPLETED', workflow_api.next_task.state) + self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end. + self.assertEquals(WorkflowStatus.complete, workflow_api.status) \ No newline at end of file diff --git a/tests/test_workflow_processor.py b/tests/workflow/test_workflow_processor.py similarity index 88% rename from tests/test_workflow_processor.py rename to tests/workflow/test_workflow_processor.py index b3f6c374..a51f029d 100644 --- a/tests/test_workflow_processor.py +++ b/tests/workflow/test_workflow_processor.py @@ -187,7 +187,7 @@ class TestWorkflowProcessor(BaseTest): file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_struc_mod.bpmn') self.replace_file("two_forms.bpmn", file_path) - # Attemping a soft update on a structural change should raise a sensible error. + # Attempting a soft update on a structural change should raise a sensible error. with self.assertRaises(ApiError) as context: processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True) self.assertEqual("unexpected_workflow_structure", context.exception.code) @@ -270,53 +270,6 @@ class TestWorkflowProcessor(BaseTest): processor = self.get_processor(study, workflow_spec_model) self.assertTrue(processor.get_version_string().startswith('v2.1.1')) - def test_restart_workflow(self): - self.load_example_data() - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"key": "Value"} - processor.complete_task(task) - task_before_restart = processor.next_task() - processor.hard_reset() - task_after_restart = processor.next_task() - - self.assertNotEqual(task.get_name(), task_before_restart.get_name()) - self.assertEqual(task.get_name(), task_after_restart.get_name()) - self.assertEqual(task.data, task_after_restart.data) - - def test_soft_reset(self): - self.load_example_data() - - # Start the two_forms workflow, and enter some data in the first form. - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"color": "blue"} - processor.complete_task(task) - - # Modify the specification, with a minor text change. - file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_text_mod.bpmn') - self.replace_file("two_forms.bpmn", file_path) - - # Setting up another processor should not error out, but doesn't pick up the update. - processor.workflow_model.bpmn_workflow_json = processor.serialize() - processor2 = WorkflowProcessor(processor.workflow_model) - self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description) - self.assertNotEqual("# This is some documentation I wanted to add.", - processor2.bpmn_workflow.last_task.task_spec.documentation) - - # You can do a soft update and get the right response. - processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True) - self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description) - self.assertEqual("# This is some documentation I wanted to add.", - processor3.bpmn_workflow.last_task.task_spec.documentation) - - def test_hard_reset(self): self.load_example_data() @@ -344,8 +297,10 @@ class TestWorkflowProcessor(BaseTest): # Do a hard reset, which should bring us back to the beginning, but retain the data. processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True) self.assertEqual("Step 1", processor3.next_task().task_spec.description) - self.assertEqual({"color": "blue"}, processor3.next_task().data) - processor3.complete_task(processor3.next_task()) + self.assertTrue(processor3.is_latest_spec) # Now at version 2. + task = processor3.next_task() + task.data = {"color": "blue"} + processor3.complete_task(task) self.assertEqual("New Step", processor3.next_task().task_spec.description) self.assertEqual("blue", processor3.next_task().data["color"]) @@ -413,4 +368,19 @@ class TestWorkflowProcessor(BaseTest): task.task_spec.form.fields.append(field) with self.assertRaises(ApiError): - self._populate_form_with_random_data(task) \ No newline at end of file + self._populate_form_with_random_data(task) + + + def test_get_role_by_name(self): + self.load_example_data() + workflow_spec_model = self.load_test_spec("roles") + study = session.query(StudyModel).first() + processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() + tasks = processor.next_user_tasks() + task = tasks[0] + self._populate_form_with_random_data(task) + processor.complete_task(task) + supervisor_task = processor.next_user_tasks()[0] + self.assertEquals("supervisor", supervisor_task.task_spec.lane) + diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py similarity index 62% rename from tests/test_workflow_processor_multi_instance.py rename to tests/workflow/test_workflow_processor_multi_instance.py index aefb73f1..a67cae7f 100644 --- a/tests/test_workflow_processor_multi_instance.py +++ b/tests/workflow/test_workflow_processor_multi_instance.py @@ -1,13 +1,13 @@ from unittest.mock import patch +from tests.base_test import BaseTest -from crc import session +from crc import session, db from crc.models.api_models import MultiInstanceType from crc.models.study import StudyModel -from crc.models.workflow import WorkflowStatus +from crc.models.workflow import WorkflowStatus, WorkflowModel from crc.services.study_service import StudyService from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService -from tests.base_test import BaseTest class TestWorkflowProcessorMultiInstance(BaseTest): @@ -32,7 +32,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): 'error': 'Unable to locate a user with id asd3v in LDAP'}} def _populate_form_with_random_data(self, task): - WorkflowProcessor.populate_form_with_random_data(task) + WorkflowService.populate_form_with_random_data(task) def get_processor(self, study_model, spec_model): workflow_model = StudyService._create_workflow_model(study_model, spec_model) @@ -51,51 +51,72 @@ class TestWorkflowProcessorMultiInstance(BaseTest): self.assertIsNotNone(processor) self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) processor.bpmn_workflow.do_engine_steps() - next_user_tasks = processor.next_user_tasks() - self.assertEqual(1, len(next_user_tasks)) - - task = next_user_tasks[0] + workflow_api = WorkflowService.processor_to_workflow_api(processor) + self.assertIsNotNone(workflow_api) + self.assertIsNotNone(workflow_api.next_task) + # 1st investigator + api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) - self.assertEqual("dhf8r", task.data["investigator"]["user_id"]) - - self.assertEqual("MutiInstanceTask", task.get_name()) - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual(MultiInstanceType.sequential, api_task.multi_instance_type) + self.assertEqual("dhf8r", api_task.data["investigator"]["user_id"]) + self.assertTrue(api_task.name.startswith("MultiInstanceTask")) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(1, api_task.multi_instance_index) - task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) - task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) + # 2nd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual(None, api_task.data["investigator"]["user_id"]) + self.assertTrue(api_task.name.startswith("MultiInstanceTask")) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(2, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + # 3rd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual("asd3v", api_task.data["investigator"]["user_id"]) + self.assertTrue(api_task.name.startswith("MultiInstanceTask")) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(3, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() - task = processor.bpmn_workflow.last_task + workflow_api = WorkflowService.processor_to_workflow_api(processor) + + # Last task + api_task = workflow_api.next_task expected = self.mock_investigator_response expected['PI']['email'] = "asd3v@virginia.edu" expected['SC_I']['email'] = "asdf32@virginia.edu" expected['DC']['email'] = "dhf8r@virginia.edu" - self.assertEqual(expected, - task.data['StudyInfo']['investigators']) + self.assertEqual(expected, api_task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + def refresh_processor(self, processor): + """Saves the processor, and returns a new one read in from the database""" + processor.save() + processor = WorkflowProcessor(processor.workflow_model) + return processor + @patch('crc.services.study_service.StudyService.get_investigators') def test_create_and_complete_workflow_parallel(self, mock_study_service): """Unlike the test above, the parallel task allows us to complete the items in any order.""" @@ -107,11 +128,15 @@ class TestWorkflowProcessorMultiInstance(BaseTest): workflow_spec_model = self.load_test_spec("multi_instance_parallel") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor = self.refresh_processor(processor) processor.bpmn_workflow.do_engine_steps() # In the Parallel instance, there should be three tasks, all of them in the ready state. next_user_tasks = processor.next_user_tasks() self.assertEqual(3, len(next_user_tasks)) + # There should be six tasks in the navigation: start event, the script task, end event, and three tasks + # for the three executions of hte multi-instance. + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # We can complete the tasks out of order. task = next_user_tasks[2] @@ -121,23 +146,31 @@ class TestWorkflowProcessorMultiInstance(BaseTest): api_task = WorkflowService.spiff_task_to_api_task(task) self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + + # Assure navigation picks up the label of the current element variable. + nav = WorkflowService.processor_to_workflow_api(processor, task).navigation + self.assertEquals("Primary Investigator", nav[2].title) + + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[0] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) + self.assertEqual("MultiInstanceTask", api_task.name) task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[1] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) + self.assertEqual("MultiInstanceTask", task.get_name()) task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # Completing the tasks out of order, still provides the correct information. expected = self.mock_investigator_response @@ -148,3 +181,4 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) diff --git a/tests/test_workflow_service.py b/tests/workflow/test_workflow_service.py similarity index 82% rename from tests/test_workflow_service.py rename to tests/workflow/test_workflow_service.py index 9f3ceda1..9ae49b5a 100644 --- a/tests/test_workflow_service.py +++ b/tests/workflow/test_workflow_service.py @@ -1,7 +1,16 @@ +import json +import unittest + from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService +from SpiffWorkflow import Task as SpiffTask, WorkflowException +from example_data import ExampleDataLoader +from crc import db +from crc.models.task_event import TaskEventModel +from crc.models.api_models import Task +from crc.api.common import ApiError class TestWorkflowService(BaseTest): @@ -78,4 +87,9 @@ class TestWorkflowService(BaseTest): task = processor.next_task() task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) WorkflowService.populate_form_with_random_data(task, task_api, required_only=False) - self.assertTrue(isinstance(task.data["sponsor"], dict)) \ No newline at end of file + self.assertTrue(isinstance(task.data["sponsor"], dict)) + + def test_dmn_evaluation_errors_in_oncomplete_raise_api_errors_during_validation(self): + workflow_spec_model = self.load_test_spec("decision_table_invalid") + with self.assertRaises(ApiError): + WorkflowService.test_spec(workflow_spec_model.id) diff --git a/tests/test_workflow_spec_api.py b/tests/workflow/test_workflow_spec_api.py similarity index 100% rename from tests/test_workflow_spec_api.py rename to tests/workflow/test_workflow_spec_api.py diff --git a/tests/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py similarity index 91% rename from tests/test_workflow_spec_validation_api.py rename to tests/workflow/test_workflow_spec_validation_api.py index cb9b6b77..0c17892e 100644 --- a/tests/test_workflow_spec_validation_api.py +++ b/tests/workflow/test_workflow_spec_validation_api.py @@ -1,4 +1,5 @@ import json +import unittest from unittest.mock import patch from tests.base_test import BaseTest @@ -51,9 +52,6 @@ class TestWorkflowSpecValidation(BaseTest): app.config['PB_ENABLED'] = True self.validate_all_loaded_workflows() - def test_successful_validation_of_rrt_workflows(self): - self.load_example_data(use_rrt_data=True) - self.validate_all_loaded_workflows() def validate_all_loaded_workflows(self): workflows = session.query(WorkflowSpecModel).all() @@ -66,7 +64,6 @@ class TestWorkflowSpecValidation(BaseTest): errors.extend(ApiErrorSchema(many=True).load(json_data)) self.assertEqual(0, len(errors), json.dumps(errors)) - def test_invalid_expression(self): self.load_example_data() errors = self.validate_workflow("invalid_expression") @@ -92,12 +89,21 @@ class TestWorkflowSpecValidation(BaseTest): self.load_example_data() errors = self.validate_workflow("invalid_script") self.assertEqual(2, len(errors)) - self.assertEqual("workflow_validation_exception", errors[0]['code']) + self.assertEqual("error_loading_workflow", errors[0]['code']) self.assertTrue("NoSuchScript" in errors[0]['message']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) self.assertEqual("invalid_script.bpmn", errors[0]['file_name']) + def test_invalid_script2(self): + self.load_example_data() + errors = self.validate_workflow("invalid_script2") + self.assertEqual(2, len(errors)) + self.assertEqual("error_loading_workflow", errors[0]['code']) + self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) + self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) + self.assertEqual("invalid_script2.bpmn", errors[0]['file_name']) + def test_repeating_sections_correctly_populated(self): self.load_example_data() spec_model = self.load_test_spec('repeat_form')