diff --git a/Pipfile b/Pipfile
index 5ecbde1f..6b28197a 100644
--- a/Pipfile
+++ b/Pipfile
@@ -9,38 +9,42 @@ pbr = "*"
coverage = "*"
[packages]
+alembic = "*"
connexion = {extras = ["swagger-ui"],version = "*"}
-swagger-ui-bundle = "*"
+coverage = "*"
+docxtpl = "*"
flask = "*"
+flask-admin = "*"
flask-bcrypt = "*"
flask-cors = "*"
+flask-mail = "*"
flask-marshmallow = "*"
flask-migrate = "*"
flask-restful = "*"
+gunicorn = "*"
httpretty = "*"
+ldap3 = "*"
+lxml = "*"
+markdown = "*"
marshmallow = "*"
marshmallow-enum = "*"
marshmallow-sqlalchemy = "*"
openpyxl = "*"
-pyjwt = "*"
-requests = "*"
-xlsxwriter = "*"
-webtest = "*"
-spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "deploy"}
-alembic = "*"
-coverage = "*"
-sphinx = "*"
-recommonmark = "*"
-psycopg2-binary = "*"
-docxtpl = "*"
-python-dateutil = "*"
pandas = "*"
-xlrd = "*"
-ldap3 = "*"
-gunicorn = "*"
-werkzeug = "*"
+psycopg2-binary = "*"
+pyjwt = "*"
+python-dateutil = "*"
+recommonmark = "*"
+requests = "*"
sentry-sdk = {extras = ["flask"],version = "==0.14.4"}
-flask-mail = "*"
+sphinx = "*"
+spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"}
+#spiffworkflow = {editable = true,path="/home/kelly/sartography/SpiffWorkflow/"}
+swagger-ui-bundle = "*"
+webtest = "*"
+werkzeug = "*"
+xlrd = "*"
+xlsxwriter = "*"
[requires]
python_version = "3.7"
diff --git a/Pipfile.lock b/Pipfile.lock
index 2f99c84f..bd8581a5 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "faaf0e1f31f4bf99df366e52df20bb148a05996a0e6467767660665c514af2d7"
+ "sha256": "97a15c4ade88db2b384d52436633889a4d9b0bdcaeea86b8a679ebda6f73fb59"
},
"pipfile-spec": 6,
"requires": {
@@ -104,17 +104,17 @@
},
"celery": {
"hashes": [
- "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647",
- "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b"
+ "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916",
+ "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da"
],
- "version": "==4.4.5"
+ "version": "==4.4.6"
},
"certifi": {
"hashes": [
- "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1",
- "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc"
+ "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3",
+ "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41"
],
- "version": "==2020.4.5.2"
+ "version": "==2020.6.20"
},
"cffi": {
"hashes": [
@@ -197,40 +197,43 @@
},
"coverage": {
"hashes": [
- "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a",
- "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355",
- "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65",
- "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7",
- "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9",
- "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1",
- "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0",
- "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55",
- "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c",
- "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6",
- "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef",
- "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019",
- "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e",
- "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0",
- "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf",
- "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24",
- "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2",
- "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c",
- "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4",
- "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0",
- "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd",
- "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04",
- "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e",
- "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730",
- "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2",
- "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768",
- "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796",
- "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7",
- "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a",
- "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489",
- "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"
+ "sha256:0fc4e0d91350d6f43ef6a61f64a48e917637e1dcfcba4b4b7d543c628ef82c2d",
+ "sha256:10f2a618a6e75adf64329f828a6a5b40244c1c50f5ef4ce4109e904e69c71bd2",
+ "sha256:12eaccd86d9a373aea59869bc9cfa0ab6ba8b1477752110cb4c10d165474f703",
+ "sha256:1874bdc943654ba46d28f179c1846f5710eda3aeb265ff029e0ac2b52daae404",
+ "sha256:1dcebae667b73fd4aa69237e6afb39abc2f27520f2358590c1b13dd90e32abe7",
+ "sha256:1e58fca3d9ec1a423f1b7f2aa34af4f733cbfa9020c8fe39ca451b6071237405",
+ "sha256:214eb2110217f2636a9329bc766507ab71a3a06a8ea30cdeebb47c24dce5972d",
+ "sha256:25fe74b5b2f1b4abb11e103bb7984daca8f8292683957d0738cd692f6a7cc64c",
+ "sha256:32ecee61a43be509b91a526819717d5e5650e009a8d5eda8631a59c721d5f3b6",
+ "sha256:3740b796015b889e46c260ff18b84683fa2e30f0f75a171fb10d2bf9fb91fc70",
+ "sha256:3b2c34690f613525672697910894b60d15800ac7e779fbd0fccf532486c1ba40",
+ "sha256:41d88736c42f4a22c494c32cc48a05828236e37c991bd9760f8923415e3169e4",
+ "sha256:42fa45a29f1059eda4d3c7b509589cc0343cd6bbf083d6118216830cd1a51613",
+ "sha256:4bb385a747e6ae8a65290b3df60d6c8a692a5599dc66c9fa3520e667886f2e10",
+ "sha256:509294f3e76d3f26b35083973fbc952e01e1727656d979b11182f273f08aa80b",
+ "sha256:5c74c5b6045969b07c9fb36b665c9cac84d6c174a809fc1b21bdc06c7836d9a0",
+ "sha256:60a3d36297b65c7f78329b80120f72947140f45b5c7a017ea730f9112b40f2ec",
+ "sha256:6f91b4492c5cde83bfe462f5b2b997cdf96a138f7c58b1140f05de5751623cf1",
+ "sha256:7403675df5e27745571aba1c957c7da2dacb537c21e14007ec3a417bf31f7f3d",
+ "sha256:87bdc8135b8ee739840eee19b184804e5d57f518578ffc797f5afa2c3c297913",
+ "sha256:8a3decd12e7934d0254939e2bf434bf04a5890c5bf91a982685021786a08087e",
+ "sha256:9702e2cb1c6dec01fb8e1a64c015817c0800a6eca287552c47a5ee0ebddccf62",
+ "sha256:a4d511012beb967a39580ba7d2549edf1e6865a33e5fe51e4dce550522b3ac0e",
+ "sha256:bbb387811f7a18bdc61a2ea3d102be0c7e239b0db9c83be7bfa50f095db5b92a",
+ "sha256:bfcc811883699ed49afc58b1ed9f80428a18eb9166422bce3c31a53dba00fd1d",
+ "sha256:c32aa13cc3fe86b0f744dfe35a7f879ee33ac0a560684fef0f3e1580352b818f",
+ "sha256:ca63dae130a2e788f2b249200f01d7fa240f24da0596501d387a50e57aa7075e",
+ "sha256:d54d7ea74cc00482a2410d63bf10aa34ebe1c49ac50779652106c867f9986d6b",
+ "sha256:d67599521dff98ec8c34cd9652cbcfe16ed076a2209625fca9dc7419b6370e5c",
+ "sha256:d82db1b9a92cb5c67661ca6616bdca6ff931deceebb98eecbd328812dab52032",
+ "sha256:d9ad0a988ae20face62520785ec3595a5e64f35a21762a57d115dae0b8fb894a",
+ "sha256:ebf2431b2d457ae5217f3a1179533c456f3272ded16f8ed0b32961a6d90e38ee",
+ "sha256:ed9a21502e9223f563e071759f769c3d6a2e1ba5328c31e86830368e8d78bc9c",
+ "sha256:f50632ef2d749f541ca8e6c07c9928a37f87505ce3a9f20c8446ad310f1aa87b"
],
"index": "pypi",
- "version": "==5.1"
+ "version": "==5.2"
},
"docutils": {
"hashes": [
@@ -261,6 +264,13 @@
"index": "pypi",
"version": "==1.1.2"
},
+ "flask-admin": {
+ "hashes": [
+ "sha256:68c761d8582d59b1f7702013e944a7ad11d7659a72f3006b89b68b0bd8df61b8"
+ ],
+ "index": "pypi",
+ "version": "==1.5.6"
+ },
"flask-bcrypt": {
"hashes": [
"sha256:d71c8585b2ee1c62024392ebdbc447438564e2c8c02b4e57b56a4cafd8d13c5f"
@@ -309,10 +319,10 @@
},
"flask-sqlalchemy": {
"hashes": [
- "sha256:0b656fbf87c5f24109d859bafa791d29751fabbda2302b606881ae5485b557a5",
- "sha256:fcfe6df52cd2ed8a63008ca36b86a51fa7a4b70cef1c39e5625f722fca32308e"
+ "sha256:05b31d2034dd3f2a685cbbae4cfc4ed906b2a733cff7964ada450fd5e462b84e",
+ "sha256:bfc7150eaf809b1c283879302f04c42791136060c6eeb12c0c6674fb1291fae5"
],
- "version": "==2.4.3"
+ "version": "==2.4.4"
},
"future": {
"hashes": [
@@ -337,10 +347,10 @@
},
"idna": {
"hashes": [
- "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb",
- "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa"
+ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6",
+ "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"
],
- "version": "==2.9"
+ "version": "==2.10"
},
"imagesize": {
"hashes": [
@@ -351,11 +361,11 @@
},
"importlib-metadata": {
"hashes": [
- "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
- "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"
+ "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83",
+ "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070"
],
"markers": "python_version < '3.8'",
- "version": "==1.6.1"
+ "version": "==1.7.0"
},
"inflection": {
"hashes": [
@@ -394,10 +404,10 @@
},
"kombu": {
"hashes": [
- "sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a",
- "sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3"
+ "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a",
+ "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74"
],
- "version": "==4.6.10"
+ "version": "==4.6.11"
},
"ldap3": {
"hashes": [
@@ -409,35 +419,40 @@
},
"lxml": {
"hashes": [
- "sha256:06748c7192eab0f48e3d35a7adae609a329c6257495d5e53878003660dc0fec6",
- "sha256:0790ddca3f825dd914978c94c2545dbea5f56f008b050e835403714babe62a5f",
- "sha256:1aa7a6197c1cdd65d974f3e4953764eee3d9c7b67e3966616b41fab7f8f516b7",
- "sha256:22c6d34fdb0e65d5f782a4d1a1edb52e0a8365858dafb1c08cb1d16546cf0786",
- "sha256:2754d4406438c83144f9ffd3628bbe2dcc6d62b20dbc5c1ec4bc4385e5d44b42",
- "sha256:27ee0faf8077c7c1a589573b1450743011117f1aa1a91d5ae776bbc5ca6070f2",
- "sha256:2b02c106709466a93ed424454ce4c970791c486d5fcdf52b0d822a7e29789626",
- "sha256:2d1ddce96cf15f1254a68dba6935e6e0f1fe39247de631c115e84dd404a6f031",
- "sha256:4f282737d187ae723b2633856085c31ae5d4d432968b7f3f478a48a54835f5c4",
- "sha256:51bb4edeb36d24ec97eb3e6a6007be128b720114f9a875d6b370317d62ac80b9",
- "sha256:7eee37c1b9815e6505847aa5e68f192e8a1b730c5c7ead39ff317fde9ce29448",
- "sha256:7fd88cb91a470b383aafad554c3fe1ccf6dfb2456ff0e84b95335d582a799804",
- "sha256:9144ce36ca0824b29ebc2e02ca186e54040ebb224292072250467190fb613b96",
- "sha256:925baf6ff1ef2c45169f548cc85204433e061360bfa7d01e1be7ae38bef73194",
- "sha256:a636346c6c0e1092ffc202d97ec1843a75937d8c98aaf6771348ad6422e44bb0",
- "sha256:a87dbee7ad9dce3aaefada2081843caf08a44a8f52e03e0a4cc5819f8398f2f4",
- "sha256:a9e3b8011388e7e373565daa5e92f6c9cb844790dc18e43073212bb3e76f7007",
- "sha256:afb53edf1046599991fb4a7d03e601ab5f5422a5435c47ee6ba91ec3b61416a6",
- "sha256:b26719890c79a1dae7d53acac5f089d66fd8cc68a81f4e4bd355e45470dc25e1",
- "sha256:b7462cdab6fffcda853338e1741ce99706cdf880d921b5a769202ea7b94e8528",
- "sha256:b77975465234ff49fdad871c08aa747aae06f5e5be62866595057c43f8d2f62c",
- "sha256:c47a8a5d00060122ca5908909478abce7bbf62d812e3fc35c6c802df8fb01fe7",
- "sha256:c79e5debbe092e3c93ca4aee44c9a7631bdd407b2871cb541b979fd350bbbc29",
- "sha256:d8d40e0121ca1606aa9e78c28a3a7d88a05c06b3ca61630242cded87d8ce55fa",
- "sha256:ee2be8b8f72a2772e72ab926a3bccebf47bb727bda41ae070dc91d1fb759b726",
- "sha256:f95d28193c3863132b1f55c1056036bf580b5a488d908f7d22a04ace8935a3a9",
- "sha256:fadd2a63a2bfd7fb604508e553d1cf68eca250b2fbdbd81213b5f6f2fbf23529"
+ "sha256:05a444b207901a68a6526948c7cc8f9fe6d6f24c70781488e32fd74ff5996e3f",
+ "sha256:08fc93257dcfe9542c0a6883a25ba4971d78297f63d7a5a26ffa34861ca78730",
+ "sha256:107781b213cf7201ec3806555657ccda67b1fccc4261fb889ef7fc56976db81f",
+ "sha256:121b665b04083a1e85ff1f5243d4a93aa1aaba281bc12ea334d5a187278ceaf1",
+ "sha256:1fa21263c3aba2b76fd7c45713d4428dbcc7644d73dcf0650e9d344e433741b3",
+ "sha256:2b30aa2bcff8e958cd85d907d5109820b01ac511eae5b460803430a7404e34d7",
+ "sha256:4b4a111bcf4b9c948e020fd207f915c24a6de3f1adc7682a2d92660eb4e84f1a",
+ "sha256:5591c4164755778e29e69b86e425880f852464a21c7bb53c7ea453bbe2633bbe",
+ "sha256:59daa84aef650b11bccd18f99f64bfe44b9f14a08a28259959d33676554065a1",
+ "sha256:5a9c8d11aa2c8f8b6043d845927a51eb9102eb558e3f936df494e96393f5fd3e",
+ "sha256:5dd20538a60c4cc9a077d3b715bb42307239fcd25ef1ca7286775f95e9e9a46d",
+ "sha256:74f48ec98430e06c1fa8949b49ebdd8d27ceb9df8d3d1c92e1fdc2773f003f20",
+ "sha256:786aad2aa20de3dbff21aab86b2fb6a7be68064cbbc0219bde414d3a30aa47ae",
+ "sha256:7ad7906e098ccd30d8f7068030a0b16668ab8aa5cda6fcd5146d8d20cbaa71b5",
+ "sha256:80a38b188d20c0524fe8959c8ce770a8fdf0e617c6912d23fc97c68301bb9aba",
+ "sha256:8f0ec6b9b3832e0bd1d57af41f9238ea7709bbd7271f639024f2fc9d3bb01293",
+ "sha256:92282c83547a9add85ad658143c76a64a8d339028926d7dc1998ca029c88ea6a",
+ "sha256:94150231f1e90c9595ccc80d7d2006c61f90a5995db82bccbca7944fd457f0f6",
+ "sha256:9dc9006dcc47e00a8a6a029eb035c8f696ad38e40a27d073a003d7d1443f5d88",
+ "sha256:a76979f728dd845655026ab991df25d26379a1a8fc1e9e68e25c7eda43004bed",
+ "sha256:aa8eba3db3d8761db161003e2d0586608092e217151d7458206e243be5a43843",
+ "sha256:bea760a63ce9bba566c23f726d72b3c0250e2fa2569909e2d83cda1534c79443",
+ "sha256:c3f511a3c58676147c277eff0224c061dd5a6a8e1373572ac817ac6324f1b1e0",
+ "sha256:c9d317efde4bafbc1561509bfa8a23c5cab66c44d49ab5b63ff690f5159b2304",
+ "sha256:cc411ad324a4486b142c41d9b2b6a722c534096963688d879ea6fa8a35028258",
+ "sha256:cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6",
+ "sha256:cfd7c5dd3c35c19cec59c63df9571c67c6d6e5c92e0fe63517920e97f61106d1",
+ "sha256:e1cacf4796b20865789083252186ce9dc6cc59eca0c2e79cca332bdff24ac481",
+ "sha256:e70d4e467e243455492f5de463b72151cc400710ac03a0678206a5f27e79ddef",
+ "sha256:ecc930ae559ea8a43377e8b60ca6f8d61ac532fc57efb915d899de4a67928efd",
+ "sha256:f161af26f596131b63b236372e4ce40f3167c1b5b5d459b29d2514bd8c9dc9ee"
],
- "version": "==4.5.1"
+ "index": "pypi",
+ "version": "==4.5.2"
},
"mako": {
"hashes": [
@@ -446,6 +461,14 @@
],
"version": "==1.1.3"
},
+ "markdown": {
+ "hashes": [
+ "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17",
+ "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59"
+ ],
+ "index": "pypi",
+ "version": "==3.2.2"
+ },
"markupsafe": {
"hashes": [
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
@@ -486,11 +509,11 @@
},
"marshmallow": {
"hashes": [
- "sha256:35ee2fb188f0bd9fc1cf9ac35e45fd394bd1c153cee430745a465ea435514bd5",
- "sha256:9aa20f9b71c992b4782dad07c51d92884fd0f7c5cb9d3c737bea17ec1bad765f"
+ "sha256:67bf4cae9d3275b3fc74bd7ff88a7c98ee8c57c94b251a67b031dc293ecc4b76",
+ "sha256:a2a5eefb4b75a3b43f05be1cca0b6686adf56af7465c3ca629e5ad8d1e1fe13d"
],
"index": "pypi",
- "version": "==3.6.1"
+ "version": "==3.7.1"
},
"marshmallow-enum": {
"hashes": [
@@ -510,29 +533,34 @@
},
"numpy": {
"hashes": [
- "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233",
- "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b",
- "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7",
- "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f",
- "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5",
- "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb",
- "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583",
- "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1",
- "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a",
- "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271",
- "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824",
- "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3",
- "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc",
- "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161",
- "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f",
- "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f",
- "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf",
- "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b",
- "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0",
- "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675",
- "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"
+ "sha256:082f8d4dd69b6b688f64f509b91d482362124986d98dc7dc5f5e9f9b9c3bb983",
+ "sha256:1bc0145999e8cb8aed9d4e65dd8b139adf1919e521177f198529687dbf613065",
+ "sha256:309cbcfaa103fc9a33ec16d2d62569d541b79f828c382556ff072442226d1968",
+ "sha256:3673c8b2b29077f1b7b3a848794f8e11f401ba0b71c49fbd26fb40b71788b132",
+ "sha256:480fdd4dbda4dd6b638d3863da3be82873bba6d32d1fc12ea1b8486ac7b8d129",
+ "sha256:56ef7f56470c24bb67fb43dae442e946a6ce172f97c69f8d067ff8550cf782ff",
+ "sha256:5a936fd51049541d86ccdeef2833cc89a18e4d3808fe58a8abeb802665c5af93",
+ "sha256:5b6885c12784a27e957294b60f97e8b5b4174c7504665333c5e94fbf41ae5d6a",
+ "sha256:667c07063940e934287993366ad5f56766bc009017b4a0fe91dbd07960d0aba7",
+ "sha256:7ed448ff4eaffeb01094959b19cbaf998ecdee9ef9932381420d514e446601cd",
+ "sha256:8343bf67c72e09cfabfab55ad4a43ce3f6bf6e6ced7acf70f45ded9ebb425055",
+ "sha256:92feb989b47f83ebef246adabc7ff3b9a59ac30601c3f6819f8913458610bdcc",
+ "sha256:935c27ae2760c21cd7354402546f6be21d3d0c806fffe967f745d5f2de5005a7",
+ "sha256:aaf42a04b472d12515debc621c31cf16c215e332242e7a9f56403d814c744624",
+ "sha256:b12e639378c741add21fbffd16ba5ad25c0a1a17cf2b6fe4288feeb65144f35b",
+ "sha256:b1cca51512299841bf69add3b75361779962f9cee7d9ee3bb446d5982e925b69",
+ "sha256:b8456987b637232602ceb4d663cb34106f7eb780e247d51a260b84760fd8f491",
+ "sha256:b9792b0ac0130b277536ab8944e7b754c69560dac0415dd4b2dbd16b902c8954",
+ "sha256:c9591886fc9cbe5532d5df85cb8e0cc3b44ba8ce4367bd4cf1b93dc19713da72",
+ "sha256:cf1347450c0b7644ea142712619533553f02ef23f92f781312f6a3553d031fc7",
+ "sha256:de8b4a9b56255797cbddb93281ed92acbc510fb7b15df3f01bd28f46ebc4edae",
+ "sha256:e1b1dc0372f530f26a03578ac75d5e51b3868b9b76cd2facba4c9ee0eb252ab1",
+ "sha256:e45f8e981a0ab47103181773cc0a54e650b2aef8c7b6cd07405d0fa8d869444a",
+ "sha256:e4f6d3c53911a9d103d8ec9518190e52a8b945bab021745af4939cfc7c0d4a9e",
+ "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e",
+ "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc"
],
- "version": "==1.18.5"
+ "version": "==1.19.1"
},
"openapi-spec-validator": {
"hashes": [
@@ -544,10 +572,11 @@
},
"openpyxl": {
"hashes": [
- "sha256:547a9fc6aafcf44abe358b89ed4438d077e9d92e4f182c87e2dc294186dc4b64"
+ "sha256:6e62f058d19b09b95d20ebfbfb04857ad08d0833190516c1660675f699c6186f",
+ "sha256:d88dd1480668019684c66cfff3e52a5de4ed41e9df5dd52e008cbf27af0dbf87"
],
"index": "pypi",
- "version": "==3.0.3"
+ "version": "==3.0.4"
},
"packaging": {
"hashes": [
@@ -558,25 +587,25 @@
},
"pandas": {
"hashes": [
- "sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46",
- "sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5",
- "sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa",
- "sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc",
- "sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678",
- "sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc",
- "sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31",
- "sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8",
- "sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6",
- "sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa",
- "sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4",
- "sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874",
- "sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd",
- "sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4",
- "sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126",
- "sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648"
+ "sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096",
+ "sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0",
+ "sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453",
+ "sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc",
+ "sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7",
+ "sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b",
+ "sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8",
+ "sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8",
+ "sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef",
+ "sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d",
+ "sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705",
+ "sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9",
+ "sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91",
+ "sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3",
+ "sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107",
+ "sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e"
],
"index": "pypi",
- "version": "==1.0.4"
+ "version": "==1.0.5"
},
"psycopg2-binary": {
"hashes": [
@@ -656,6 +685,13 @@
],
"version": "==0.16.0"
},
+ "python-box": {
+ "hashes": [
+ "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111",
+ "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f"
+ ],
+ "version": "==5.0.1"
+ },
"python-dateutil": {
"hashes": [
"sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c",
@@ -678,6 +714,61 @@
],
"version": "==1.0.4"
},
+ "python-levenshtein-wheels": {
+ "hashes": [
+ "sha256:0065529c8aec4c044468286177761857d36981ba6f7fdb62d7d5f7ffd143de5d",
+ "sha256:016924a59d689f9f47d5f7b26b70f31e309255e8dd72602c91e93ceb752b9f92",
+ "sha256:089d046ea7727e583233c71fef1046663ed67b96967063ae8ddc9f551e86a4fc",
+ "sha256:09f9faaaa8f65726f91b44c11d3d622fee0f1780cfbe2bf3f410dd0e7345adcb",
+ "sha256:0aea217eab612acd45dcc3424a2e8dbd977cc309f80359d0c01971f1e65b9a9b",
+ "sha256:0beb91ad80b1573829066e5af36b80190c367be6e0a65292f073353b0388c7fc",
+ "sha256:0ec1bc73f5ed3a1a06e02d13bb3cd22a0b32ebf65a9667bbccba106bfa0546f1",
+ "sha256:0fa2ca69ef803bc6037a8c919e2e8a17b55e94c9c9ffcb4c21befbb15a1d0f40",
+ "sha256:11c77d0d74ab7f46f89a58ae9c2d67349ebc1ae3e18636627f9939d810167c31",
+ "sha256:19a68716a322486ddffc8bf7e5cf44a82f7700b05a10658e6e7fc5c7ae92b13d",
+ "sha256:19a95a01d28d63b042438ba860c4ace90362906a038fa77962ba33325d377d10",
+ "sha256:1a61f3a51e00a3608659bbaabb3f27af37c9dbe84d843369061a3e45cf0d5103",
+ "sha256:1c50aebebab403fb2dd415d70355446ac364dece502b0e2737a1a085bb9a4aa4",
+ "sha256:1d2390d04f9b673391e5ce1a0b054d0565f2e00ea5d1187a044221dc5c02c3e6",
+ "sha256:1e51cdc123625a28709662d24ea0cb4cf6f991845e6054d9f803c78da1d6b08f",
+ "sha256:1eca6dc97dfcf588f53281fe48a6d5c423d4e14bdab658a1aa6efd447acc64e0",
+ "sha256:1f0056d3216b0fe38f25c6f8ebc84bd9f6d34c55a7a9414341b674fb98961399",
+ "sha256:228b59460e9a786e498bdfc8011838b89c6054650b115c86c9c819a055a793b0",
+ "sha256:23020f9ff2cb3457a926dcc470b84f9bd5b7646bd8b8e06b915bdbbc905cb23f",
+ "sha256:2b7b7cf0f43b677f818aa9a610464abf06106c19a51b9ac35bd051a439f337a5",
+ "sha256:3b591c9a7e91480f0d7bf2041d325f578b9b9c2f2d593304377cb28862e7f9a2",
+ "sha256:3ca9c70411ab587d071c1d8fc8b69d0558be8e4aa920f2595e2cb5eb229ccc4c",
+ "sha256:3e6bcca97a7ff4e720352b57ddc26380c0583dcdd4b791acef7b574ad58468a7",
+ "sha256:3ed88f9e638da57647149115c34e0e120cae6f3d35eee7d77e22cc9c1d8eced3",
+ "sha256:445bf7941cb1fa05d6c2a4a502ad4868a5cacd92e8eb77b2bd008cdda9d37c55",
+ "sha256:4ba5e147d76d7ee884fd6eae461438b080bcc9f2c6eb9b576811e1bcfe8f808e",
+ "sha256:4bb128b719c30f3b9feacfe71a338ae07d39dbffc077139416f3535c89f12362",
+ "sha256:4e951907b9b5d40c9f1b611c8bdfe46ff8cf8371877cebbd589bf5840feab662",
+ "sha256:53c0c9964390368fd64460b690f168221c669766b193b7e80ae3950c2b9551f8",
+ "sha256:57c4edef81611098d37176278f2b6a3712bf864eed313496d7d80504805896d1",
+ "sha256:5b36e406937c6463d1c1ef3dd82d3f771d9d845f21351e8a026fe4dd398ea8d0",
+ "sha256:7d0821dab24b430dfdc2cba70a06e6d7a45cb839d0dd0e6db97bb99e23c3d884",
+ "sha256:7f7283dfe50eac8a8cd9b777de9eb50b1edf7dbb46fc7cc9d9b0050d0c135021",
+ "sha256:7f9759095b3fc825464a72b1cae95125e610eba3c70f91557754c32a0bf32ea2",
+ "sha256:8005a4df455569c0d490ddfd9e5a163f21293477fd0ed4ea9effdd723ddd8eaa",
+ "sha256:86e865f29ad3dc3bb4733e5247220173d90f05ac8d2ad18e9689a220f90de55f",
+ "sha256:98727050ba70eb8d318ec8a8203531c20119347fc8f281102b097326812742ab",
+ "sha256:ac9cdf044dcb9481c7da782db01b50c1f0e7cdd78c8507b963b6d072829c0263",
+ "sha256:acfad8ffed96891fe7c583d92717cd8ec0c03b59a954c389fd4e26a5cdeac610",
+ "sha256:ad15f25abff8220e556d64e2a27c646241b08f00faf1bc02313655696cd3edfa",
+ "sha256:b679f951f842c38665aa54bea4d7403099131f71fac6d8584f893a731fe1266d",
+ "sha256:b8c183dc4aa4e95dc5c373eedc3d205c176805835611fcfec5d9050736c695c4",
+ "sha256:c097a6829967c76526a037ed34500a028f78f0d765c8e3dbd1a7717afd09fb92",
+ "sha256:c2c76f483d05eddec60a5cd89e92385adef565a4f243b1d9a6abe2f6bd2a7c0a",
+ "sha256:c388baa3c04272a7c585d3da24030c142353eb26eb531dd2681502e6be7d7a26",
+ "sha256:cb0f2a711db665b5bf8697b5af3b9884bb1139385c5c12c2e472e4bbee62da99",
+ "sha256:cbac984d7b36e75b440d1c8ff9d3425d778364a0cbc23f8943383d4decd35d5e",
+ "sha256:f55adf069be2d655f8d668594fe1be1b84d9dc8106d380a9ada06f34941c33c8",
+ "sha256:f9084ed3b8997ad4353d124b903f2860a9695b9e080663276d9e58c32e293244",
+ "sha256:fb7df3504222fcb1fa593f76623abbb54d6019eec15aac5d05cd07ad90ac016c"
+ ],
+ "version": "==0.13.1"
+ },
"pytz": {
"hashes": [
"sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed",
@@ -711,11 +802,11 @@
},
"requests": {
"hashes": [
- "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee",
- "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6"
+ "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b",
+ "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898"
],
"index": "pypi",
- "version": "==2.23.0"
+ "version": "==2.24.0"
},
"sentry-sdk": {
"extras": [
@@ -751,11 +842,11 @@
},
"sphinx": {
"hashes": [
- "sha256:74fbead182a611ce1444f50218a1c5fc70b6cc547f64948f5182fb30a2a20258",
- "sha256:97c9e3bcce2f61d9f5edf131299ee9d1219630598d9f9a8791459a4d9e815be5"
+ "sha256:97dbf2e31fc5684bb805104b8ad34434ed70e6c588f6896991b2fdfd2bef8c00",
+ "sha256:b9daeb9b39aa1ffefc2809b43604109825300300b987a24f45976c001ba1a8fd"
],
"index": "pypi",
- "version": "==3.1.1"
+ "version": "==3.1.2"
},
"sphinxcontrib-applehelp": {
"hashes": [
@@ -802,49 +893,48 @@
"spiffworkflow": {
"editable": true,
"git": "https://github.com/sartography/SpiffWorkflow.git",
- "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0"
+ "ref": "74529738b4e16be5aadd846669a201560f81a6d4"
},
"sqlalchemy": {
"hashes": [
- "sha256:128bc917ed20d78143a45024455ff0aed7d3b96772eba13d5dbaf9cc57e5c41b",
- "sha256:156a27548ba4e1fed944ff9fcdc150633e61d350d673ae7baaf6c25c04ac1f71",
- "sha256:27e2efc8f77661c9af2681755974205e7462f1ae126f498f4fe12a8b24761d15",
- "sha256:2a12f8be25b9ea3d1d5b165202181f2b7da4b3395289000284e5bb86154ce87c",
- "sha256:31c043d5211aa0e0773821fcc318eb5cbe2ec916dfbc4c6eea0c5188971988eb",
- "sha256:65eb3b03229f684af0cf0ad3bcc771970c1260a82a791a8d07bffb63d8c95bcc",
- "sha256:6cd157ce74a911325e164441ff2d9b4e244659a25b3146310518d83202f15f7a",
- "sha256:703c002277f0fbc3c04d0ae4989a174753a7554b2963c584ce2ec0cddcf2bc53",
- "sha256:869bbb637de58ab0a912b7f20e9192132f9fbc47fc6b5111cd1e0f6cdf5cf9b0",
- "sha256:8a0e0cd21da047ea10267c37caf12add400a92f0620c8bc09e4a6531a765d6d7",
- "sha256:8d01e949a5d22e5c4800d59b50617c56125fc187fbeb8fa423e99858546de616",
- "sha256:925b4fe5e7c03ed76912b75a9a41dfd682d59c0be43bce88d3b27f7f5ba028fb",
- "sha256:9cb1819008f0225a7c066cac8bb0cf90847b2c4a6eb9ebb7431dbd00c56c06c5",
- "sha256:a87d496884f40c94c85a647c385f4fd5887941d2609f71043e2b73f2436d9c65",
- "sha256:a9030cd30caf848a13a192c5e45367e3c6f363726569a56e75dc1151ee26d859",
- "sha256:a9e75e49a0f1583eee0ce93270232b8e7bb4b1edc89cc70b07600d525aef4f43",
- "sha256:b50f45d0e82b4562f59f0e0ca511f65e412f2a97d790eea5f60e34e5f1aabc9a",
- "sha256:b7878e59ec31f12d54b3797689402ee3b5cfcb5598f2ebf26491732758751908",
- "sha256:ce1ddaadee913543ff0154021d31b134551f63428065168e756d90bdc4c686f5",
- "sha256:ce2646e4c0807f3461be0653502bb48c6e91a5171d6e450367082c79e12868bf",
- "sha256:ce6c3d18b2a8ce364013d47b9cad71db815df31d55918403f8db7d890c9d07ae",
- "sha256:e4e2664232005bd306f878b0f167a31f944a07c4de0152c444f8c61bbe3cfb38",
- "sha256:e8aa395482728de8bdcca9cc0faf3765ab483e81e01923aaa736b42f0294f570",
- "sha256:eb4fcf7105bf071c71068c6eee47499ab8d4b8f5a11fc35147c934f0faa60f23",
- "sha256:ed375a79f06cad285166e5be74745df1ed6845c5624aafadec4b7a29c25866ef",
- "sha256:f35248f7e0d63b234a109dd72fbfb4b5cb6cb6840b221d0df0ecbf54ab087654",
- "sha256:f502ef245c492b391e0e23e94cba030ab91722dcc56963c85bfd7f3441ea2bbe",
- "sha256:fe01bac7226499aedf472c62fa3b85b2c619365f3f14dd222ffe4f3aa91e5f98"
+ "sha256:0942a3a0df3f6131580eddd26d99071b48cfe5aaf3eab2783076fbc5a1c1882e",
+ "sha256:0ec575db1b54909750332c2e335c2bb11257883914a03bc5a3306a4488ecc772",
+ "sha256:109581ccc8915001e8037b73c29590e78ce74be49ca0a3630a23831f9e3ed6c7",
+ "sha256:16593fd748944726540cd20f7e83afec816c2ac96b082e26ae226e8f7e9688cf",
+ "sha256:427273b08efc16a85aa2b39892817e78e3ed074fcb89b2a51c4979bae7e7ba98",
+ "sha256:50c4ee32f0e1581828843267d8de35c3298e86ceecd5e9017dc45788be70a864",
+ "sha256:512a85c3c8c3995cc91af3e90f38f460da5d3cade8dc3a229c8e0879037547c9",
+ "sha256:57aa843b783179ab72e863512e14bdcba186641daf69e4e3a5761d705dcc35b1",
+ "sha256:621f58cd921cd71ba6215c42954ffaa8a918eecd8c535d97befa1a8acad986dd",
+ "sha256:6ac2558631a81b85e7fb7a44e5035347938b0a73f5fdc27a8566777d0792a6a4",
+ "sha256:716754d0b5490bdcf68e1e4925edc02ac07209883314ad01a137642ddb2056f1",
+ "sha256:736d41cfebedecc6f159fc4ac0769dc89528a989471dc1d378ba07d29a60ba1c",
+ "sha256:8619b86cb68b185a778635be5b3e6018623c0761dde4df2f112896424aa27bd8",
+ "sha256:87fad64529cde4f1914a5b9c383628e1a8f9e3930304c09cf22c2ae118a1280e",
+ "sha256:89494df7f93b1836cae210c42864b292f9b31eeabca4810193761990dc689cce",
+ "sha256:8cac7bb373a5f1423e28de3fd5fc8063b9c8ffe8957dc1b1a59cb90453db6da1",
+ "sha256:8fd452dc3d49b3cc54483e033de6c006c304432e6f84b74d7b2c68afa2569ae5",
+ "sha256:adad60eea2c4c2a1875eb6305a0b6e61a83163f8e233586a4d6a55221ef984fe",
+ "sha256:c26f95e7609b821b5f08a72dab929baa0d685406b953efd7c89423a511d5c413",
+ "sha256:cbe1324ef52ff26ccde2cb84b8593c8bf930069dfc06c1e616f1bfd4e47f48a3",
+ "sha256:d05c4adae06bd0c7f696ae3ec8d993ed8ffcc4e11a76b1b35a5af8a099bd2284",
+ "sha256:d98bc827a1293ae767c8f2f18be3bb5151fd37ddcd7da2a5f9581baeeb7a3fa1",
+ "sha256:da2fb75f64792c1fc64c82313a00c728a7c301efe6a60b7a9fe35b16b4368ce7",
+ "sha256:e4624d7edb2576cd72bb83636cd71c8ce544d8e272f308bd80885056972ca299",
+ "sha256:e89e0d9e106f8a9180a4ca92a6adde60c58b1b0299e1b43bd5e0312f535fbf33",
+ "sha256:f11c2437fb5f812d020932119ba02d9e2bc29a6eca01a055233a8b449e3e1e7d",
+ "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274",
+ "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd"
],
- "version": "==1.3.17"
+ "version": "==1.3.18"
},
"swagger-ui-bundle": {
"hashes": [
- "sha256:49d2e12d60a6499e9d37ea37953b5d700f4e114edc7520fe918bae5eb693a20e",
- "sha256:c5373b683487b1b914dccd23bcd9a3016afa2c2d1cda10f8713c0a9af0f91dd3",
- "sha256:f776811855092c086dbb08216c8810a84accef8c76c796a135caa13645c5cc68"
+ "sha256:f5255f786cde67a2638111f4a7d04355836743198a83c4ecbe815d9fc384b0c8",
+ "sha256:f5691167f2e9f73ecbe8229a89454ae5ea958f90bb0d4583ed7adaae598c4122"
],
"index": "pypi",
- "version": "==0.0.6"
+ "version": "==0.0.8"
},
"urllib3": {
"hashes": [
@@ -890,6 +980,13 @@
"index": "pypi",
"version": "==1.0.1"
},
+ "wtforms": {
+ "hashes": [
+ "sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b",
+ "sha256:861a13b3ae521d6700dac3b2771970bd354a63ba7043ecc3a82b5288596a1972"
+ ],
+ "version": "==2.3.1"
+ },
"xlrd": {
"hashes": [
"sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2",
@@ -924,48 +1021,51 @@
},
"coverage": {
"hashes": [
- "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a",
- "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355",
- "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65",
- "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7",
- "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9",
- "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1",
- "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0",
- "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55",
- "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c",
- "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6",
- "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef",
- "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019",
- "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e",
- "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0",
- "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf",
- "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24",
- "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2",
- "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c",
- "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4",
- "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0",
- "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd",
- "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04",
- "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e",
- "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730",
- "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2",
- "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768",
- "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796",
- "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7",
- "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a",
- "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489",
- "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"
+ "sha256:0fc4e0d91350d6f43ef6a61f64a48e917637e1dcfcba4b4b7d543c628ef82c2d",
+ "sha256:10f2a618a6e75adf64329f828a6a5b40244c1c50f5ef4ce4109e904e69c71bd2",
+ "sha256:12eaccd86d9a373aea59869bc9cfa0ab6ba8b1477752110cb4c10d165474f703",
+ "sha256:1874bdc943654ba46d28f179c1846f5710eda3aeb265ff029e0ac2b52daae404",
+ "sha256:1dcebae667b73fd4aa69237e6afb39abc2f27520f2358590c1b13dd90e32abe7",
+ "sha256:1e58fca3d9ec1a423f1b7f2aa34af4f733cbfa9020c8fe39ca451b6071237405",
+ "sha256:214eb2110217f2636a9329bc766507ab71a3a06a8ea30cdeebb47c24dce5972d",
+ "sha256:25fe74b5b2f1b4abb11e103bb7984daca8f8292683957d0738cd692f6a7cc64c",
+ "sha256:32ecee61a43be509b91a526819717d5e5650e009a8d5eda8631a59c721d5f3b6",
+ "sha256:3740b796015b889e46c260ff18b84683fa2e30f0f75a171fb10d2bf9fb91fc70",
+ "sha256:3b2c34690f613525672697910894b60d15800ac7e779fbd0fccf532486c1ba40",
+ "sha256:41d88736c42f4a22c494c32cc48a05828236e37c991bd9760f8923415e3169e4",
+ "sha256:42fa45a29f1059eda4d3c7b509589cc0343cd6bbf083d6118216830cd1a51613",
+ "sha256:4bb385a747e6ae8a65290b3df60d6c8a692a5599dc66c9fa3520e667886f2e10",
+ "sha256:509294f3e76d3f26b35083973fbc952e01e1727656d979b11182f273f08aa80b",
+ "sha256:5c74c5b6045969b07c9fb36b665c9cac84d6c174a809fc1b21bdc06c7836d9a0",
+ "sha256:60a3d36297b65c7f78329b80120f72947140f45b5c7a017ea730f9112b40f2ec",
+ "sha256:6f91b4492c5cde83bfe462f5b2b997cdf96a138f7c58b1140f05de5751623cf1",
+ "sha256:7403675df5e27745571aba1c957c7da2dacb537c21e14007ec3a417bf31f7f3d",
+ "sha256:87bdc8135b8ee739840eee19b184804e5d57f518578ffc797f5afa2c3c297913",
+ "sha256:8a3decd12e7934d0254939e2bf434bf04a5890c5bf91a982685021786a08087e",
+ "sha256:9702e2cb1c6dec01fb8e1a64c015817c0800a6eca287552c47a5ee0ebddccf62",
+ "sha256:a4d511012beb967a39580ba7d2549edf1e6865a33e5fe51e4dce550522b3ac0e",
+ "sha256:bbb387811f7a18bdc61a2ea3d102be0c7e239b0db9c83be7bfa50f095db5b92a",
+ "sha256:bfcc811883699ed49afc58b1ed9f80428a18eb9166422bce3c31a53dba00fd1d",
+ "sha256:c32aa13cc3fe86b0f744dfe35a7f879ee33ac0a560684fef0f3e1580352b818f",
+ "sha256:ca63dae130a2e788f2b249200f01d7fa240f24da0596501d387a50e57aa7075e",
+ "sha256:d54d7ea74cc00482a2410d63bf10aa34ebe1c49ac50779652106c867f9986d6b",
+ "sha256:d67599521dff98ec8c34cd9652cbcfe16ed076a2209625fca9dc7419b6370e5c",
+ "sha256:d82db1b9a92cb5c67661ca6616bdca6ff931deceebb98eecbd328812dab52032",
+ "sha256:d9ad0a988ae20face62520785ec3595a5e64f35a21762a57d115dae0b8fb894a",
+ "sha256:ebf2431b2d457ae5217f3a1179533c456f3272ded16f8ed0b32961a6d90e38ee",
+ "sha256:ed9a21502e9223f563e071759f769c3d6a2e1ba5328c31e86830368e8d78bc9c",
+ "sha256:f50632ef2d749f541ca8e6c07c9928a37f87505ce3a9f20c8446ad310f1aa87b"
],
"index": "pypi",
- "version": "==5.1"
+ "version": "==5.2"
},
"importlib-metadata": {
"hashes": [
- "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
- "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"
+ "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83",
+ "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070"
],
"markers": "python_version < '3.8'",
- "version": "==1.6.1"
+ "version": "==1.7.0"
},
"more-itertools": {
"hashes": [
@@ -998,10 +1098,10 @@
},
"py": {
"hashes": [
- "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44",
- "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b"
+ "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2",
+ "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"
],
- "version": "==1.8.2"
+ "version": "==1.9.0"
},
"pyparsing": {
"hashes": [
@@ -1027,10 +1127,10 @@
},
"wcwidth": {
"hashes": [
- "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f",
- "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f"
+ "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784",
+ "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"
],
- "version": "==0.2.4"
+ "version": "==0.2.5"
},
"zipp": {
"hashes": [
diff --git a/README.md b/README.md
index a8191a67..6bd7dd67 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# CrConnectFrontend
+# sartography/cr-connect-workflow
[![Build Status](https://travis-ci.com/sartography/cr-connect-workflow.svg?branch=master)](https://travis-ci.com/sartography/cr-connect-workflow)
@@ -27,7 +27,7 @@ Make sure all of the following are properly installed on your system:
- Select the directory where you cloned this repository and click `Ok`.
- Expand the `Project Interpreter` section.
- Select the `New environment using` radio button and choose `Pipenv` in the dropdown.
- - Under `Base interpreter`, select `Python 3.6`
+ - Under `Base interpreter`, select `Python 3.7`
- In the `Pipenv executable` field, enter `/home/your_username_goes_here/.local/bin/pipenv`
- Click `Create`
![Project Interpreter](readme_images/new_project.png)
@@ -47,22 +47,15 @@ run configuration so it doesn't go away.) :
Just click the "Play" button next to RUN in the top right corner of the screen.
The Swagger based view of the API will be avialable at http://0.0.0.0:5000/v1.0/ui/
-### Testing from the Shell
-This app includes a command line interface that will read in BPMN files and let you
-play with it at the command line. To run it right click on app/command_line/joke.py and
-click run. Type "?" to get a list of commands.
-So far the joke system will work a little, when you file it up try these commands
-in this order:
-```bash
-> engine (this will run all tasks up to first user task and should print a joke)
-> answer clock (this is the correct answer)
-> next (this completes the user task)
-> engine (this runs the rest of the tasks, and should tell you that you got the question right)
+### Running Tests
+We use pytest to execute tests. You can run this from the command line with:
```
+pipenv run coverage run -m pytest
+```
+To run the tests within PyCharm set up a run configuration using pytest (Go to Run, configurations, click the
+plus icon, select Python Tests, and under this select pytest, defaults should work good-a-plenty with no
+additional edits required.)
-You can try re-running this and getting the question wrong.
-You might open up the Joke bpmn diagram so you can see what this looks like to
-draw out.
## Documentation
Additional Documentation is available on [ReadTheDocs](https://cr-connect-workflow.readthedocs.io/en/latest/#)
diff --git a/config/default.py b/config/default.py
index bee6f968..b295bf4b 100644
--- a/config/default.py
+++ b/config/default.py
@@ -15,7 +15,8 @@ TEST_UID = environ.get('TEST_UID', default="dhf8r")
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah3us,cl3wf"))
# Sentry flag
-ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true"
+ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true" # To be removed soon
+SENTRY_ENVIRONMENT = environ.get('SENTRY_ENVIRONMENT', None)
# Add trailing slash to base path
APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/'))
@@ -30,7 +31,7 @@ SQLALCHEMY_DATABASE_URI = environ.get(
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
)
TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24))
-TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
+SECRET_KEY = environ.get('SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
@@ -46,6 +47,7 @@ LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No
LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1))
# Email configuration
+DEFAULT_SENDER = 'askresearch@virginia.edu'
FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com']
MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
diff --git a/config/testing.py b/config/testing.py
index c7a777ad..5b03cc41 100644
--- a/config/testing.py
+++ b/config/testing.py
@@ -5,7 +5,7 @@ basedir = os.path.abspath(os.path.dirname(__file__))
NAME = "CR Connect Workflow"
TESTING = True
-TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
+SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
# This is here, for when we are running the E2E Tests in the frontend code bases.
# which will set the TESTING envronment to true, causing this to execute, but we need
diff --git a/crc/__init__.py b/crc/__init__.py
index 1ac2678f..9081f739 100644
--- a/crc/__init__.py
+++ b/crc/__init__.py
@@ -4,6 +4,8 @@ import sentry_sdk
import connexion
from jinja2 import Environment, FileSystemLoader
+from flask_admin import Admin
+from flask_admin.contrib.sqla import ModelView
from flask_cors import CORS
from flask_marshmallow import Marshmallow
from flask_mail import Mail
@@ -32,30 +34,31 @@ db = SQLAlchemy(app)
session = db.session
""":type: sqlalchemy.orm.Session"""
+# Mail settings
+mail = Mail(app)
+
migrate = Migrate(app, db)
ma = Marshmallow(app)
from crc import models
from crc import api
+from crc.api import admin
connexion_app.add_api('api.yml', base_path='/v1.0')
+
# Convert list of allowed origins to list of regexes
origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']]
cors = CORS(connexion_app.app, origins=origins_re)
-if app.config['ENABLE_SENTRY']:
+# Sentry error handling
+if app.config['SENTRY_ENVIRONMENT']:
sentry_sdk.init(
+ environment=app.config['SENTRY_ENVIRONMENT'],
dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915",
integrations=[FlaskIntegration()]
)
-# Jinja environment definition, used to render mail templates
-template_dir = os.getcwd() + '/crc/static/templates/mails'
-env = Environment(loader=FileSystemLoader(template_dir))
-# Mail settings
-mail = Mail(app)
-
print('=== USING THESE CONFIG SETTINGS: ===')
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
@@ -88,3 +91,4 @@ def clear_db():
"""Load example data into the database."""
from example_data import ExampleDataLoader
ExampleDataLoader.clean_db()
+
diff --git a/crc/api.yml b/crc/api.yml
index 64f6086a..4c6ebd1b 100644
--- a/crc/api.yml
+++ b/crc/api.yml
@@ -502,7 +502,6 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/File"
- # /v1.0/workflow/0
/reference_file:
get:
operationId: crc.api.file.get_reference_files
@@ -565,6 +564,26 @@ paths:
type: string
format: binary
example: '
{}'.format(json_value)) + +class TaskEventView(AdminModelView): + column_filters = ['workflow_id', 'action'] + column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'form_data', 'date'] + column_formatters = { + 'form_data': json_formatter, + } + +admin = Admin(app) + +admin.add_view(StudyView(StudyModel, db.session)) +admin.add_view(ApprovalView(ApprovalModel, db.session)) +admin.add_view(UserView(UserModel, db.session)) +admin.add_view(WorkflowView(WorkflowModel, db.session)) +admin.add_view(FileView(FileModel, db.session)) +admin.add_view(TaskEventView(TaskEventModel, db.session)) diff --git a/crc/api/approval.py b/crc/api/approval.py index b3ee0fed..fd01e221 100644 --- a/crc/api/approval.py +++ b/crc/api/approval.py @@ -1,9 +1,11 @@ +import csv +import io import json import pickle from base64 import b64decode from datetime import datetime -from flask import g +from flask import g, make_response from crc import db, session from crc.api.common import ApiError @@ -88,71 +90,25 @@ def get_approvals_for_study(study_id=None): return results +def get_health_attesting_csv(): + records = ApprovalService.get_health_attesting_records() + si = io.StringIO() + cw = csv.writer(si) + cw.writerows(records) + output = make_response(si.getvalue()) + output.headers["Content-Disposition"] = "attachment; filename=health_attesting.csv" + output.headers["Content-type"] = "text/csv" + return output + + # ----- Begin descent into madness ---- # def get_csv(): """A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a man to do just about anything""" - approvals = ApprovalService.get_all_approvals(include_cancelled=False) - output = [] - errors = [] - for approval in approvals: - try: - if approval.status != ApprovalStatus.APPROVED.value: - continue - for related_approval in approval.related_approvals: - if related_approval.status != ApprovalStatus.APPROVED.value: - continue - workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() - data = json.loads(workflow.bpmn_workflow_json) - last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) - personnel = extract_value(last_task, 'personnel') - training_val = extract_value(last_task, 'RequiredTraining') - pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] - review_complete = 'AllRequiredTraining' in training_val - pi_uid = workflow.study.primary_investigator_id - pi_details = LdapService.user_info(pi_uid) - details = [] - details.append(pi_details) - for person in personnel: - uid = person['PersonnelComputingID']['value'] - details.append(LdapService.user_info(uid)) + content = ApprovalService.get_not_really_csv_content() - for person in details: - record = { - "study_id": approval.study_id, - "pi_uid": pi_details.uid, - "pi": pi_details.display_name, - "name": person.display_name, - "uid": person.uid, - "email": person.email_address, - "supervisor": "", - "review_complete": review_complete, - } - # We only know the PI's supervisor. - if person.uid == pi_details.uid: - record["supervisor"] = pi_supervisor + return content - output.append(record) - - except Exception as e: - errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) - return {"results": output, "errors": errors } - - -def extract_value(task, key): - if key in task['data']: - return pickle.loads(b64decode(task['data'][key]['__bytes__'])) - else: - return "" - - -def find_task(uuid, task): - if task['id']['__uuid__'] == uuid: - return task - for child in task['children']: - task = find_task(uuid, child) - if task: - return task # ----- come back to the world of the living ---- # diff --git a/crc/api/common.py b/crc/api/common.py index f8673a5b..cb527c73 100644 --- a/crc/api/common.py +++ b/crc/api/common.py @@ -25,6 +25,7 @@ class ApiError(Exception): instance.task_name = task.task_spec.description or "" instance.file_name = task.workflow.spec.file or "" instance.task_data = task.data + app.logger.error(message, exc_info=True) return instance @classmethod @@ -35,6 +36,7 @@ class ApiError(Exception): instance.task_name = task_spec.description or "" if task_spec._wf_spec: instance.file_name = task_spec._wf_spec.file + app.logger.error(message, exc_info=True) return instance @classmethod diff --git a/crc/api/tools.py b/crc/api/tools.py index d140e962..de30d10d 100644 --- a/crc/api/tools.py +++ b/crc/api/tools.py @@ -2,6 +2,7 @@ import io import json import connexion +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine from flask import send_file from jinja2 import Template, UndefinedError @@ -10,11 +11,12 @@ from crc.scripts.complete_template import CompleteTemplate from crc.scripts.script import Script import crc.scripts from crc.services.mails import send_test_email +from crc.services.workflow_processor import WorkflowProcessor def render_markdown(data, template): """ - Provides a quick way to very that a Jinja markdown template will work properly on a given json + Provides a quick way to very that a Jinja markdown template will work properly on a given json data structure. Useful for folks that are building these markdown templates. """ try: @@ -61,8 +63,22 @@ def list_scripts(): }) return script_meta + def send_email(address): """Just sends a quick test email to assure the system is working.""" if not address: address = "dan@sartography.com" - return send_test_email(address, [address]) \ No newline at end of file + return send_test_email(address, [address]) + + +def evaluate_python_expression(expression, body): + """Evaluate the given python expression, returning it's result. This is useful if the + front end application needs to do real-time processing on task data. If for instance + there is a hide expression that is based on a previous value in the same form.""" + try: + # fixme: The script engine should be pulled from Workflow Processor, + # but the one it returns overwrites the evaluate expression making it uncallable. + script_engine = PythonScriptEngine() + return script_engine.evaluate(expression, **body) + except Exception as e: + raise ApiError("expression_error", str(e)) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 655a85e7..a290d340 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -1,12 +1,13 @@ import uuid +from SpiffWorkflow.util.deep_merge import DeepMerge from flask import g - from crc import session, app from crc.api.common import ApiError, ApiErrorSchema from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema from crc.models.file import FileModel, LookupDataSchema -from crc.models.stats import TaskEventModel +from crc.models.study import StudyModel, WorkflowMetadata +from crc.models.task_event import TaskEventModel, TaskEventModelSchema, TaskEvent, TaskEventSchema from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \ WorkflowSpecCategoryModelSchema from crc.services.file_service import FileService @@ -41,7 +42,6 @@ def get_workflow_specification(spec_id): def validate_workflow_specification(spec_id): - errors = [] try: WorkflowService.test_spec(spec_id) @@ -57,7 +57,6 @@ def validate_workflow_specification(spec_id): return ApiErrorSchema(many=True).dump(errors) - def update_workflow_specification(spec_id, body): if spec_id is None: raise ApiError('unknown_spec', 'Please provide a valid Workflow Spec ID.') @@ -89,115 +88,95 @@ def delete_workflow_specification(spec_id): session.query(TaskEventModel).filter(TaskEventModel.workflow_spec_id == spec_id).delete() - # Delete all stats and workflow models related to this specification + # Delete all events and workflow models related to this specification for workflow in session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id): StudyService.delete_workflow(workflow) session.query(WorkflowSpecModel).filter_by(id=spec_id).delete() session.commit() -def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None): - """Returns an API model representing the state of the current workflow, if requested, and - possible, next_task is set to the current_task.""" - - nav_dict = processor.bpmn_workflow.get_nav_list() - navigation = [] - for nav_item in nav_dict: - spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) - if 'description' in nav_item: - nav_item['title'] = nav_item.pop('description') - # fixme: duplicate code from the workflow_service. Should only do this in one place. - if ' ' in nav_item['title']: - nav_item['title'] = nav_item['title'].partition(' ')[2] - else: - nav_item['title'] = "" - if spiff_task: - nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) - nav_item['title'] = nav_item['task'].title # Prefer the task title. - else: - nav_item['task'] = None - if not 'is_decision' in nav_item: - nav_item['is_decision'] = False - - navigation.append(NavigationItem(**nav_item)) - NavigationItemSchema().dump(nav_item) - - spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() - workflow_api = WorkflowApi( - id=processor.get_workflow_id(), - status=processor.get_status(), - next_task=None, - navigation=navigation, - workflow_spec_id=processor.workflow_spec_id, - spec_version=processor.get_version_string(), - is_latest_spec=processor.is_latest_spec, - total_tasks=len(navigation), - completed_tasks=processor.workflow_model.completed_tasks, - last_updated=processor.workflow_model.last_updated, - title=spec.display_name - ) - if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. - # This may or may not work, sometimes there is no next task to complete. - next_task = processor.next_task() - if next_task: - workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) - - return workflow_api - - def get_workflow(workflow_id, soft_reset=False, hard_reset=False): workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - workflow_api_model = __get_workflow_api_model(processor) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) + WorkflowService.update_task_assignments(processor) return WorkflowApiSchema().dump(workflow_api_model) +def get_task_events(action): + """Provides a way to see a history of what has happened, or get a list of tasks that need your attention.""" + query = session.query(TaskEventModel).filter(TaskEventModel.user_uid == g.user.uid) + if action: + query = query.filter(TaskEventModel.action == action) + events = query.all() + + # Turn the database records into something a little richer for the UI to use. + task_events = [] + for event in events: + study = session.query(StudyModel).filter(StudyModel.id == event.study_id).first() + workflow = session.query(WorkflowModel).filter(WorkflowModel.id == event.workflow_id).first() + workflow_meta = WorkflowMetadata.from_workflow(workflow) + task_events.append(TaskEvent(event, study, workflow_meta)) + return TaskEventSchema(many=True).dump(task_events) + + def delete_workflow(workflow_id): StudyService.delete_workflow(workflow_id) def set_current_task(workflow_id, task_id): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() - user_uid = __get_user_uid(workflow_model.study.user_uid) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.COMPLETED and task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + _verify_user_and_role(processor, spiff_task) + user_uid = g.user.uid + if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not move the token to a task who's state is not " "currently set to COMPLETE or READY.") # Only reset the token if the task doesn't already have it. - if task.state == task.COMPLETED: - task.reset_token(reset_data=False) # we could optionally clear the previous data. + if spiff_task.state == spiff_task.COMPLETED: + spiff_task.reset_token(reset_data=True) # Don't try to copy the existing data back into this task. + processor.save() - WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET) - workflow_api_model = __get_workflow_api_model(processor, task) + WorkflowService.log_task_action(user_uid, processor, spiff_task, WorkflowService.TASK_ACTION_TOKEN_RESET) + WorkflowService.update_task_assignments(processor) + + workflow_api_model = WorkflowService.processor_to_workflow_api(processor, spiff_task) return WorkflowApiSchema().dump(workflow_api_model) -def update_task(workflow_id, task_id, body): +def update_task(workflow_id, task_id, body, terminate_loop=None): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() - if workflow_model is None: raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404) elif workflow_model.study is None: raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404) - user_uid = __get_user_uid(workflow_model.study.user_uid) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + _verify_user_and_role(processor, spiff_task) + if not spiff_task: + raise ApiError("empty_task", "Processor failed to obtain task.", status_code=404) + if spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") - task.update_data(body) - processor.complete_task(task) + + if terminate_loop: + spiff_task.terminate_loop() + spiff_task.update_data(body) + processor.complete_task(spiff_task) processor.do_engine_steps() processor.save() - WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE) - workflow_api_model = __get_workflow_api_model(processor) + # Log the action, and any pending task assignments in the event of lanes in the workflow. + WorkflowService.log_task_action(g.user.uid, processor, spiff_task, WorkflowService.TASK_ACTION_COMPLETE) + WorkflowService.update_task_assignments(processor) + + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) @@ -240,7 +219,7 @@ def delete_workflow_spec_category(cat_id): session.commit() -def lookup(workflow_id, field_id, query, limit): +def lookup(workflow_id, field_id, query=None, value=None, limit=10): """ given a field in a task, attempts to find the lookup table or function associated with that field and runs a full-text query against it to locate the values and @@ -248,16 +227,25 @@ def lookup(workflow_id, field_id, query, limit): Tries to be fast, but first runs will be very slow. """ workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first() - lookup_data = LookupService.lookup(workflow, field_id, query, limit) + lookup_data = LookupService.lookup(workflow, field_id, query, value, limit) return LookupDataSchema(many=True).dump(lookup_data) -def __get_user_uid(user_uid): - if 'user' in g: - if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid: - raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403) - else: - return g.user.uid +def _verify_user_and_role(processor, spiff_task): + """Assures the currently logged in user can access the given workflow and task, or + raises an error. + Allow administrators to modify tasks, otherwise assure that the current user + is allowed to edit or update the task. Will raise the appropriate error if user + is not authorized. """ - else: + if 'user' not in g: raise ApiError("logged_out", "You are no longer logged in.", status_code=401) + + if g.user.uid in app.config['ADMIN_UIDS']: + return g.user.uid + + allowed_users = WorkflowService.get_users_assigned_to_task(processor, spiff_task) + if g.user.uid not in allowed_users: + raise ApiError.from_task("permission_denied", + f"This task must be completed by '{allowed_users}', " + f"but you are {g.user.uid}", spiff_task) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 53706a75..843609e0 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -29,20 +29,44 @@ class NavigationItem(object): self.state = state self.is_decision = is_decision self.task = task + self.lane = lane class Task(object): + ########################################################################## + # Custom properties and validations defined in Camunda form fields # + ########################################################################## + + # Repeating form section PROP_OPTIONS_REPEAT = "repeat" - PROP_OPTIONS_FILE = "spreadsheet.name" - PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column" - PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column" + + # Read-only field + PROP_OPTIONS_READ_ONLY = "read_only" + + # LDAP lookup PROP_LDAP_LOOKUP = "ldap.lookup" - VALIDATION_REQUIRED = "required" + + # Autocomplete field FIELD_TYPE_AUTO_COMPLETE = "autocomplete" + # Required field + VALIDATION_REQUIRED = "required" - def __init__(self, id, name, title, type, state, form, documentation, data, - multi_instance_type, multi_instance_count, multi_instance_index, process_name, properties): + # Enum field options values pulled from a spreadsheet + PROP_OPTIONS_FILE_NAME = "spreadsheet.name" + PROP_OPTIONS_FILE_VALUE_COLUMN = "spreadsheet.value.column" + PROP_OPTIONS_FILE_LABEL_COLUMN = "spreadsheet.label.column" + + # Enum field options values pulled from task data + PROP_OPTIONS_DATA_NAME = "data.name" + PROP_OPTIONS_DATA_VALUE_COLUMN = "data.value.column" + PROP_OPTIONS_DATA_LABEL_COLUMN = "data.label.column" + + ########################################################################## + + def __init__(self, id, name, title, type, state, lane, form, documentation, data, + multi_instance_type, multi_instance_count, multi_instance_index, + process_name, properties): self.id = id self.name = name self.title = title @@ -51,6 +75,7 @@ class Task(object): self.form = form self.documentation = documentation self.data = data + self.lane = lane self.multi_instance_type = multi_instance_type # Some tasks have a repeat behavior. self.multi_instance_count = multi_instance_count # This is the number of times the task could repeat. self.multi_instance_index = multi_instance_index # And the index of the currently repeating task. @@ -60,7 +85,7 @@ class Task(object): class OptionSchema(ma.Schema): class Meta: - fields = ["id", "name"] + fields = ["id", "name", "data"] class ValidationSchema(ma.Schema): @@ -70,15 +95,11 @@ class ValidationSchema(ma.Schema): class FormFieldPropertySchema(ma.Schema): class Meta: - fields = [ - "id", "value" - ] + fields = ["id", "value"] class FormFieldSchema(ma.Schema): class Meta: - fields = [ - "id", "type", "label", "default_value", "options", "validation", "properties", "value" - ] + fields = ["id", "type", "label", "default_value", "options", "validation", "properties", "value"] default_value = marshmallow.fields.String(required=False, allow_none=True) options = marshmallow.fields.List(marshmallow.fields.Nested(OptionSchema)) @@ -93,7 +114,7 @@ class FormSchema(ma.Schema): class TaskSchema(ma.Schema): class Meta: - fields = ["id", "name", "title", "type", "state", "form", "documentation", "data", "multi_instance_type", + fields = ["id", "name", "title", "type", "state", "lane", "form", "documentation", "data", "multi_instance_type", "multi_instance_count", "multi_instance_index", "process_name", "properties"] multi_instance_type = EnumField(MultiInstanceType) @@ -101,6 +122,7 @@ class TaskSchema(ma.Schema): form = marshmallow.fields.Nested(FormSchema, required=False, allow_none=True) title = marshmallow.fields.String(required=False, allow_none=True) process_name = marshmallow.fields.String(required=False, allow_none=True) + lane = marshmallow.fields.String(required=False, allow_none=True) @marshmallow.post_load def make_task(self, data, **kwargs): @@ -110,10 +132,11 @@ class TaskSchema(ma.Schema): class NavigationItemSchema(ma.Schema): class Meta: fields = ["id", "task_id", "name", "title", "backtracks", "level", "indent", "child_count", "state", - "is_decision", "task"] + "is_decision", "task", "lane"] unknown = INCLUDE task = marshmallow.fields.Nested(TaskSchema, dump_only=True, required=False, allow_none=True) backtracks = marshmallow.fields.String(required=False, allow_none=True) + lane = marshmallow.fields.String(required=False, allow_none=True) title = marshmallow.fields.String(required=False, allow_none=True) task_id = marshmallow.fields.String(required=False, allow_none=True) diff --git a/crc/models/approval.py b/crc/models/approval.py index 0592fbd1..df433fac 100644 --- a/crc/models/approval.py +++ b/crc/models/approval.py @@ -57,28 +57,16 @@ class Approval(object): @classmethod def from_model(cls, model: ApprovalModel): - # TODO: Reduce the code by iterating over model's dict keys - instance = cls() - instance.id = model.id - instance.study_id = model.study_id - instance.workflow_id = model.workflow_id - instance.version = model.version - instance.approver_uid = model.approver_uid - instance.status = model.status - instance.message = model.message - instance.date_created = model.date_created - instance.date_approved = model.date_approved - instance.version = model.version - instance.title = '' + args = dict((k, v) for k, v in model.__dict__.items() if not k.startswith('_')) + instance = cls(**args) instance.related_approvals = [] + instance.title = model.study.title if model.study else '' - if model.study: - instance.title = model.study.title try: instance.approver = LdapService.user_info(model.approver_uid) instance.primary_investigator = LdapService.user_info(model.study.primary_investigator_id) except ApiError as ae: - app.logger.error("Ldap lookup failed for approval record %i" % model.id) + app.logger.error(f'Ldap lookup failed for approval record {model.id}', exc_info=True) doc_dictionary = FileService.get_doc_dictionary() instance.associated_files = [] diff --git a/crc/models/email.py b/crc/models/email.py new file mode 100644 index 00000000..dc8c6834 --- /dev/null +++ b/crc/models/email.py @@ -0,0 +1,18 @@ +from flask_marshmallow.sqla import SQLAlchemyAutoSchema +from marshmallow import EXCLUDE +from sqlalchemy import func + +from crc import db +from crc.models.study import StudyModel + + +class EmailModel(db.Model): + __tablename__ = 'email' + id = db.Column(db.Integer, primary_key=True) + subject = db.Column(db.String) + sender = db.Column(db.String) + recipients = db.Column(db.String) + content = db.Column(db.String) + content_html = db.Column(db.String) + study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=True) + study = db.relationship(StudyModel) diff --git a/crc/models/file.py b/crc/models/file.py index 15a48709..8afed6cd 100644 --- a/crc/models/file.py +++ b/crc/models/file.py @@ -144,7 +144,6 @@ class LookupFileModel(db.Model): """Gives us a quick way to tell what kind of lookup is set on a form field. Connected to the file data model, so that if a new version of the same file is created, we can update the listing.""" - #fixme: What happens if they change the file associated with a lookup field? __tablename__ = 'lookup_file' id = db.Column(db.Integer, primary_key=True) workflow_spec_id = db.Column(db.String) @@ -153,6 +152,7 @@ class LookupFileModel(db.Model): file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id')) dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan") + class LookupDataModel(db.Model): __tablename__ = 'lookup_data' id = db.Column(db.Integer, primary_key=True) @@ -181,6 +181,7 @@ class LookupDataSchema(SQLAlchemyAutoSchema): load_instance = True include_relationships = False include_fk = False # Includes foreign keys + exclude = ['id'] # Do not include the id field, it should never be used via the API. class SimpleFileSchema(ma.Schema): diff --git a/crc/models/ldap.py b/crc/models/ldap.py index 7e05eccd..802e0d36 100644 --- a/crc/models/ldap.py +++ b/crc/models/ldap.py @@ -29,6 +29,9 @@ class LdapModel(db.Model): affiliation=", ".join(entry.uvaPersonIAMAffiliation), sponsor_type=", ".join(entry.uvaPersonSponsoredType)) + def proper_name(self): + return f'{self.display_name} - ({self.uid})' + class LdapSchema(SQLAlchemyAutoSchema): class Meta: diff --git a/crc/models/stats.py b/crc/models/stats.py deleted file mode 100644 index c72df7d4..00000000 --- a/crc/models/stats.py +++ /dev/null @@ -1,32 +0,0 @@ -from marshmallow_sqlalchemy import SQLAlchemyAutoSchema - -from crc import db - - -class TaskEventModel(db.Model): - __tablename__ = 'task_event' - id = db.Column(db.Integer, primary_key=True) - study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False) - user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=False) - workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False) - workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id')) - spec_version = db.Column(db.String) - action = db.Column(db.String) - task_id = db.Column(db.String) - task_name = db.Column(db.String) - task_title = db.Column(db.String) - task_type = db.Column(db.String) - task_state = db.Column(db.String) - mi_type = db.Column(db.String) - mi_count = db.Column(db.Integer) - mi_index = db.Column(db.Integer) - process_name = db.Column(db.String) - date = db.Column(db.DateTime) - - -class TaskEventModelSchema(SQLAlchemyAutoSchema): - class Meta: - model = TaskEventModel - load_instance = True - include_relationships = True - include_fk = True # Includes foreign keys diff --git a/crc/models/study.py b/crc/models/study.py index 540ee018..47d4eb8f 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -31,10 +31,8 @@ class StudyModel(db.Model): self.title = pbs.TITLE self.user_uid = pbs.NETBADGEID self.last_updated = pbs.DATE_MODIFIED - self.protocol_builder_status = ProtocolBuilderStatus.INCOMPLETE - if pbs.Q_COMPLETE: - self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE + self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE if pbs.HSRNUMBER: self.protocol_builder_status = ProtocolBuilderStatus.OPEN if self.on_hold: diff --git a/crc/models/task_event.py b/crc/models/task_event.py new file mode 100644 index 00000000..a6cb1a2d --- /dev/null +++ b/crc/models/task_event.py @@ -0,0 +1,64 @@ +from marshmallow import INCLUDE, fields +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema + +from crc import db, ma +from crc.models.study import StudyModel, StudySchema, WorkflowMetadataSchema, WorkflowMetadata +from crc.models.workflow import WorkflowModel + + +class TaskEventModel(db.Model): + __tablename__ = 'task_event' + id = db.Column(db.Integer, primary_key=True) + study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False) + user_uid = db.Column(db.String, nullable=False) # In some cases the unique user id may not exist in the db yet. + workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False) + workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id')) + spec_version = db.Column(db.String) + action = db.Column(db.String) + task_id = db.Column(db.String) + task_name = db.Column(db.String) + task_title = db.Column(db.String) + task_type = db.Column(db.String) + task_state = db.Column(db.String) + task_lane = db.Column(db.String) + form_data = db.Column(db.JSON) # And form data submitted when the task was completed. + mi_type = db.Column(db.String) + mi_count = db.Column(db.Integer) + mi_index = db.Column(db.Integer) + process_name = db.Column(db.String) + date = db.Column(db.DateTime) + + +class TaskEventModelSchema(SQLAlchemyAutoSchema): + class Meta: + model = TaskEventModel + load_instance = True + include_relationships = True + include_fk = True # Includes foreign keys + + +class TaskEvent(object): + def __init__(self, model: TaskEventModel, study: StudyModel, workflow: WorkflowMetadata): + self.id = model.id + self.study = study + self.workflow = workflow + self.user_uid = model.user_uid + self.action = model.action + self.task_id = model.task_id + self.task_title = model.task_title + self.task_name = model.task_name + self.task_type = model.task_type + self.task_state = model.task_state + self.task_lane = model.task_lane + + +class TaskEventSchema(ma.Schema): + + study = fields.Nested(StudySchema, dump_only=True) + workflow = fields.Nested(WorkflowMetadataSchema, dump_only=True) + + class Meta: + model = TaskEvent + additional = ["id", "user_uid", "action", "task_id", "task_title", + "task_name", "task_type", "task_state", "task_lane"] + unknown = INCLUDE diff --git a/crc/models/user.py b/crc/models/user.py index 55bba35f..221176bc 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -35,7 +35,7 @@ class UserModel(db.Model): } return jwt.encode( payload, - app.config.get('TOKEN_AUTH_SECRET_KEY'), + app.config.get('SECRET_KEY'), algorithm='HS256', ) @@ -47,7 +47,7 @@ class UserModel(db.Model): :return: integer|string """ try: - payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256') + payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'), algorithms='HS256') return payload except jwt.ExpiredSignatureError: raise ApiError('token_expired', 'The Authentication token you provided expired and must be renewed.') diff --git a/crc/scripts/email.py b/crc/scripts/email.py new file mode 100644 index 00000000..855ec8a4 --- /dev/null +++ b/crc/scripts/email.py @@ -0,0 +1,90 @@ +import markdown +from jinja2 import Template + +from crc import app +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService +from crc.services.mails import send_mail + + +class Email(Script): + """This Script allows to be introduced as part of a workflow and called from there, specifying + recipients and content """ + + def get_description(self): + return """ +Creates an email, using the provided arguments (a list of UIDs)" +Each argument will be used to look up personal information needed for +the email creation. + +Example: +Email Subject ApprvlApprvr1 PIComputingID +""" + + def do_task_validate_only(self, task, *args, **kwargs): + self.get_subject(task, args) + self.get_users_info(task, args) + self.get_content(task) + + def do_task(self, task, *args, **kwargs): + args = [arg for arg in args if type(arg) == str] + subject = self.get_subject(task, args) + recipients = self.get_users_info(task, args) + content, content_html = self.get_content(task) + if recipients: + send_mail( + subject=subject, + sender=app.config['DEFAULT_SENDER'], + recipients=recipients, + content=content, + content_html=content_html + ) + + def get_users_info(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one argument. The " + "name of the variable in the task data that contains user" + "id to process. Multiple arguments are accepted.") + emails = [] + for arg in args: + try: + uid = task.workflow.script_engine.evaluate_expression(task, arg) + except Exception as e: + app.logger.error(f'Workflow engines could not parse {arg}', exc_info=True) + continue + user_info = LdapService.user_info(uid) + email = user_info.email_address + emails.append(user_info.email_address) + if not isinstance(email, str): + raise ApiError(code="invalid_argument", + message="The Email script requires at least 1 UID argument. The " + "name of the variable in the task data that contains subject and" + " user ids to process. This must point to an array or a string, but " + "it currently points to a %s " % emails.__class__.__name__) + + return emails + + def get_subject(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one subject argument. The " + "name of the variable in the task data that contains subject" + " to process. Multiple arguments are accepted.") + subject = args[0] + if not isinstance(subject, str): + raise ApiError(code="invalid_argument", + message="The Email script requires 1 argument. The " + "the name of the variable in the task data that contains user" + "ids to process. This must point to an array or a string, but " + "it currently points to a %s " % subject.__class__.__name__) + + return subject + + def get_content(self, task): + content = task.task_spec.documentation + template = Template(content) + rendered = template.render(task.data) + rendered_markdown = markdown.markdown(rendered).replace('\n', '
Hypertext Markup Language content for this email
' + + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, study_id=study.id) + + email_model = EmailModel.query.first() + + self.assertEqual(email_model.subject, subject) + self.assertEqual(email_model.sender, sender) + self.assertEqual(email_model.recipients, str(recipients)) + self.assertEqual(email_model.content, content) + self.assertEqual(email_model.content_html, content_html) + self.assertEqual(email_model.study, study) + + subject = 'Email Subject - Empty study' + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html) + + email_model = EmailModel.query.order_by(EmailModel.id.desc()).first() + + self.assertEqual(email_model.subject, subject) + self.assertEqual(email_model.sender, sender) + self.assertEqual(email_model.recipients, str(recipients)) + self.assertEqual(email_model.content, content) + self.assertEqual(email_model.content_html, content_html) + self.assertEqual(email_model.study, None) diff --git a/tests/emails/test_mails.py b/tests/emails/test_mails.py new file mode 100644 index 00000000..e9320f4d --- /dev/null +++ b/tests/emails/test_mails.py @@ -0,0 +1,113 @@ +from crc import mail +from crc.models.email import EmailModel +from crc.services.mails import ( + send_ramp_up_submission_email, + send_ramp_up_approval_request_email, + send_ramp_up_approval_request_first_review_email, + send_ramp_up_approved_email, + send_ramp_up_denied_email, + send_ramp_up_denied_email_to_approver +) +from tests.base_test import BaseTest + + +class TestMails(BaseTest): + + def setUp(self): + """Initial setup shared by all TestApprovals tests""" + self.load_example_data() + self.study = self.create_study() + self.workflow = self.create_workflow('random_fact') + + self.sender = 'sender@sartography.com' + self.recipients = ['recipient@sartography.com'] + self.primary_investigator = 'Dr. Bartlett' + self.approver_1 = 'Max Approver' + self.approver_2 = 'Close Reviewer' + + def test_send_ramp_up_submission_email(self): + with mail.record_messages() as outbox: + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + + def test_send_ramp_up_approval_request_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_ramp_up_approval_request_first_review_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approval_request_first_review_email( + self.sender, self.recipients, self.primary_investigator + ) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_ramp_up_approved_email(self): + with mail.record_messages() as outbox: + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + + def test_send_ramp_up_denied_email(self): + with mail.record_messages() as outbox: + send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + + def test_send_send_ramp_up_denied_email_to_approver(self): + with mail.record_messages() as outbox: + send_ramp_up_denied_email_to_approver( + self.sender, self.recipients, self.primary_investigator, self.approver_2 + ) + + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + self.assertIn(self.approver_2, outbox[0].body) + self.assertIn(self.approver_2, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) diff --git a/tests/test_file_service.py b/tests/files/test_file_service.py similarity index 98% rename from tests/test_file_service.py rename to tests/files/test_file_service.py index 1dea810c..dd95e458 100644 --- a/tests/test_file_service.py +++ b/tests/files/test_file_service.py @@ -61,14 +61,14 @@ class TestFileService(BaseTest): # Archive the file file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(1, len(file_models)) + self.assertEqual(1, len(file_models)) file_model = file_models[0] file_model.archived = True db.session.add(file_model) # Assure that the file no longer comes back. file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(0, len(file_models)) + self.assertEqual(0, len(file_models)) # Add the file again with different data FileService.add_workflow_file(workflow_id=workflow.id, diff --git a/tests/test_files_api.py b/tests/files/test_files_api.py similarity index 98% rename from tests/test_files_api.py rename to tests/files/test_files_api.py index 2d14a8b5..59e6c1f6 100644 --- a/tests/test_files_api.py +++ b/tests/files/test_files_api.py @@ -91,7 +91,6 @@ class TestFilesApi(BaseTest): content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) - def test_archive_file_no_longer_shows_up(self): self.load_example_data() self.create_reference_document() @@ -109,21 +108,16 @@ class TestFilesApi(BaseTest): self.assert_success(rv) rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(1, len(json.loads(rv.get_data(as_text=True)))) + self.assertEqual(1, len(json.loads(rv.get_data(as_text=True)))) file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all() - self.assertEquals(1, len(file_model)) + self.assertEqual(1, len(file_model)) file_model[0].archived = True db.session.commit() rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(0, len(json.loads(rv.get_data(as_text=True)))) - - - - - + self.assertEqual(0, len(json.loads(rv.get_data(as_text=True)))) def test_set_reference_file(self): file_name = "irb_document_types.xls" @@ -285,8 +279,8 @@ class TestFilesApi(BaseTest): .filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\ .filter(ApprovalModel.study_id == workflow.study_id).all() - self.assertEquals(1, len(approvals)) - self.assertEquals(1, len(approvals[0].approval_files)) + self.assertEqual(1, len(approvals)) + self.assertEqual(1, len(approvals[0].approval_files)) def test_change_primary_bpmn(self): diff --git a/tests/test_study_api.py b/tests/study/test_study_api.py similarity index 94% rename from tests/test_study_api.py rename to tests/study/test_study_api.py index cdae21c5..3b781f50 100644 --- a/tests/test_study_api.py +++ b/tests/study/test_study_api.py @@ -7,7 +7,8 @@ from unittest.mock import patch from crc import session, app from crc.models.protocol_builder import ProtocolBuilderStatus, \ ProtocolBuilderStudySchema -from crc.models.stats import TaskEventModel +from crc.models.approval import ApprovalStatus +from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel, StudySchema from crc.models.workflow import WorkflowSpecModel, WorkflowModel from crc.services.file_service import FileService @@ -95,8 +96,21 @@ class TestStudyApi(BaseTest): # TODO: WRITE A TEST FOR STUDY FILES def test_get_study_has_details_about_approvals(self): - # TODO: WRITE A TEST FOR STUDY APPROVALS - pass + self.load_example_data() + full_study = self._create_study_workflow_approvals( + user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp", + approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value] + ) + + api_response = self.app.get('/v1.0/study/%i' % full_study['study'].id, + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response) + study = StudySchema().loads(api_response.get_data(as_text=True)) + + self.assertEqual(len(study.approvals), 2) + + for approval in study.approvals: + self.assertEqual(full_study['study'].title, approval['title']) def test_add_study(self): self.load_example_data() @@ -168,8 +182,6 @@ class TestStudyApi(BaseTest): num_open = 0 for study in json_data: - if study['protocol_builder_status'] == 'INCOMPLETE': # One study in user_studies.json is not q_complete - num_incomplete += 1 if study['protocol_builder_status'] == 'ABANDONED': # One study does not exist in user_studies.json num_abandoned += 1 if study['protocol_builder_status'] == 'ACTIVE': # One study is marked complete without HSR Number @@ -182,8 +194,8 @@ class TestStudyApi(BaseTest): self.assertGreater(num_db_studies_after, num_db_studies_before) self.assertEqual(num_abandoned, 1) self.assertEqual(num_open, 1) - self.assertEqual(num_active, 1) - self.assertEqual(num_incomplete, 1) + self.assertEqual(num_active, 2) + self.assertEqual(num_incomplete, 0) self.assertEqual(len(json_data), num_db_studies_after) self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after) diff --git a/tests/test_study_details_documents.py b/tests/study/test_study_details_documents.py similarity index 100% rename from tests/test_study_details_documents.py rename to tests/study/test_study_details_documents.py diff --git a/tests/test_study_service.py b/tests/study/test_study_service.py similarity index 88% rename from tests/test_study_service.py rename to tests/study/test_study_service.py index 1c482bcb..b436835f 100644 --- a/tests/test_study_service.py +++ b/tests/study/test_study_service.py @@ -183,7 +183,7 @@ class TestStudyService(BaseTest): @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs - def test_get_personnel(self, mock_docs): + def test_get_personnel_roles(self, mock_docs): self.load_example_data() # mock out the protocol builder @@ -191,9 +191,9 @@ class TestStudyService(BaseTest): mock_docs.return_value = json.loads(docs_response) workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case. - investigators = StudyService().get_investigators(workflow.study_id) + investigators = StudyService().get_investigators(workflow.study_id, all=True) - self.assertEqual(9, len(investigators)) + self.assertEqual(10, len(investigators)) # dhf8r is in the ldap mock data. self.assertEqual("dhf8r", investigators['PI']['user_id']) @@ -207,3 +207,26 @@ class TestStudyService(BaseTest): # No value is provided for Department Chair self.assertIsNone(investigators['DEPT_CH']['user_id']) + + @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs + def test_get_study_personnel(self, mock_docs): + self.load_example_data() + + # mock out the protocol builder + docs_response = self.protocol_builder_response('investigators.json') + mock_docs.return_value = json.loads(docs_response) + + workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case. + investigators = StudyService().get_investigators(workflow.study_id, all=False) + + self.assertEqual(5, len(investigators)) + + # dhf8r is in the ldap mock data. + self.assertEqual("dhf8r", investigators['PI']['user_id']) + self.assertEqual("Dan Funk", investigators['PI']['display_name']) # Data from ldap + self.assertEqual("Primary Investigator", investigators['PI']['label']) # Data from xls file. + self.assertEqual("Always", investigators['PI']['display']) # Data from xls file. + + # Both Alex and Aaron are SI, and both should be returned. + self.assertEqual("ajl2j", investigators['SI']['user_id']) + self.assertEqual("cah3us", investigators['SI_2']['user_id']) diff --git a/tests/test_update_study_script.py b/tests/study/test_update_study_script.py similarity index 100% rename from tests/test_update_study_script.py rename to tests/study/test_update_study_script.py diff --git a/tests/test_lookup_service.py b/tests/test_lookup_service.py index b61e20e2..a27427f4 100644 --- a/tests/test_lookup_service.py +++ b/tests/test_lookup_service.py @@ -61,6 +61,15 @@ class TestLookupService(BaseTest): lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all() self.assertEqual(4, len(lookup_data)) + def test_lookup_based_on_id(self): + spec = BaseTest.load_test_spec('enum_options_from_file') + workflow = self.create_workflow('enum_options_from_file') + processor = WorkflowProcessor(workflow) + processor.do_engine_steps() + results = LookupService.lookup(workflow, "AllTheNames", "", value="1000", limit=10) + self.assertEqual(1, len(results), "It is possible to find an item based on the id, rather than as a search") + self.assertIsNotNone(results[0].data) + self.assertIsInstance(results[0].data, dict) def test_some_full_text_queries(self): @@ -114,6 +123,9 @@ class TestLookupService(BaseTest): results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10) self.assertEqual("1 Something", results[0].label, "special characters don't flake out") + results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10) + self.assertEqual("1 Something", results[0].label, "double spaces should not be an issue.") + # 1018 10000 Something Industry diff --git a/tests/test_looping_task.py b/tests/test_looping_task.py new file mode 100644 index 00000000..e56e0877 --- /dev/null +++ b/tests/test_looping_task.py @@ -0,0 +1,54 @@ +from unittest.mock import patch + +from crc import session +from crc.models.api_models import MultiInstanceType +from crc.models.study import StudyModel +from crc.models.workflow import WorkflowStatus +from crc.services.study_service import StudyService +from crc.services.workflow_processor import WorkflowProcessor +from crc.services.workflow_service import WorkflowService +from tests.base_test import BaseTest + + +class TestWorkflowProcessorLoopingTask(BaseTest): + """Tests the Workflow Processor as it deals with a Looping task""" + + def _populate_form_with_random_data(self, task): + api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) + WorkflowService.populate_form_with_random_data(task, api_task, required_only=False) + + def get_processor(self, study_model, spec_model): + workflow_model = StudyService._create_workflow_model(study_model, spec_model) + return WorkflowProcessor(workflow_model) + + def test_create_and_complete_workflow(self): + # This depends on getting a list of investigators back from the protocol builder. + + workflow = self.create_workflow('looping_task') + task = self.get_workflow_api(workflow).next_task + + self.assertEqual("GetNames", task.name) + + self.assertEqual(task.multi_instance_type, 'looping') + self.assertEqual(1, task.multi_instance_index) + self.complete_form(workflow,task,{'GetNames_CurrentVar':{'Name': 'Peter Norvig', 'Nickname': 'Pete'}}) + task = self.get_workflow_api(workflow).next_task + + self.assertEqual(task.multi_instance_type,'looping') + self.assertEqual(2, task.multi_instance_index) + self.complete_form(workflow, + task, + {'GetNames_CurrentVar':{'Name': 'Stuart Russell', 'Nickname': 'Stu'}}, + terminate_loop=True) + + task = self.get_workflow_api(workflow).next_task + self.assertEqual(task.name,'Event_End') + self.assertEqual(workflow.completed_tasks,workflow.total_tasks) + self.assertEqual(task.data, {'GetNames_CurrentVar': 2, + 'GetNames': {'1': {'Name': 'Peter Norvig', + 'Nickname': 'Pete'}, + '2': {'Name': 'Stuart Russell', + 'Nickname': 'Stu'}}}) + + + diff --git a/tests/test_mails.py b/tests/test_mails.py deleted file mode 100644 index 15a01583..00000000 --- a/tests/test_mails.py +++ /dev/null @@ -1,55 +0,0 @@ - -from tests.base_test import BaseTest - -from crc.services.mails import ( - send_ramp_up_submission_email, - send_ramp_up_approval_request_email, - send_ramp_up_approval_request_first_review_email, - send_ramp_up_approved_email, - send_ramp_up_denied_email, - send_ramp_up_denied_email_to_approver -) - - -class TestMails(BaseTest): - - def setUp(self): - self.sender = 'sender@sartography.com' - self.recipients = ['recipient@sartography.com'] - self.primary_investigator = 'Dr. Bartlett' - self.approver_1 = 'Max Approver' - self.approver_2 = 'Close Reviewer' - - def test_send_ramp_up_submission_email(self): - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) - - def test_send_ramp_up_approval_request_email(self): - send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) - self.assertTrue(True) - - def test_send_ramp_up_approval_request_first_review_email(self): - send_ramp_up_approval_request_first_review_email( - self.sender, self.recipients, self.primary_investigator - ) - self.assertTrue(True) - - def test_send_ramp_up_approved_email(self): - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) - - def test_send_ramp_up_denied_email(self): - send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) - - def test_send_send_ramp_up_denied_email_to_approver(self): - send_ramp_up_denied_email_to_approver( - self.sender, self.recipients, self.primary_investigator, self.approver_2 - ) - self.assertTrue(True) diff --git a/tests/test_protocol_builder.py b/tests/test_protocol_builder.py index e5b75632..2a77ec05 100644 --- a/tests/test_protocol_builder.py +++ b/tests/test_protocol_builder.py @@ -24,7 +24,7 @@ class TestProtocolBuilder(BaseTest): mock_get.return_value.text = self.protocol_builder_response('investigators.json') response = ProtocolBuilderService.get_investigators(self.test_study_id) self.assertIsNotNone(response) - self.assertEqual(3, len(response)) + self.assertEqual(5, len(response)) self.assertEqual("DC", response[0]["INVESTIGATORTYPE"]) self.assertEqual("Department Contact", response[0]["INVESTIGATORTYPEFULL"]) self.assertEqual("asd3v", response[0]["NETBADGEID"]) diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 654b777e..8284313d 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -4,6 +4,7 @@ import random from unittest.mock import patch from tests.base_test import BaseTest + from crc import session, app from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema from crc.models.file import FileModelSchema @@ -12,6 +13,18 @@ from crc.models.workflow import WorkflowStatus class TestTasksApi(BaseTest): + def assert_options_populated(self, results, lookup_data_keys): + option_keys = ['value', 'label', 'data'] + self.assertIsInstance(results, list) + for result in results: + for option_key in option_keys: + self.assertTrue(option_key in result, 'should have value, label, and data properties populated') + self.assertIsNotNone(result[option_key], '%s should not be None' % option_key) + + self.assertIsInstance(result['data'], dict) + for lookup_data_key in lookup_data_keys: + self.assertTrue(lookup_data_key in result['data'], 'should have all lookup data columns populated') + def test_get_current_user_tasks(self): self.load_example_data() workflow = self.create_workflow('random_fact') @@ -250,7 +263,7 @@ class TestTasksApi(BaseTest): self.assertEqual(4, len(navigation)) # Start task, form_task, multi_task, end task self.assertEqual("UserTask", workflow.next_task.type) self.assertEqual(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type) - self.assertEqual(9, workflow.next_task.multi_instance_count) + self.assertEqual(5, workflow.next_task.multi_instance_count) # Assure that the names for each task are properly updated, so they aren't all the same. self.assertEqual("Primary Investigator", workflow.next_task.properties['display_name']) @@ -270,6 +283,80 @@ class TestTasksApi(BaseTest): self.assert_success(rv) results = json.loads(rv.get_data(as_text=True)) self.assertEqual(5, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + + def test_lookup_endpoint_for_task_field_using_lookup_entry_id(self): + self.load_example_data() + workflow = self.create_workflow('enum_options_with_search') + # get the first form in the two form workflow. + workflow = self.get_workflow_api(workflow) + task = workflow.next_task + field_id = task.form['fields'][0]['id'] + rv = self.app.get('/v1.0/workflow/%i/lookup/%s?query=%s&limit=5' % + (workflow.id, field_id, 'c'), # All records with a word that starts with 'c' + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + results = json.loads(rv.get_data(as_text=True)) + self.assertEqual(5, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + + rv = self.app.get('/v1.0/workflow/%i/lookup/%s?value=%s' % + (workflow.id, field_id, results[0]['value']), # All records with a word that starts with 'c' + headers=self.logged_in_headers(), + content_type="application/json") + results = json.loads(rv.get_data(as_text=True)) + self.assertEqual(1, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + self.assertNotIn('id', results[0], "Don't include the internal id, that can be very confusing, and should not be used.") + + def test_lookup_endpoint_also_works_for_enum(self): + # Naming here get's a little confusing. fields can be marked as enum or autocomplete. + # In the event of an auto-complete it's a type-ahead search field, for an enum the + # the key/values from the spreadsheet are added directly to the form and it shows up as + # a dropdown. This tests the case of wanting to get additional data when a user selects + # something from a dropdown. + self.load_example_data() + workflow = self.create_workflow('enum_options_from_file') + # get the first form in the two form workflow. + workflow = self.get_workflow_api(workflow) + task = workflow.next_task + field_id = task.form['fields'][0]['id'] + option_id = task.form['fields'][0]['options'][0]['id'] + rv = self.app.get('/v1.0/workflow/%i/lookup/%s?value=%s' % + (workflow.id, field_id, option_id), # All records with a word that starts with 'c' + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + results = json.loads(rv.get_data(as_text=True)) + self.assertEqual(1, len(results)) + self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) + self.assertIsInstance(results[0]['data'], dict) + + def test_enum_from_task_data(self): + self.load_example_data() + workflow = self.create_workflow('enum_options_from_task_data') + # get the first form in the two form workflow. + workflow_api = self.get_workflow_api(workflow) + task = workflow_api.next_task + + workflow_api = self.complete_form(workflow, task, {'invitees': [ + {'first_name': 'Alistair', 'last_name': 'Aardvark', 'age': 43, 'likes_pie': True, 'num_lumps': 21, 'secret_id': 'Antimony', 'display_name': 'Professor Alistair A. Aardvark'}, + {'first_name': 'Berthilda', 'last_name': 'Binturong', 'age': 12, 'likes_pie': False, 'num_lumps': 34, 'secret_id': 'Beryllium', 'display_name': 'Dr. Berthilda B. Binturong'}, + {'first_name': 'Chesterfield', 'last_name': 'Capybara', 'age': 32, 'likes_pie': True, 'num_lumps': 1, 'secret_id': 'Cadmium', 'display_name': 'The Honorable C. C. Capybara'}, + ]}) + task = workflow_api.next_task + + field_id = task.form['fields'][0]['id'] + options = task.form['fields'][0]['options'] + self.assertEqual(3, len(options)) + option_id = options[0]['id'] + self.assertEqual('Professor Alistair A. Aardvark', options[0]['name']) + self.assertEqual('Dr. Berthilda B. Binturong', options[1]['name']) + self.assertEqual('The Honorable C. C. Capybara', options[2]['name']) + self.assertEqual('Alistair', options[0]['data']['first_name']) + self.assertEqual('Berthilda', options[1]['data']['first_name']) + self.assertEqual('Chesterfield', options[2]['data']['first_name']) def test_lookup_endpoint_for_task_ldap_field_lookup(self): self.load_example_data() @@ -285,6 +372,9 @@ class TestTasksApi(BaseTest): content_type="application/json") self.assert_success(rv) results = json.loads(rv.get_data(as_text=True)) + self.assert_options_populated(results, ['telephone_number', 'affiliation', 'uid', 'title', + 'given_name', 'department', 'date_cached', 'sponsor_type', + 'display_name', 'email_address']) self.assertEqual(1, len(results)) def test_sub_process(self): @@ -299,13 +389,13 @@ class TestTasksApi(BaseTest): self.assertEqual("UserTask", task.type) self.assertEqual("Activity_A", task.name) self.assertEqual("My Sub Process", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldA": "Dan"}) task = workflow_api.next_task self.assertIsNotNone(task) self.assertEqual("Activity_B", task.name) self.assertEqual("Sub Workflow Example", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldB": "Dan"}) self.assertEqual(WorkflowStatus.complete, workflow_api.status) def test_update_task_resets_token(self): @@ -363,17 +453,25 @@ class TestTasksApi(BaseTest): workflow = self.create_workflow('multi_instance_parallel') workflow_api = self.get_workflow_api(workflow) - self.assertEqual(12, len(workflow_api.navigation)) + self.assertEqual(8, len(workflow_api.navigation)) ready_items = [nav for nav in workflow_api.navigation if nav['state'] == "READY"] - self.assertEqual(9, len(ready_items)) + self.assertEqual(5, len(ready_items)) self.assertEqual("UserTask", workflow_api.next_task.type) - self.assertEqual("MutiInstanceTask",workflow_api.next_task.name) - self.assertEqual("more information", workflow_api.next_task.title) + self.assertEqual("MultiInstanceTask",workflow_api.next_task.name) + self.assertEqual("Primary Investigator", workflow_api.next_task.title) - for i in random.sample(range(9), 9): + for i in random.sample(range(5), 5): task = TaskSchema().load(ready_items[i]['task']) - self.complete_form(workflow, task, {"investigator":{"email": "dhf8r@virginia.edu"}}) + rv = self.app.put('/v1.0/workflow/%i/task/%s/set_token' % (workflow.id, task.id), + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + workflow = WorkflowApiSchema().load(json_data) + data = workflow.next_task.data + data['investigator']['email'] = "dhf8r@virginia.edu" + self.complete_form(workflow, task, data) #tasks = self.get_workflow_api(workflow).user_tasks workflow = self.get_workflow_api(workflow) diff --git a/tests/test_tools_api.py b/tests/test_tools_api.py index c6f543c1..3ddf9fea 100644 --- a/tests/test_tools_api.py +++ b/tests/test_tools_api.py @@ -37,3 +37,12 @@ class TestStudyApi(BaseTest): self.assertTrue(len(scripts) > 1) self.assertIsNotNone(scripts[0]['name']) self.assertIsNotNone(scripts[0]['description']) + + def test_eval_hide_expression(self): + """Assures we can use python to process a hide expression fron the front end""" + rv = self.app.put('/v1.0/eval?expression=x.y==2', + data='{"x":{"y":2}}', follow_redirects=True, + content_type='application/json', + headers=self.logged_in_headers()) + self.assert_success(rv) + self.assertEqual("true", rv.get_data(as_text=True).strip()) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py new file mode 100644 index 00000000..6104641c --- /dev/null +++ b/tests/test_user_roles.py @@ -0,0 +1,202 @@ +import json + +from tests.base_test import BaseTest +from crc.models.workflow import WorkflowStatus +from crc import db +from crc.api.common import ApiError +from crc.models.task_event import TaskEventModel, TaskEventSchema +from crc.services.workflow_service import WorkflowService + + +class TestTasksApi(BaseTest): + + def test_raise_error_if_role_does_not_exist_in_data(self): + workflow = self.create_workflow('roles', as_user="lje5u") + workflow_api = self.get_workflow_api(workflow, user_uid="lje5u") + data = workflow_api.next_task.data + # User lje5u can complete the first task + self.complete_form(workflow, workflow_api.next_task, data, user_uid="lje5u") + + # The next task is a supervisor task, and should raise an error if the role + # information is not in the task data. + workflow_api = self.get_workflow_api(workflow, user_uid="lje5u") + data = workflow_api.next_task.data + data["approved"] = True + result = self.complete_form(workflow, workflow_api.next_task, data, user_uid="lje5u", + error_code="permission_denied") + + def test_validation_of_workflow_fails_if_workflow_does_not_define_user_for_lane(self): + error = None + try: + workflow = self.create_workflow('invalid_roles', as_user="lje5u") + WorkflowService.test_spec(workflow.workflow_spec_id) + except ApiError as ae: + error = ae + self.assertIsNotNone(error, "An error should be raised.") + self.assertEquals("invalid_role", error.code) + + def test_raise_error_if_user_does_not_have_the_correct_role(self): + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + # User lje5u can complete the first task, and set her supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + + # But she can not complete the supervisor role. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + data = workflow_api.next_task.data + data["approval"] = True + result = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid, + error_code="permission_denied") + + # Only her supervisor can do that. + self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + + def test_nav_includes_lanes(self): + submitter = self.create_user(uid='lje5u') + workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals("supervisor", nav[1]['lane']) + + def test_get_outstanding_tasks_awaiting_current_user(self): + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + # User lje5u can complete the first task, and set her supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + + # At this point there should be a task_log with an action of Lane Change on it for + # the supervisor. + task_logs = db.session.query(TaskEventModel). \ + filter(TaskEventModel.user_uid == supervisor.uid). \ + filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).all() + self.assertEquals(1, len(task_logs)) + + # A call to the /task endpoint as the supervisor user should return a list of + # tasks that need their attention. + rv = self.app.get('/v1.0/task_events?action=ASSIGNMENT', + headers=self.logged_in_headers(supervisor), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + tasks = TaskEventSchema(many=True).load(json_data) + self.assertEquals(1, len(tasks)) + self.assertEquals(workflow.id, tasks[0]['workflow']['id']) + self.assertEquals(workflow.study.id, tasks[0]['study']['id']) + + # Assure we can say something sensible like: + # You have a task called "Approval" to be completed in the "Supervisor Approval" workflow + # for the study 'Why dogs are stinky' managed by user "Jane Smith (js42x)", + # please check here to complete the task. + # Display name isn't set in the tests, so just checking name, but the full workflow details are included. + # I didn't delve into the full user details to keep things decoupled from ldap, so you just get the + # uid back, but could query to get the full entry. + self.assertEquals("roles", tasks[0]['workflow']['name']) + self.assertEquals("Beer consumption in the bipedal software engineer", tasks[0]['study']['title']) + self.assertEquals("lje5u", tasks[0]['study']['user_uid']) + + # Completing the next step of the workflow will close the task. + data['approval'] = True + self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + + def test_navigation_and_current_task_updates_through_workflow(self): + + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', as_user=submitter.uid) + + # Navigation as Submitter with ready task. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('READY', nav[0]['state']) # First item is ready, no progress yet. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked. + self.assertEquals('NOOP', nav[3]['state']) # Approved Path, has no operation + self.assertEquals('NOOP', nav[4]['state']) # Rejected Path, has no operation. + self.assertEquals('READY', workflow_api.next_task.state) + + # Navigation as Submitter after handoff to supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals('COMPLETED', nav[0]['state']) # First item is ready, no progress yet. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked. + self.assertEquals('LOCKED', workflow_api.next_task.state) + # In the event the next task is locked, we should say something sensible here. + # It is possible to look at the role of the task, and say The next task "TASK TITLE" will + # be handled by 'dhf8r', who is full-filling the role of supervisor. the Task Data + # is guaranteed to have a supervisor attribute in it that will contain the users uid, which + # could be looked up through an ldap service. + self.assertEquals('supervisor', workflow_api.next_task.lane) + + + # Navigation as Supervisor + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('LOCKED', nav[0]['state']) # First item belongs to the submitter, and is locked. + self.assertEquals('READY', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked. + self.assertEquals('READY', workflow_api.next_task.state) + + data = workflow_api.next_task.data + data["approval"] = False + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + + # Navigation as Supervisor, after completing task. + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('LOCKED', nav[0]['state']) # First item belongs to the submitter, and is locked. + self.assertEquals('COMPLETED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('COMPLETED', nav[2]['state']) # third item is a gateway, and is now complete. + self.assertEquals('LOCKED', workflow_api.next_task.state) + + # Navigation as Submitter, coming back in to a rejected workflow to view the rejection message. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('COMPLETED', nav[0]['state']) # First item belongs to the submitter, and is locked. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked. + self.assertEquals('READY', workflow_api.next_task.state) + + # Navigation as Submitter, re-completing the original request a second time, and sending it for review. + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + nav = workflow_api.navigation + self.assertEquals(5, len(nav)) + self.assertEquals('COMPLETED', nav[0]['state']) # We still have some issues here, the navigation will be off when looping back. + self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. + self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked. + self.assertEquals('READY', workflow_api.next_task.state) + + data["favorite_color"] = "blue" + data["quest"] = "to seek the holy grail" + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals('LOCKED', workflow_api.next_task.state) + + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + self.assertEquals('READY', workflow_api.next_task.state) + + data = workflow_api.next_task.data + data["approval"] = True + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + self.assertEquals('LOCKED', workflow_api.next_task.state) + + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + self.assertEquals('COMPLETED', workflow_api.next_task.state) + self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end. + self.assertEquals(WorkflowStatus.complete, workflow_api.status) \ No newline at end of file diff --git a/tests/test_workflow_processor.py b/tests/workflow/test_workflow_processor.py similarity index 88% rename from tests/test_workflow_processor.py rename to tests/workflow/test_workflow_processor.py index b3f6c374..a51f029d 100644 --- a/tests/test_workflow_processor.py +++ b/tests/workflow/test_workflow_processor.py @@ -187,7 +187,7 @@ class TestWorkflowProcessor(BaseTest): file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_struc_mod.bpmn') self.replace_file("two_forms.bpmn", file_path) - # Attemping a soft update on a structural change should raise a sensible error. + # Attempting a soft update on a structural change should raise a sensible error. with self.assertRaises(ApiError) as context: processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True) self.assertEqual("unexpected_workflow_structure", context.exception.code) @@ -270,53 +270,6 @@ class TestWorkflowProcessor(BaseTest): processor = self.get_processor(study, workflow_spec_model) self.assertTrue(processor.get_version_string().startswith('v2.1.1')) - def test_restart_workflow(self): - self.load_example_data() - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"key": "Value"} - processor.complete_task(task) - task_before_restart = processor.next_task() - processor.hard_reset() - task_after_restart = processor.next_task() - - self.assertNotEqual(task.get_name(), task_before_restart.get_name()) - self.assertEqual(task.get_name(), task_after_restart.get_name()) - self.assertEqual(task.data, task_after_restart.data) - - def test_soft_reset(self): - self.load_example_data() - - # Start the two_forms workflow, and enter some data in the first form. - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"color": "blue"} - processor.complete_task(task) - - # Modify the specification, with a minor text change. - file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_text_mod.bpmn') - self.replace_file("two_forms.bpmn", file_path) - - # Setting up another processor should not error out, but doesn't pick up the update. - processor.workflow_model.bpmn_workflow_json = processor.serialize() - processor2 = WorkflowProcessor(processor.workflow_model) - self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description) - self.assertNotEqual("# This is some documentation I wanted to add.", - processor2.bpmn_workflow.last_task.task_spec.documentation) - - # You can do a soft update and get the right response. - processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True) - self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description) - self.assertEqual("# This is some documentation I wanted to add.", - processor3.bpmn_workflow.last_task.task_spec.documentation) - - def test_hard_reset(self): self.load_example_data() @@ -344,8 +297,10 @@ class TestWorkflowProcessor(BaseTest): # Do a hard reset, which should bring us back to the beginning, but retain the data. processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True) self.assertEqual("Step 1", processor3.next_task().task_spec.description) - self.assertEqual({"color": "blue"}, processor3.next_task().data) - processor3.complete_task(processor3.next_task()) + self.assertTrue(processor3.is_latest_spec) # Now at version 2. + task = processor3.next_task() + task.data = {"color": "blue"} + processor3.complete_task(task) self.assertEqual("New Step", processor3.next_task().task_spec.description) self.assertEqual("blue", processor3.next_task().data["color"]) @@ -413,4 +368,19 @@ class TestWorkflowProcessor(BaseTest): task.task_spec.form.fields.append(field) with self.assertRaises(ApiError): - self._populate_form_with_random_data(task) \ No newline at end of file + self._populate_form_with_random_data(task) + + + def test_get_role_by_name(self): + self.load_example_data() + workflow_spec_model = self.load_test_spec("roles") + study = session.query(StudyModel).first() + processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() + tasks = processor.next_user_tasks() + task = tasks[0] + self._populate_form_with_random_data(task) + processor.complete_task(task) + supervisor_task = processor.next_user_tasks()[0] + self.assertEquals("supervisor", supervisor_task.task_spec.lane) + diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py similarity index 62% rename from tests/test_workflow_processor_multi_instance.py rename to tests/workflow/test_workflow_processor_multi_instance.py index aefb73f1..a67cae7f 100644 --- a/tests/test_workflow_processor_multi_instance.py +++ b/tests/workflow/test_workflow_processor_multi_instance.py @@ -1,13 +1,13 @@ from unittest.mock import patch +from tests.base_test import BaseTest -from crc import session +from crc import session, db from crc.models.api_models import MultiInstanceType from crc.models.study import StudyModel -from crc.models.workflow import WorkflowStatus +from crc.models.workflow import WorkflowStatus, WorkflowModel from crc.services.study_service import StudyService from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService -from tests.base_test import BaseTest class TestWorkflowProcessorMultiInstance(BaseTest): @@ -32,7 +32,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): 'error': 'Unable to locate a user with id asd3v in LDAP'}} def _populate_form_with_random_data(self, task): - WorkflowProcessor.populate_form_with_random_data(task) + WorkflowService.populate_form_with_random_data(task) def get_processor(self, study_model, spec_model): workflow_model = StudyService._create_workflow_model(study_model, spec_model) @@ -51,51 +51,72 @@ class TestWorkflowProcessorMultiInstance(BaseTest): self.assertIsNotNone(processor) self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) processor.bpmn_workflow.do_engine_steps() - next_user_tasks = processor.next_user_tasks() - self.assertEqual(1, len(next_user_tasks)) - - task = next_user_tasks[0] + workflow_api = WorkflowService.processor_to_workflow_api(processor) + self.assertIsNotNone(workflow_api) + self.assertIsNotNone(workflow_api.next_task) + # 1st investigator + api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) - self.assertEqual("dhf8r", task.data["investigator"]["user_id"]) - - self.assertEqual("MutiInstanceTask", task.get_name()) - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual(MultiInstanceType.sequential, api_task.multi_instance_type) + self.assertEqual("dhf8r", api_task.data["investigator"]["user_id"]) + self.assertTrue(api_task.name.startswith("MultiInstanceTask")) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(1, api_task.multi_instance_index) - task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) - task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) + # 2nd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual(None, api_task.data["investigator"]["user_id"]) + self.assertTrue(api_task.name.startswith("MultiInstanceTask")) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(2, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + # 3rd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual("asd3v", api_task.data["investigator"]["user_id"]) + self.assertTrue(api_task.name.startswith("MultiInstanceTask")) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(3, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() - task = processor.bpmn_workflow.last_task + workflow_api = WorkflowService.processor_to_workflow_api(processor) + + # Last task + api_task = workflow_api.next_task expected = self.mock_investigator_response expected['PI']['email'] = "asd3v@virginia.edu" expected['SC_I']['email'] = "asdf32@virginia.edu" expected['DC']['email'] = "dhf8r@virginia.edu" - self.assertEqual(expected, - task.data['StudyInfo']['investigators']) + self.assertEqual(expected, api_task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + def refresh_processor(self, processor): + """Saves the processor, and returns a new one read in from the database""" + processor.save() + processor = WorkflowProcessor(processor.workflow_model) + return processor + @patch('crc.services.study_service.StudyService.get_investigators') def test_create_and_complete_workflow_parallel(self, mock_study_service): """Unlike the test above, the parallel task allows us to complete the items in any order.""" @@ -107,11 +128,15 @@ class TestWorkflowProcessorMultiInstance(BaseTest): workflow_spec_model = self.load_test_spec("multi_instance_parallel") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor = self.refresh_processor(processor) processor.bpmn_workflow.do_engine_steps() # In the Parallel instance, there should be three tasks, all of them in the ready state. next_user_tasks = processor.next_user_tasks() self.assertEqual(3, len(next_user_tasks)) + # There should be six tasks in the navigation: start event, the script task, end event, and three tasks + # for the three executions of hte multi-instance. + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # We can complete the tasks out of order. task = next_user_tasks[2] @@ -121,23 +146,31 @@ class TestWorkflowProcessorMultiInstance(BaseTest): api_task = WorkflowService.spiff_task_to_api_task(task) self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + + # Assure navigation picks up the label of the current element variable. + nav = WorkflowService.processor_to_workflow_api(processor, task).navigation + self.assertEquals("Primary Investigator", nav[2].title) + + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[0] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) + self.assertEqual("MultiInstanceTask", api_task.name) task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[1] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) + self.assertEqual("MultiInstanceTask", task.get_name()) task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # Completing the tasks out of order, still provides the correct information. expected = self.mock_investigator_response @@ -148,3 +181,4 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) diff --git a/tests/test_workflow_service.py b/tests/workflow/test_workflow_service.py similarity index 82% rename from tests/test_workflow_service.py rename to tests/workflow/test_workflow_service.py index 9f3ceda1..9ae49b5a 100644 --- a/tests/test_workflow_service.py +++ b/tests/workflow/test_workflow_service.py @@ -1,7 +1,16 @@ +import json +import unittest + from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService +from SpiffWorkflow import Task as SpiffTask, WorkflowException +from example_data import ExampleDataLoader +from crc import db +from crc.models.task_event import TaskEventModel +from crc.models.api_models import Task +from crc.api.common import ApiError class TestWorkflowService(BaseTest): @@ -78,4 +87,9 @@ class TestWorkflowService(BaseTest): task = processor.next_task() task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) WorkflowService.populate_form_with_random_data(task, task_api, required_only=False) - self.assertTrue(isinstance(task.data["sponsor"], dict)) \ No newline at end of file + self.assertTrue(isinstance(task.data["sponsor"], dict)) + + def test_dmn_evaluation_errors_in_oncomplete_raise_api_errors_during_validation(self): + workflow_spec_model = self.load_test_spec("decision_table_invalid") + with self.assertRaises(ApiError): + WorkflowService.test_spec(workflow_spec_model.id) diff --git a/tests/test_workflow_spec_api.py b/tests/workflow/test_workflow_spec_api.py similarity index 100% rename from tests/test_workflow_spec_api.py rename to tests/workflow/test_workflow_spec_api.py diff --git a/tests/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py similarity index 91% rename from tests/test_workflow_spec_validation_api.py rename to tests/workflow/test_workflow_spec_validation_api.py index cb9b6b77..0c17892e 100644 --- a/tests/test_workflow_spec_validation_api.py +++ b/tests/workflow/test_workflow_spec_validation_api.py @@ -1,4 +1,5 @@ import json +import unittest from unittest.mock import patch from tests.base_test import BaseTest @@ -51,9 +52,6 @@ class TestWorkflowSpecValidation(BaseTest): app.config['PB_ENABLED'] = True self.validate_all_loaded_workflows() - def test_successful_validation_of_rrt_workflows(self): - self.load_example_data(use_rrt_data=True) - self.validate_all_loaded_workflows() def validate_all_loaded_workflows(self): workflows = session.query(WorkflowSpecModel).all() @@ -66,7 +64,6 @@ class TestWorkflowSpecValidation(BaseTest): errors.extend(ApiErrorSchema(many=True).load(json_data)) self.assertEqual(0, len(errors), json.dumps(errors)) - def test_invalid_expression(self): self.load_example_data() errors = self.validate_workflow("invalid_expression") @@ -92,12 +89,21 @@ class TestWorkflowSpecValidation(BaseTest): self.load_example_data() errors = self.validate_workflow("invalid_script") self.assertEqual(2, len(errors)) - self.assertEqual("workflow_validation_exception", errors[0]['code']) + self.assertEqual("error_loading_workflow", errors[0]['code']) self.assertTrue("NoSuchScript" in errors[0]['message']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) self.assertEqual("invalid_script.bpmn", errors[0]['file_name']) + def test_invalid_script2(self): + self.load_example_data() + errors = self.validate_workflow("invalid_script2") + self.assertEqual(2, len(errors)) + self.assertEqual("error_loading_workflow", errors[0]['code']) + self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) + self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) + self.assertEqual("invalid_script2.bpmn", errors[0]['file_name']) + def test_repeating_sections_correctly_populated(self): self.load_example_data() spec_model = self.load_test_spec('repeat_form')