Merge branch 'dev' into add-by-user-61

# Conflicts:
#	tests/files/test_files_api.py
This commit is contained in:
mike cullerton 2021-07-07 10:46:50 -04:00
commit b0cf74fa3d
31 changed files with 410 additions and 508 deletions

View File

@ -46,7 +46,6 @@ werkzeug = "*"
xlrd = "*"
xlsxwriter = "*"
pygithub = "*"
python-levenshtein = "*"
apscheduler = "*"
[requires]

220
Pipfile.lock generated
View File

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "2d74273fabb4ccda79f76e59ed2595d68a72eaa4a56bd4e04d0e7fbd9489039e"
"sha256": "ad259e41c4e42c8818992a6e5ce7436d35755a02e7f12688bed01e0250a3d668"
},
"pipfile-spec": 6,
"requires": {
@ -31,14 +31,6 @@
"index": "pypi",
"version": "==1.6.5"
},
"amqp": {
"hashes": [
"sha256:03e16e94f2b34c31f8bf1206d8ddd3ccaa4c315f7f6a1879b7b1210d229568c2",
"sha256:493a2ac6788ce270a2f6a765b017299f60c1998f5a8617908ee9be082f7300fb"
],
"markers": "python_version >= '3.6'",
"version": "==5.0.6"
},
"aniso8601": {
"hashes": [
"sha256:1d2b7ef82963909e93c4f24ce48d4de9e66009a21bf1c1e1c85bdd0812fe412f",
@ -59,7 +51,6 @@
"sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
"sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==21.2.0"
},
"babel": {
@ -67,7 +58,6 @@
"sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9",
"sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.9.1"
},
"bcrypt": {
@ -80,7 +70,6 @@
"sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1",
"sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"
],
"markers": "python_version >= '3.6'",
"version": "==3.2.0"
},
"beautifulsoup4": {
@ -91,27 +80,12 @@
],
"version": "==4.9.3"
},
"billiard": {
"hashes": [
"sha256:299de5a8da28a783d51b197d496bef4f1595dd023a93a4f59dde1886ae905547",
"sha256:87103ea78fa6ab4d5c751c4909bcff74617d985de7fa8b672cf8618afd5a875b"
],
"version": "==3.6.4.0"
},
"blinker": {
"hashes": [
"sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
],
"version": "==1.4"
},
"celery": {
"hashes": [
"sha256:8d9a3de9162965e97f8e8cc584c67aad83b3f7a267584fa47701ed11c3e0d4b0",
"sha256:9dab2170b4038f7bf10ef2861dbf486ddf1d20592290a1040f7b7a1259705d42"
],
"markers": "python_version >= '3.6'",
"version": "==5.1.2"
},
"certifi": {
"hashes": [
"sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee",
@ -178,36 +152,14 @@
"sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa",
"sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==4.0.0"
},
"click": {
"hashes": [
"sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a",
"sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"
"sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a",
"sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==7.1.2"
},
"click-didyoumean": {
"hashes": [
"sha256:112229485c9704ff51362fe34b2d4f0b12fc71cc20f6d2b3afabed4b8bfa6aeb"
],
"version": "==0.0.3"
},
"click-plugins": {
"hashes": [
"sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b",
"sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"
],
"version": "==1.1.1"
},
"click-repl": {
"hashes": [
"sha256:94b3fbbc9406a236f176e0506524b2937e4b23b6f4c0c0b2a0a83f8a64e9194b",
"sha256:cd12f68d745bf6151210790540b4cb064c7b13e571bc64b6957d98d120dacfd8"
],
"version": "==0.2.0"
"version": "==8.0.1"
},
"clickclick": {
"hashes": [
@ -223,14 +175,6 @@
],
"version": "==0.9.1"
},
"configparser": {
"hashes": [
"sha256:85d5de102cfe6d14a5172676f09d19c465ce63d6019cf0a4ef13385fc535e828",
"sha256:af59f2cdd7efbdd5d111c1976ecd0b82db9066653362f0962d7bf1d3ab89a1fa"
],
"markers": "python_version >= '3.6'",
"version": "==5.0.2"
},
"connexion": {
"extras": [
"swagger-ui"
@ -305,7 +249,6 @@
"sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771",
"sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.2.12"
},
"docutils": {
@ -313,7 +256,6 @@
"sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125",
"sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.17.1"
},
"docxtpl": {
@ -329,7 +271,6 @@
"sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c",
"sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"
],
"markers": "python_version >= '3.6'",
"version": "==1.1.0"
},
"flask": {
@ -398,16 +339,8 @@
"sha256:2bda44b43e7cacb15d4e05ff3cc1f8bc97936cc464623424102bfc2c35e95912",
"sha256:f12c3d4cc5cc7fdcc148b9527ea05671718c3ea45d50c7e732cceb33f574b390"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.5.1"
},
"future": {
"hashes": [
"sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.18.2"
},
"greenlet": {
"hashes": [
"sha256:03f28a5ea20201e70ab70518d151116ce939b412961c33827519ce620957d44c",
@ -483,7 +416,6 @@
"sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6",
"sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.10"
},
"imagesize": {
@ -491,7 +423,6 @@
"sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1",
"sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.2.0"
},
"inflection": {
@ -499,7 +430,6 @@
"sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417",
"sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"
],
"markers": "python_version >= '3.5'",
"version": "==0.5.1"
},
"isodate": {
@ -514,7 +444,6 @@
"sha256:5174094b9637652bdb841a3029700391451bd092ba3db90600dea710ba28e97c",
"sha256:9e724d68fc22902a1435351f84c3fb8623f303fffcc566a4cb952df8c572cff0"
],
"markers": "python_version >= '3.6'",
"version": "==2.0.1"
},
"jinja2": {
@ -522,7 +451,6 @@
"sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4",
"sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4"
],
"markers": "python_version >= '3.6'",
"version": "==3.0.1"
},
"jsonschema": {
@ -532,20 +460,9 @@
],
"version": "==3.2.0"
},
"kombu": {
"hashes": [
"sha256:01481d99f4606f6939cdc9b637264ed353ee9e3e4f62cfb582324142c41a572d",
"sha256:e2dedd8a86c9077c350555153825a31e456a0dc20c15d5751f00137ec9c75f0a"
],
"markers": "python_version >= '3.6'",
"version": "==5.1.0"
},
"ldap3": {
"hashes": [
"sha256:18c3ee656a6775b9b0d60f7c6c5b094d878d1d90fc03d56731039f0a4b546a91",
"sha256:4139c91f0eef9782df7b77c8cbc6243086affcb6a8a249b768a9658438e5da59",
"sha256:8c949edbad2be8a03e719ba48bd6779f327ec156929562814b3e84ab56889c8c",
"sha256:afc6fc0d01f02af82cd7bfabd3bbfd5dc96a6ae91e97db0a2dab8a0f1b436056",
"sha256:c1df41d89459be6f304e0ceec4b00fdea533dbbcd83c802b1272dcdb94620b57"
],
"index": "pypi",
@ -608,7 +525,6 @@
"sha256:17831f0b7087c313c0ffae2bcbbd3c1d5ba9eeac9c38f2eb7b50e8c99fe9d5ab",
"sha256:aea166356da44b9b830c8023cd9b557fa856bd8b4035d6de771ca027dfc5cc6e"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.1.4"
},
"markdown": {
@ -656,16 +572,15 @@
"sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51",
"sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"
],
"markers": "python_version >= '3.6'",
"version": "==2.0.1"
},
"marshmallow": {
"hashes": [
"sha256:8050475b70470cc58f4441ee92375db611792ba39ca1ad41d39cad193ea9e040",
"sha256:b45cde981d1835145257b4a3c5cb7b80786dcf5f50dd2990749a50c16cb48e01"
"sha256:77368dfedad93c3a041cbbdbce0b33fac1d8608c9e2e2288408a43ce3493d2ff",
"sha256:d4090ca9a36cd129126ad8b10c3982c47d4644a6e3ccb20534b512badce95f35"
],
"index": "pypi",
"version": "==3.12.1"
"version": "==3.12.2"
},
"marshmallow-enum": {
"hashes": [
@ -722,7 +637,6 @@
"sha256:a4b2712020284cee880b4c55faa513fbc2f8f07f365deda6098f8ab943c9f0df",
"sha256:b65d6c2242620bfe76d4c749b61cd9657e4528895a8f4fb6f916085b508ebd24"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.1.5"
},
"openapi-spec-validator": {
@ -731,7 +645,6 @@
"sha256:3d70e6592754799f7e77a45b98c6a91706bdd309a425169d17d8e92173e198a2",
"sha256:ba28b06e63274f2bc6de995a07fb572c657e534425b5baf68d9f7911efe6929f"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.3.1"
},
"openpyxl": {
@ -744,35 +657,35 @@
},
"packaging": {
"hashes": [
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
"sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7",
"sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==20.9"
"version": "==21.0"
},
"pandas": {
"hashes": [
"sha256:0c34b89215f984a9e4956446e0a29330d720085efa08ea72022387ee37d8b373",
"sha256:0dbd125b0e44e5068163cbc9080a00db1756a5e36309329ae14fd259747f2300",
"sha256:1102d719038e134e648e7920672188a00375f3908f0383fd3b202fbb9d2c3a95",
"sha256:14abb8ea73fce8aebbb1fb44bec809163f1c55241bcc1db91c2c780e97265033",
"sha256:25fc8ef6c6beb51c9224284a1ad89dfb591832f23ceff78845f182de35c52356",
"sha256:38e7486410de23069392bdf1dc7297ae75d2d67531750753f3149c871cd1c6e3",
"sha256:4bfbf62b00460f78a8bc4407112965c5ab44324f34551e8e1f4cac271a07706c",
"sha256:78de96c1174bcfdbe8dece9c38c2d7994e407fd8bb62146bb46c61294bcc06ef",
"sha256:7b09293c7119ab22ab3f7f086f813ac2acbfa3bcaaaeb650f4cddfb5b9fa9be4",
"sha256:821d92466fcd2826656374a9b6fe4f2ec2ba5e370cce71d5a990577929d948df",
"sha256:9244fb0904512b074d8c6362fb13aac1da6c4db94372760ddb2565c620240264",
"sha256:94ca6ea3f46f44a979a38a4d5a70a88cee734f7248d7aeeed202e6b3ba485af1",
"sha256:a67227e17236442c6bc31c02cb713b5277b26eee204eac14b5aecba52492e3a3",
"sha256:c862cd72353921c102166784fc4db749f1c3b691dd017fc36d9df2c67a9afe4e",
"sha256:d9e6edddeac9a8e473391d2d2067bb3c9dc7ad79fd137af26a39ee425c2b4c78",
"sha256:e36515163829e0e95a6af10820f178dd8768102482c01872bff8ae592e508e58",
"sha256:f20e4b8a7909f5a0c0a9e745091e3ea18b45af9f73496a4d498688badbdac7ea",
"sha256:fc9215dd1dd836ff26b896654e66b2dfcf4bbb18aa4c1089a79bab527b665a90"
"sha256:08eeff3da6a188e24db7f292b39a8ca9e073bf841fbbeadb946b3ad5c19d843e",
"sha256:1ff13eed501e07e7fb26a4ea18a846b6e5d7de549b497025601fd9ccb7c1d123",
"sha256:522bfea92f3ef6207cadc7428bda1e7605dae0383b8065030e7b5d0266717b48",
"sha256:7897326cae660eee69d501cbfa950281a193fcf407393965e1bc07448e1cc35a",
"sha256:798675317d0e4863a92a9a6bc5bd2490b5f6fef8c17b95f29e2e33f28bef9eca",
"sha256:7d3cd2c99faa94d717ca00ea489264a291ad7209453dffbf059bfb7971fd3a61",
"sha256:823737830364d0e2af8c3912a28ba971296181a07950873492ed94e12d28c405",
"sha256:872aa91e0f9ca913046ab639d4181a899f5e592030d954d28c2529b88756a736",
"sha256:88864c1e28353b958b1f30e4193818519624ad9a1776921622a6a2a016d5d807",
"sha256:92835113a67cbd34747c198d41f09f4b63f6fe11ca5643baebc7ab1e30e89e95",
"sha256:98efc2d4983d5bb47662fe2d97b2c81b91566cb08b266490918b9c7d74a5ef64",
"sha256:b10d7910ae9d7920a5ff7816d794d99acbc361f7b16a0f017d4fa83ced8cb55e",
"sha256:c554e6c9cf2d5ea1aba5979cc837b3649539ced0e18ece186f055450c86622e2",
"sha256:c746876cdd8380be0c3e70966d4566855901ac9aaa5e4b9ccaa5ca5311457d11",
"sha256:c81b8d91e9ae861eb4406b4e0f8d4dabbc105b9c479b3d1e921fba1d35b5b62a",
"sha256:e6b75091fa54a53db3927b4d1bc997c23c5ba6f87acdfe1ee5a92c38c6b2ed6a",
"sha256:ed4fc66f23fe17c93a5d439230ca2d6b5f8eac7154198d327dbe8a16d98f3f10",
"sha256:f058c786e7b0a9e7fa5e0b9f4422e0ccdd3bf3aa3053c18d77ed2a459bd9a45a",
"sha256:fe7a549d10ca534797095586883a5c17d140d606747591258869c56e14d1b457"
],
"index": "pypi",
"version": "==1.2.5"
"version": "==1.3.0"
},
"psycopg2-binary": {
"hashes": [
@ -811,19 +724,8 @@
},
"pyasn1": {
"hashes": [
"sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359",
"sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576",
"sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf",
"sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7",
"sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d",
"sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00",
"sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8",
"sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86",
"sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12",
"sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776",
"sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba",
"sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2",
"sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"
"sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"
],
"version": "==0.4.8"
},
@ -832,7 +734,6 @@
"sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
"sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.20"
},
"pygithub": {
@ -848,7 +749,6 @@
"sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f",
"sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e"
],
"markers": "python_version >= '3.5'",
"version": "==2.9.0"
},
"pyjwt": {
@ -880,7 +780,6 @@
"sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff",
"sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.4.0"
},
"pyparsing": {
@ -888,7 +787,6 @@
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
},
"pyrsistent": {
@ -935,19 +833,10 @@
"hashes": [
"sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d",
"sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b",
"sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8",
"sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77",
"sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522"
"sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8"
],
"version": "==1.0.4"
},
"python-levenshtein": {
"hashes": [
"sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6"
],
"index": "pypi",
"version": "==0.12.2"
},
"pytz": {
"hashes": [
"sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da",
@ -987,7 +876,6 @@
"sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6",
"sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==5.4.1"
},
"recommonmark": {
@ -1022,7 +910,6 @@
"sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
"sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.16.0"
},
"snowballstemmer": {
@ -1037,23 +924,22 @@
"sha256:052774848f448cf19c7e959adf5566904d525f33a3f8b6ba6f6f8f26ec7de0cc",
"sha256:c2c1c2d44f158cdbddab7824a9af8c4f83c76b1e23e049479aa432feb6c4c23b"
],
"markers": "python_version >= '3'",
"markers": "python_version >= '3.0'",
"version": "==2.2.1"
},
"sphinx": {
"hashes": [
"sha256:b5c2ae4120bf00c799ba9b3699bc895816d272d120080fbc967292f29b52b48c",
"sha256:d1cb10bee9c4231f1700ec2e24a91be3f3a3aba066ea4ca9f3bbe47e59d5a1d4"
"sha256:5747f3c855028076fcff1e4df5e75e07c836f0ac11f7df886747231092cfe4ad",
"sha256:dff357e6a208eb7edb2002714733ac21a9fe597e73609ff417ab8cf0c6b4fbb8"
],
"index": "pypi",
"version": "==4.0.2"
"version": "==4.0.3"
},
"sphinxcontrib-applehelp": {
"hashes": [
"sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a",
"sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.2"
},
"sphinxcontrib-devhelp": {
@ -1061,7 +947,6 @@
"sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e",
"sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.2"
},
"sphinxcontrib-htmlhelp": {
@ -1069,7 +954,6 @@
"sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07",
"sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"
],
"markers": "python_version >= '3.6'",
"version": "==2.0.0"
},
"sphinxcontrib-jsmath": {
@ -1077,7 +961,6 @@
"sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178",
"sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.1"
},
"sphinxcontrib-qthelp": {
@ -1085,7 +968,6 @@
"sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72",
"sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.3"
},
"sphinxcontrib-serializinghtml": {
@ -1093,12 +975,11 @@
"sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd",
"sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"
],
"markers": "python_version >= '3.5'",
"version": "==1.1.5"
},
"spiffworkflow": {
"git": "https://github.com/sartography/SpiffWorkflow.git",
"ref": "109c237423e4e2645b4605b1166075546f22d272"
"ref": "66555b92ef1d8d9ce117b6f2ccf6aa248df9835f"
},
"sqlalchemy": {
"hashes": [
@ -1162,22 +1043,13 @@
"sha256:29af5a53e9fb4e158f525367678b50053808ca6c21ba585754c77d790008c746",
"sha256:69e1f242c7f80273490d3403c3976f3ac3b26e289856936d1f620ed48f321897"
],
"markers": "python_version >= '3.6'",
"version": "==2.0.0"
},
"wcwidth": {
"hashes": [
"sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784",
"sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"
],
"version": "==0.2.5"
},
"webob": {
"hashes": [
"sha256:73aae30359291c14fa3b956f8b5ca31960e420c28c1bec002547fb04928cf89b",
"sha256:b64ef5141be559cfade448f044fa45c2260351edcb6a8ef6b7e00c7dcef0c323"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.8.7"
},
"webtest": {
@ -1219,11 +1091,11 @@
},
"xlsxwriter": {
"hashes": [
"sha256:1a7fac99687020e76aa7dd0d7de4b9b576547ed748e5cd91a99d52a6df54ca16",
"sha256:641db6e7b4f4982fd407a3f372f45b878766098250d26963e95e50121168cbe2"
"sha256:15b65f02f7ecdcfb1f22794b1fcfed8e9a49e8b7414646f90347be5cbf464234",
"sha256:791567acccc485ba76e0b84bccced2651981171de5b47d541520416f2f9f93e3"
],
"index": "pypi",
"version": "==1.4.3"
"version": "==1.4.4"
}
},
"develop": {
@ -1232,7 +1104,6 @@
"sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
"sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==21.2.0"
},
"coverage": {
@ -1302,11 +1173,10 @@
},
"packaging": {
"hashes": [
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
"sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7",
"sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==20.9"
"version": "==21.0"
},
"pbr": {
"hashes": [
@ -1321,7 +1191,6 @@
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.13.1"
},
"py": {
@ -1329,7 +1198,6 @@
"sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
"sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.10.0"
},
"pyparsing": {
@ -1337,7 +1205,6 @@
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
},
"pytest": {
@ -1353,7 +1220,6 @@
"sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
"sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.10.2"
}
}

View File

@ -82,7 +82,7 @@ paths:
schema :
type : integer
get:
operationId: crc.api.file.get_document_directory
operationId: crc.api.document.get_document_directory
summary: Returns a directory of all files for study in a nested structure
tags:
- Document Categories
@ -510,7 +510,7 @@ paths:
description: The unique id of an existing workflow specification to validate.
schema:
type: string
- name: validate_study_id
- name: study_id
in: query
required: false
description: Optional id of study to test under different scenarios

View File

@ -10,7 +10,9 @@ import sentry_sdk
class ApiError(Exception):
def __init__(self, code, message, status_code=400,
file_name="", task_id="", task_name="", tag="", task_data = {}):
file_name="", task_id="", task_name="", tag="", task_data=None, error_type="", line_number=0, offset=0):
if task_data is None:
task_data = {}
self.status_code = status_code
self.code = code # a short consistent string describing the error.
self.message = message # A detailed message that provides more information.
@ -18,8 +20,11 @@ class ApiError(Exception):
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error ocurred.
if hasattr(g,'user'):
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error occurred.
self.line_number = line_number
self.offset = offset
self.error_type = error_type
if hasattr(g, 'user'):
user = g.user.uid
else:
user = 'Unknown'
@ -29,12 +34,16 @@ class ApiError(Exception):
Exception.__init__(self, self.message)
@classmethod
def from_task(cls, code, message, task, status_code=400):
def from_task(cls, code, message, task, status_code=400, line_number=0, offset=0, error_type="", error_line=""):
"""Constructs an API Error with details pulled from the current task."""
instance = cls(code, message, status_code=status_code)
instance.task_id = task.task_spec.name or ""
instance.task_name = task.task_spec.description or ""
instance.file_name = task.workflow.spec.file or ""
instance.line_number = line_number
instance.offset = offset
instance.error_type = error_type
instance.error_line = error_line
# Fixme: spiffworkflow is doing something weird where task ends up referenced in the data in some cases.
if "task" in task.data:
@ -61,7 +70,11 @@ class ApiError(Exception):
so consolidating the code, and doing the best things
we can with the data we have."""
if isinstance(exp, WorkflowTaskExecException):
return ApiError.from_task(code, message, exp.task)
return ApiError.from_task(code, message, exp.task, line_number=exp.line_number,
offset=exp.offset,
error_type=exp.exception.__class__.__name__,
error_line=exp.error_line)
else:
return ApiError.from_task_spec(code, message, exp.sender)
@ -69,7 +82,7 @@ class ApiError(Exception):
class ApiErrorSchema(ma.Schema):
class Meta:
fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id",
"task_data", "task_user", "hint")
"task_data", "task_user", "hint", "line_number", "offset", "error_type", "error_line")
@app.errorhandler(ApiError)

18
crc/api/document.py Normal file
View File

@ -0,0 +1,18 @@
from crc.models.api_models import DocumentDirectorySchema
from crc.models.file import File
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
def get_document_directory(study_id, workflow_id=None):
"""
return a nested list of files arranged according to the category hierarchy
defined in the doc dictionary
"""
file_models = FileService.get_files_for_study(study_id=study_id)
doc_dict = DocumentService.get_dictionary()
files = (File.from_models(model, FileService.get_file_data(model.id), doc_dict) for model in file_models)
directory = DocumentService.get_directory(doc_dict, files, workflow_id)
return DocumentDirectorySchema(many=True).dump(directory)

View File

@ -7,71 +7,15 @@ from flask import send_file
from crc import session
from crc.api.common import ApiError
from crc.api.user import verify_token
from crc.models.api_models import DocumentDirectory, DocumentDirectorySchema
from crc.models.file import FileSchema, FileModel, File, FileModelSchema, FileDataModel, FileType
from crc.models.workflow import WorkflowSpecModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
def ensure_exists(output, categories, expanded):
"""
This is a recursive function, it expects a list of
levels with a file object at the end (kinda like duck,duck,duck,goose)
for each level, it makes sure that level is already in the structure and if it is not
it will add it
function terminates upon getting an entry that is a file object ( or really anything but string)
"""
current_item = categories[0]
found = False
if isinstance(current_item, str):
for item in output:
if item.level == current_item:
found = True
item.filecount = item.filecount + 1
item.expanded = expanded | item.expanded
ensure_exists(item.children, categories[1:], expanded)
if not found:
new_level = DocumentDirectory(level=current_item)
new_level.filecount = 1
new_level.expanded = expanded
output.append(new_level)
ensure_exists(new_level.children, categories[1:], expanded)
else:
new_level = DocumentDirectory(file=current_item)
new_level.expanded = expanded
output.append(new_level)
def get_document_directory(study_id, workflow_id=None):
"""
return a nested list of files arranged according to the category hirearchy
defined in the doc dictionary
"""
output = []
doc_dict = FileService.get_doc_dictionary()
file_models = FileService.get_files_for_study(study_id=study_id)
files = (to_file_api(model) for model in file_models)
for file in files:
if file.irb_doc_code in doc_dict:
doc_code = doc_dict[file.irb_doc_code]
else:
doc_code = {'category1': "Unknown", 'category2': '', 'category3': ''}
if workflow_id:
expand = file.workflow_id == int(workflow_id)
else:
expand = False
print(expand)
categories = [x for x in [doc_code['category1'],doc_code['category2'],doc_code['category3'],file] if x != '']
ensure_exists(output, categories, expanded=expand)
return DocumentDirectorySchema(many=True).dump(output)
def to_file_api(file_model):
"""Converts a FileModel object to something we can return via the api"""
return File.from_models(file_model, FileService.get_file_data(file_model.id),
FileService.get_doc_dictionary())
DocumentService.get_dictionary())
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None,study_id=None):

View File

@ -46,22 +46,15 @@ def get_workflow_specification(spec_id):
return WorkflowSpecModelSchema().dump(spec)
def validate_workflow_specification(spec_id, validate_study_id=None, test_until=None):
errors = {}
def validate_workflow_specification(spec_id, study_id=None, test_until=None):
try:
WorkflowService.test_spec(spec_id, validate_study_id, test_until)
WorkflowService.test_spec(spec_id, study_id, test_until)
WorkflowService.test_spec(spec_id, study_id, test_until, required_only=True)
except ApiError as ae:
ae.message = "When populating all fields ... \n" + ae.message
errors['all'] = ae
try:
# Run the validation twice, the second time, just populate the required fields.
WorkflowService.test_spec(spec_id, validate_study_id, test_until, required_only=True)
except ApiError as ae:
ae.message = "When populating only required fields ... \n" + ae.message
errors['required'] = ae
interpreted_errors = ValidationErrorService.interpret_validation_errors(errors)
return ApiErrorSchema(many=True).dump(interpreted_errors)
error = ae
error = ValidationErrorService.interpret_validation_error(error)
return ApiErrorSchema(many=True).dump([error])
return []
def update_workflow_specification(spec_id, body):
if spec_id is None:

View File

@ -1,15 +1,14 @@
import enum
from typing import cast
from marshmallow import INCLUDE, EXCLUDE, fields, Schema
from marshmallow import INCLUDE, EXCLUDE, Schema
from marshmallow_enum import EnumField
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from sqlalchemy import func, Index
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import deferred, relationship
from crc.models.data_store import DataStoreModel # this is needed by the relationship
from crc import db, ma
from crc.models.data_store import DataStoreModel
class FileType(enum.Enum):
@ -43,7 +42,7 @@ CONTENT_TYPES = {
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"gif": "image/gif",
"jpg": "image/jpeg",
"md" : "text/plain",
"md": "text/plain",
"pdf": "application/pdf",
"png": "image/png",
"ppt": "application/vnd.ms-powerpoint",
@ -72,7 +71,6 @@ class FileDataModel(db.Model):
user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=True)
class FileModel(db.Model):
__tablename__ = 'file'
id = db.Column(db.Integer, primary_key=True)
@ -80,18 +78,19 @@ class FileModel(db.Model):
type = db.Column(db.Enum(FileType))
is_status = db.Column(db.Boolean)
content_type = db.Column(db.String)
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
primary = db.Column(db.Boolean, nullable=False, default=False) # Is this the primary BPMN in a workflow?
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, critical for primary BPMN.
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
primary = db.Column(db.Boolean, nullable=False, default=False) # Is this the primary BPMN in a workflow?
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, for primary BPMN.
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
# A request was made to delete the file, but we can't because there are
# active approvals or running workflows that depend on it. So we archive
# it instead, hide it in the interface.
is_review = db.Column(db.Boolean, default=False, nullable=True)
archived = db.Column(db.Boolean, default=False, nullable=False)
data_stores = relationship("DataStoreModel", cascade="all,delete", backref="file")
data_stores = relationship(DataStoreModel, cascade="all,delete", backref="file")
class File(object):
@classmethod
@ -108,7 +107,7 @@ class File(object):
instance.workflow_id = model.workflow_id
instance.irb_doc_code = model.irb_doc_code
instance.type = model.type
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
instance.document = doc_dictionary[model.irb_doc_code]
else:
instance.document = {}
@ -149,7 +148,6 @@ class FileSchema(Schema):
type = EnumField(FileType)
class LookupFileModel(db.Model):
"""Gives us a quick way to tell what kind of lookup is set on a form field.
Connected to the file data model, so that if a new version of the same file is
@ -161,7 +159,8 @@ class LookupFileModel(db.Model):
field_id = db.Column(db.String)
is_ldap = db.Column(db.Boolean) # Allows us to run an ldap query instead of a db lookup.
file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id'))
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan")
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model",
cascade="all, delete, delete-orphan")
class LookupDataModel(db.Model):
@ -171,7 +170,7 @@ class LookupDataModel(db.Model):
value = db.Column(db.String)
label = db.Column(db.String)
# In the future, we might allow adding an additional "search" column if we want to search things not in label.
data = db.Column(db.JSON) # all data for the row is stored in a json structure here, but not searched presently.
data = db.Column(db.JSON) # all data for the row is stored in a json structure here, but not searched presently.
# Assure there is a searchable index on the label column, so we can get fast results back.
# query with:
@ -194,7 +193,7 @@ class LookupDataSchema(SQLAlchemyAutoSchema):
load_instance = True
include_relationships = False
include_fk = False # Includes foreign keys
exclude = ['id'] # Do not include the id field, it should never be used via the API.
exclude = ['id'] # Do not include the id field, it should never be used via the API.
class SimpleFileSchema(ma.Schema):

View File

@ -2,6 +2,7 @@ from crc import session
from crc.api.common import ApiError
from crc.models.file import FileModel
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
@ -9,7 +10,7 @@ class DeleteFile(Script):
@staticmethod
def process_document_deletion(doc_code, workflow_id, task):
if FileService.is_allowed_document(doc_code):
if DocumentService.is_allowed_document(doc_code):
result = session.query(FileModel).filter(
FileModel.workflow_id == workflow_id, FileModel.irb_doc_code == doc_code).all()
if isinstance(result, list) and len(result) > 0 and isinstance(result[0], FileModel):

View File

@ -3,6 +3,7 @@ from flask import g
from crc.api.common import ApiError
from crc.services.data_store_service import DataStoreBase
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
@ -17,17 +18,22 @@ class FileDataSet(Script, DataStoreBase):
del(kwargs['file_id'])
return True
def validate_kw_args(self,**kwargs):
if kwargs.get('key',None) is None:
def validate_kw_args(self, **kwargs):
if kwargs.get('key', None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'key'")
message=f"The 'file_data_get' script requires a keyword argument of 'key'")
if kwargs.get('file_id', None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
if kwargs.get('value', None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'value'")
if kwargs.get('file_id',None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
if kwargs.get('value',None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'value'")
if kwargs['key'] == 'irb_code' and not DocumentService.is_allowed_document(kwargs.get('value')):
raise ApiError("invalid_form_field_key",
"When setting an irb_code, the form field id must match a known document in the "
"irb_docunents.xslx reference file. This code is not found in that file '%s'" %
kwargs.get('value'))
return True

View File

@ -10,6 +10,7 @@ from crc.models.protocol_builder import ProtocolBuilderInvestigatorType
from crc.models.study import StudyModel, StudySchema
from crc.api import workflow as workflow_api
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.study_service import StudyService
@ -168,8 +169,8 @@ Please note this is just a few examples, ALL known document types are returned i
"""For validation only, pretend no results come back from pb"""
self.check_args(args, 2)
# Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST)
FileService.get_reference_file_data(DocumentService.DOCUMENT_LIST)
FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
# we call the real do_task so we can
# seed workflow validations with settings from studies in PB Mock
# in order to test multiple paths thru the workflow

View File

@ -0,0 +1,98 @@
from crc.api.common import ApiError
from crc.models.api_models import DocumentDirectory
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
class DocumentService(object):
"""The document service provides details about the types of documents that can be uploaded to a workflow.
This metadata about different document types is managed in an Excel spreadsheet, which can be uploaded at any
time to change which documents are accepted, and it allows us to categorize these documents. At a minimum,
the spreadsheet should contain the columns 'code', 'category1', 'category2', 'category3', 'description' and 'id',
code is required for all rows in the table, the other fields are optional. """
DOCUMENT_LIST = "irb_documents.xlsx"
@staticmethod
def is_allowed_document(code):
doc_dict = DocumentService.get_dictionary()
return code in doc_dict
@staticmethod
def verify_doc_dictionary(dd):
"""
We are currently getting structured information from an XLS file, if someone accidentally
changes a header we will have problems later, so we will verify we have the headers we need
here
"""
required_fields = ['category1', 'category2', 'category3', 'description']
# we only need to check the first item, as all of the keys should be the same
key = list(dd.keys())[0]
for field in required_fields:
if field not in dd[key].keys():
raise ApiError(code="Invalid document list %s" % DocumentService.DOCUMENT_LIST,
message='Please check the headers in %s' % DocumentService.DOCUMENT_LIST)
@staticmethod
def get_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_data = FileService.get_reference_file_data(DocumentService.DOCUMENT_LIST)
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'description')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data
return doc_dict
@staticmethod
def get_directory(doc_dict, files, workflow_id):
"""Returns a list of directories, hierarchically nested by category, with files at the deepest level.
Empty directories are not include."""
directory = []
if files:
for file in files:
if file.irb_doc_code in doc_dict:
doc_code = doc_dict[file.irb_doc_code]
else:
doc_code = {'category1': "Unknown", 'category2': None, 'category3': None}
if workflow_id:
expand = file.workflow_id == int(workflow_id)
else:
expand = False
print(expand)
categories = [x for x in [doc_code['category1'], doc_code['category2'], doc_code['category3'], file] if x]
DocumentService.ensure_exists(directory, categories, expanded=expand)
return directory
@staticmethod
def ensure_exists(output, categories, expanded):
"""
This is a recursive function, it expects a list of
levels with a file object at the end (kinda like duck,duck,duck,goose)
for each level, it makes sure that level is already in the structure and if it is not
it will add it
function terminates upon getting an entry that is a file object ( or really anything but string)
"""
current_item = categories[0]
found = False
if isinstance(current_item, str):
for item in output:
if item.level == current_item:
found = True
item.filecount = item.filecount + 1
item.expanded = expanded | item.expanded
DocumentService.ensure_exists(item.children, categories[1:], expanded)
if not found:
new_level = DocumentDirectory(level=current_item)
new_level.filecount = 1
new_level.expanded = expanded
output.append(new_level)
DocumentService.ensure_exists(new_level.children, categories[1:], expanded)
else:
print("Found it")
else:
new_level = DocumentDirectory(file=current_item)
new_level.expanded = expanded
output.append(new_level)

View File

@ -1,6 +1,5 @@
import re
generic_message = """Workflow validation failed. For more information about the error, see below."""
# known_errors is a dictionary of errors from validation that we want to give users a hint for solving their problem.
# The key is the known error, or part of the known error. It is a string.
@ -14,7 +13,7 @@ generic_message = """Workflow validation failed. For more information about the
# I know this explanation is confusing. If you have ideas for clarification, pull request welcome.
known_errors = {'Error is Non-default exclusive outgoing sequence flow without condition':
known_errors = {'Non-default exclusive outgoing sequence flow without condition':
{'hint': 'Add a Condition Type to your gateway path.'},
'Could not set task title on task .*':
@ -29,37 +28,16 @@ class ValidationErrorService(object):
Validation is run twice,
once where we try to fill in all form fields
and a second time where we only fill in the required fields.
We get a list that contains possible errors from the validation."""
@staticmethod
def interpret_validation_errors(errors):
if len(errors) == 0:
return ()
interpreted_errors = []
for error_type in ['all', 'required']:
if error_type in errors:
hint = generic_message
for known_key in known_errors:
regex = re.compile(known_key)
result = regex.search(errors[error_type].message)
if result is not None:
if 'hint' in known_errors[known_key]:
if 'groups' in known_errors[known_key]:
caught = {}
for group in known_errors[known_key]['groups']:
group_id = known_errors[known_key]['groups'][group]
group_value = result.groups()[group_id]
caught[group] = group_value
hint = known_errors[known_key]['hint'].format(**caught)
else:
hint = known_errors[known_key]['hint']
errors[error_type].hint = hint
interpreted_errors.append(errors[error_type])
return interpreted_errors
def interpret_validation_error(error):
if error is None:
return
for known_key in known_errors:
regex = re.compile(known_key)
result = regex.search(error.message)
if result is not None:
if 'hint' in known_errors[known_key]:
error.hint = known_errors[known_key]['hint']
return error

View File

@ -10,8 +10,6 @@ from lxml import etree
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from lxml.etree import XMLSyntaxError
from pandas import ExcelFile
from pandas._libs.missing import NA
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
@ -39,34 +37,6 @@ def camel_to_snake(camel):
class FileService(object):
"""Provides consistent management and rules for storing, retrieving and processing files."""
DOCUMENT_LIST = "irb_documents.xlsx"
INVESTIGATOR_LIST = "investigators.xlsx"
__doc_dictionary = None
@staticmethod
def verify_doc_dictionary(dd):
"""
We are currently getting structured information from an XLS file, if someone accidentally
changes a header we will have problems later, so we will verify we have the headers we need
here
"""
required_fields = ['category1','category2','category3','description']
# we only need to check the first item, as all of the keys should be the same
key = list(dd.keys())[0]
for field in required_fields:
if field not in dd[key].keys():
raise ApiError(code="Invalid document list %s"%FileService.DOCUMENT_LIST,
message='Please check the headers in %s'%FileService.DOCUMENT_LIST)
@staticmethod
def get_doc_dictionary():
if not FileService.__doc_dictionary:
FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
FileService.verify_doc_dictionary(FileService.__doc_dictionary)
return FileService.__doc_dictionary
@staticmethod
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
@ -89,10 +59,7 @@ class FileService(object):
return FileService.update_file(file_model, binary_data, content_type)
@staticmethod
def is_allowed_document(code):
doc_dict = FileService.get_doc_dictionary()
return code in doc_dict
@staticmethod
@cache
@ -105,12 +72,6 @@ class FileService(object):
def update_irb_code(file_id, irb_doc_code):
"""Create a new file and associate it with the workflow
Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document."""
if not FileService.is_allowed_document(irb_doc_code):
raise ApiError("invalid_form_field_key",
"When uploading files, the form field id must match a known document in the "
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code)
""" """
file_model = session.query(FileModel)\
.filter(FileModel.id == file_id).first()
if file_model is None:
@ -138,28 +99,6 @@ class FileService(object):
)
return FileService.update_file(file_model, binary_data, content_type)
@staticmethod
def get_reference_data(reference_file_name, index_column, int_columns=[]):
""" Opens a reference file (assumes that it is xls file) and returns the data as a
dictionary, each row keyed on the given index_column name. If there are columns
that should be represented as integers, pass these as an array of int_columns, lest
you get '1.0' rather than '1'
fixme: This is stupid stupid slow. Place it in the database and just check if it is up to date."""
data_model = FileService.get_reference_file_data(reference_file_name)
xls = ExcelFile(data_model.data, engine='openpyxl')
df = xls.parse(xls.sheet_names[0])
df = df.convert_dtypes()
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
df = pd.DataFrame(df).replace({NA: None}) # replace NA with None.
for c in int_columns:
df[c] = df[c].fillna(0)
df = df.astype({c: 'Int64'})
df = df.fillna('')
df = df.applymap(str)
df = df.set_index(index_column)
return json.loads(df.to_json(orient='index'))
@staticmethod
def get_workflow_files(workflow_id):
"""Returns all the file models associated with a running workflow."""

View File

@ -12,7 +12,7 @@ from sqlalchemy.sql.functions import GenericFunction
from crc import db
from crc.api.common import ApiError
from crc.models.api_models import Task
from crc.models.file import FileDataModel, LookupFileModel, LookupDataModel
from crc.models.file import FileModel, FileDataModel, LookupFileModel, LookupDataModel
from crc.models.workflow import WorkflowModel, WorkflowSpecDependencyFile
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
@ -25,11 +25,14 @@ class TSRank(GenericFunction):
class LookupService(object):
"""Provides tools for doing lookups for auto-complete fields.
This can currently take two forms:
"""Provides tools for doing lookups for auto-complete fields, and rapid access to any
uploaded spreadsheets.
This can currently take three forms:
1) Lookup from spreadsheet data associated with a workflow specification.
in which case we store the spreadsheet data in a lookup table with full
text indexing enabled, and run searches against that table.
2) Lookup from spreadsheet data associated with a specific file. This allows us
to get a lookup model for a specific file object, such as a reference file.
2) Lookup from LDAP records. In which case we call out to an external service
to pull back detailed records and return them.
@ -44,6 +47,14 @@ class LookupService(object):
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
return LookupService.__get_lookup_model(workflow, spiff_task.task_spec.name, field.id)
@staticmethod
def get_lookup_model_for_file_data(file_data: FileDataModel, value_column, label_column):
lookup_model = db.session.query(LookupFileModel).filter(LookupFileModel.file_data_model_id == file_data.id).first()
if not lookup_model:
logging.warning("!!!! Making a very expensive call to update the lookup model.")
lookup_model = LookupService.build_lookup_table(file_data, value_column, label_column)
return lookup_model
@staticmethod
def __get_lookup_model(workflow, task_spec_id, field_id):
lookup_model = db.session.query(LookupFileModel) \
@ -139,7 +150,8 @@ class LookupService(object):
return lookup_model
@staticmethod
def build_lookup_table(data_model: FileDataModel, value_column, label_column, workflow_spec_id, task_spec_id, field_id):
def build_lookup_table(data_model: FileDataModel, value_column, label_column,
workflow_spec_id=None, task_spec_id=None, field_id=None):
""" In some cases the lookup table can be very large. This method will add all values to the database
in a way that can be searched and returned via an api call - rather than sending the full set of
options along with the form. It will only open the file and process the options if something has
@ -147,8 +159,9 @@ class LookupService(object):
xls = ExcelFile(data_model.data, engine='openpyxl')
df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
df = df.convert_dtypes()
df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Drop unnamed columns.
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
df = pd.DataFrame(df).replace({NA: None})
df = pd.DataFrame(df).replace({NA: ''})
if value_column not in df:
raise ApiError("invalid_enum",

View File

@ -22,13 +22,16 @@ from crc.models.study import StudyModel, Study, StudyStatus, Category, WorkflowM
from crc.models.task_event import TaskEventModel, TaskEvent
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus, WorkflowSpecDependencyFile
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.lookup_service import LookupService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
class StudyService(object):
"""Provides common tools for working with a Study"""
INVESTIGATOR_LIST = "investigators.xlsx" # A reference document containing details about what investigators to show, and when.
@staticmethod
def get_studies_for_user(user):
@ -77,7 +80,7 @@ class StudyService(object):
workflow_metas = StudyService._get_workflow_metas(study_id)
files = FileService.get_files_for_study(study.id)
files = (File.from_models(model, FileService.get_file_data(model.id),
FileService.get_doc_dictionary()) for model in files)
DocumentService.get_dictionary()) for model in files)
study.files = list(files)
# Calling this line repeatedly is very very slow. It creates the
# master spec and runs it. Don't execute this for Abandoned studies, as
@ -265,14 +268,14 @@ class StudyService(object):
# Loop through all known document types, get the counts for those files,
# and use pb_docs to mark those as required.
doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
doc_dictionary = DocumentService.get_dictionary()
documents = {}
for code, doc in doc_dictionary.items():
if ProtocolBuilderService.is_enabled():
doc['required'] = False
if ProtocolBuilderService.is_enabled() and doc['id']:
pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
doc['required'] = False
if pb_data:
doc['required'] = True
@ -282,7 +285,7 @@ class StudyService(object):
# Make a display name out of categories
name_list = []
for cat_key in ['category1', 'category2', 'category3']:
if doc[cat_key] not in ['', 'NULL']:
if doc[cat_key] not in ['', 'NULL', None]:
name_list.append(doc[cat_key])
doc['display_name'] = ' / '.join(name_list)
@ -319,12 +322,22 @@ class StudyService(object):
documents[code] = doc
return Box(documents)
@staticmethod
def get_investigator_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_data = FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'label')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data
return doc_dict
@staticmethod
def get_investigators(study_id, all=False):
"""Convert array of investigators from protocol builder into a dictionary keyed on the type. """
# Loop through all known investigator types as set in the reference file
inv_dictionary = FileService.get_reference_data(FileService.INVESTIGATOR_LIST, 'code')
inv_dictionary = StudyService.get_investigator_dictionary()
# Get PB required docs
pb_investigators = ProtocolBuilderService.get_investigators(study_id=study_id)

View File

@ -53,21 +53,15 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
try:
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
augmentMethods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
augment_methods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
else:
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
super().execute(task, script, data, externalMethods=augmentMethods)
except SyntaxError as e:
raise ApiError('syntax_error',
f'Something is wrong with your python script '
f'please correct the following:'
f' {script}, {e.msg}')
except NameError as e:
raise ApiError('name_error',
f'something you are referencing does not exist:'
f' {script}, {e}')
augment_methods = Script.generate_augmented_list(task, study_id, workflow_id)
super().execute(task, script, data, external_methods=augment_methods)
except WorkflowException as e:
raise e
except Exception as e:
raise WorkflowTaskExecException(task, f' {script}, {e}', e)
def evaluate_expression(self, task, expression):
"""
@ -86,7 +80,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
else:
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
exp, valid = self.validateExpression(expression)
return self._eval(exp, externalMethods=augmentMethods, **task.data)
return self._eval(exp, external_methods=augmentMethods, **task.data)
except Exception as e:
raise WorkflowTaskExecException(task,
@ -331,8 +325,8 @@ class WorkflowProcessor(object):
spec = parser.get_spec(process_id)
except ValidationException as ve:
raise ApiError(code="workflow_validation_error",
message="Failed to parse Workflow Specification '%s'. \n" % workflow_spec_id +
"Error is %s. \n" % str(ve),
message="Failed to parse the Workflow Specification. " +
"Error is '%s.'" % str(ve),
file_name=ve.filename,
task_id=ve.id,
tag=ve.tag)

View File

@ -30,6 +30,7 @@ from crc.models.study import StudyModel
from crc.models.task_event import TaskEventModel
from crc.models.user import UserModel, UserModelSchema
from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
from crc.services.study_service import StudyService
@ -97,12 +98,15 @@ class WorkflowService(object):
def do_waiting():
records = db.session.query(WorkflowModel).filter(WorkflowModel.status==WorkflowStatus.waiting).all()
for workflow_model in records:
print('processing workflow %d'%workflow_model.id)
processor = WorkflowProcessor(workflow_model)
processor.bpmn_workflow.refresh_waiting_tasks()
processor.bpmn_workflow.do_engine_steps()
processor.save()
# fixme: Try catch with a very explicit error about the study, workflow and task that failed.
try:
app.logger.info('Processing workflow %s' % workflow_model.id)
processor = WorkflowProcessor(workflow_model)
processor.bpmn_workflow.refresh_waiting_tasks()
processor.bpmn_workflow.do_engine_steps()
processor.save()
except:
app.logger.error('Failed to process workflow')
@staticmethod
@timeit
@ -434,7 +438,7 @@ class WorkflowService(object):
doc_code = WorkflowService.evaluate_property('doc_code', field, task)
file_model = FileModel(name="test.png",
irb_doc_code = field.id)
doc_dict = FileService.get_doc_dictionary()
doc_dict = DocumentService.get_dictionary()
file = File.from_models(file_model, None, doc_dict)
return FileSchema().dump(file)
elif field.type == 'files':

View File

@ -61,7 +61,6 @@ python-box==5.2.0
python-dateutil==2.8.1
python-docx==0.8.10
python-editor==1.0.4
python-levenshtein==0.12.0
pytz==2020.4
pyyaml==5.4
recommonmark==0.6.0

View File

@ -7,7 +7,9 @@ from crc.models.file import CONTENT_TYPES
from crc.models.ldap import LdapModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecCategoryModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.study_service import StudyService
class ExampleDataLoader:
@ -315,14 +317,14 @@ class ExampleDataLoader:
def load_reference_documents(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.DOCUMENT_LIST,
FileService.add_reference_file(DocumentService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.INVESTIGATOR_LIST,
FileService.add_reference_file(StudyService.INVESTIGATOR_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()

View File

@ -2,6 +2,7 @@
# IMPORTANT - Environment must be loaded before app, models, etc....
import os
os.environ["TESTING"] = "true"
import json
@ -23,6 +24,7 @@ from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.user_service import UserService
from crc.services.workflow_service import WorkflowService
from crc.services.document_service import DocumentService
from example_data import ExampleDataLoader
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
@ -138,8 +140,7 @@ class BaseTest(unittest.TestCase):
delete everything that matters in the local database - this is used to
test ground zero copy of workflow specs.
"""
session.execute("delete from workflow; delete from file_data; delete from file; delete from workflow_spec;")
session.commit()
ExampleDataLoader.clean_db()
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
"""use_crc_data will cause this to load the mammoth collection of documents
@ -282,28 +283,6 @@ class BaseTest(unittest.TestCase):
session.commit()
return study
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
workflow_spec_name="random_fact"):
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
approvals = []
for i in range(len(approver_uids)):
approvals.append(self.create_approval(
study=study,
workflow=workflow,
approver_uid=approver_uids[i],
status=statuses[i],
version=1
))
full_study = {
'study': study,
'workflow': workflow,
'approvals': approvals,
}
return full_study
def create_workflow(self, workflow_name, display_name=None, study=None, category_id=None, as_user="dhf8r"):
session.flush()
@ -320,30 +299,11 @@ class BaseTest(unittest.TestCase):
def create_reference_document(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.DOCUMENT_LIST,
FileService.add_reference_file(DocumentService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
content_type=CONTENT_TYPES['xlsx'])
file.close()
def create_approval(
self,
study=None,
workflow=None,
approver_uid=None,
status=None,
version=None,
):
study = study or self.create_study()
workflow = workflow or self.create_workflow()
approver_uid = approver_uid or self.test_uid
status = status or ApprovalStatus.PENDING.value
version = version or 1
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status,
version=version)
session.add(approval)
session.commit()
return approval
def get_workflow_common(self, url, user):
rv = self.app.get(url,
headers=self.logged_in_headers(user),

View File

@ -16,6 +16,12 @@
OGC will upload the Non-Funded Executed Agreement after it has been negotiated by OSP contract negotiator.</bpmn:documentation>
<bpmn:extensionElements>
<camunda:formData>
<camunda:formField id="Date" label="Version Date" type="date">
<camunda:properties>
<camunda:property id="group" value="PCRApproval" />
<camunda:property id="file_data" value="Some_File" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="file_type" type="enum" defaultValue="AD_CoCApp">
<camunda:value id="AD_CoCApp" name="Ancillary Documents / Case Report Form" />
<camunda:value id="AD_CoCAppr" name="Ancillary Documents / CoC Approval" />
@ -32,12 +38,6 @@ OGC will upload the Non-Funded Executed Agreement after it has been negotiated b
<camunda:property id="file_data" value="Some_File" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="Date" label="Version Date" type="date">
<camunda:properties>
<camunda:property id="group" value="PCRApproval" />
<camunda:property id="file_data" value="Some_File" />
</camunda:properties>
</camunda:formField>
</camunda:formData>
</bpmn:extensionElements>
<bpmn:incoming>SequenceFlow_0ea9hvd</bpmn:incoming>
@ -67,4 +67,4 @@ OGC will upload the Non-Funded Executed Agreement after it has been negotiated b
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>
</bpmn:definitions>

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.0">
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
<bpmn:process id="Process_18biih5" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
@ -8,32 +8,34 @@
<bpmn:endEvent id="EndEvent_063bpg6">
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
</bpmn:endEvent>
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Script Reference">
<bpmn:scriptTask id="Invalid_Script_Task" name="A Syntax Error">
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
<bpmn:script>a really bad error that should fail</bpmn:script>
<bpmn:script>x = 1
y = 2
x + y === a</bpmn:script>
</bpmn:scriptTask>
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
<di:waypoint x="390" y="117" />
<di:waypoint x="442" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
<di:waypoint x="215" y="117" />
<di:waypoint x="290" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
<dc:Bounds x="442" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
<dc:Bounds x="290" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
<di:waypoint x="390" y="117" />
<di:waypoint x="442" y="117" />
</bpmndi:BPMNEdge>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,41 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
<bpmn:process id="Process_18biih5" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="SequenceFlow_1pnq3kg" sourceRef="StartEvent_1" targetRef="Invalid_Script_Task" />
<bpmn:endEvent id="EndEvent_063bpg6">
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
</bpmn:endEvent>
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Variable">
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
<bpmn:script>x = 1
y = 2
x + a == 3</bpmn:script>
</bpmn:scriptTask>
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
<di:waypoint x="390" y="117" />
<di:waypoint x="442" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
<di:waypoint x="215" y="117" />
<di:waypoint x="290" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
<dc:Bounds x="442" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
<dc:Bounds x="290" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -1,14 +1,16 @@
import io
import json
import os
from tests.base_test import BaseTest
from crc import session, db
from crc import session, db, app
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.models.data_store import DataStoreModel
from crc.services.document_service import DocumentService
from example_data import ExampleDataLoader
@ -110,21 +112,24 @@ class TestFilesApi(BaseTest):
self.assertEqual(0, len(json.loads(rv.get_data(as_text=True))))
def test_set_reference_file(self):
file_name = "irb_document_types.xls"
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.xls")}
file_name = "irb_documents.xlsx"
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.xls, file.type)
self.assertEqual(FileType.xlsx, file.type)
self.assertTrue(file.is_reference)
self.assertEqual("application/vnd.ms-excel", file.content_type)
self.assertEqual("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", file.content_type)
self.assertEqual('dhf8r', json_data['user_uid'])
def test_set_reference_file_bad_extension(self):
file_name = FileService.DOCUMENT_LIST
file_name = DocumentService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.ppt")}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
@ -132,22 +137,28 @@ class TestFilesApi(BaseTest):
def test_get_reference_file(self):
file_name = "irb_document_types.xls"
data = {'file': (io.BytesIO(b"abcdef"), "some crazy thing do not care.xls")}
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
rv = self.app.get('/v1.0/reference_file/%s' % file_name, headers=self.logged_in_headers())
self.assert_success(rv)
data_out = rv.get_data()
self.assertEqual(b"abcdef", data_out)
self.assertEqual(file_data, data_out)
def test_list_reference_files(self):
ExampleDataLoader.clean_db()
file_name = FileService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), file_name)}
file_name = DocumentService.DOCUMENT_LIST
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('/v1.0/reference_file',
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
@ -160,7 +171,8 @@ class TestFilesApi(BaseTest):
def test_update_file_info(self):
self.load_example_data()
file: FileModel = session.query(FileModel).first()
self.create_reference_document()
file: FileModel = session.query(FileModel).filter(FileModel.is_reference==False).first()
file.name = "silly_new_name.bpmn"
rv = self.app.put('/v1.0/file/%i' % file.id,

View File

@ -1,4 +1,3 @@
import json
from SpiffWorkflow.bpmn.PythonScriptEngine import Box
@ -15,6 +14,7 @@ from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
from crc.scripts.file_data_set import FileDataSet
from crc.services.document_service import DocumentService
class TestStudyDetailsDocumentsScript(BaseTest):
@ -43,8 +43,8 @@ class TestStudyDetailsDocumentsScript(BaseTest):
# Remove the reference file.
file_model = db.session.query(FileModel). \
filter(FileModel.is_reference == True). \
filter(FileModel.name == FileService.DOCUMENT_LIST).first()
filter(FileModel.is_reference is True). \
filter(FileModel.name == DocumentService.DOCUMENT_LIST).first()
if file_model:
db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
@ -71,7 +71,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
def test_load_lookup_data(self):
self.create_reference_document()
dict = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
dict = DocumentService.get_dictionary()
self.assertIsNotNone(dict)
def get_required_docs(self):

View File

@ -126,7 +126,7 @@ class TestStudyService(BaseTest):
self.assertEqual("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
self.assertEqual(0, documents["UVACompl_PRCAppr"]['count'])
self.assertEqual(True, documents["UVACompl_PRCAppr"]['required'])
self.assertEqual('6', documents["UVACompl_PRCAppr"]['id'])
self.assertEqual(6, documents["UVACompl_PRCAppr"]['id'])
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_get_documents_has_file_details(self, mock_docs):

View File

@ -3,9 +3,6 @@ from tests.base_test import BaseTest
from crc.services.file_service import FileService
class TestDocumentDirectories(BaseTest):
def test_directory_list(self):

View File

@ -10,7 +10,7 @@ class TestFormFieldName(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(json_data[0]['message'],
'When populating all fields ... \nInvalid Field name: "user-title". A field ID must begin '
'Invalid Field name: "user-title". A field ID must begin '
'with a letter, and can only contain letters, numbers, and "_"')
def test_form_field_name_with_period(self):

View File

@ -10,5 +10,5 @@ class TestFormFieldType(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(json_data[0]['message'],
'When populating all fields ... \nType is missing for field "name". A field type must be provided.')
'Type is missing for field "name". A field type must be provided.')
# print('TestFormFieldType: Good Form')

View File

@ -59,7 +59,6 @@ class TestWorkflowSpecValidation(BaseTest):
app.config['PB_ENABLED'] = True
self.validate_all_loaded_workflows()
def validate_all_loaded_workflows(self):
workflows = session.query(WorkflowSpecModel).all()
errors = []
@ -71,12 +70,12 @@ class TestWorkflowSpecValidation(BaseTest):
def test_invalid_expression(self):
self.load_example_data()
errors = self.validate_workflow("invalid_expression")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_exception", errors[0]['code'])
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
self.assertEqual('When populating all fields ... \nExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
self.assertEqual('ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
self.assertIsNotNone(errors[0]['task_data'])
self.assertIn("has_bananas", errors[0]['task_data'])
@ -84,7 +83,7 @@ class TestWorkflowSpecValidation(BaseTest):
def test_validation_error(self):
self.load_example_data()
errors = self.validate_workflow("invalid_spec")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_error", errors[0]['code'])
self.assertEqual("StartEvent_1", errors[0]['task_id'])
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
@ -93,7 +92,7 @@ class TestWorkflowSpecValidation(BaseTest):
def test_invalid_script(self):
self.load_example_data()
errors = self.validate_workflow("invalid_script")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_exception", errors[0]['code'])
#self.assertTrue("NoSuchScript" in errors[0]['message'])
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
@ -103,12 +102,23 @@ class TestWorkflowSpecValidation(BaseTest):
def test_invalid_script2(self):
self.load_example_data()
errors = self.validate_workflow("invalid_script2")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_exception", errors[0]['code'])
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
self.assertEqual(3, errors[0]['line_number'])
self.assertEqual(9, errors[0]['offset'])
self.assertEqual("SyntaxError", errors[0]['error_type'])
self.assertEqual("A Syntax Error", errors[0]['task_name'])
self.assertEqual("invalid_script2.bpmn", errors[0]['file_name'])
def test_invalid_script3(self):
self.load_example_data()
errors = self.validate_workflow("invalid_script3")
self.assertEqual(1, len(errors))
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
self.assertEqual(3, errors[0]['line_number'])
self.assertEqual("NameError", errors[0]['error_type'])
def test_repeating_sections_correctly_populated(self):
self.load_example_data()
spec_model = self.load_test_spec('repeat_form')