mirror of
https://github.com/sartography/cr-connect-workflow.git
synced 2025-02-23 13:18:35 +00:00
Merge branch 'dev' into add-name-error-hint-8
This commit is contained in:
commit
2a45f2fcda
3
Pipfile
3
Pipfile
@ -39,14 +39,13 @@ requests = "*"
|
||||
sentry-sdk = {extras = ["flask"],version = "==0.14.4"}
|
||||
sphinx = "*"
|
||||
swagger-ui-bundle = "*"
|
||||
spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"}
|
||||
spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git"}
|
||||
# spiffworkflow = {editable = true, path = "./../SpiffWorkflow"}
|
||||
webtest = "*"
|
||||
werkzeug = "*"
|
||||
xlrd = "*"
|
||||
xlsxwriter = "*"
|
||||
pygithub = "*"
|
||||
python-levenshtein = "*"
|
||||
apscheduler = "*"
|
||||
|
||||
[requires]
|
||||
|
80
Pipfile.lock
generated
80
Pipfile.lock
generated
@ -1,7 +1,7 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "2d74273fabb4ccda79f76e59ed2595d68a72eaa4a56bd4e04d0e7fbd9489039e"
|
||||
"sha256": "ad259e41c4e42c8818992a6e5ce7436d35755a02e7f12688bed01e0250a3d668"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
@ -576,11 +576,11 @@
|
||||
},
|
||||
"marshmallow": {
|
||||
"hashes": [
|
||||
"sha256:8050475b70470cc58f4441ee92375db611792ba39ca1ad41d39cad193ea9e040",
|
||||
"sha256:b45cde981d1835145257b4a3c5cb7b80786dcf5f50dd2990749a50c16cb48e01"
|
||||
"sha256:77368dfedad93c3a041cbbdbce0b33fac1d8608c9e2e2288408a43ce3493d2ff",
|
||||
"sha256:d4090ca9a36cd129126ad8b10c3982c47d4644a6e3ccb20534b512badce95f35"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.12.1"
|
||||
"version": "==3.12.2"
|
||||
},
|
||||
"marshmallow-enum": {
|
||||
"hashes": [
|
||||
@ -657,34 +657,35 @@
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
|
||||
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
|
||||
"sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7",
|
||||
"sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"
|
||||
],
|
||||
"version": "==20.9"
|
||||
"version": "==21.0"
|
||||
},
|
||||
"pandas": {
|
||||
"hashes": [
|
||||
"sha256:0c34b89215f984a9e4956446e0a29330d720085efa08ea72022387ee37d8b373",
|
||||
"sha256:0dbd125b0e44e5068163cbc9080a00db1756a5e36309329ae14fd259747f2300",
|
||||
"sha256:1102d719038e134e648e7920672188a00375f3908f0383fd3b202fbb9d2c3a95",
|
||||
"sha256:14abb8ea73fce8aebbb1fb44bec809163f1c55241bcc1db91c2c780e97265033",
|
||||
"sha256:25fc8ef6c6beb51c9224284a1ad89dfb591832f23ceff78845f182de35c52356",
|
||||
"sha256:38e7486410de23069392bdf1dc7297ae75d2d67531750753f3149c871cd1c6e3",
|
||||
"sha256:4bfbf62b00460f78a8bc4407112965c5ab44324f34551e8e1f4cac271a07706c",
|
||||
"sha256:78de96c1174bcfdbe8dece9c38c2d7994e407fd8bb62146bb46c61294bcc06ef",
|
||||
"sha256:7b09293c7119ab22ab3f7f086f813ac2acbfa3bcaaaeb650f4cddfb5b9fa9be4",
|
||||
"sha256:821d92466fcd2826656374a9b6fe4f2ec2ba5e370cce71d5a990577929d948df",
|
||||
"sha256:9244fb0904512b074d8c6362fb13aac1da6c4db94372760ddb2565c620240264",
|
||||
"sha256:94ca6ea3f46f44a979a38a4d5a70a88cee734f7248d7aeeed202e6b3ba485af1",
|
||||
"sha256:a67227e17236442c6bc31c02cb713b5277b26eee204eac14b5aecba52492e3a3",
|
||||
"sha256:c862cd72353921c102166784fc4db749f1c3b691dd017fc36d9df2c67a9afe4e",
|
||||
"sha256:d9e6edddeac9a8e473391d2d2067bb3c9dc7ad79fd137af26a39ee425c2b4c78",
|
||||
"sha256:e36515163829e0e95a6af10820f178dd8768102482c01872bff8ae592e508e58",
|
||||
"sha256:f20e4b8a7909f5a0c0a9e745091e3ea18b45af9f73496a4d498688badbdac7ea",
|
||||
"sha256:fc9215dd1dd836ff26b896654e66b2dfcf4bbb18aa4c1089a79bab527b665a90"
|
||||
"sha256:08eeff3da6a188e24db7f292b39a8ca9e073bf841fbbeadb946b3ad5c19d843e",
|
||||
"sha256:1ff13eed501e07e7fb26a4ea18a846b6e5d7de549b497025601fd9ccb7c1d123",
|
||||
"sha256:522bfea92f3ef6207cadc7428bda1e7605dae0383b8065030e7b5d0266717b48",
|
||||
"sha256:7897326cae660eee69d501cbfa950281a193fcf407393965e1bc07448e1cc35a",
|
||||
"sha256:798675317d0e4863a92a9a6bc5bd2490b5f6fef8c17b95f29e2e33f28bef9eca",
|
||||
"sha256:7d3cd2c99faa94d717ca00ea489264a291ad7209453dffbf059bfb7971fd3a61",
|
||||
"sha256:823737830364d0e2af8c3912a28ba971296181a07950873492ed94e12d28c405",
|
||||
"sha256:872aa91e0f9ca913046ab639d4181a899f5e592030d954d28c2529b88756a736",
|
||||
"sha256:88864c1e28353b958b1f30e4193818519624ad9a1776921622a6a2a016d5d807",
|
||||
"sha256:92835113a67cbd34747c198d41f09f4b63f6fe11ca5643baebc7ab1e30e89e95",
|
||||
"sha256:98efc2d4983d5bb47662fe2d97b2c81b91566cb08b266490918b9c7d74a5ef64",
|
||||
"sha256:b10d7910ae9d7920a5ff7816d794d99acbc361f7b16a0f017d4fa83ced8cb55e",
|
||||
"sha256:c554e6c9cf2d5ea1aba5979cc837b3649539ced0e18ece186f055450c86622e2",
|
||||
"sha256:c746876cdd8380be0c3e70966d4566855901ac9aaa5e4b9ccaa5ca5311457d11",
|
||||
"sha256:c81b8d91e9ae861eb4406b4e0f8d4dabbc105b9c479b3d1e921fba1d35b5b62a",
|
||||
"sha256:e6b75091fa54a53db3927b4d1bc997c23c5ba6f87acdfe1ee5a92c38c6b2ed6a",
|
||||
"sha256:ed4fc66f23fe17c93a5d439230ca2d6b5f8eac7154198d327dbe8a16d98f3f10",
|
||||
"sha256:f058c786e7b0a9e7fa5e0b9f4422e0ccdd3bf3aa3053c18d77ed2a459bd9a45a",
|
||||
"sha256:fe7a549d10ca534797095586883a5c17d140d606747591258869c56e14d1b457"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.2.5"
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"psycopg2-binary": {
|
||||
"hashes": [
|
||||
@ -836,13 +837,6 @@
|
||||
],
|
||||
"version": "==1.0.4"
|
||||
},
|
||||
"python-levenshtein": {
|
||||
"hashes": [
|
||||
"sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.12.2"
|
||||
},
|
||||
"pytz": {
|
||||
"hashes": [
|
||||
"sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da",
|
||||
@ -935,11 +929,11 @@
|
||||
},
|
||||
"sphinx": {
|
||||
"hashes": [
|
||||
"sha256:b5c2ae4120bf00c799ba9b3699bc895816d272d120080fbc967292f29b52b48c",
|
||||
"sha256:d1cb10bee9c4231f1700ec2e24a91be3f3a3aba066ea4ca9f3bbe47e59d5a1d4"
|
||||
"sha256:5747f3c855028076fcff1e4df5e75e07c836f0ac11f7df886747231092cfe4ad",
|
||||
"sha256:dff357e6a208eb7edb2002714733ac21a9fe597e73609ff417ab8cf0c6b4fbb8"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.0.2"
|
||||
"version": "==4.0.3"
|
||||
},
|
||||
"sphinxcontrib-applehelp": {
|
||||
"hashes": [
|
||||
@ -985,7 +979,7 @@
|
||||
},
|
||||
"spiffworkflow": {
|
||||
"git": "https://github.com/sartography/SpiffWorkflow.git",
|
||||
"ref": "109c237423e4e2645b4605b1166075546f22d272"
|
||||
"ref": "12525258fe8172147600d5c846c04f245d3b3620"
|
||||
},
|
||||
"sqlalchemy": {
|
||||
"hashes": [
|
||||
@ -1097,11 +1091,11 @@
|
||||
},
|
||||
"xlsxwriter": {
|
||||
"hashes": [
|
||||
"sha256:1a7fac99687020e76aa7dd0d7de4b9b576547ed748e5cd91a99d52a6df54ca16",
|
||||
"sha256:641db6e7b4f4982fd407a3f372f45b878766098250d26963e95e50121168cbe2"
|
||||
"sha256:15b65f02f7ecdcfb1f22794b1fcfed8e9a49e8b7414646f90347be5cbf464234",
|
||||
"sha256:791567acccc485ba76e0b84bccced2651981171de5b47d541520416f2f9f93e3"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.4.3"
|
||||
"version": "==1.4.4"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
@ -1179,10 +1173,10 @@
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
|
||||
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
|
||||
"sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7",
|
||||
"sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"
|
||||
],
|
||||
"version": "==20.9"
|
||||
"version": "==21.0"
|
||||
},
|
||||
"pbr": {
|
||||
"hashes": [
|
||||
|
10
crc/api.yml
10
crc/api.yml
@ -82,7 +82,7 @@ paths:
|
||||
schema :
|
||||
type : integer
|
||||
get:
|
||||
operationId: crc.api.file.get_document_directory
|
||||
operationId: crc.api.document.get_document_directory
|
||||
summary: Returns a directory of all files for study in a nested structure
|
||||
tags:
|
||||
- Document Categories
|
||||
@ -510,12 +510,18 @@ paths:
|
||||
description: The unique id of an existing workflow specification to validate.
|
||||
schema:
|
||||
type: string
|
||||
- name: validate_study_id
|
||||
- name: study_id
|
||||
in: query
|
||||
required: false
|
||||
description: Optional id of study to test under different scenarios
|
||||
schema:
|
||||
type: string
|
||||
- name: test_until
|
||||
in: query
|
||||
required: false
|
||||
description: Optional name of task to stop validating at
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
operationId: crc.api.workflow.validate_workflow_specification
|
||||
summary: Loads and attempts to execute a Workflow Specification, returning a list of errors encountered
|
||||
|
@ -10,7 +10,9 @@ import sentry_sdk
|
||||
|
||||
class ApiError(Exception):
|
||||
def __init__(self, code, message, status_code=400,
|
||||
file_name="", task_id="", task_name="", tag="", task_data = {}):
|
||||
file_name="", task_id="", task_name="", tag="", task_data=None, error_type="", line_number=0, offset=0):
|
||||
if task_data is None:
|
||||
task_data = {}
|
||||
self.status_code = status_code
|
||||
self.code = code # a short consistent string describing the error.
|
||||
self.message = message # A detailed message that provides more information.
|
||||
@ -18,8 +20,11 @@ class ApiError(Exception):
|
||||
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
|
||||
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
|
||||
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
|
||||
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error ocurred.
|
||||
if hasattr(g,'user'):
|
||||
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error occurred.
|
||||
self.line_number = line_number
|
||||
self.offset = offset
|
||||
self.error_type = error_type
|
||||
if hasattr(g, 'user'):
|
||||
user = g.user.uid
|
||||
else:
|
||||
user = 'Unknown'
|
||||
@ -29,12 +34,16 @@ class ApiError(Exception):
|
||||
Exception.__init__(self, self.message)
|
||||
|
||||
@classmethod
|
||||
def from_task(cls, code, message, task, status_code=400):
|
||||
def from_task(cls, code, message, task, status_code=400, line_number=0, offset=0, error_type="", error_line=""):
|
||||
"""Constructs an API Error with details pulled from the current task."""
|
||||
instance = cls(code, message, status_code=status_code)
|
||||
instance.task_id = task.task_spec.name or ""
|
||||
instance.task_name = task.task_spec.description or ""
|
||||
instance.file_name = task.workflow.spec.file or ""
|
||||
instance.line_number = line_number
|
||||
instance.offset = offset
|
||||
instance.error_type = error_type
|
||||
instance.error_line = error_line
|
||||
|
||||
# Fixme: spiffworkflow is doing something weird where task ends up referenced in the data in some cases.
|
||||
if "task" in task.data:
|
||||
@ -61,7 +70,11 @@ class ApiError(Exception):
|
||||
so consolidating the code, and doing the best things
|
||||
we can with the data we have."""
|
||||
if isinstance(exp, WorkflowTaskExecException):
|
||||
return ApiError.from_task(code, message, exp.task)
|
||||
return ApiError.from_task(code, message, exp.task, line_number=exp.line_number,
|
||||
offset=exp.offset,
|
||||
error_type=exp.exception.__class__.__name__,
|
||||
error_line=exp.error_line)
|
||||
|
||||
else:
|
||||
return ApiError.from_task_spec(code, message, exp.sender)
|
||||
|
||||
@ -69,7 +82,7 @@ class ApiError(Exception):
|
||||
class ApiErrorSchema(ma.Schema):
|
||||
class Meta:
|
||||
fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id",
|
||||
"task_data", "task_user", "hint")
|
||||
"task_data", "task_user", "hint", "line_number", "offset", "error_type", "error_line")
|
||||
|
||||
|
||||
@app.errorhandler(ApiError)
|
||||
|
18
crc/api/document.py
Normal file
18
crc/api/document.py
Normal file
@ -0,0 +1,18 @@
|
||||
from crc.models.api_models import DocumentDirectorySchema
|
||||
from crc.models.file import File
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.lookup_service import LookupService
|
||||
|
||||
|
||||
def get_document_directory(study_id, workflow_id=None):
|
||||
"""
|
||||
return a nested list of files arranged according to the category hierarchy
|
||||
defined in the doc dictionary
|
||||
"""
|
||||
file_models = FileService.get_files_for_study(study_id=study_id)
|
||||
doc_dict = DocumentService.get_dictionary()
|
||||
files = (File.from_models(model, FileService.get_file_data(model.id), doc_dict) for model in file_models)
|
||||
directory = DocumentService.get_directory(doc_dict, files, workflow_id)
|
||||
|
||||
return DocumentDirectorySchema(many=True).dump(directory)
|
@ -7,71 +7,15 @@ from flask import send_file
|
||||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.api.user import verify_token
|
||||
from crc.models.api_models import DocumentDirectory, DocumentDirectorySchema
|
||||
from crc.models.file import FileSchema, FileModel, File, FileModelSchema, FileDataModel, FileType
|
||||
from crc.models.workflow import WorkflowSpecModel
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
def ensure_exists(output, categories, expanded):
|
||||
"""
|
||||
This is a recursive function, it expects a list of
|
||||
levels with a file object at the end (kinda like duck,duck,duck,goose)
|
||||
|
||||
for each level, it makes sure that level is already in the structure and if it is not
|
||||
it will add it
|
||||
|
||||
function terminates upon getting an entry that is a file object ( or really anything but string)
|
||||
"""
|
||||
current_item = categories[0]
|
||||
found = False
|
||||
if isinstance(current_item, str):
|
||||
for item in output:
|
||||
if item.level == current_item:
|
||||
found = True
|
||||
item.filecount = item.filecount + 1
|
||||
item.expanded = expanded | item.expanded
|
||||
ensure_exists(item.children, categories[1:], expanded)
|
||||
if not found:
|
||||
new_level = DocumentDirectory(level=current_item)
|
||||
new_level.filecount = 1
|
||||
new_level.expanded = expanded
|
||||
output.append(new_level)
|
||||
ensure_exists(new_level.children, categories[1:], expanded)
|
||||
else:
|
||||
new_level = DocumentDirectory(file=current_item)
|
||||
new_level.expanded = expanded
|
||||
output.append(new_level)
|
||||
|
||||
|
||||
def get_document_directory(study_id, workflow_id=None):
|
||||
"""
|
||||
return a nested list of files arranged according to the category hirearchy
|
||||
defined in the doc dictionary
|
||||
"""
|
||||
output = []
|
||||
doc_dict = FileService.get_doc_dictionary()
|
||||
file_models = FileService.get_files_for_study(study_id=study_id)
|
||||
files = (to_file_api(model) for model in file_models)
|
||||
for file in files:
|
||||
if file.irb_doc_code in doc_dict:
|
||||
doc_code = doc_dict[file.irb_doc_code]
|
||||
else:
|
||||
doc_code = {'category1': "Unknown", 'category2': '', 'category3': ''}
|
||||
if workflow_id:
|
||||
expand = file.workflow_id == int(workflow_id)
|
||||
else:
|
||||
expand = False
|
||||
print(expand)
|
||||
categories = [x for x in [doc_code['category1'],doc_code['category2'],doc_code['category3'],file] if x != '']
|
||||
ensure_exists(output, categories, expanded=expand)
|
||||
return DocumentDirectorySchema(many=True).dump(output)
|
||||
|
||||
|
||||
def to_file_api(file_model):
|
||||
"""Converts a FileModel object to something we can return via the api"""
|
||||
return File.from_models(file_model, FileService.get_file_data(file_model.id),
|
||||
FileService.get_doc_dictionary())
|
||||
DocumentService.get_dictionary())
|
||||
|
||||
|
||||
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None,study_id=None):
|
||||
|
@ -46,22 +46,15 @@ def get_workflow_specification(spec_id):
|
||||
return WorkflowSpecModelSchema().dump(spec)
|
||||
|
||||
|
||||
def validate_workflow_specification(spec_id, validate_study_id=None):
|
||||
errors = {}
|
||||
def validate_workflow_specification(spec_id, study_id=None, test_until=None):
|
||||
try:
|
||||
WorkflowService.test_spec(spec_id, validate_study_id)
|
||||
WorkflowService.test_spec(spec_id, study_id, test_until)
|
||||
WorkflowService.test_spec(spec_id, study_id, test_until, required_only=True)
|
||||
except ApiError as ae:
|
||||
ae.message = "When populating all fields ... \n" + ae.message
|
||||
errors['all'] = ae
|
||||
try:
|
||||
# Run the validation twice, the second time, just populate the required fields.
|
||||
WorkflowService.test_spec(spec_id, validate_study_id, required_only=True)
|
||||
except ApiError as ae:
|
||||
ae.message = "When populating only required fields ... \n" + ae.message
|
||||
errors['required'] = ae
|
||||
interpreted_errors = ValidationErrorService.interpret_validation_errors(errors)
|
||||
return ApiErrorSchema(many=True).dump(interpreted_errors)
|
||||
|
||||
error = ae
|
||||
error = ValidationErrorService.interpret_validation_error(error)
|
||||
return ApiErrorSchema(many=True).dump([error])
|
||||
return []
|
||||
|
||||
def update_workflow_specification(spec_id, body):
|
||||
if spec_id is None:
|
||||
|
@ -1,15 +1,14 @@
|
||||
import enum
|
||||
from typing import cast
|
||||
|
||||
from marshmallow import INCLUDE, EXCLUDE, fields, Schema
|
||||
from marshmallow import INCLUDE, EXCLUDE, Schema
|
||||
from marshmallow_enum import EnumField
|
||||
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
||||
from sqlalchemy import func, Index
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.orm import deferred, relationship
|
||||
from crc.models.data_store import DataStoreModel # this is needed by the relationship
|
||||
|
||||
from crc import db, ma
|
||||
from crc.models.data_store import DataStoreModel
|
||||
|
||||
|
||||
class FileType(enum.Enum):
|
||||
@ -43,7 +42,7 @@ CONTENT_TYPES = {
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"gif": "image/gif",
|
||||
"jpg": "image/jpeg",
|
||||
"md" : "text/plain",
|
||||
"md": "text/plain",
|
||||
"pdf": "application/pdf",
|
||||
"png": "image/png",
|
||||
"ppt": "application/vnd.ms-powerpoint",
|
||||
@ -71,7 +70,6 @@ class FileDataModel(db.Model):
|
||||
file_model = db.relationship("FileModel", foreign_keys=[file_model_id])
|
||||
|
||||
|
||||
|
||||
class FileModel(db.Model):
|
||||
__tablename__ = 'file'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
@ -79,18 +77,19 @@ class FileModel(db.Model):
|
||||
type = db.Column(db.Enum(FileType))
|
||||
is_status = db.Column(db.Boolean)
|
||||
content_type = db.Column(db.String)
|
||||
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
|
||||
primary = db.Column(db.Boolean, nullable=False, default=False) # Is this the primary BPMN in a workflow?
|
||||
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, critical for primary BPMN.
|
||||
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
|
||||
primary = db.Column(db.Boolean, nullable=False, default=False) # Is this the primary BPMN in a workflow?
|
||||
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, for primary BPMN.
|
||||
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
|
||||
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
|
||||
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
|
||||
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
|
||||
# A request was made to delete the file, but we can't because there are
|
||||
# active approvals or running workflows that depend on it. So we archive
|
||||
# it instead, hide it in the interface.
|
||||
is_review = db.Column(db.Boolean, default=False, nullable=True)
|
||||
archived = db.Column(db.Boolean, default=False, nullable=False)
|
||||
data_stores = relationship("DataStoreModel", cascade="all,delete", backref="file")
|
||||
data_stores = relationship(DataStoreModel, cascade="all,delete", backref="file")
|
||||
|
||||
|
||||
class File(object):
|
||||
@classmethod
|
||||
@ -107,7 +106,7 @@ class File(object):
|
||||
instance.workflow_id = model.workflow_id
|
||||
instance.irb_doc_code = model.irb_doc_code
|
||||
instance.type = model.type
|
||||
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
|
||||
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
|
||||
instance.document = doc_dictionary[model.irb_doc_code]
|
||||
else:
|
||||
instance.document = {}
|
||||
@ -147,7 +146,6 @@ class FileSchema(Schema):
|
||||
type = EnumField(FileType)
|
||||
|
||||
|
||||
|
||||
class LookupFileModel(db.Model):
|
||||
"""Gives us a quick way to tell what kind of lookup is set on a form field.
|
||||
Connected to the file data model, so that if a new version of the same file is
|
||||
@ -159,7 +157,8 @@ class LookupFileModel(db.Model):
|
||||
field_id = db.Column(db.String)
|
||||
is_ldap = db.Column(db.Boolean) # Allows us to run an ldap query instead of a db lookup.
|
||||
file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id'))
|
||||
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan")
|
||||
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model",
|
||||
cascade="all, delete, delete-orphan")
|
||||
|
||||
|
||||
class LookupDataModel(db.Model):
|
||||
@ -169,7 +168,7 @@ class LookupDataModel(db.Model):
|
||||
value = db.Column(db.String)
|
||||
label = db.Column(db.String)
|
||||
# In the future, we might allow adding an additional "search" column if we want to search things not in label.
|
||||
data = db.Column(db.JSON) # all data for the row is stored in a json structure here, but not searched presently.
|
||||
data = db.Column(db.JSON) # all data for the row is stored in a json structure here, but not searched presently.
|
||||
|
||||
# Assure there is a searchable index on the label column, so we can get fast results back.
|
||||
# query with:
|
||||
@ -192,7 +191,7 @@ class LookupDataSchema(SQLAlchemyAutoSchema):
|
||||
load_instance = True
|
||||
include_relationships = False
|
||||
include_fk = False # Includes foreign keys
|
||||
exclude = ['id'] # Do not include the id field, it should never be used via the API.
|
||||
exclude = ['id'] # Do not include the id field, it should never be used via the API.
|
||||
|
||||
|
||||
class SimpleFileSchema(ma.Schema):
|
||||
|
@ -2,6 +2,7 @@ from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileModel
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
@ -9,7 +10,7 @@ class DeleteFile(Script):
|
||||
|
||||
@staticmethod
|
||||
def process_document_deletion(doc_code, workflow_id, task):
|
||||
if FileService.is_allowed_document(doc_code):
|
||||
if DocumentService.is_allowed_document(doc_code):
|
||||
result = session.query(FileModel).filter(
|
||||
FileModel.workflow_id == workflow_id, FileModel.irb_doc_code == doc_code).all()
|
||||
if isinstance(result, list) and len(result) > 0 and isinstance(result[0], FileModel):
|
||||
|
@ -3,6 +3,7 @@ from flask import g
|
||||
from crc.api.common import ApiError
|
||||
from crc.services.data_store_service import DataStoreBase
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
@ -17,17 +18,22 @@ class FileDataSet(Script, DataStoreBase):
|
||||
del(kwargs['file_id'])
|
||||
return True
|
||||
|
||||
def validate_kw_args(self,**kwargs):
|
||||
if kwargs.get('key',None) is None:
|
||||
def validate_kw_args(self, **kwargs):
|
||||
if kwargs.get('key', None) is None:
|
||||
raise ApiError(code="missing_argument",
|
||||
message=f"The 'file_data_get' script requires a keyword argument of 'key'")
|
||||
message=f"The 'file_data_get' script requires a keyword argument of 'key'")
|
||||
if kwargs.get('file_id', None) is None:
|
||||
raise ApiError(code="missing_argument",
|
||||
message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
|
||||
if kwargs.get('value', None) is None:
|
||||
raise ApiError(code="missing_argument",
|
||||
message=f"The 'file_data_get' script requires a keyword argument of 'value'")
|
||||
|
||||
if kwargs.get('file_id',None) is None:
|
||||
raise ApiError(code="missing_argument",
|
||||
message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
|
||||
if kwargs.get('value',None) is None:
|
||||
raise ApiError(code="missing_argument",
|
||||
message=f"The 'file_data_get' script requires a keyword argument of 'value'")
|
||||
if kwargs['key'] == 'irb_code' and not DocumentService.is_allowed_document(kwargs.get('value')):
|
||||
raise ApiError("invalid_form_field_key",
|
||||
"When setting an irb_code, the form field id must match a known document in the "
|
||||
"irb_docunents.xslx reference file. This code is not found in that file '%s'" %
|
||||
kwargs.get('value'))
|
||||
|
||||
return True
|
||||
|
||||
|
@ -10,6 +10,7 @@ from crc.models.protocol_builder import ProtocolBuilderInvestigatorType
|
||||
from crc.models.study import StudyModel, StudySchema
|
||||
from crc.api import workflow as workflow_api
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.study_service import StudyService
|
||||
@ -168,8 +169,8 @@ Please note this is just a few examples, ALL known document types are returned i
|
||||
"""For validation only, pretend no results come back from pb"""
|
||||
self.check_args(args, 2)
|
||||
# Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
|
||||
FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
|
||||
FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST)
|
||||
FileService.get_reference_file_data(DocumentService.DOCUMENT_LIST)
|
||||
FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
|
||||
# we call the real do_task so we can
|
||||
# seed workflow validations with settings from studies in PB Mock
|
||||
# in order to test multiple paths thru the workflow
|
||||
|
98
crc/services/document_service.py
Normal file
98
crc/services/document_service.py
Normal file
@ -0,0 +1,98 @@
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.api_models import DocumentDirectory
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.lookup_service import LookupService
|
||||
|
||||
|
||||
class DocumentService(object):
|
||||
"""The document service provides details about the types of documents that can be uploaded to a workflow.
|
||||
This metadata about different document types is managed in an Excel spreadsheet, which can be uploaded at any
|
||||
time to change which documents are accepted, and it allows us to categorize these documents. At a minimum,
|
||||
the spreadsheet should contain the columns 'code', 'category1', 'category2', 'category3', 'description' and 'id',
|
||||
code is required for all rows in the table, the other fields are optional. """
|
||||
|
||||
DOCUMENT_LIST = "irb_documents.xlsx"
|
||||
|
||||
@staticmethod
|
||||
def is_allowed_document(code):
|
||||
doc_dict = DocumentService.get_dictionary()
|
||||
return code in doc_dict
|
||||
|
||||
@staticmethod
|
||||
def verify_doc_dictionary(dd):
|
||||
"""
|
||||
We are currently getting structured information from an XLS file, if someone accidentally
|
||||
changes a header we will have problems later, so we will verify we have the headers we need
|
||||
here
|
||||
"""
|
||||
required_fields = ['category1', 'category2', 'category3', 'description']
|
||||
|
||||
# we only need to check the first item, as all of the keys should be the same
|
||||
key = list(dd.keys())[0]
|
||||
for field in required_fields:
|
||||
if field not in dd[key].keys():
|
||||
raise ApiError(code="Invalid document list %s" % DocumentService.DOCUMENT_LIST,
|
||||
message='Please check the headers in %s' % DocumentService.DOCUMENT_LIST)
|
||||
|
||||
@staticmethod
|
||||
def get_dictionary():
|
||||
"""Returns a dictionary of document details keyed on the doc_code."""
|
||||
file_data = FileService.get_reference_file_data(DocumentService.DOCUMENT_LIST)
|
||||
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'description')
|
||||
doc_dict = {}
|
||||
for lookup_data in lookup_model.dependencies:
|
||||
doc_dict[lookup_data.value] = lookup_data.data
|
||||
return doc_dict
|
||||
|
||||
@staticmethod
|
||||
def get_directory(doc_dict, files, workflow_id):
|
||||
"""Returns a list of directories, hierarchically nested by category, with files at the deepest level.
|
||||
Empty directories are not include."""
|
||||
directory = []
|
||||
if files:
|
||||
for file in files:
|
||||
if file.irb_doc_code in doc_dict:
|
||||
doc_code = doc_dict[file.irb_doc_code]
|
||||
else:
|
||||
doc_code = {'category1': "Unknown", 'category2': None, 'category3': None}
|
||||
if workflow_id:
|
||||
expand = file.workflow_id == int(workflow_id)
|
||||
else:
|
||||
expand = False
|
||||
print(expand)
|
||||
categories = [x for x in [doc_code['category1'], doc_code['category2'], doc_code['category3'], file] if x]
|
||||
DocumentService.ensure_exists(directory, categories, expanded=expand)
|
||||
return directory
|
||||
|
||||
@staticmethod
|
||||
def ensure_exists(output, categories, expanded):
|
||||
"""
|
||||
This is a recursive function, it expects a list of
|
||||
levels with a file object at the end (kinda like duck,duck,duck,goose)
|
||||
|
||||
for each level, it makes sure that level is already in the structure and if it is not
|
||||
it will add it
|
||||
|
||||
function terminates upon getting an entry that is a file object ( or really anything but string)
|
||||
"""
|
||||
current_item = categories[0]
|
||||
found = False
|
||||
if isinstance(current_item, str):
|
||||
for item in output:
|
||||
if item.level == current_item:
|
||||
found = True
|
||||
item.filecount = item.filecount + 1
|
||||
item.expanded = expanded | item.expanded
|
||||
DocumentService.ensure_exists(item.children, categories[1:], expanded)
|
||||
if not found:
|
||||
new_level = DocumentDirectory(level=current_item)
|
||||
new_level.filecount = 1
|
||||
new_level.expanded = expanded
|
||||
output.append(new_level)
|
||||
DocumentService.ensure_exists(new_level.children, categories[1:], expanded)
|
||||
else:
|
||||
print("Found it")
|
||||
else:
|
||||
new_level = DocumentDirectory(file=current_item)
|
||||
new_level.expanded = expanded
|
||||
output.append(new_level)
|
@ -1,6 +1,5 @@
|
||||
import re
|
||||
|
||||
generic_message = """Workflow validation failed. For more information about the error, see below."""
|
||||
|
||||
# known_errors is a dictionary of errors from validation that we want to give users a hint for solving their problem.
|
||||
# The key is the known error, or part of the known error. It is a string.
|
||||
@ -14,7 +13,7 @@ generic_message = """Workflow validation failed. For more information about the
|
||||
|
||||
# I know this explanation is confusing. If you have ideas for clarification, pull request welcome.
|
||||
|
||||
known_errors = {'Error is Non-default exclusive outgoing sequence flow without condition':
|
||||
known_errors = {'Non-default exclusive outgoing sequence flow without condition':
|
||||
{'hint': 'Add a Condition Type to your gateway path.'},
|
||||
|
||||
'Could not set task title on task .*':
|
||||
@ -29,37 +28,16 @@ class ValidationErrorService(object):
|
||||
Validation is run twice,
|
||||
once where we try to fill in all form fields
|
||||
and a second time where we only fill in the required fields.
|
||||
|
||||
We get a list that contains possible errors from the validation."""
|
||||
|
||||
@staticmethod
|
||||
def interpret_validation_errors(errors):
|
||||
if len(errors) == 0:
|
||||
return ()
|
||||
|
||||
interpreted_errors = []
|
||||
|
||||
for error_type in ['all', 'required']:
|
||||
if error_type in errors:
|
||||
hint = generic_message
|
||||
for known_key in known_errors:
|
||||
regex = re.compile(known_key)
|
||||
result = regex.search(errors[error_type].message)
|
||||
if result is not None:
|
||||
if 'hint' in known_errors[known_key]:
|
||||
if 'groups' in known_errors[known_key]:
|
||||
caught = {}
|
||||
|
||||
for group in known_errors[known_key]['groups']:
|
||||
group_id = known_errors[known_key]['groups'][group]
|
||||
group_value = result.groups()[group_id]
|
||||
caught[group] = group_value
|
||||
|
||||
hint = known_errors[known_key]['hint'].format(**caught)
|
||||
else:
|
||||
hint = known_errors[known_key]['hint']
|
||||
|
||||
errors[error_type].hint = hint
|
||||
interpreted_errors.append(errors[error_type])
|
||||
|
||||
return interpreted_errors
|
||||
def interpret_validation_error(error):
|
||||
if error is None:
|
||||
return
|
||||
for known_key in known_errors:
|
||||
regex = re.compile(known_key)
|
||||
result = regex.search(error.message)
|
||||
if result is not None:
|
||||
if 'hint' in known_errors[known_key]:
|
||||
error.hint = known_errors[known_key]['hint']
|
||||
return error
|
||||
|
@ -10,8 +10,6 @@ from lxml import etree
|
||||
|
||||
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
||||
from lxml.etree import XMLSyntaxError
|
||||
from pandas import ExcelFile
|
||||
from pandas._libs.missing import NA
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
@ -38,34 +36,6 @@ def camel_to_snake(camel):
|
||||
|
||||
|
||||
class FileService(object):
|
||||
"""Provides consistent management and rules for storing, retrieving and processing files."""
|
||||
DOCUMENT_LIST = "irb_documents.xlsx"
|
||||
INVESTIGATOR_LIST = "investigators.xlsx"
|
||||
|
||||
__doc_dictionary = None
|
||||
|
||||
@staticmethod
|
||||
def verify_doc_dictionary(dd):
|
||||
"""
|
||||
We are currently getting structured information from an XLS file, if someone accidentally
|
||||
changes a header we will have problems later, so we will verify we have the headers we need
|
||||
here
|
||||
"""
|
||||
required_fields = ['category1','category2','category3','description']
|
||||
|
||||
# we only need to check the first item, as all of the keys should be the same
|
||||
key = list(dd.keys())[0]
|
||||
for field in required_fields:
|
||||
if field not in dd[key].keys():
|
||||
raise ApiError(code="Invalid document list %s"%FileService.DOCUMENT_LIST,
|
||||
message='Please check the headers in %s'%FileService.DOCUMENT_LIST)
|
||||
|
||||
@staticmethod
|
||||
def get_doc_dictionary():
|
||||
if not FileService.__doc_dictionary:
|
||||
FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
|
||||
FileService.verify_doc_dictionary(FileService.__doc_dictionary)
|
||||
return FileService.__doc_dictionary
|
||||
|
||||
@staticmethod
|
||||
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
|
||||
@ -88,10 +58,7 @@ class FileService(object):
|
||||
|
||||
return FileService.update_file(file_model, binary_data, content_type)
|
||||
|
||||
@staticmethod
|
||||
def is_allowed_document(code):
|
||||
doc_dict = FileService.get_doc_dictionary()
|
||||
return code in doc_dict
|
||||
|
||||
|
||||
@staticmethod
|
||||
@cache
|
||||
@ -104,12 +71,6 @@ class FileService(object):
|
||||
def update_irb_code(file_id, irb_doc_code):
|
||||
"""Create a new file and associate it with the workflow
|
||||
Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document."""
|
||||
if not FileService.is_allowed_document(irb_doc_code):
|
||||
raise ApiError("invalid_form_field_key",
|
||||
"When uploading files, the form field id must match a known document in the "
|
||||
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code)
|
||||
|
||||
""" """
|
||||
file_model = session.query(FileModel)\
|
||||
.filter(FileModel.id == file_id).first()
|
||||
if file_model is None:
|
||||
@ -137,28 +98,6 @@ class FileService(object):
|
||||
)
|
||||
return FileService.update_file(file_model, binary_data, content_type)
|
||||
|
||||
@staticmethod
|
||||
def get_reference_data(reference_file_name, index_column, int_columns=[]):
|
||||
""" Opens a reference file (assumes that it is xls file) and returns the data as a
|
||||
dictionary, each row keyed on the given index_column name. If there are columns
|
||||
that should be represented as integers, pass these as an array of int_columns, lest
|
||||
you get '1.0' rather than '1'
|
||||
fixme: This is stupid stupid slow. Place it in the database and just check if it is up to date."""
|
||||
data_model = FileService.get_reference_file_data(reference_file_name)
|
||||
xls = ExcelFile(data_model.data, engine='openpyxl')
|
||||
df = xls.parse(xls.sheet_names[0])
|
||||
df = df.convert_dtypes()
|
||||
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
|
||||
df = pd.DataFrame(df).replace({NA: None}) # replace NA with None.
|
||||
|
||||
for c in int_columns:
|
||||
df[c] = df[c].fillna(0)
|
||||
df = df.astype({c: 'Int64'})
|
||||
df = df.fillna('')
|
||||
df = df.applymap(str)
|
||||
df = df.set_index(index_column)
|
||||
return json.loads(df.to_json(orient='index'))
|
||||
|
||||
@staticmethod
|
||||
def get_workflow_files(workflow_id):
|
||||
"""Returns all the file models associated with a running workflow."""
|
||||
|
@ -12,7 +12,7 @@ from sqlalchemy.sql.functions import GenericFunction
|
||||
from crc import db
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.api_models import Task
|
||||
from crc.models.file import FileDataModel, LookupFileModel, LookupDataModel
|
||||
from crc.models.file import FileModel, FileDataModel, LookupFileModel, LookupDataModel
|
||||
from crc.models.workflow import WorkflowModel, WorkflowSpecDependencyFile
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.ldap_service import LdapService
|
||||
@ -25,11 +25,14 @@ class TSRank(GenericFunction):
|
||||
|
||||
|
||||
class LookupService(object):
|
||||
"""Provides tools for doing lookups for auto-complete fields.
|
||||
This can currently take two forms:
|
||||
"""Provides tools for doing lookups for auto-complete fields, and rapid access to any
|
||||
uploaded spreadsheets.
|
||||
This can currently take three forms:
|
||||
1) Lookup from spreadsheet data associated with a workflow specification.
|
||||
in which case we store the spreadsheet data in a lookup table with full
|
||||
text indexing enabled, and run searches against that table.
|
||||
2) Lookup from spreadsheet data associated with a specific file. This allows us
|
||||
to get a lookup model for a specific file object, such as a reference file.
|
||||
2) Lookup from LDAP records. In which case we call out to an external service
|
||||
to pull back detailed records and return them.
|
||||
|
||||
@ -44,6 +47,14 @@ class LookupService(object):
|
||||
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
return LookupService.__get_lookup_model(workflow, spiff_task.task_spec.name, field.id)
|
||||
|
||||
@staticmethod
|
||||
def get_lookup_model_for_file_data(file_data: FileDataModel, value_column, label_column):
|
||||
lookup_model = db.session.query(LookupFileModel).filter(LookupFileModel.file_data_model_id == file_data.id).first()
|
||||
if not lookup_model:
|
||||
logging.warning("!!!! Making a very expensive call to update the lookup model.")
|
||||
lookup_model = LookupService.build_lookup_table(file_data, value_column, label_column)
|
||||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def __get_lookup_model(workflow, task_spec_id, field_id):
|
||||
lookup_model = db.session.query(LookupFileModel) \
|
||||
@ -139,7 +150,8 @@ class LookupService(object):
|
||||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def build_lookup_table(data_model: FileDataModel, value_column, label_column, workflow_spec_id, task_spec_id, field_id):
|
||||
def build_lookup_table(data_model: FileDataModel, value_column, label_column,
|
||||
workflow_spec_id=None, task_spec_id=None, field_id=None):
|
||||
""" In some cases the lookup table can be very large. This method will add all values to the database
|
||||
in a way that can be searched and returned via an api call - rather than sending the full set of
|
||||
options along with the form. It will only open the file and process the options if something has
|
||||
@ -147,8 +159,9 @@ class LookupService(object):
|
||||
xls = ExcelFile(data_model.data, engine='openpyxl')
|
||||
df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
|
||||
df = df.convert_dtypes()
|
||||
df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Drop unnamed columns.
|
||||
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
|
||||
df = pd.DataFrame(df).replace({NA: None})
|
||||
df = pd.DataFrame(df).replace({NA: ''})
|
||||
|
||||
if value_column not in df:
|
||||
raise ApiError("invalid_enum",
|
||||
|
@ -22,13 +22,16 @@ from crc.models.study import StudyModel, Study, StudyStatus, Category, WorkflowM
|
||||
from crc.models.task_event import TaskEventModel, TaskEvent
|
||||
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
|
||||
WorkflowStatus, WorkflowSpecDependencyFile
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.ldap_service import LdapService
|
||||
from crc.services.lookup_service import LookupService
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
class StudyService(object):
|
||||
"""Provides common tools for working with a Study"""
|
||||
INVESTIGATOR_LIST = "investigators.xlsx" # A reference document containing details about what investigators to show, and when.
|
||||
|
||||
@staticmethod
|
||||
def get_studies_for_user(user):
|
||||
@ -77,7 +80,7 @@ class StudyService(object):
|
||||
workflow_metas = StudyService._get_workflow_metas(study_id)
|
||||
files = FileService.get_files_for_study(study.id)
|
||||
files = (File.from_models(model, FileService.get_file_data(model.id),
|
||||
FileService.get_doc_dictionary()) for model in files)
|
||||
DocumentService.get_dictionary()) for model in files)
|
||||
study.files = list(files)
|
||||
# Calling this line repeatedly is very very slow. It creates the
|
||||
# master spec and runs it. Don't execute this for Abandoned studies, as
|
||||
@ -265,14 +268,14 @@ class StudyService(object):
|
||||
|
||||
# Loop through all known document types, get the counts for those files,
|
||||
# and use pb_docs to mark those as required.
|
||||
doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
|
||||
doc_dictionary = DocumentService.get_dictionary()
|
||||
|
||||
documents = {}
|
||||
for code, doc in doc_dictionary.items():
|
||||
|
||||
if ProtocolBuilderService.is_enabled():
|
||||
doc['required'] = False
|
||||
if ProtocolBuilderService.is_enabled() and doc['id']:
|
||||
pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
|
||||
doc['required'] = False
|
||||
if pb_data:
|
||||
doc['required'] = True
|
||||
|
||||
@ -282,7 +285,7 @@ class StudyService(object):
|
||||
# Make a display name out of categories
|
||||
name_list = []
|
||||
for cat_key in ['category1', 'category2', 'category3']:
|
||||
if doc[cat_key] not in ['', 'NULL']:
|
||||
if doc[cat_key] not in ['', 'NULL', None]:
|
||||
name_list.append(doc[cat_key])
|
||||
doc['display_name'] = ' / '.join(name_list)
|
||||
|
||||
@ -319,12 +322,22 @@ class StudyService(object):
|
||||
documents[code] = doc
|
||||
return Box(documents)
|
||||
|
||||
@staticmethod
|
||||
def get_investigator_dictionary():
|
||||
"""Returns a dictionary of document details keyed on the doc_code."""
|
||||
file_data = FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
|
||||
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'label')
|
||||
doc_dict = {}
|
||||
for lookup_data in lookup_model.dependencies:
|
||||
doc_dict[lookup_data.value] = lookup_data.data
|
||||
return doc_dict
|
||||
|
||||
@staticmethod
|
||||
def get_investigators(study_id, all=False):
|
||||
"""Convert array of investigators from protocol builder into a dictionary keyed on the type. """
|
||||
|
||||
# Loop through all known investigator types as set in the reference file
|
||||
inv_dictionary = FileService.get_reference_data(FileService.INVESTIGATOR_LIST, 'code')
|
||||
inv_dictionary = StudyService.get_investigator_dictionary()
|
||||
|
||||
# Get PB required docs
|
||||
pb_investigators = ProtocolBuilderService.get_investigators(study_id=study_id)
|
||||
|
@ -52,36 +52,16 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
||||
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
||||
else:
|
||||
workflow_id = None
|
||||
|
||||
try:
|
||||
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
||||
augmentMethods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
|
||||
augment_methods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
|
||||
else:
|
||||
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
|
||||
|
||||
super().execute(task, script, data, externalMethods=augmentMethods)
|
||||
except SyntaxError as e:
|
||||
raise ApiError('syntax_error',
|
||||
f'Something is wrong with your python script '
|
||||
f'please correct the following:'
|
||||
f' {script}, {e.msg}')
|
||||
except NameError as e:
|
||||
def get_most_similar(task_data, name_error):
|
||||
bad_variable = str(name_error)[6:-16]
|
||||
highest_ratio = 0
|
||||
most_similar = None
|
||||
for item in task_data:
|
||||
ratio = SequenceMatcher(None, item, bad_variable).ratio()
|
||||
if ratio > highest_ratio:
|
||||
most_similar = item
|
||||
highest_ratio = ratio
|
||||
return most_similar, int(highest_ratio*100)
|
||||
most_similar, highest_ratio = get_most_similar(data, e)
|
||||
error_message = f'something you are referencing does not exist: {script}, {e}.'
|
||||
if highest_ratio > 50:
|
||||
error_message += f' Did you mean \'{most_similar}\'?'
|
||||
raise ApiError('name_error', error_message)
|
||||
|
||||
augment_methods = Script.generate_augmented_list(task, study_id, workflow_id)
|
||||
super().execute(task, script, data, external_methods=augment_methods)
|
||||
except WorkflowException as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise WorkflowTaskExecException(task, f' {script}, {e}', e)
|
||||
|
||||
def evaluate_expression(self, task, expression):
|
||||
"""
|
||||
@ -100,7 +80,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
||||
else:
|
||||
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
|
||||
exp, valid = self.validateExpression(expression)
|
||||
return self._eval(exp, externalMethods=augmentMethods, **task.data)
|
||||
return self._eval(exp, external_methods=augmentMethods, **task.data)
|
||||
|
||||
except Exception as e:
|
||||
raise WorkflowTaskExecException(task,
|
||||
@ -345,8 +325,8 @@ class WorkflowProcessor(object):
|
||||
spec = parser.get_spec(process_id)
|
||||
except ValidationException as ve:
|
||||
raise ApiError(code="workflow_validation_error",
|
||||
message="Failed to parse Workflow Specification '%s'. \n" % workflow_spec_id +
|
||||
"Error is %s. \n" % str(ve),
|
||||
message="Failed to parse the Workflow Specification. " +
|
||||
"Error is '%s.'" % str(ve),
|
||||
file_name=ve.filename,
|
||||
task_id=ve.id,
|
||||
tag=ve.tag)
|
||||
@ -378,10 +358,10 @@ class WorkflowProcessor(object):
|
||||
def get_status(self):
|
||||
return self.status_of(self.bpmn_workflow)
|
||||
|
||||
def do_engine_steps(self):
|
||||
def do_engine_steps(self, exit_at = None):
|
||||
try:
|
||||
self.bpmn_workflow.refresh_waiting_tasks()
|
||||
self.bpmn_workflow.do_engine_steps()
|
||||
self.bpmn_workflow.do_engine_steps(exit_at = exit_at)
|
||||
except WorkflowTaskExecException as we:
|
||||
raise ApiError.from_task("task_error", str(we), we.task)
|
||||
|
||||
|
@ -30,6 +30,7 @@ from crc.models.study import StudyModel
|
||||
from crc.models.task_event import TaskEventModel
|
||||
from crc.models.user import UserModel, UserModelSchema
|
||||
from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.lookup_service import LookupService
|
||||
from crc.services.study_service import StudyService
|
||||
@ -97,20 +98,25 @@ class WorkflowService(object):
|
||||
def do_waiting():
|
||||
records = db.session.query(WorkflowModel).filter(WorkflowModel.status==WorkflowStatus.waiting).all()
|
||||
for workflow_model in records:
|
||||
print('processing workflow %d'%workflow_model.id)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
processor.bpmn_workflow.refresh_waiting_tasks()
|
||||
processor.bpmn_workflow.do_engine_steps()
|
||||
processor.save()
|
||||
|
||||
# fixme: Try catch with a very explicit error about the study, workflow and task that failed.
|
||||
try:
|
||||
app.logger.info('Processing workflow %s' % workflow_model.id)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
processor.bpmn_workflow.refresh_waiting_tasks()
|
||||
processor.bpmn_workflow.do_engine_steps()
|
||||
processor.save()
|
||||
except:
|
||||
app.logger.error('Failed to process workflow')
|
||||
|
||||
@staticmethod
|
||||
@timeit
|
||||
def test_spec(spec_id, validate_study_id=None, required_only=False):
|
||||
def test_spec(spec_id, validate_study_id=None, test_until=None, required_only=False):
|
||||
"""Runs a spec through it's paces to see if it results in any errors.
|
||||
Not fool-proof, but a good sanity check. Returns the final data
|
||||
output form the last task if successful.
|
||||
|
||||
test_until
|
||||
|
||||
required_only can be set to true, in which case this will run the
|
||||
spec, only completing the required fields, rather than everything.
|
||||
"""
|
||||
@ -118,36 +124,44 @@ class WorkflowService(object):
|
||||
workflow_model = WorkflowService.make_test_workflow(spec_id, validate_study_id)
|
||||
try:
|
||||
processor = WorkflowProcessor(workflow_model, validate_only=True)
|
||||
|
||||
count = 0
|
||||
|
||||
while not processor.bpmn_workflow.is_completed():
|
||||
processor.bpmn_workflow.get_deep_nav_list() # Assure no errors with navigation.
|
||||
processor.bpmn_workflow.do_engine_steps()
|
||||
tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY)
|
||||
for task in tasks:
|
||||
if task.task_spec.lane is not None and task.task_spec.lane not in task.data:
|
||||
raise ApiError.from_task("invalid_role",
|
||||
f"This task is in a lane called '{task.task_spec.lane}', The "
|
||||
f" current task data must have information mapping this role to "
|
||||
f" a unique user id.", task)
|
||||
task_api = WorkflowService.spiff_task_to_api_task(
|
||||
task,
|
||||
add_docs_and_forms=True) # Assure we try to process the documentation, and raise those errors.
|
||||
# make sure forms have a form key
|
||||
if hasattr(task_api, 'form') and task_api.form is not None and task_api.form.key == '':
|
||||
raise ApiError(code='missing_form_key',
|
||||
message='Forms must include a Form Key.',
|
||||
task_id=task.id,
|
||||
task_name=task.get_name())
|
||||
WorkflowService._process_documentation(task)
|
||||
WorkflowService.populate_form_with_random_data(task, task_api, required_only)
|
||||
processor.complete_task(task)
|
||||
count += 1
|
||||
if count >= 100:
|
||||
raise ApiError.from_task(code='validation_loop',
|
||||
message=f'There appears to be an infinite loop in the validation. Task is {task.task_spec.description}',
|
||||
task=task)
|
||||
processor.bpmn_workflow.get_deep_nav_list() # Assure no errors with navigation.
|
||||
exit_task = processor.bpmn_workflow.do_engine_steps(exit_at=test_until)
|
||||
if (exit_task != None):
|
||||
raise ApiError.from_task("validation_break",
|
||||
f"The validation has been exited early on task '{exit_task.task_spec.name}' and was parented by ",
|
||||
exit_task.parent)
|
||||
tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY)
|
||||
for task in tasks:
|
||||
if task.task_spec.lane is not None and task.task_spec.lane not in task.data:
|
||||
raise ApiError.from_task("invalid_role",
|
||||
f"This task is in a lane called '{task.task_spec.lane}', The "
|
||||
f" current task data must have information mapping this role to "
|
||||
f" a unique user id.", task)
|
||||
task_api = WorkflowService.spiff_task_to_api_task(
|
||||
task,
|
||||
add_docs_and_forms=True) # Assure we try to process the documentation, and raise those errors.
|
||||
# make sure forms have a form key
|
||||
if hasattr(task_api, 'form') and task_api.form is not None and task_api.form.key == '':
|
||||
raise ApiError(code='missing_form_key',
|
||||
message='Forms must include a Form Key.',
|
||||
task_id=task.id,
|
||||
task_name=task.get_name())
|
||||
WorkflowService.populate_form_with_random_data(task, task_api, required_only)
|
||||
processor.complete_task(task)
|
||||
if test_until == task.task_spec.name:
|
||||
raise ApiError.from_task("validation_break",
|
||||
f"The validation has been exited early on task '{task.task_spec.name}' and was parented by ",
|
||||
task.parent)
|
||||
count += 1
|
||||
if count >= 100:
|
||||
raise ApiError.from_task(code='validation_loop',
|
||||
message=f'There appears to be an infinite loop in the validation. Task is {task.task_spec.description}',
|
||||
task=task)
|
||||
WorkflowService._process_documentation(processor.bpmn_workflow.last_task.parent.parent)
|
||||
|
||||
except WorkflowException as we:
|
||||
raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we)
|
||||
finally:
|
||||
@ -424,7 +438,7 @@ class WorkflowService(object):
|
||||
doc_code = WorkflowService.evaluate_property('doc_code', field, task)
|
||||
file_model = FileModel(name="test.png",
|
||||
irb_doc_code = field.id)
|
||||
doc_dict = FileService.get_doc_dictionary()
|
||||
doc_dict = DocumentService.get_dictionary()
|
||||
file = File.from_models(file_model, None, doc_dict)
|
||||
return FileSchema().dump(file)
|
||||
elif field.type == 'files':
|
||||
|
@ -61,7 +61,6 @@ python-box==5.2.0
|
||||
python-dateutil==2.8.1
|
||||
python-docx==0.8.10
|
||||
python-editor==1.0.4
|
||||
python-levenshtein==0.12.0
|
||||
pytz==2020.4
|
||||
pyyaml==5.4
|
||||
recommonmark==0.6.0
|
||||
|
@ -7,7 +7,9 @@ from crc.models.file import CONTENT_TYPES
|
||||
from crc.models.ldap import LdapModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecCategoryModel
|
||||
from crc.services.document_service import DocumentService
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
|
||||
|
||||
class ExampleDataLoader:
|
||||
@ -315,14 +317,14 @@ class ExampleDataLoader:
|
||||
def load_reference_documents(self):
|
||||
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
|
||||
file = open(file_path, "rb")
|
||||
FileService.add_reference_file(FileService.DOCUMENT_LIST,
|
||||
FileService.add_reference_file(DocumentService.DOCUMENT_LIST,
|
||||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
||||
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
|
||||
file = open(file_path, "rb")
|
||||
FileService.add_reference_file(FileService.INVESTIGATOR_LIST,
|
||||
FileService.add_reference_file(StudyService.INVESTIGATOR_LIST,
|
||||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
@ -2,6 +2,7 @@
|
||||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||
import os
|
||||
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
import json
|
||||
@ -23,6 +24,7 @@ from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.user_service import UserService
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from crc.services.document_service import DocumentService
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
@ -138,8 +140,7 @@ class BaseTest(unittest.TestCase):
|
||||
delete everything that matters in the local database - this is used to
|
||||
test ground zero copy of workflow specs.
|
||||
"""
|
||||
session.execute("delete from workflow; delete from file_data; delete from file; delete from workflow_spec;")
|
||||
session.commit()
|
||||
ExampleDataLoader.clean_db()
|
||||
|
||||
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
|
||||
"""use_crc_data will cause this to load the mammoth collection of documents
|
||||
@ -282,28 +283,6 @@ class BaseTest(unittest.TestCase):
|
||||
session.commit()
|
||||
return study
|
||||
|
||||
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
|
||||
workflow_spec_name="random_fact"):
|
||||
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
|
||||
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
|
||||
approvals = []
|
||||
|
||||
for i in range(len(approver_uids)):
|
||||
approvals.append(self.create_approval(
|
||||
study=study,
|
||||
workflow=workflow,
|
||||
approver_uid=approver_uids[i],
|
||||
status=statuses[i],
|
||||
version=1
|
||||
))
|
||||
|
||||
full_study = {
|
||||
'study': study,
|
||||
'workflow': workflow,
|
||||
'approvals': approvals,
|
||||
}
|
||||
|
||||
return full_study
|
||||
|
||||
def create_workflow(self, workflow_name, display_name=None, study=None, category_id=None, as_user="dhf8r"):
|
||||
session.flush()
|
||||
@ -320,30 +299,11 @@ class BaseTest(unittest.TestCase):
|
||||
def create_reference_document(self):
|
||||
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
|
||||
file = open(file_path, "rb")
|
||||
FileService.add_reference_file(FileService.DOCUMENT_LIST,
|
||||
FileService.add_reference_file(DocumentService.DOCUMENT_LIST,
|
||||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
content_type=CONTENT_TYPES['xlsx'])
|
||||
file.close()
|
||||
|
||||
def create_approval(
|
||||
self,
|
||||
study=None,
|
||||
workflow=None,
|
||||
approver_uid=None,
|
||||
status=None,
|
||||
version=None,
|
||||
):
|
||||
study = study or self.create_study()
|
||||
workflow = workflow or self.create_workflow()
|
||||
approver_uid = approver_uid or self.test_uid
|
||||
status = status or ApprovalStatus.PENDING.value
|
||||
version = version or 1
|
||||
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status,
|
||||
version=version)
|
||||
session.add(approval)
|
||||
session.commit()
|
||||
return approval
|
||||
|
||||
def get_workflow_common(self, url, user):
|
||||
rv = self.app.get(url,
|
||||
headers=self.logged_in_headers(user),
|
||||
|
@ -16,6 +16,12 @@
|
||||
OGC will upload the Non-Funded Executed Agreement after it has been negotiated by OSP contract negotiator.</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="Date" label="Version Date" type="date">
|
||||
<camunda:properties>
|
||||
<camunda:property id="group" value="PCRApproval" />
|
||||
<camunda:property id="file_data" value="Some_File" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="file_type" type="enum" defaultValue="AD_CoCApp">
|
||||
<camunda:value id="AD_CoCApp" name="Ancillary Documents / Case Report Form" />
|
||||
<camunda:value id="AD_CoCAppr" name="Ancillary Documents / CoC Approval" />
|
||||
@ -32,12 +38,6 @@ OGC will upload the Non-Funded Executed Agreement after it has been negotiated b
|
||||
<camunda:property id="file_data" value="Some_File" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="Date" label="Version Date" type="date">
|
||||
<camunda:properties>
|
||||
<camunda:property id="group" value="PCRApproval" />
|
||||
<camunda:property id="file_data" value="Some_File" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>SequenceFlow_0ea9hvd</bpmn:incoming>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.0">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="Process_18biih5" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
|
||||
@ -8,32 +8,34 @@
|
||||
<bpmn:endEvent id="EndEvent_063bpg6">
|
||||
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Script Reference">
|
||||
<bpmn:scriptTask id="Invalid_Script_Task" name="A Syntax Error">
|
||||
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
|
||||
<bpmn:script>a really bad error that should fail</bpmn:script>
|
||||
<bpmn:script>x = 1
|
||||
y = 2
|
||||
x + y === a</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
|
||||
<di:waypoint x="390" y="117" />
|
||||
<di:waypoint x="442" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="290" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
|
||||
<dc:Bounds x="442" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
|
||||
<dc:Bounds x="290" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
|
||||
<di:waypoint x="390" y="117" />
|
||||
<di:waypoint x="442" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
||||
|
41
tests/data/invalid_script3/invalid_script3.bpmn
Normal file
41
tests/data/invalid_script3/invalid_script3.bpmn
Normal file
@ -0,0 +1,41 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="Process_18biih5" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_1pnq3kg" sourceRef="StartEvent_1" targetRef="Invalid_Script_Task" />
|
||||
<bpmn:endEvent id="EndEvent_063bpg6">
|
||||
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Variable">
|
||||
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
|
||||
<bpmn:script>x = 1
|
||||
y = 2
|
||||
x + a == 3</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
|
||||
<di:waypoint x="390" y="117" />
|
||||
<di:waypoint x="442" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="290" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
|
||||
<dc:Bounds x="442" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
|
||||
<dc:Bounds x="290" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
@ -1,14 +1,16 @@
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, db
|
||||
from crc import session, db, app
|
||||
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
|
||||
from crc.models.workflow import WorkflowSpecModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.models.data_store import DataStoreModel
|
||||
from crc.services.document_service import DocumentService
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
|
||||
@ -110,20 +112,23 @@ class TestFilesApi(BaseTest):
|
||||
self.assertEqual(0, len(json.loads(rv.get_data(as_text=True))))
|
||||
|
||||
def test_set_reference_file(self):
|
||||
file_name = "irb_document_types.xls"
|
||||
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.xls")}
|
||||
file_name = "irb_documents.xlsx"
|
||||
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
|
||||
with open(filepath, 'rb') as myfile:
|
||||
file_data = myfile.read()
|
||||
data = {'file': (io.BytesIO(file_data), file_name)}
|
||||
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
|
||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assertIsNotNone(rv.get_data())
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
file = FileModelSchema().load(json_data, session=session)
|
||||
self.assertEqual(FileType.xls, file.type)
|
||||
self.assertEqual(FileType.xlsx, file.type)
|
||||
self.assertTrue(file.is_reference)
|
||||
self.assertEqual("application/vnd.ms-excel", file.content_type)
|
||||
self.assertEqual("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", file.content_type)
|
||||
|
||||
def test_set_reference_file_bad_extension(self):
|
||||
file_name = FileService.DOCUMENT_LIST
|
||||
file_name = DocumentService.DOCUMENT_LIST
|
||||
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.ppt")}
|
||||
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
|
||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
@ -131,22 +136,28 @@ class TestFilesApi(BaseTest):
|
||||
|
||||
def test_get_reference_file(self):
|
||||
file_name = "irb_document_types.xls"
|
||||
data = {'file': (io.BytesIO(b"abcdef"), "some crazy thing do not care.xls")}
|
||||
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
|
||||
with open(filepath, 'rb') as myfile:
|
||||
file_data = myfile.read()
|
||||
data = {'file': (io.BytesIO(file_data), file_name)}
|
||||
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
|
||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
rv = self.app.get('/v1.0/reference_file/%s' % file_name, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
data_out = rv.get_data()
|
||||
self.assertEqual(b"abcdef", data_out)
|
||||
self.assertEqual(file_data, data_out)
|
||||
|
||||
def test_list_reference_files(self):
|
||||
ExampleDataLoader.clean_db()
|
||||
|
||||
file_name = FileService.DOCUMENT_LIST
|
||||
data = {'file': (io.BytesIO(b"abcdef"), file_name)}
|
||||
file_name = DocumentService.DOCUMENT_LIST
|
||||
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
|
||||
with open(filepath, 'rb') as myfile:
|
||||
file_data = myfile.read()
|
||||
data = {'file': (io.BytesIO(file_data), file_name)}
|
||||
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
|
||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
|
||||
self.assert_success(rv)
|
||||
rv = self.app.get('/v1.0/reference_file',
|
||||
follow_redirects=True,
|
||||
content_type="application/json", headers=self.logged_in_headers())
|
||||
@ -159,7 +170,8 @@ class TestFilesApi(BaseTest):
|
||||
|
||||
def test_update_file_info(self):
|
||||
self.load_example_data()
|
||||
file: FileModel = session.query(FileModel).first()
|
||||
self.create_reference_document()
|
||||
file: FileModel = session.query(FileModel).filter(FileModel.is_reference==False).first()
|
||||
file.name = "silly_new_name.bpmn"
|
||||
|
||||
rv = self.app.put('/v1.0/file/%i' % file.id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
import json
|
||||
|
||||
from SpiffWorkflow.bpmn.PythonScriptEngine import Box
|
||||
|
||||
@ -15,6 +14,7 @@ from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.scripts.file_data_set import FileDataSet
|
||||
from crc.services.document_service import DocumentService
|
||||
|
||||
|
||||
class TestStudyDetailsDocumentsScript(BaseTest):
|
||||
@ -43,8 +43,8 @@ class TestStudyDetailsDocumentsScript(BaseTest):
|
||||
|
||||
# Remove the reference file.
|
||||
file_model = db.session.query(FileModel). \
|
||||
filter(FileModel.is_reference == True). \
|
||||
filter(FileModel.name == FileService.DOCUMENT_LIST).first()
|
||||
filter(FileModel.is_reference is True). \
|
||||
filter(FileModel.name == DocumentService.DOCUMENT_LIST).first()
|
||||
if file_model:
|
||||
db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
|
||||
db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
|
||||
@ -71,7 +71,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
|
||||
|
||||
def test_load_lookup_data(self):
|
||||
self.create_reference_document()
|
||||
dict = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
|
||||
dict = DocumentService.get_dictionary()
|
||||
self.assertIsNotNone(dict)
|
||||
|
||||
def get_required_docs(self):
|
||||
|
@ -126,7 +126,7 @@ class TestStudyService(BaseTest):
|
||||
self.assertEqual("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
|
||||
self.assertEqual(0, documents["UVACompl_PRCAppr"]['count'])
|
||||
self.assertEqual(True, documents["UVACompl_PRCAppr"]['required'])
|
||||
self.assertEqual('6', documents["UVACompl_PRCAppr"]['id'])
|
||||
self.assertEqual(6, documents["UVACompl_PRCAppr"]['id'])
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
||||
def test_get_documents_has_file_details(self, mock_docs):
|
||||
|
@ -3,9 +3,6 @@ from tests.base_test import BaseTest
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class TestDocumentDirectories(BaseTest):
|
||||
|
||||
def test_directory_list(self):
|
||||
|
@ -10,7 +10,7 @@ class TestFormFieldName(BaseTest):
|
||||
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(json_data[0]['message'],
|
||||
'When populating all fields ... \nInvalid Field name: "user-title". A field ID must begin '
|
||||
'Invalid Field name: "user-title". A field ID must begin '
|
||||
'with a letter, and can only contain letters, numbers, and "_"')
|
||||
|
||||
def test_form_field_name_with_period(self):
|
||||
|
@ -10,5 +10,5 @@ class TestFormFieldType(BaseTest):
|
||||
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(json_data[0]['message'],
|
||||
'When populating all fields ... \nType is missing for field "name". A field type must be provided.')
|
||||
'Type is missing for field "name". A field type must be provided.')
|
||||
# print('TestFormFieldType: Good Form')
|
||||
|
@ -9,4 +9,4 @@ class TestNameErrorHint(BaseTest):
|
||||
spec_model = self.load_test_spec('script_with_name_error')
|
||||
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
self.assertIn('Did you mean \'spam\'', json_data[0]['message'])
|
||||
self.assertIn('Did you mean \'[\'spam\'', json_data[0]['message'])
|
||||
|
@ -59,7 +59,6 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||
app.config['PB_ENABLED'] = True
|
||||
self.validate_all_loaded_workflows()
|
||||
|
||||
|
||||
def validate_all_loaded_workflows(self):
|
||||
workflows = session.query(WorkflowSpecModel).all()
|
||||
errors = []
|
||||
@ -71,12 +70,12 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||
def test_invalid_expression(self):
|
||||
self.load_example_data()
|
||||
errors = self.validate_workflow("invalid_expression")
|
||||
self.assertEqual(2, len(errors))
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEqual("workflow_validation_exception", errors[0]['code'])
|
||||
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
|
||||
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
|
||||
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
|
||||
self.assertEqual('When populating all fields ... \nExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
|
||||
self.assertEqual('ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
|
||||
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
|
||||
self.assertIsNotNone(errors[0]['task_data'])
|
||||
self.assertIn("has_bananas", errors[0]['task_data'])
|
||||
@ -84,7 +83,7 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||
def test_validation_error(self):
|
||||
self.load_example_data()
|
||||
errors = self.validate_workflow("invalid_spec")
|
||||
self.assertEqual(2, len(errors))
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEqual("workflow_validation_error", errors[0]['code'])
|
||||
self.assertEqual("StartEvent_1", errors[0]['task_id'])
|
||||
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
|
||||
@ -93,7 +92,7 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||
def test_invalid_script(self):
|
||||
self.load_example_data()
|
||||
errors = self.validate_workflow("invalid_script")
|
||||
self.assertEqual(2, len(errors))
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEqual("workflow_validation_exception", errors[0]['code'])
|
||||
#self.assertTrue("NoSuchScript" in errors[0]['message'])
|
||||
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
|
||||
@ -103,12 +102,23 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||
def test_invalid_script2(self):
|
||||
self.load_example_data()
|
||||
errors = self.validate_workflow("invalid_script2")
|
||||
self.assertEqual(2, len(errors))
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEqual("workflow_validation_exception", errors[0]['code'])
|
||||
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
|
||||
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
|
||||
self.assertEqual(3, errors[0]['line_number'])
|
||||
self.assertEqual(9, errors[0]['offset'])
|
||||
self.assertEqual("SyntaxError", errors[0]['error_type'])
|
||||
self.assertEqual("A Syntax Error", errors[0]['task_name'])
|
||||
self.assertEqual("invalid_script2.bpmn", errors[0]['file_name'])
|
||||
|
||||
def test_invalid_script3(self):
|
||||
self.load_example_data()
|
||||
errors = self.validate_workflow("invalid_script3")
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
|
||||
self.assertEqual(3, errors[0]['line_number'])
|
||||
self.assertEqual("NameError", errors[0]['error_type'])
|
||||
|
||||
def test_repeating_sections_correctly_populated(self):
|
||||
self.load_example_data()
|
||||
spec_model = self.load_test_spec('repeat_form')
|
||||
|
Loading…
x
Reference in New Issue
Block a user