Vastly more informative ApiError model that provides details on the underlying task where the error occured.
Added a validate_workflow_specification endpoint that allows you to check if the workflow will execute from beginning to end using random data. Minor fixes to existing bpmns to allow them to pass. All scripts must include a "do_task_validate_only" that restricts external calls and database modifications, but performs as much logic as possible.
This commit is contained in:
parent
6c832829b0
commit
c7d2c28178
|
@ -179,10 +179,10 @@
|
|||
},
|
||||
"configparser": {
|
||||
"hashes": [
|
||||
"sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c",
|
||||
"sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"
|
||||
"sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1",
|
||||
"sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd"
|
||||
],
|
||||
"version": "==4.0.2"
|
||||
"version": "==5.0.0"
|
||||
},
|
||||
"connexion": {
|
||||
"extras": [
|
||||
|
@ -322,10 +322,10 @@
|
|||
},
|
||||
"httpretty": {
|
||||
"hashes": [
|
||||
"sha256:66216f26b9d2c52e81808f3e674a6fb65d4bf719721394a1a9be926177e55fbe"
|
||||
"sha256:24a6fd2fe1c76e94801b74db8f52c0fb42718dc4a199a861b305b1a492b9d868"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.9.7"
|
||||
"version": "==1.0.2"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
|
@ -343,11 +343,11 @@
|
|||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:0095bf45caca7a93685cbb9e5ef49f0ed37f848639df8f4684f07229aa7a8322",
|
||||
"sha256:dd381cddc02a58a23667ef675164ad70848d82966d3a8fddea96dcfb51064803"
|
||||
"sha256:298a914c82144c6b3b06c568a8973b89ad2176685f43cd1ea9ba968307300fa9",
|
||||
"sha256:dfc83688553a91a786c6c91eeb5f3b1d31f24d71877bbd94ecbf5484e57690a2"
|
||||
],
|
||||
"markers": "python_version < '3.8'",
|
||||
"version": "==1.5.1"
|
||||
"version": "==1.5.2"
|
||||
},
|
||||
"inflection": {
|
||||
"hashes": [
|
||||
|
@ -769,7 +769,7 @@
|
|||
"spiffworkflow": {
|
||||
"editable": true,
|
||||
"git": "https://github.com/sartography/SpiffWorkflow.git",
|
||||
"ref": "c8240e44e62f54026b993eaaf027c7978f11726e"
|
||||
"ref": "4e8f4d7ab9da27e7191997019634eb968e0a11e4"
|
||||
},
|
||||
"sqlalchemy": {
|
||||
"hashes": [
|
||||
|
@ -863,11 +863,11 @@
|
|||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:0095bf45caca7a93685cbb9e5ef49f0ed37f848639df8f4684f07229aa7a8322",
|
||||
"sha256:dd381cddc02a58a23667ef675164ad70848d82966d3a8fddea96dcfb51064803"
|
||||
"sha256:298a914c82144c6b3b06c568a8973b89ad2176685f43cd1ea9ba968307300fa9",
|
||||
"sha256:dfc83688553a91a786c6c91eeb5f3b1d31f24d71877bbd94ecbf5484e57690a2"
|
||||
],
|
||||
"markers": "python_version < '3.8'",
|
||||
"version": "==1.5.1"
|
||||
"version": "==1.5.2"
|
||||
},
|
||||
"more-itertools": {
|
||||
"hashes": [
|
||||
|
|
32
crc/api.yml
32
crc/api.yml
|
@ -302,12 +302,34 @@ paths:
|
|||
responses:
|
||||
'204':
|
||||
description: The workflow specification has been removed.
|
||||
/workflow-specification/{spec_id}/validate:
|
||||
parameters:
|
||||
- name: spec_id
|
||||
in: path
|
||||
required: false
|
||||
description: The unique id of an existing workflow specification to validate.
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
operationId: crc.api.workflow.validate_workflow_specification
|
||||
summary: Loads and attempts to execute a Workflow Specification, returning a list of errors encountered
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
responses:
|
||||
'200':
|
||||
description: Workflow specification.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/workflow-specification-category:
|
||||
get:
|
||||
operationId: crc.api.workflow.list_workflow_spec_categories
|
||||
summary: Provides a list of categories that can be added to a workflow spec.
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
- Workflow Specification Category
|
||||
responses:
|
||||
'200':
|
||||
description: An array of workflow specification categories
|
||||
|
@ -321,7 +343,7 @@ paths:
|
|||
operationId: crc.api.workflow.add_workflow_spec_category
|
||||
summary: Creates a new workflow spec category with the given parameters.
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
- Workflow Specification Category
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
|
@ -346,7 +368,7 @@ paths:
|
|||
operationId: crc.api.workflow.get_workflow_spec_category
|
||||
summary: Returns a single workflow spec category
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
- Workflow Specification Category
|
||||
responses:
|
||||
'200':
|
||||
description: Workflow spec category.
|
||||
|
@ -358,7 +380,7 @@ paths:
|
|||
operationId: crc.api.workflow.update_workflow_spec_category
|
||||
summary: Modifies an existing workflow spec category with the given parameters.
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
- Workflow Specification Category
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
|
@ -375,7 +397,7 @@ paths:
|
|||
operationId: crc.api.workflow.delete_workflow_spec_category
|
||||
summary: Removes an existing workflow spec category
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
- Workflow Specification Category
|
||||
responses:
|
||||
'204':
|
||||
description: The workflow spec category has been removed.
|
||||
|
|
|
@ -2,16 +2,40 @@ from crc import ma, app
|
|||
|
||||
|
||||
class ApiError(Exception):
|
||||
def __init__(self, code, message, status_code=400):
|
||||
def __init__(self, code, message, status_code=400,
|
||||
file_name="", task_id="", task_name="", tag=""):
|
||||
self.status_code = status_code
|
||||
self.code = code
|
||||
self.message = message
|
||||
self.code = code # a short consistent string describing the error.
|
||||
self.message = message # A detailed message that provides more information.
|
||||
self.task_id = task_id or "" # OPTIONAL: The id of the task in the BPMN Diagram.
|
||||
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
|
||||
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
|
||||
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
|
||||
Exception.__init__(self, self.message)
|
||||
|
||||
@classmethod
|
||||
def from_task(cls, code, message, task, status_code=400):
|
||||
"""Constructs an API Error with details pulled from the current task."""
|
||||
instance = cls(code, message, status_code=status_code)
|
||||
instance.task_id = task.task_spec.name or ""
|
||||
instance.task_name = task.task_spec.description or ""
|
||||
instance.file_name = task.workflow.spec.file or ""
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def from_task_spec(cls, code, message, task_spec, status_code=400):
|
||||
"""Constructs an API Error with details pulled from the current task."""
|
||||
instance = cls(code, message, status_code=status_code)
|
||||
instance.task_id = task_spec.name or ""
|
||||
instance.task_name = task_spec.description or ""
|
||||
if task_spec._wf_spec:
|
||||
instance.file_name = task_spec._wf_spec.file
|
||||
return instance
|
||||
|
||||
|
||||
class ApiErrorSchema(ma.Schema):
|
||||
class Meta:
|
||||
fields = ("code", "message")
|
||||
fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id")
|
||||
|
||||
|
||||
@app.errorhandler(ApiError)
|
||||
|
|
|
@ -36,6 +36,17 @@ def get_workflow_specification(spec_id):
|
|||
return WorkflowSpecModelSchema().dump(spec)
|
||||
|
||||
|
||||
def validate_workflow_specification(spec_id):
|
||||
|
||||
errors = []
|
||||
try:
|
||||
WorkflowProcessor.test_spec(spec_id)
|
||||
except ApiError as ae:
|
||||
errors.append(ae)
|
||||
return ApiErrorSchema(many=True).dump(errors)
|
||||
|
||||
|
||||
|
||||
def update_workflow_specification(spec_id, body):
|
||||
if spec_id is None:
|
||||
raise ApiError('unknown_spec', 'Please provide a valid Workflow Spec ID.')
|
||||
|
@ -104,6 +115,7 @@ def get_workflow(workflow_id, soft_reset=False, hard_reset=False):
|
|||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
|
||||
def delete(workflow_id):
|
||||
session.query(WorkflowModel).filter_by(id=workflow_id).delete()
|
||||
session.commit()
|
||||
|
|
|
@ -26,7 +26,23 @@ Takes two arguments:
|
|||
2. The 'code' of the IRB Document as set in the irb_documents.xlsx file."
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, study_id, *args, **kwargs):
|
||||
"""For validation only, process the template, but do not store it in the database."""
|
||||
self.process_template(task, study_id, *args, **kwargs)
|
||||
|
||||
def do_task(self, task, study_id, *args, **kwargs):
|
||||
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
||||
final_document_stream = self.process_template(task, study_id, *args, **kwargs)
|
||||
|
||||
file_name = args[0]
|
||||
irb_doc_code = args[1]
|
||||
FileService.add_task_file(study_id=study_id, workflow_id=workflow_id, task_id=task.id,
|
||||
name=file_name,
|
||||
content_type=CONTENT_TYPES['docx'],
|
||||
binary_data=final_document_stream.read(),
|
||||
irb_doc_code=irb_doc_code)
|
||||
|
||||
def process_template(self, task, study_id, *args, **kwargs):
|
||||
"""Entry point, mostly worried about wiring it all up."""
|
||||
if len(args) != 2:
|
||||
raise ApiError(code="missing_argument",
|
||||
|
@ -34,10 +50,9 @@ Takes two arguments:
|
|||
"the name of the docx template to use. The second "
|
||||
"argument is a code for the document, as "
|
||||
"set in the reference document %s. " % FileService.IRB_PRO_CATEGORIES_FILE)
|
||||
file_name = args[0]
|
||||
irb_doc_code = args[1]
|
||||
workflow_spec_model = self.find_spec_model_in_db(task.workflow)
|
||||
task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
||||
file_name = args[0]
|
||||
|
||||
if task_study_id != study_id:
|
||||
raise ApiError(code="invalid_argument",
|
||||
|
@ -54,18 +69,12 @@ Takes two arguments:
|
|||
|
||||
if file_data_model is None:
|
||||
raise ApiError(code="file_missing",
|
||||
message="Can not find a file called '%s' "
|
||||
"within workflow specification '%s'") % (args[0], workflow_spec_model.id)
|
||||
message="Can not find a file called '%s' within workflow specification '%s'"
|
||||
% (args[0], workflow_spec_model.id))
|
||||
|
||||
final_document_stream = self.make_template(BytesIO(file_data_model.data), task.data)
|
||||
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
||||
FileService.add_task_file(study_id=study_id, workflow_id=workflow_id, task_id=task.id,
|
||||
name=file_name,
|
||||
content_type=CONTENT_TYPES['docx'],
|
||||
binary_data=final_document_stream.read(),
|
||||
irb_doc_code=irb_doc_code)
|
||||
|
||||
print("Complete Task was called with %s" % str(args))
|
||||
return self.make_template(BytesIO(file_data_model.data), task.data)
|
||||
|
||||
|
||||
def make_template(self, binary_stream, context):
|
||||
doc = DocxTemplate(binary_stream)
|
||||
|
|
|
@ -20,6 +20,9 @@ class FactService(Script):
|
|||
response = requests.get('https://api.chucknorris.io/jokes/random')
|
||||
return response.json()['value']
|
||||
|
||||
def do_task_validate_only(self, task, study_id, **kwargs):
|
||||
self.do_task(task, study_id, **kwargs)
|
||||
|
||||
def do_task(self, task, study_id, **kwargs):
|
||||
print(task.data)
|
||||
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
from pandas import ExcelFile
|
||||
|
||||
from crc import session, ma
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.study import StudyModel, StudyModelSchema
|
||||
from crc.scripts.script import Script, ScriptValidationError
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
|
@ -38,18 +34,23 @@ For example:
|
|||
}
|
||||
```
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, study_id, *args, **kwargs):
|
||||
"""For validation only, pretend no results come back from pb"""
|
||||
pb_docs = []
|
||||
self.get_required_docs(study_id, pb_docs)
|
||||
task.data["required_docs"] = self.get_required_docs(study_id, pb_docs)
|
||||
|
||||
def do_task(self, task, study_id, *args, **kwargs):
|
||||
"""Takes data from the protocol builder, and merges it with data from the IRB Pro Categories
|
||||
spreadsheet to return pertinent details about the required documents."""
|
||||
self.get_required_docs(study_id)
|
||||
task.data["required_docs"] = self.get_required_docs(study_id)
|
||||
pb_docs = self.pb.get_required_docs(study_id)
|
||||
self.get_required_docs(study_id, pb_docs)
|
||||
task.data["required_docs"] = self.get_required_docs(study_id, pb_docs)
|
||||
|
||||
def get_required_docs(self, study_id):
|
||||
def get_required_docs(self, study_id, pb_docs):
|
||||
"""Takes data from the protocol builder, and merges it with data from the IRB Pro Categories spreadsheet to return
|
||||
pertinant details about the required documents."""
|
||||
pb_docs = self.pb.get_required_docs(study_id)
|
||||
|
||||
doc_dictionary = FileService.get_file_reference_dictionary()
|
||||
required_docs = {}
|
||||
for doc in pb_docs:
|
||||
|
|
|
@ -15,9 +15,15 @@ class Script:
|
|||
|
||||
def do_task(self, task, study_id, **kwargs):
|
||||
raise ApiError("invalid_script",
|
||||
"This is an internal error. The script you are trying to execute " +
|
||||
"This is an internal error. The script you are trying to execute '%s' " % self.__class__.__name__ +
|
||||
"does not properly implement the do_task function.")
|
||||
|
||||
def do_task_validate_only(self, task, study_id, **kwargs):
|
||||
raise ApiError("invalid_script",
|
||||
"This is an internal error. The script you are trying to execute '%s' " % self.__class__.__name__ +
|
||||
"does must provide a validate_only option that mimics the do_task, " +
|
||||
"but does not make external calls or database updates." )
|
||||
|
||||
def validate(self):
|
||||
"""Override this method to perform an early check that the script has access to
|
||||
everything it needs to properly process requests.
|
||||
|
|
|
@ -3,6 +3,7 @@ from crc.api.common import ApiError
|
|||
from crc.models.study import StudyModel, StudyModelSchema
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class StudyInfo(Script):
|
||||
|
@ -20,11 +21,14 @@ class StudyInfo(Script):
|
|||
this study.
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, study_id, *args, **kwargs):
|
||||
"""For validation only, pretend no results come back from pb"""
|
||||
self.check_args(args)
|
||||
|
||||
|
||||
def do_task(self, task, study_id, *args, **kwargs):
|
||||
if len(args) != 1 or (args[0] not in StudyInfo.type_options):
|
||||
raise ApiError(code="missing_argument",
|
||||
message="The StudyInfo script requires a single argument which must be "
|
||||
"one of %s" % ",".join(StudyInfo.type_options))
|
||||
self.check_args(args)
|
||||
|
||||
cmd = args[0]
|
||||
study_info = {}
|
||||
if "study" in task.data:
|
||||
|
@ -39,3 +43,10 @@ class StudyInfo(Script):
|
|||
if cmd == 'details':
|
||||
study_info["details"] = self.pb.get_study_details(study_id)
|
||||
task.data["study"] = study_info
|
||||
|
||||
|
||||
def check_args(self, args):
|
||||
if len(args) != 1 or (args[0] not in StudyInfo.type_options):
|
||||
raise ApiError(code="missing_argument",
|
||||
message="The StudyInfo script requires a single argument which must be "
|
||||
"one of %s" % ",".join(StudyInfo.type_options))
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import json
|
||||
import re
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import xml.etree.ElementTree as ElementTree
|
||||
|
||||
from SpiffWorkflow import Task as SpiffTask, Workflow
|
||||
from SpiffWorkflow import Task as SpiffTask
|
||||
from SpiffWorkflow.bpmn.BpmnScriptEngine import BpmnScriptEngine
|
||||
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
||||
from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer
|
||||
|
@ -10,9 +12,10 @@ from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
|||
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
|
||||
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
|
||||
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
|
||||
from SpiffWorkflow.exceptions import WorkflowException
|
||||
from SpiffWorkflow.operators import Operator
|
||||
|
||||
from crc import session, db
|
||||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileDataModel, FileModel, FileType
|
||||
from crc.models.workflow import WorkflowStatus, WorkflowModel
|
||||
|
@ -45,14 +48,21 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
klass = getattr(mod, class_name)
|
||||
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
||||
if not isinstance(klass(), Script):
|
||||
raise ApiError("invalid_script",
|
||||
raise ApiError.from_task("invalid_script",
|
||||
"This is an internal error. The script '%s:%s' you called "
|
||||
"does not properly implement the CRC Script class." %
|
||||
(module_name, class_name))
|
||||
klass().do_task(task, study_id, *commands[1:])
|
||||
(module_name, class_name),
|
||||
task=task)
|
||||
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
||||
"""If this is running a validation, and not a normal process, then we want to
|
||||
mimic running the script, but not make any external calls or database changes."""
|
||||
klass().do_task_validate_only(task, study_id, *commands[1:])
|
||||
else:
|
||||
klass().do_task(task, study_id, *commands[1:])
|
||||
except ModuleNotFoundError as mnfe:
|
||||
raise ApiError("invalid_script",
|
||||
"Unable to locate Script: '%s:%s'" % (module_name, class_name), 400)
|
||||
raise ApiError.from_task("invalid_script",
|
||||
"Unable to locate Script: '%s:%s'" % (module_name, class_name),
|
||||
task=task)
|
||||
|
||||
@staticmethod
|
||||
def camel_to_snake(camel):
|
||||
|
@ -71,11 +81,12 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
|
||||
def _eval(self, task, expression, **kwargs):
|
||||
locals().update(kwargs)
|
||||
try :
|
||||
try:
|
||||
return eval(expression)
|
||||
except NameError as ne:
|
||||
raise ApiError('invalid_expression',
|
||||
'The expression you provided does not exist:' + expression)
|
||||
raise ApiError.from_task('invalid_expression',
|
||||
'The expression you provided does not exist:' + expression,
|
||||
task=task)
|
||||
|
||||
|
||||
class MyCustomParser(BpmnDmnParser):
|
||||
|
@ -91,6 +102,7 @@ class WorkflowProcessor(object):
|
|||
_serializer = BpmnSerializer()
|
||||
WORKFLOW_ID_KEY = "workflow_id"
|
||||
STUDY_ID_KEY = "study_id"
|
||||
VALIDATION_PROCESS_KEY = "validate_only"
|
||||
|
||||
def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False):
|
||||
"""Create a Workflow Processor based on the serialized information available in the workflow model.
|
||||
|
@ -211,10 +223,56 @@ class WorkflowProcessor(object):
|
|||
except ValidationException as ve:
|
||||
raise ApiError(code="workflow_validation_error",
|
||||
message="Failed to parse Workflow Specification '%s' %s." % (workflow_spec_id, version) +
|
||||
"Error is %s" % str(ve))
|
||||
"Error is %s" % str(ve),
|
||||
file_name=ve.filename,
|
||||
task_id=ve.id,
|
||||
tag=ve.tag)
|
||||
spec.description = version
|
||||
return spec
|
||||
|
||||
@classmethod
|
||||
def test_spec(cls, spec_id):
|
||||
|
||||
spec = WorkflowProcessor.get_spec(spec_id)
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=cls._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = 1
|
||||
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = spec_id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = True
|
||||
|
||||
while not bpmn_workflow.is_completed():
|
||||
try:
|
||||
bpmn_workflow.do_engine_steps()
|
||||
tasks = bpmn_workflow.get_tasks(SpiffTask.READY)
|
||||
for task in tasks:
|
||||
WorkflowProcessor.populate_form_with_random_data(task)
|
||||
task.complete()
|
||||
except WorkflowException as we:
|
||||
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
|
||||
we.sender)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def populate_form_with_random_data(task):
|
||||
"""populates a task with random data - useful for testing a spec."""
|
||||
|
||||
form_data = {}
|
||||
for field in task.task_spec.form.fields:
|
||||
if field.type == "enum":
|
||||
form_data[field.id] = random.choice(field.options)
|
||||
elif field.type == "long":
|
||||
form_data[field.id] = random.randint(1,1000)
|
||||
else:
|
||||
form_data[field.id] = WorkflowProcessor._randomString()
|
||||
if task.data is None:
|
||||
task.data = {}
|
||||
task.data.update(form_data)
|
||||
|
||||
@staticmethod
|
||||
def _randomString(stringLength=10):
|
||||
"""Generate a random string of fixed length """
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(stringLength))
|
||||
|
||||
@staticmethod
|
||||
def status_of(bpmn_workflow):
|
||||
if bpmn_workflow.is_completed():
|
||||
|
@ -230,6 +288,7 @@ class WorkflowProcessor(object):
|
|||
spec = WorkflowProcessor.get_spec(workflow_spec_id)
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=cls._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study_id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
||||
bpmn_workflow.do_engine_steps()
|
||||
workflow_model = WorkflowModel(status=WorkflowProcessor.status_of(bpmn_workflow),
|
||||
study_id=study_id,
|
||||
|
|
Binary file not shown.
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" id="Definitions_1wv9t3c" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" id="Definitions_1wv9t3c" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<bpmn:process id="Process_19ej1y2" name="Data Securty Plan" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1co48s3">
|
||||
<bpmn:outgoing>SequenceFlow_100w7co</bpmn:outgoing>
|
||||
|
@ -634,7 +634,7 @@ Submit the step only when you are ready. After you "Submit" the step, the inform
|
|||
<bpmn:scriptTask id="Task_0q2zf9x" name="Generate DSP">
|
||||
<bpmn:incoming>SequenceFlow_07rwety</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_0v51xcx</bpmn:outgoing>
|
||||
<bpmn:script>scripts.CompleteTemplate NEW_DSP_template.docx</bpmn:script>
|
||||
<bpmn:script>CompleteTemplate NEW_DSP_template.docx Study.DataSecurityPlan</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_1huif01" name="No" sourceRef="ExclusiveGateway_1ef1ype" targetRef="Activity_0kxjyy1">
|
||||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">FormField_isCreateDSP == False</bpmn:conditionExpression>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<bpmn:outgoing>SequenceFlow_1lmkn99</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_1lmkn99" sourceRef="Task_Has_Bananas" targetRef="ExclusiveGateway_003amsm" />
|
||||
<bpmn:exclusiveGateway id="ExclusiveGateway_003amsm">
|
||||
<bpmn:exclusiveGateway id="ExclusiveGateway_003amsm" name="Has Bananas Gateway">
|
||||
<bpmn:incoming>SequenceFlow_1lmkn99</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_Yes_Bananas</bpmn:outgoing>
|
||||
<bpmn:outgoing>SequenceFlow_No_Bananas</bpmn:outgoing>
|
||||
|
@ -24,7 +24,7 @@
|
|||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">has_bananas == True</bpmn:conditionExpression>
|
||||
</bpmn:sequenceFlow>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_No_Bananas" name="no" sourceRef="ExclusiveGateway_003amsm" targetRef="Task_Why_No_Bananas">
|
||||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">lower_case_true==true</bpmn:conditionExpression>
|
||||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">this_value_does_not_exist==true</bpmn:conditionExpression>
|
||||
</bpmn:sequenceFlow>
|
||||
<bpmn:userTask id="Task_Num_Bananas" name="Number of Bananas" camunda:formKey="banana_count">
|
||||
<bpmn:extensionElements>
|
||||
|
@ -71,6 +71,9 @@
|
|||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="ExclusiveGateway_14wqqsi_di" bpmnElement="ExclusiveGateway_003amsm" isMarkerVisible="true">
|
||||
<dc:Bounds x="425" y="92" width="50" height="50" />
|
||||
<bpmndi:BPMNLabel>
|
||||
<dc:Bounds x="417" y="62" width="67" height="27" />
|
||||
</bpmndi:BPMNLabel>
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0f3vx1l_di" bpmnElement="SequenceFlow_Yes_Bananas">
|
||||
<di:waypoint x="475" y="117" />
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<bpmn:process id="Process_18biih5" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_1pnq3kg" sourceRef="StartEvent_1" targetRef="Invalid_Script_Task" />
|
||||
<bpmn:endEvent id="EndEvent_063bpg6">
|
||||
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Script Reference">
|
||||
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
|
||||
<bpmn:script>NoSuchScript withArg1</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="290" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
|
||||
<dc:Bounds x="442" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
|
||||
<dc:Bounds x="290" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
|
||||
<di:waypoint x="390" y="117" />
|
||||
<di:waypoint x="442" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
|
@ -1,7 +1,9 @@
|
|||
import json
|
||||
from unittest.mock import patch
|
||||
|
||||
from crc import db
|
||||
from crc.models.file import FileDataModel, FileModel
|
||||
from crc.models.protocol_builder import ProtocolBuilderRequiredDocumentSchema
|
||||
from crc.scripts.required_docs import RequiredDocs
|
||||
from crc.services.file_service import FileService
|
||||
from tests.base_test import BaseTest
|
||||
|
@ -41,13 +43,15 @@ class TestRequiredDocsScript(BaseTest):
|
|||
dict = FileService.get_file_reference_dictionary()
|
||||
self.assertIsNotNone(dict)
|
||||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_get_required_docs(self, mock_get):
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('required_docs.json')
|
||||
def get_required_docs(self):
|
||||
string_data = self.protocol_builder_response('required_docs.json')
|
||||
return ProtocolBuilderRequiredDocumentSchema(many=True).loads(string_data)
|
||||
|
||||
def test_get_required_docs(self):
|
||||
pb_docs = self.get_required_docs()
|
||||
self.create_reference_document()
|
||||
script = RequiredDocs()
|
||||
required_docs = script.get_required_docs(12) # Mocked out, any random study id works.
|
||||
required_docs = script.get_required_docs(12, pb_docs) # Mocked out, any random study id works.
|
||||
self.assertIsNotNone(required_docs)
|
||||
self.assertTrue(6 in required_docs.keys())
|
||||
self.assertEquals("Cancer Center's PRC Approval Form", required_docs[6]['name'])
|
||||
|
@ -56,14 +60,9 @@ class TestRequiredDocsScript(BaseTest):
|
|||
self.assertEquals("CRC", required_docs[6]['Who Uploads?'])
|
||||
self.assertEquals(0, required_docs[6]['count'])
|
||||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_get_required_docs_has_correct_count_when_a_file_exists(self, mock_get):
|
||||
def test_get_required_docs_has_correct_count_when_a_file_exists(self):
|
||||
self.load_example_data()
|
||||
|
||||
# Mock out the protocol builder
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('required_docs.json')
|
||||
|
||||
pb_docs = self.get_required_docs()
|
||||
# Make sure the xslt reference document is in place.
|
||||
self.create_reference_document()
|
||||
script = RequiredDocs()
|
||||
|
@ -76,6 +75,6 @@ class TestRequiredDocsScript(BaseTest):
|
|||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code=irb_code)
|
||||
|
||||
required_docs = script.get_required_docs(workflow.study_id)
|
||||
required_docs = script.get_required_docs(workflow.study_id, pb_docs)
|
||||
self.assertIsNotNone(required_docs)
|
||||
self.assertEquals(1, required_docs[6]['count'])
|
||||
|
|
|
@ -18,18 +18,10 @@ from crc.services.workflow_processor import WorkflowProcessor
|
|||
|
||||
class TestWorkflowProcessor(BaseTest):
|
||||
|
||||
def _randomString(self, stringLength=10):
|
||||
"""Generate a random string of fixed length """
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(stringLength))
|
||||
|
||||
|
||||
def _populate_form_with_random_data(self, task):
|
||||
form_data = {}
|
||||
for field in task.task_spec.form.fields:
|
||||
form_data[field.id] = self._randomString()
|
||||
if task.data is None:
|
||||
task.data = {}
|
||||
task.data.update(form_data)
|
||||
WorkflowProcessor.populate_form_with_random_data(task)
|
||||
|
||||
def test_create_and_complete_workflow(self):
|
||||
self.load_example_data()
|
||||
|
@ -193,9 +185,6 @@ class TestWorkflowProcessor(BaseTest):
|
|||
processor3 = WorkflowProcessor(workflow_model, soft_reset=True)
|
||||
self.assertEqual("unexpected_workflow_structure", context.exception.code)
|
||||
|
||||
|
||||
|
||||
|
||||
def test_workflow_with_bad_expression_raises_sensible_error(self):
|
||||
self.load_example_data()
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import json
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiErrorSchema
|
||||
from crc.models.file import FileModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
|
||||
from tests.base_test import BaseTest
|
||||
|
@ -94,11 +95,3 @@ class TestWorkflowSpec(BaseTest):
|
|||
num_workflows_after = session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).count()
|
||||
self.assertEqual(num_files_after + num_workflows_after, 0)
|
||||
|
||||
# def test_validate_workflow_specification(self):
|
||||
# self.load_example_data()
|
||||
# db_spec = session.query(WorkflowSpecModel).first()
|
||||
# rv = self.app.get('/v1.0/workflow-specification/%s/validate' % db_spec.id, headers=self.logged_in_headers())
|
||||
# self.assert_success(rv)
|
||||
# json_data = json.loads(rv.get_data(as_text=True))
|
||||
# api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
|
||||
# self.assertEqual(db_spec, api_spec)
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
import json
|
||||
import unittest
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiErrorSchema
|
||||
from crc.models.file import FileModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestWorkflowSpecValidation(BaseTest):
|
||||
|
||||
def validate_workflow(self, workflow_name):
|
||||
self.load_example_data()
|
||||
spec_model = self.load_test_spec(workflow_name)
|
||||
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
return ApiErrorSchema(many=True).load(json_data)
|
||||
|
||||
def test_successful_validation_of_test_workflows(self):
|
||||
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
|
||||
self.assertEqual(0, len(self.validate_workflow("decision_table")))
|
||||
self.assertEqual(0, len(self.validate_workflow("docx")))
|
||||
self.assertEqual(0, len(self.validate_workflow("exclusive_gateway")))
|
||||
self.assertEqual(0, len(self.validate_workflow("file_upload_form")))
|
||||
self.assertEqual(0, len(self.validate_workflow("random_fact")))
|
||||
self.assertEqual(0, len(self.validate_workflow("study_details")))
|
||||
self.assertEqual(0, len(self.validate_workflow("top_level_workflow")))
|
||||
self.assertEqual(0, len(self.validate_workflow("two_forms")))
|
||||
|
||||
@unittest.skip("There is one workflow that is failing right now, and I want that visible after deployment.")
|
||||
def test_successful_validation_of_auto_loaded_workflows(self):
|
||||
self.load_example_data()
|
||||
workflows = session.query(WorkflowSpecModel).all()
|
||||
errors = []
|
||||
for w in workflows:
|
||||
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % w.id,
|
||||
headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
errors.extend(ApiErrorSchema(many=True).load(json_data))
|
||||
self.assertEqual(0, len(errors), json.dumps(errors))
|
||||
|
||||
|
||||
def test_invalid_expression(self):
|
||||
errors = self.validate_workflow("invalid_expression")
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEquals("invalid_expression", errors[0]['code'])
|
||||
self.assertEquals("ExclusiveGateway_003amsm", errors[0]['task_id'])
|
||||
self.assertEquals("Has Bananas Gateway", errors[0]['task_name'])
|
||||
self.assertEquals("invalid_expression.bpmn", errors[0]['file_name'])
|
||||
self.assertEquals("The expression you provided does not exist:this_value_does_not_exist==true", errors[0]["message"])
|
||||
|
||||
def test_validation_error(self):
|
||||
errors = self.validate_workflow("invalid_spec")
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEquals("workflow_validation_error", errors[0]['code'])
|
||||
self.assertEquals("StartEvent_1", errors[0]['task_id'])
|
||||
self.assertEquals("invalid_spec.bpmn", errors[0]['file_name'])
|
||||
|
||||
def test_invalid_script(self):
|
||||
errors = self.validate_workflow("invalid_script")
|
||||
self.assertEqual(1, len(errors))
|
||||
self.assertEquals("workflow_execution_exception", errors[0]['code'])
|
||||
self.assertTrue("NoSuchScript" in errors[0]['message'])
|
||||
self.assertEquals("Invalid_Script_Task", errors[0]['task_id'])
|
||||
self.assertEquals("An Invalid Script Reference", errors[0]['task_name'])
|
||||
self.assertEquals("invalid_script.bpmn", errors[0]['file_name'])
|
Loading…
Reference in New Issue