2020-03-05 18:25:28 +00:00
|
|
|
import json
|
2020-03-03 18:50:22 +00:00
|
|
|
import re
|
2019-12-31 21:32:47 +00:00
|
|
|
import xml.etree.ElementTree as ElementTree
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
from SpiffWorkflow import Task as SpiffTask, Workflow
|
2019-12-18 19:02:17 +00:00
|
|
|
from SpiffWorkflow.bpmn.BpmnScriptEngine import BpmnScriptEngine
|
2020-01-22 21:51:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer
|
2020-02-25 17:01:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
2019-12-18 19:02:17 +00:00
|
|
|
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
|
2019-12-30 16:07:26 +00:00
|
|
|
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
|
2020-01-23 20:32:53 +00:00
|
|
|
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
|
2020-02-04 21:49:28 +00:00
|
|
|
from SpiffWorkflow.operators import Operator
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
from crc import session, db
|
2020-02-18 21:38:56 +00:00
|
|
|
from crc.api.common import ApiError
|
2020-01-24 16:52:52 +00:00
|
|
|
from crc.models.file import FileDataModel, FileModel, FileType
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.models.workflow import WorkflowStatus, WorkflowModel
|
2020-03-03 18:50:22 +00:00
|
|
|
from crc.scripts.script import Script
|
2019-12-31 21:32:47 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|
|
|
"""This is a custom script processor that can be easily injected into Spiff Workflow.
|
|
|
|
Rather than execute arbitrary code, this assumes the script references a fully qualified python class
|
|
|
|
such as myapp.RandomFact. """
|
|
|
|
|
2020-03-03 18:50:22 +00:00
|
|
|
def execute(self, task:SpiffTask, script, **kwargs):
|
2019-12-18 19:02:17 +00:00
|
|
|
"""
|
|
|
|
Assume that the script read in from the BPMN file is a fully qualified python class. Instantiate
|
|
|
|
that class, pass in any data available to the current task so that it might act on it.
|
|
|
|
Assume that the class implements the "do_task" method.
|
|
|
|
|
|
|
|
This allows us to reference custom code from the BPMN diagram.
|
|
|
|
"""
|
2020-02-10 21:19:23 +00:00
|
|
|
commands = script.split(" ")
|
2020-03-03 18:50:22 +00:00
|
|
|
path_and_command = commands[0].rsplit(".", 1)
|
|
|
|
if len(path_and_command) == 1:
|
|
|
|
module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0])
|
|
|
|
class_name = path_and_command[0]
|
|
|
|
else:
|
|
|
|
module_name = "crc.scripts." + path_and_command[0] + "." + self.camel_to_snake(path_and_command[1])
|
|
|
|
class_name = path_and_command[1]
|
|
|
|
try:
|
|
|
|
mod = __import__(module_name, fromlist=[class_name])
|
|
|
|
klass = getattr(mod, class_name)
|
|
|
|
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
|
|
|
if not isinstance(klass(), Script):
|
|
|
|
raise ApiError("invalid_script",
|
|
|
|
"This is an internal error. The script '%s:%s' you called "
|
|
|
|
"does not properly implement the CRC Script class." %
|
|
|
|
(module_name, class_name))
|
|
|
|
klass().do_task(task, study_id, *commands[1:])
|
|
|
|
except ModuleNotFoundError as mnfe:
|
|
|
|
raise ApiError("invalid_script",
|
|
|
|
"Unable to locate Script: '%s:%s'" % (module_name, class_name), 400)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def camel_to_snake(camel):
|
|
|
|
camel = camel.strip()
|
|
|
|
return re.sub(r'(?<!^)(?=[A-Z])', '_', camel).lower()
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-02-04 21:49:28 +00:00
|
|
|
def evaluate(self, task, expression):
|
|
|
|
"""
|
|
|
|
Evaluate the given expression, within the context of the given task and
|
|
|
|
return the result.
|
|
|
|
"""
|
|
|
|
if isinstance(expression, Operator):
|
|
|
|
return expression._matches(task)
|
|
|
|
else:
|
|
|
|
return self._eval(task, expression, **task.data)
|
|
|
|
|
|
|
|
def _eval(self, task, expression, **kwargs):
|
|
|
|
locals().update(kwargs)
|
|
|
|
try :
|
|
|
|
return eval(expression)
|
|
|
|
except NameError as ne:
|
2020-02-18 21:38:56 +00:00
|
|
|
raise ApiError('invalid_expression',
|
|
|
|
'The expression you provided does not exist:' + expression)
|
2020-02-04 21:49:28 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
class MyCustomParser(BpmnDmnParser):
|
|
|
|
"""
|
|
|
|
A BPMN and DMN parser that can also parse Camunda forms.
|
|
|
|
"""
|
|
|
|
OVERRIDE_PARSER_CLASSES = BpmnDmnParser.OVERRIDE_PARSER_CLASSES
|
|
|
|
OVERRIDE_PARSER_CLASSES.update(CamundaParser.OVERRIDE_PARSER_CLASSES)
|
|
|
|
|
2020-02-27 14:54:46 +00:00
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
class WorkflowProcessor(object):
|
2019-12-18 20:22:46 +00:00
|
|
|
_script_engine = CustomBpmnScriptEngine()
|
2020-01-22 21:51:25 +00:00
|
|
|
_serializer = BpmnSerializer()
|
2020-02-10 21:19:23 +00:00
|
|
|
WORKFLOW_ID_KEY = "workflow_id"
|
2020-03-03 18:50:22 +00:00
|
|
|
STUDY_ID_KEY = "study_id"
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False):
|
|
|
|
"""Create a Worflow Processor based on the serialized information available in the workflow model.
|
|
|
|
If soft_reset is set to true, it will try to use the latest version of the workflow specification.
|
|
|
|
If hard_reset is set to true, it will create a new Workflow, but embed the data from the last
|
|
|
|
completed task in the previous workflow.
|
|
|
|
If neither flag is set, it will use the same version of the specification that was used to originally
|
|
|
|
create the workflow model. """
|
|
|
|
if soft_reset:
|
|
|
|
spec = self.get_spec(workflow_model.workflow_spec_id)
|
|
|
|
workflow_model.spec_version = spec.description
|
|
|
|
else:
|
|
|
|
spec = self.get_spec(workflow_model.workflow_spec_id, workflow_model.spec_version)
|
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
self.workflow_spec_id = workflow_model.workflow_spec_id
|
2020-03-05 20:35:55 +00:00
|
|
|
self.bpmn_workflow = self._serializer.deserialize_workflow(workflow_model.bpmn_workflow_json, workflow_spec=spec)
|
2019-12-18 20:22:46 +00:00
|
|
|
self.bpmn_workflow.script_engine = self._script_engine
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
if hard_reset:
|
|
|
|
# Now that the spec is loaded, get the data and rebuild the bpmn with the new details
|
|
|
|
workflow_model.spec_version = self.hard_reset()
|
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_parser():
|
|
|
|
parser = MyCustomParser()
|
|
|
|
return parser
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
@staticmethod
|
2020-03-05 20:35:55 +00:00
|
|
|
def __get_file_models_for_version(workflow_spec_id, version):
|
|
|
|
"""Version is in the format v[VERSION] [FILE_ID_LIST]
|
|
|
|
For example, a single bpmn file with only one version would be
|
|
|
|
v1 [12] Where 12 is the id of the file that is used to create the
|
|
|
|
specification. If multiple files exist, they are added on in
|
|
|
|
dot notation to both the version number and the file list. So
|
|
|
|
a Spec that includes a BPMN, DMN, an a Word file all on the first
|
|
|
|
version would be v1.1.1 [12,45,21]"""
|
|
|
|
file_id_strings = re.findall('\((.*)\)', version)[0].split(".")
|
|
|
|
file_ids = [int(i) for i in file_id_strings]
|
|
|
|
files = session.query(FileDataModel)\
|
|
|
|
.join(FileModel) \
|
|
|
|
.filter(FileModel.workflow_spec_id == workflow_spec_id)\
|
|
|
|
.filter(FileDataModel.id.in_(file_ids)).all()
|
|
|
|
if len(files) != len(file_ids):
|
|
|
|
raise ApiError("invalid_version",
|
|
|
|
"The version '%s' of workflow specification '%s' is invalid. Unable to locate the correct files to recreate it." %
|
|
|
|
(version, workflow_spec_id))
|
|
|
|
return files
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __get_latest_file_models(workflow_spec_id):
|
|
|
|
"""Returns all the latest files related to a workflow specification"""
|
|
|
|
return session.query(FileDataModel) \
|
2019-12-30 16:07:26 +00:00
|
|
|
.join(FileModel) \
|
2020-03-04 22:08:45 +00:00
|
|
|
.filter(FileModel.workflow_spec_id == workflow_spec_id)\
|
|
|
|
.filter(FileDataModel.version == FileModel.latest_version)\
|
|
|
|
.order_by(FileModel.id)\
|
|
|
|
.all()
|
2020-03-05 20:35:55 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_spec(workflow_spec_id, version=None):
|
|
|
|
"""Returns the requested version of the specification,
|
|
|
|
or the lastest version if none is specified."""
|
|
|
|
parser = WorkflowProcessor.get_parser()
|
|
|
|
major_version = 0 # The version of the primary file.
|
|
|
|
minor_version = [] # The versions of the minor files if any.
|
|
|
|
file_ids = []
|
|
|
|
process_id = None
|
|
|
|
if version is None:
|
|
|
|
file_data_models = WorkflowProcessor.__get_latest_file_models(workflow_spec_id)
|
|
|
|
else:
|
|
|
|
file_data_models = WorkflowProcessor.__get_file_models_for_version(workflow_spec_id, version)
|
2019-12-30 16:07:26 +00:00
|
|
|
for file_data in file_data_models:
|
2020-03-05 18:25:28 +00:00
|
|
|
file_ids.append(file_data.id)
|
2020-01-23 20:32:53 +00:00
|
|
|
if file_data.file_model.type == FileType.bpmn:
|
|
|
|
bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
|
|
|
|
if file_data.file_model.primary:
|
2020-02-10 21:19:23 +00:00
|
|
|
process_id = WorkflowProcessor.get_process_id(bpmn)
|
2020-01-23 20:32:53 +00:00
|
|
|
parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
|
|
|
|
elif file_data.file_model.type == FileType.dmn:
|
|
|
|
dmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
|
|
|
|
parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
|
2020-03-04 22:08:45 +00:00
|
|
|
if file_data.file_model.primary:
|
|
|
|
major_version = file_data.version
|
|
|
|
else:
|
|
|
|
minor_version.append(file_data.version)
|
2020-01-23 20:32:53 +00:00
|
|
|
if process_id is None:
|
2020-01-31 15:39:19 +00:00
|
|
|
raise(Exception("There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
|
2020-03-05 16:18:20 +00:00
|
|
|
minor_version.insert(0, major_version) # Add major version to beginning.
|
2020-03-04 22:08:45 +00:00
|
|
|
spec = parser.get_spec(process_id)
|
2020-03-05 18:25:28 +00:00
|
|
|
version = ".".join(str(x) for x in minor_version)
|
|
|
|
files = ".".join(str(x) for x in file_ids)
|
2020-03-05 20:35:55 +00:00
|
|
|
spec.description = "v%s (%s)" % (version, files)
|
2020-03-04 22:08:45 +00:00
|
|
|
return spec
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
@staticmethod
|
|
|
|
def status_of(bpmn_workflow):
|
|
|
|
if bpmn_workflow.is_completed():
|
|
|
|
return WorkflowStatus.complete
|
|
|
|
user_tasks = bpmn_workflow.get_ready_user_tasks()
|
|
|
|
if len(user_tasks) > 0:
|
|
|
|
return WorkflowStatus.user_input_required
|
|
|
|
else:
|
|
|
|
return WorkflowStatus.waiting
|
2020-02-07 16:34:44 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
@classmethod
|
2020-02-10 21:19:23 +00:00
|
|
|
def create(cls, study_id, workflow_spec_id):
|
2019-12-30 16:07:26 +00:00
|
|
|
spec = WorkflowProcessor.get_spec(workflow_spec_id)
|
2019-12-18 20:22:46 +00:00
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=cls._script_engine)
|
2020-03-05 20:35:55 +00:00
|
|
|
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study_id
|
2019-12-18 19:02:17 +00:00
|
|
|
bpmn_workflow.do_engine_steps()
|
2020-03-05 18:25:28 +00:00
|
|
|
workflow_model = WorkflowModel(status=WorkflowProcessor.status_of(bpmn_workflow),
|
2020-02-10 21:19:23 +00:00
|
|
|
study_id=study_id,
|
2020-03-05 20:35:55 +00:00
|
|
|
workflow_spec_id=workflow_spec_id,
|
|
|
|
spec_version=spec.description)
|
2020-02-10 21:19:23 +00:00
|
|
|
session.add(workflow_model)
|
|
|
|
session.commit()
|
|
|
|
# Need to commit twice, first to get a unique id for the workflow model, and
|
|
|
|
# a second time to store the serilaization so we can maintain this link within
|
|
|
|
# the spiff-workflow process.
|
2020-03-05 18:25:28 +00:00
|
|
|
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
|
|
|
|
|
|
|
|
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(bpmn_workflow)
|
2020-02-10 21:19:23 +00:00
|
|
|
session.add(workflow_model)
|
|
|
|
session.commit()
|
2020-03-05 18:25:28 +00:00
|
|
|
processor = cls(workflow_model)
|
2019-12-18 19:02:17 +00:00
|
|
|
return processor
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def hard_reset(self):
|
2020-03-04 22:08:45 +00:00
|
|
|
"""Recreate this workflow, but keep the data from the last completed task and add it back into the first task.
|
|
|
|
This may be useful when a workflow specification changes, and users need to review all the
|
2020-03-05 20:35:55 +00:00
|
|
|
prior steps, but don't need to reenter all the previous data.
|
|
|
|
|
|
|
|
Returns the new version.
|
|
|
|
"""
|
2020-03-04 22:08:45 +00:00
|
|
|
spec = WorkflowProcessor.get_spec(self.workflow_spec_id)
|
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
|
|
|
bpmn_workflow.data = self.bpmn_workflow.data
|
|
|
|
for task in bpmn_workflow.get_tasks(SpiffTask.READY):
|
|
|
|
task.data = self.bpmn_workflow.last_task.data
|
|
|
|
bpmn_workflow.do_engine_steps()
|
|
|
|
self.bpmn_workflow = bpmn_workflow
|
2020-03-05 20:35:55 +00:00
|
|
|
return spec.description
|
2020-03-04 22:08:45 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def get_status(self):
|
2020-03-05 18:25:28 +00:00
|
|
|
return self.status_of(self.bpmn_workflow)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-04 22:08:45 +00:00
|
|
|
def get_spec_version(self):
|
|
|
|
"""We use the spec's descrption field to store the version information"""
|
|
|
|
return self.bpmn_workflow.spec.description
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def do_engine_steps(self):
|
|
|
|
self.bpmn_workflow.do_engine_steps()
|
|
|
|
|
|
|
|
def serialize(self):
|
2019-12-19 16:58:51 +00:00
|
|
|
return self._serializer.serialize_workflow(self.bpmn_workflow)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
def next_user_tasks(self):
|
|
|
|
return self.bpmn_workflow.get_ready_user_tasks()
|
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def next_task(self):
|
2020-02-25 17:01:25 +00:00
|
|
|
"""Returns the next task that should be completed
|
|
|
|
even if there are parallel tasks and multiple options are
|
|
|
|
available.
|
|
|
|
If the workflow is complete
|
|
|
|
it will return the final end task.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# If the whole blessed mess is done, return the end_event task in the tree
|
|
|
|
if self.bpmn_workflow.is_completed():
|
|
|
|
last_task = None
|
|
|
|
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.ANY_MASK):
|
|
|
|
if isinstance(task.task_spec, EndEvent):
|
|
|
|
return task
|
|
|
|
|
|
|
|
# If there are ready tasks to complete, return the next ready task, but return the one
|
|
|
|
# in the active parallel path if possible.
|
|
|
|
ready_tasks = self.bpmn_workflow.get_tasks(SpiffTask.READY)
|
|
|
|
if len(ready_tasks) > 0:
|
2020-02-07 16:34:44 +00:00
|
|
|
for task in ready_tasks:
|
|
|
|
if task.parent == self.bpmn_workflow.last_task:
|
2020-02-11 16:11:21 +00:00
|
|
|
return task
|
2020-02-07 16:34:44 +00:00
|
|
|
return ready_tasks[0]
|
|
|
|
|
2020-02-25 17:01:25 +00:00
|
|
|
# If there are no ready tasks, but the thing isn't complete yet, find the first non-complete task
|
|
|
|
# and return that
|
|
|
|
next_task = None
|
|
|
|
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.NOT_FINISHED_MASK):
|
|
|
|
next_task = task
|
|
|
|
return next_task
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def complete_task(self, task):
|
|
|
|
self.bpmn_workflow.complete_task_from_id(task.id)
|
|
|
|
|
|
|
|
def get_data(self):
|
|
|
|
return self.bpmn_workflow.data
|
2019-12-19 16:58:51 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
def get_workflow_id(self):
|
|
|
|
return self.bpmn_workflow.data[self.WORKFLOW_ID_KEY]
|
|
|
|
|
|
|
|
def get_study_id(self):
|
|
|
|
return self.bpmn_workflow.data[self.STUDY_ID_KEY]
|
|
|
|
|
2019-12-19 16:58:51 +00:00
|
|
|
def get_ready_user_tasks(self):
|
|
|
|
return self.bpmn_workflow.get_ready_user_tasks()
|
2019-12-30 16:07:26 +00:00
|
|
|
|
2020-01-31 17:40:08 +00:00
|
|
|
def get_all_user_tasks(self):
|
|
|
|
all_tasks = self.bpmn_workflow.get_tasks(SpiffTask.ANY_MASK)
|
|
|
|
return [t for t in all_tasks if not self.bpmn_workflow._is_engine_task(t.task_spec)]
|
|
|
|
|
2019-12-30 16:07:26 +00:00
|
|
|
@staticmethod
|
2020-02-10 21:19:23 +00:00
|
|
|
def get_process_id(et_root: ElementTree.Element):
|
2019-12-30 16:07:26 +00:00
|
|
|
process_elements = []
|
|
|
|
for child in et_root:
|
|
|
|
if child.tag.endswith('process') and child.attrib.get('isExecutable', False):
|
|
|
|
process_elements.append(child)
|
|
|
|
|
|
|
|
if len(process_elements) == 0:
|
|
|
|
raise Exception('No executable process tag found')
|
|
|
|
|
2020-02-10 20:20:45 +00:00
|
|
|
# There are multiple root elements
|
2019-12-30 16:07:26 +00:00
|
|
|
if len(process_elements) > 1:
|
2020-02-10 20:20:45 +00:00
|
|
|
|
|
|
|
# Look for the element that has the startEvent in it
|
|
|
|
for e in process_elements:
|
|
|
|
this_element: ElementTree.Element = e
|
|
|
|
for child_element in list(this_element):
|
|
|
|
if child_element.tag.endswith('startEvent'):
|
|
|
|
return this_element.attrib['id']
|
|
|
|
|
|
|
|
raise Exception('No start event found in %s' % et_root.attrib['id'])
|
2019-12-30 16:07:26 +00:00
|
|
|
|
2020-01-14 16:45:12 +00:00
|
|
|
return process_elements[0].attrib['id']
|