2020-03-27 12:29:31 +00:00
|
|
|
import re
|
2020-07-06 19:34:24 +00:00
|
|
|
|
2021-09-29 14:06:17 +00:00
|
|
|
from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine
|
2021-10-11 15:50:03 +00:00
|
|
|
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
|
2020-07-06 19:34:24 +00:00
|
|
|
from SpiffWorkflow.serializer.exceptions import MissingSpecError
|
2021-06-18 20:41:55 +00:00
|
|
|
from SpiffWorkflow.util.metrics import timeit, firsttime, sincetime
|
2020-06-25 18:02:16 +00:00
|
|
|
from lxml import etree
|
2020-06-25 18:01:24 +00:00
|
|
|
import shlex
|
2020-05-04 14:57:09 +00:00
|
|
|
from datetime import datetime
|
2020-05-29 00:03:50 +00:00
|
|
|
from typing import List
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2021-07-06 15:46:47 +00:00
|
|
|
from SpiffWorkflow import Task as SpiffTask, WorkflowException, Task
|
2020-03-11 20:33:18 +00:00
|
|
|
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
2020-01-22 21:51:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer
|
2020-02-25 17:01:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
2019-12-18 19:02:17 +00:00
|
|
|
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
|
2019-12-30 16:07:26 +00:00
|
|
|
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
|
2020-01-23 20:32:53 +00:00
|
|
|
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
|
2020-05-11 21:04:05 +00:00
|
|
|
from SpiffWorkflow.exceptions import WorkflowTaskExecException
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from SpiffWorkflow.specs import WorkflowSpec
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-07-24 16:08:46 +00:00
|
|
|
import crc
|
2020-07-20 15:39:50 +00:00
|
|
|
from crc import session, app
|
2020-02-18 21:38:56 +00:00
|
|
|
from crc.api.common import ApiError
|
2020-01-24 16:52:52 +00:00
|
|
|
from crc.models.file import FileDataModel, FileModel, FileType
|
2021-01-14 20:32:14 +00:00
|
|
|
from crc.models.task_event import TaskEventModel
|
2021-02-16 17:42:59 +00:00
|
|
|
from crc.models.user import UserModelSchema
|
2020-05-29 00:03:50 +00:00
|
|
|
from crc.models.workflow import WorkflowStatus, WorkflowModel, WorkflowSpecDependencyFile
|
2020-03-03 18:50:22 +00:00
|
|
|
from crc.scripts.script import Script
|
2020-05-29 00:03:50 +00:00
|
|
|
from crc.services.file_service import FileService
|
2020-07-20 14:12:15 +00:00
|
|
|
from crc import app
|
2021-02-16 17:42:59 +00:00
|
|
|
from crc.services.user_service import UserService
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2021-07-01 19:38:45 +00:00
|
|
|
|
2021-09-29 14:06:17 +00:00
|
|
|
class CustomBpmnScriptEngine(PythonScriptEngine):
|
2019-12-18 19:02:17 +00:00
|
|
|
"""This is a custom script processor that can be easily injected into Spiff Workflow.
|
2020-07-30 17:35:20 +00:00
|
|
|
It will execute python code read in from the bpmn. It will also make any scripts in the
|
|
|
|
scripts directory available for execution. """
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-12-01 14:13:12 +00:00
|
|
|
def evaluate(self, task, expression):
|
|
|
|
"""
|
|
|
|
Evaluate the given expression, within the context of the given task and
|
|
|
|
return the result.
|
|
|
|
"""
|
2020-07-24 16:08:46 +00:00
|
|
|
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
|
|
|
if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data:
|
|
|
|
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
2020-03-03 18:50:22 +00:00
|
|
|
else:
|
2020-07-24 16:08:46 +00:00
|
|
|
workflow_id = None
|
2021-10-06 16:17:57 +00:00
|
|
|
|
2020-07-17 14:56:04 +00:00
|
|
|
try:
|
|
|
|
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
2021-10-06 16:17:57 +00:00
|
|
|
augmentMethods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
|
2020-07-17 14:56:04 +00:00
|
|
|
else:
|
2021-10-06 16:17:57 +00:00
|
|
|
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
|
|
|
|
|
|
|
|
return self._evaluate(expression, external_methods=augmentMethods, **task.data)
|
|
|
|
|
2021-07-07 04:53:30 +00:00
|
|
|
except Exception as e:
|
2021-10-06 16:17:57 +00:00
|
|
|
raise WorkflowTaskExecException(task,
|
|
|
|
"Error evaluating expression "
|
|
|
|
"'%s', %s" % (expression, str(e)))
|
|
|
|
|
|
|
|
|
|
|
|
@timeit
|
|
|
|
def execute(self, task: SpiffTask, script, data):
|
2020-03-03 18:50:22 +00:00
|
|
|
|
2020-12-01 14:13:12 +00:00
|
|
|
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
|
|
|
if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data:
|
|
|
|
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
|
|
|
else:
|
|
|
|
workflow_id = None
|
|
|
|
try:
|
|
|
|
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
2021-10-06 16:17:57 +00:00
|
|
|
augment_methods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
|
2020-12-01 14:13:12 +00:00
|
|
|
else:
|
2021-10-06 16:17:57 +00:00
|
|
|
augment_methods = Script.generate_augmented_list(task, study_id, workflow_id)
|
|
|
|
super().execute(task, script, data, external_methods=augment_methods)
|
|
|
|
except WorkflowException as e:
|
|
|
|
raise e
|
2020-12-01 14:13:12 +00:00
|
|
|
except Exception as e:
|
2021-10-06 16:17:57 +00:00
|
|
|
raise WorkflowTaskExecException(task, f' {script}, {e}', e)
|
2020-12-01 14:13:12 +00:00
|
|
|
|
2021-03-08 19:00:03 +00:00
|
|
|
|
|
|
|
|
2020-05-25 19:30:06 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
class MyCustomParser(BpmnDmnParser):
|
|
|
|
"""
|
|
|
|
A BPMN and DMN parser that can also parse Camunda forms.
|
|
|
|
"""
|
|
|
|
OVERRIDE_PARSER_CLASSES = BpmnDmnParser.OVERRIDE_PARSER_CLASSES
|
|
|
|
OVERRIDE_PARSER_CLASSES.update(CamundaParser.OVERRIDE_PARSER_CLASSES)
|
|
|
|
|
2020-02-27 14:54:46 +00:00
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
class WorkflowProcessor(object):
|
2019-12-18 20:22:46 +00:00
|
|
|
_script_engine = CustomBpmnScriptEngine()
|
2020-01-22 21:51:25 +00:00
|
|
|
_serializer = BpmnSerializer()
|
2020-04-28 17:48:44 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
WORKFLOW_ID_KEY = "workflow_id"
|
2020-03-03 18:50:22 +00:00
|
|
|
STUDY_ID_KEY = "study_id"
|
2020-03-27 12:29:31 +00:00
|
|
|
VALIDATION_PROCESS_KEY = "validate_only"
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2021-01-20 18:24:53 +00:00
|
|
|
def __init__(self, workflow_model: WorkflowModel, validate_only=False):
|
2021-01-19 20:14:36 +00:00
|
|
|
"""Create a Workflow Processor based on the serialized information available in the workflow model."""
|
2020-10-09 15:00:33 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.workflow_model = workflow_model
|
2020-03-05 20:35:55 +00:00
|
|
|
|
2021-01-20 18:24:53 +00:00
|
|
|
if workflow_model.bpmn_workflow_json is None: # The workflow was never started.
|
2020-05-29 00:03:50 +00:00
|
|
|
self.spec_data_files = FileService.get_spec_data_files(
|
2021-08-10 13:15:38 +00:00
|
|
|
workflow_spec_id=workflow_model.workflow_spec_id,include_libraries=True)
|
2020-10-09 15:00:33 +00:00
|
|
|
spec = self.get_spec(self.spec_data_files, workflow_model.workflow_spec_id)
|
2020-05-29 00:03:50 +00:00
|
|
|
else:
|
|
|
|
self.spec_data_files = FileService.get_spec_data_files(
|
|
|
|
workflow_spec_id=workflow_model.workflow_spec_id,
|
|
|
|
workflow_id=workflow_model.id)
|
2020-10-09 15:00:33 +00:00
|
|
|
spec = None
|
2020-05-29 00:03:50 +00:00
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
self.workflow_spec_id = workflow_model.workflow_spec_id
|
2021-02-16 17:42:59 +00:00
|
|
|
|
2020-03-11 20:33:18 +00:00
|
|
|
try:
|
2020-05-29 08:42:48 +00:00
|
|
|
self.bpmn_workflow = self.__get_bpmn_workflow(workflow_model, spec, validate_only)
|
2020-03-30 18:01:57 +00:00
|
|
|
self.bpmn_workflow.script_engine = self._script_engine
|
|
|
|
|
2021-02-16 17:42:59 +00:00
|
|
|
if UserService.has_user():
|
|
|
|
current_user = UserService.current_user(allow_admin_impersonate=True)
|
|
|
|
current_user_data = UserModelSchema().dump(current_user)
|
|
|
|
tasks = self.bpmn_workflow.get_tasks(SpiffTask.READY)
|
|
|
|
for task in tasks:
|
|
|
|
task.data['current_user'] = current_user_data
|
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
if self.WORKFLOW_ID_KEY not in self.bpmn_workflow.data:
|
2020-05-04 14:57:09 +00:00
|
|
|
if not workflow_model.id:
|
|
|
|
session.add(workflow_model)
|
|
|
|
session.commit()
|
|
|
|
# If the model is new, and has no id, save it, write it into the workflow model
|
|
|
|
# and save it again. In this way, the workflow process is always aware of the
|
|
|
|
# database model to which it is associated, and scripts running within the model
|
|
|
|
# can then load data as needed.
|
|
|
|
self.bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
|
2020-10-09 12:46:14 +00:00
|
|
|
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(
|
2021-01-19 20:14:36 +00:00
|
|
|
self.bpmn_workflow, include_spec=True)
|
2020-05-04 14:57:09 +00:00
|
|
|
self.save()
|
2020-03-30 18:01:57 +00:00
|
|
|
|
2020-07-06 19:34:24 +00:00
|
|
|
except MissingSpecError as ke:
|
2020-03-11 20:33:18 +00:00
|
|
|
raise ApiError(code="unexpected_workflow_structure",
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
message="Failed to deserialize workflow"
|
|
|
|
" '%s' version %s, due to a mis-placed or missing task '%s'" %
|
2021-01-19 20:14:36 +00:00
|
|
|
(self.workflow_spec_id, self.get_version_string(), str(ke)))
|
|
|
|
|
|
|
|
# set whether this is the latest spec file.
|
|
|
|
if self.spec_data_files == FileService.get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id):
|
|
|
|
self.is_latest_spec = True
|
|
|
|
else:
|
|
|
|
self.is_latest_spec = False
|
|
|
|
|
2021-03-09 16:52:55 +00:00
|
|
|
@staticmethod
|
2021-04-15 15:25:58 +00:00
|
|
|
def reset(workflow_model, clear_data=False, delete_files=False):
|
2021-01-19 20:14:36 +00:00
|
|
|
print('WorkflowProcessor: reset: ')
|
|
|
|
|
2021-03-09 16:52:55 +00:00
|
|
|
# Try to execute a cancel notify
|
|
|
|
try:
|
|
|
|
wp = WorkflowProcessor(workflow_model)
|
|
|
|
wp.cancel_notify() # The executes a notification to all endpoints that
|
|
|
|
except Exception as e:
|
|
|
|
app.logger.error(f"Unable to send a cancel notify for workflow %s during a reset."
|
|
|
|
f" Continuing with the reset anyway so we don't get in an unresolvable"
|
|
|
|
f" state. An %s error occured with the following information: %s" %
|
|
|
|
(workflow_model.id, e.__class__.__name__, str(e)))
|
2021-01-19 20:14:36 +00:00
|
|
|
workflow_model.bpmn_workflow_json = None
|
2021-01-14 20:32:14 +00:00
|
|
|
if clear_data:
|
|
|
|
# Clear form_data from task_events
|
|
|
|
task_events = session.query(TaskEventModel). \
|
|
|
|
filter(TaskEventModel.workflow_id == workflow_model.id).all()
|
|
|
|
for task_event in task_events:
|
|
|
|
task_event.form_data = {}
|
|
|
|
session.add(task_event)
|
2021-04-15 15:25:58 +00:00
|
|
|
if delete_files:
|
|
|
|
files = FileModel.query.filter(FileModel.workflow_id == workflow_model.id).all()
|
|
|
|
for file in files:
|
|
|
|
FileService.delete_file(file.id)
|
2021-01-20 18:24:53 +00:00
|
|
|
session.commit()
|
2021-03-09 16:52:55 +00:00
|
|
|
return WorkflowProcessor(workflow_model)
|
2021-01-14 20:32:14 +00:00
|
|
|
|
2020-05-29 08:42:48 +00:00
|
|
|
def __get_bpmn_workflow(self, workflow_model: WorkflowModel, spec: WorkflowSpec, validate_only=False):
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
if workflow_model.bpmn_workflow_json:
|
2020-10-09 12:46:14 +00:00
|
|
|
bpmn_workflow = self._serializer.deserialize_workflow(workflow_model.bpmn_workflow_json,
|
2020-10-09 15:00:33 +00:00
|
|
|
workflow_spec=spec)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
else:
|
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id
|
2020-05-29 08:42:48 +00:00
|
|
|
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only
|
2020-07-28 17:33:38 +00:00
|
|
|
# try:
|
|
|
|
# bpmn_workflow.do_engine_steps()
|
|
|
|
# except WorkflowException as we:
|
|
|
|
# raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender)
|
2020-03-30 18:01:57 +00:00
|
|
|
return bpmn_workflow
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
|
2020-05-04 14:57:09 +00:00
|
|
|
def save(self):
|
|
|
|
"""Saves the current state of this processor to the database """
|
2020-05-29 00:03:50 +00:00
|
|
|
self.workflow_model.bpmn_workflow_json = self.serialize()
|
2020-05-04 14:57:09 +00:00
|
|
|
complete_states = [SpiffTask.CANCELLED, SpiffTask.COMPLETED]
|
|
|
|
tasks = list(self.get_all_user_tasks())
|
2020-05-29 00:03:50 +00:00
|
|
|
self.workflow_model.status = self.get_status()
|
|
|
|
self.workflow_model.total_tasks = len(tasks)
|
|
|
|
self.workflow_model.completed_tasks = sum(1 for t in tasks if t.state in complete_states)
|
2021-04-29 14:25:28 +00:00
|
|
|
self.workflow_model.last_updated = datetime.utcnow()
|
2020-05-29 00:03:50 +00:00
|
|
|
self.update_dependencies(self.spec_data_files)
|
|
|
|
session.add(self.workflow_model)
|
2020-05-04 14:57:09 +00:00
|
|
|
session.commit()
|
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
def get_version_string(self):
|
|
|
|
# this could potentially become expensive to load all the data in the data models.
|
|
|
|
# in which case we might consider using a deferred loader for the actual data, but
|
|
|
|
# trying not to pre-optimize.
|
|
|
|
file_data_models = FileService.get_spec_data_files(self.workflow_model.workflow_spec_id,
|
|
|
|
self.workflow_model.id)
|
|
|
|
return WorkflowProcessor.__get_version_string_for_data_models(file_data_models)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
@staticmethod
|
2020-05-29 00:03:50 +00:00
|
|
|
def get_latest_version_string_for_spec(spec_id):
|
|
|
|
file_data_models = FileService.get_spec_data_files(spec_id)
|
|
|
|
return WorkflowProcessor.__get_version_string_for_data_models(file_data_models)
|
2020-01-23 20:32:53 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
@staticmethod
|
2020-05-29 00:03:50 +00:00
|
|
|
def __get_version_string_for_data_models(file_data_models):
|
2020-03-05 21:45:44 +00:00
|
|
|
"""Version is in the format v[VERSION] (FILE_ID_LIST)
|
2020-03-05 20:35:55 +00:00
|
|
|
For example, a single bpmn file with only one version would be
|
2020-03-05 21:45:44 +00:00
|
|
|
v1 (12) Where 12 is the id of the file data model that is used to create the
|
2020-03-05 20:35:55 +00:00
|
|
|
specification. If multiple files exist, they are added on in
|
|
|
|
dot notation to both the version number and the file list. So
|
|
|
|
a Spec that includes a BPMN, DMN, an a Word file all on the first
|
2020-03-05 21:45:44 +00:00
|
|
|
version would be v1.1.1 (12.45.21)"""
|
|
|
|
|
|
|
|
major_version = 0 # The version of the primary file.
|
|
|
|
minor_version = [] # The versions of the minor files if any.
|
|
|
|
file_ids = []
|
|
|
|
for file_data in file_data_models:
|
|
|
|
file_ids.append(file_data.id)
|
|
|
|
if file_data.file_model.primary:
|
|
|
|
major_version = file_data.version
|
|
|
|
else:
|
|
|
|
minor_version.append(file_data.version)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
minor_version.insert(0, major_version) # Add major version to beginning.
|
2020-03-05 21:45:44 +00:00
|
|
|
version = ".".join(str(x) for x in minor_version)
|
|
|
|
files = ".".join(str(x) for x in file_ids)
|
|
|
|
full_version = "v%s (%s)" % (version, files)
|
|
|
|
return full_version
|
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
def update_dependencies(self, spec_data_files):
|
|
|
|
existing_dependencies = FileService.get_spec_data_files(
|
|
|
|
workflow_spec_id=self.workflow_model.workflow_spec_id,
|
|
|
|
workflow_id=self.workflow_model.id)
|
|
|
|
|
|
|
|
# Don't save the dependencies if they haven't changed.
|
|
|
|
if existing_dependencies == spec_data_files:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Remove all existing dependencies, and replace them.
|
|
|
|
self.workflow_model.dependencies = []
|
|
|
|
for file_data in spec_data_files:
|
|
|
|
self.workflow_model.dependencies.append(WorkflowSpecDependencyFile(file_data_id=file_data.id))
|
|
|
|
|
2020-03-05 21:45:44 +00:00
|
|
|
@staticmethod
|
2021-03-30 16:10:49 +00:00
|
|
|
@timeit
|
2020-05-29 00:03:50 +00:00
|
|
|
def run_master_spec(spec_model, study):
|
|
|
|
"""Executes a BPMN specification for the given study, without recording any information to the database
|
|
|
|
Useful for running the master specification, which should not persist. """
|
2021-03-30 16:10:49 +00:00
|
|
|
lasttime = firsttime()
|
2020-05-29 00:03:50 +00:00
|
|
|
spec_data_files = FileService.get_spec_data_files(spec_model.id)
|
2021-03-30 16:10:49 +00:00
|
|
|
lasttime = sincetime('load Files', lasttime)
|
2020-05-29 00:03:50 +00:00
|
|
|
spec = WorkflowProcessor.get_spec(spec_data_files, spec_model.id)
|
2021-03-30 16:10:49 +00:00
|
|
|
lasttime = sincetime('get spec', lasttime)
|
2020-05-29 00:03:50 +00:00
|
|
|
try:
|
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=WorkflowProcessor._script_engine)
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
2021-03-30 16:10:49 +00:00
|
|
|
lasttime = sincetime('get_workflow', lasttime)
|
2020-05-29 00:03:50 +00:00
|
|
|
bpmn_workflow.do_engine_steps()
|
2021-03-30 16:10:49 +00:00
|
|
|
lasttime = sincetime('run steps', lasttime)
|
2020-05-29 00:03:50 +00:00
|
|
|
except WorkflowException as we:
|
|
|
|
raise ApiError.from_task_spec("error_running_master_spec", str(we), we.sender)
|
|
|
|
|
|
|
|
if not bpmn_workflow.is_completed():
|
|
|
|
raise ApiError("master_spec_not_automatic",
|
|
|
|
"The master spec should only contain fully automated tasks, it failed to complete.")
|
|
|
|
|
|
|
|
return bpmn_workflow.last_task.data
|
2020-03-05 20:35:55 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2020-05-29 00:03:50 +00:00
|
|
|
def get_parser():
|
|
|
|
parser = MyCustomParser()
|
|
|
|
return parser
|
2020-03-05 20:35:55 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2020-05-29 00:03:50 +00:00
|
|
|
def get_spec(file_data_models: List[FileDataModel], workflow_spec_id):
|
|
|
|
"""Returns a SpiffWorkflow specification for the given workflow spec,
|
|
|
|
using the files provided. The Workflow_spec_id is only used to generate
|
|
|
|
better error messages."""
|
2020-03-05 20:35:55 +00:00
|
|
|
parser = WorkflowProcessor.get_parser()
|
|
|
|
process_id = None
|
2020-05-20 04:10:32 +00:00
|
|
|
|
2019-12-30 16:07:26 +00:00
|
|
|
for file_data in file_data_models:
|
2020-01-23 20:32:53 +00:00
|
|
|
if file_data.file_model.type == FileType.bpmn:
|
2020-06-25 18:02:16 +00:00
|
|
|
bpmn: etree.Element = etree.fromstring(file_data.data)
|
2021-08-10 13:15:38 +00:00
|
|
|
if file_data.file_model.primary and file_data.file_model.workflow_spec_id == workflow_spec_id:
|
2020-05-29 00:03:50 +00:00
|
|
|
process_id = FileService.get_process_id(bpmn)
|
2020-01-23 20:32:53 +00:00
|
|
|
parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
|
|
|
|
elif file_data.file_model.type == FileType.dmn:
|
2020-06-25 18:02:16 +00:00
|
|
|
dmn: etree.Element = etree.fromstring(file_data.data)
|
2020-01-23 20:32:53 +00:00
|
|
|
parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
|
|
|
|
if process_id is None:
|
2020-05-29 00:03:50 +00:00
|
|
|
raise (ApiError(code="no_primary_bpmn_error",
|
|
|
|
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
|
2020-03-11 20:33:18 +00:00
|
|
|
try:
|
|
|
|
spec = parser.get_spec(process_id)
|
|
|
|
except ValidationException as ve:
|
|
|
|
raise ApiError(code="workflow_validation_error",
|
2021-07-07 04:53:30 +00:00
|
|
|
message="Failed to parse the Workflow Specification. " +
|
|
|
|
"Error is '%s.'" % str(ve),
|
2020-03-27 12:29:31 +00:00
|
|
|
file_name=ve.filename,
|
|
|
|
task_id=ve.id,
|
|
|
|
tag=ve.tag)
|
2020-03-04 22:08:45 +00:00
|
|
|
return spec
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
@staticmethod
|
|
|
|
def status_of(bpmn_workflow):
|
|
|
|
if bpmn_workflow.is_completed():
|
|
|
|
return WorkflowStatus.complete
|
|
|
|
user_tasks = bpmn_workflow.get_ready_user_tasks()
|
2021-07-06 15:46:47 +00:00
|
|
|
waiting_tasks = bpmn_workflow.get_tasks(Task.WAITING)
|
|
|
|
if len(waiting_tasks) > 0:
|
|
|
|
return WorkflowStatus.waiting
|
2020-03-05 18:25:28 +00:00
|
|
|
if len(user_tasks) > 0:
|
|
|
|
return WorkflowStatus.user_input_required
|
|
|
|
else:
|
|
|
|
return WorkflowStatus.waiting
|
2020-02-07 16:34:44 +00:00
|
|
|
|
2021-01-14 20:32:14 +00:00
|
|
|
# def hard_reset(self):
|
|
|
|
# """Recreate this workflow. This will be useful when a workflow specification changes.
|
|
|
|
# """
|
|
|
|
# self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
|
|
|
|
# new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
|
|
|
|
# new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine)
|
|
|
|
# new_bpmn_workflow.data = self.bpmn_workflow.data
|
|
|
|
# try:
|
|
|
|
# new_bpmn_workflow.do_engine_steps()
|
|
|
|
# except WorkflowException as we:
|
|
|
|
# raise ApiError.from_task_spec("hard_reset_engine_steps_error", str(we), we.sender)
|
|
|
|
# self.bpmn_workflow = new_bpmn_workflow
|
2020-03-04 22:08:45 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def get_status(self):
|
2020-03-05 18:25:28 +00:00
|
|
|
return self.status_of(self.bpmn_workflow)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2021-06-30 15:15:48 +00:00
|
|
|
def do_engine_steps(self, exit_at = None):
|
2020-05-11 21:04:05 +00:00
|
|
|
try:
|
2021-06-10 12:33:24 +00:00
|
|
|
self.bpmn_workflow.refresh_waiting_tasks()
|
2021-06-30 15:15:48 +00:00
|
|
|
self.bpmn_workflow.do_engine_steps(exit_at = exit_at)
|
2020-05-11 21:04:05 +00:00
|
|
|
except WorkflowTaskExecException as we:
|
|
|
|
raise ApiError.from_task("task_error", str(we), we.task)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-12-29 23:05:13 +00:00
|
|
|
def cancel_notify(self):
|
|
|
|
try:
|
|
|
|
self.bpmn_workflow.cancel_notify()
|
|
|
|
except WorkflowTaskExecException as we:
|
|
|
|
raise ApiError.from_task("task_error", str(we), we.task)
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def serialize(self):
|
2020-10-09 12:46:14 +00:00
|
|
|
return self._serializer.serialize_workflow(self.bpmn_workflow,include_spec=True)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
def next_user_tasks(self):
|
|
|
|
return self.bpmn_workflow.get_ready_user_tasks()
|
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def next_task(self):
|
2020-02-25 17:01:25 +00:00
|
|
|
"""Returns the next task that should be completed
|
|
|
|
even if there are parallel tasks and multiple options are
|
|
|
|
available.
|
|
|
|
If the workflow is complete
|
|
|
|
it will return the final end task.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# If the whole blessed mess is done, return the end_event task in the tree
|
2021-07-27 13:19:08 +00:00
|
|
|
# This was failing in the case of a call activity where we have an intermediate EndEvent
|
|
|
|
# what we really want is the LAST EndEvent
|
|
|
|
|
|
|
|
endtasks = []
|
2020-05-01 16:11:39 +00:00
|
|
|
if self.bpmn_workflow.is_completed():
|
|
|
|
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.ANY_MASK):
|
|
|
|
if isinstance(task.task_spec, EndEvent):
|
2021-07-27 13:19:08 +00:00
|
|
|
endtasks.append(task)
|
|
|
|
return endtasks[-1]
|
2020-05-01 16:11:39 +00:00
|
|
|
|
|
|
|
# If there are ready tasks to complete, return the next ready task, but return the one
|
2021-05-19 20:26:16 +00:00
|
|
|
# in the active parallel path if possible. In some cases the active parallel path may itself be
|
|
|
|
# a parallel gateway with multiple tasks, so prefer ones that share a parent.
|
2021-10-11 15:50:03 +00:00
|
|
|
|
|
|
|
# Get a list of all ready tasks
|
2020-05-01 16:11:39 +00:00
|
|
|
ready_tasks = self.bpmn_workflow.get_tasks(SpiffTask.READY)
|
2021-10-11 15:50:03 +00:00
|
|
|
|
|
|
|
# Get a list of all completed user tasks (Non engine tasks)
|
|
|
|
completed_user_tasks = self.completed_user_tasks()
|
|
|
|
|
|
|
|
# If there are no completed user tasks, return the first ready task
|
|
|
|
if len(completed_user_tasks) == 0:
|
|
|
|
return ready_tasks[0]
|
|
|
|
|
|
|
|
# Take the last completed task, find a child of it, and return that task
|
|
|
|
last_user_task = completed_user_tasks[0]
|
2020-05-01 16:11:39 +00:00
|
|
|
if len(ready_tasks) > 0:
|
|
|
|
for task in ready_tasks:
|
2021-10-11 15:50:03 +00:00
|
|
|
if task._is_descendant_of(last_user_task):
|
2020-05-01 16:11:39 +00:00
|
|
|
return task
|
2021-05-19 20:26:16 +00:00
|
|
|
for task in ready_tasks:
|
2021-10-13 16:08:20 +00:00
|
|
|
if self.bpmn_workflow.last_task and task.parent == last_user_task.parent:
|
2021-05-19 20:26:16 +00:00
|
|
|
return task
|
|
|
|
|
2020-05-01 16:11:39 +00:00
|
|
|
return ready_tasks[0]
|
|
|
|
|
|
|
|
# If there are no ready tasks, but the thing isn't complete yet, find the first non-complete task
|
|
|
|
# and return that
|
|
|
|
next_task = None
|
|
|
|
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.NOT_FINISHED_MASK):
|
|
|
|
next_task = task
|
|
|
|
return next_task
|
|
|
|
|
2021-10-11 15:50:03 +00:00
|
|
|
def completed_user_tasks(self):
|
|
|
|
completed_user_tasks = self.bpmn_workflow.get_tasks(SpiffTask.COMPLETED)
|
|
|
|
completed_user_tasks.reverse()
|
|
|
|
completed_user_tasks = list(
|
|
|
|
filter(lambda task: not self.bpmn_workflow._is_engine_task(task.task_spec), completed_user_tasks))
|
|
|
|
return completed_user_tasks
|
|
|
|
|
2020-05-04 14:57:09 +00:00
|
|
|
def previous_task(self):
|
|
|
|
return None
|
2020-02-25 17:01:25 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def complete_task(self, task):
|
|
|
|
self.bpmn_workflow.complete_task_from_id(task.id)
|
|
|
|
|
|
|
|
def get_data(self):
|
|
|
|
return self.bpmn_workflow.data
|
2019-12-19 16:58:51 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
def get_workflow_id(self):
|
2020-05-04 14:57:09 +00:00
|
|
|
return self.workflow_model.id
|
2020-02-10 21:19:23 +00:00
|
|
|
|
|
|
|
def get_study_id(self):
|
|
|
|
return self.bpmn_workflow.data[self.STUDY_ID_KEY]
|
|
|
|
|
2019-12-19 16:58:51 +00:00
|
|
|
def get_ready_user_tasks(self):
|
|
|
|
return self.bpmn_workflow.get_ready_user_tasks()
|
2019-12-30 16:07:26 +00:00
|
|
|
|
2020-05-15 19:54:53 +00:00
|
|
|
def get_current_user_tasks(self):
|
|
|
|
"""Return a list of all user tasks that are READY or
|
|
|
|
COMPLETE and are parallel to the READY Task."""
|
|
|
|
ready_tasks = self.bpmn_workflow.get_ready_user_tasks()
|
|
|
|
additional_tasks = []
|
|
|
|
if len(ready_tasks) > 0:
|
|
|
|
for child in ready_tasks[0].parent.children:
|
|
|
|
if child.state == SpiffTask.COMPLETED:
|
|
|
|
additional_tasks.append(child)
|
|
|
|
return ready_tasks + additional_tasks
|
|
|
|
|
2020-01-31 17:40:08 +00:00
|
|
|
def get_all_user_tasks(self):
|
|
|
|
all_tasks = self.bpmn_workflow.get_tasks(SpiffTask.ANY_MASK)
|
|
|
|
return [t for t in all_tasks if not self.bpmn_workflow._is_engine_task(t.task_spec)]
|
|
|
|
|
2020-03-30 18:01:57 +00:00
|
|
|
def get_all_completed_tasks(self):
|
|
|
|
all_tasks = self.bpmn_workflow.get_tasks(SpiffTask.ANY_MASK)
|
|
|
|
return [t for t in all_tasks
|
|
|
|
if not self.bpmn_workflow._is_engine_task(t.task_spec) and t.state in [t.COMPLETED, t.CANCELLED]]
|
|
|
|
|
2020-05-15 19:54:53 +00:00
|
|
|
def get_nav_item(self, task):
|
|
|
|
for nav_item in self.bpmn_workflow.get_nav_list():
|
|
|
|
if nav_item['task_id'] == task.id:
|
|
|
|
return nav_item
|
2020-05-29 05:39:39 +00:00
|
|
|
|
2021-03-01 19:54:04 +00:00
|
|
|
def find_spec_and_field(self, spec_name, field_id):
|
2021-10-06 16:17:57 +00:00
|
|
|
"""Tracks down a form field by name in the workflow spec(s),
|
|
|
|
Returns a tuple of the task, and form"""
|
|
|
|
workflows = [self.bpmn_workflow]
|
|
|
|
for task in self.bpmn_workflow.get_ready_user_tasks():
|
|
|
|
if task.workflow not in workflows:
|
|
|
|
workflows.append(task.workflow)
|
2021-10-21 17:57:49 +00:00
|
|
|
spec_found = False
|
2021-10-06 16:17:57 +00:00
|
|
|
for workflow in workflows:
|
|
|
|
for spec in workflow.spec.task_specs.values():
|
2021-10-21 17:57:49 +00:00
|
|
|
if spec.name == spec_name:
|
|
|
|
spec_found = True
|
|
|
|
if not hasattr(spec, "form"):
|
|
|
|
raise ApiError("invalid_spec",
|
|
|
|
"The spec name you provided does not contain a form.")
|
|
|
|
|
2021-10-06 16:17:57 +00:00
|
|
|
for field in spec.form.fields:
|
|
|
|
if field.id == field_id:
|
|
|
|
return spec, field
|
2021-10-21 17:57:49 +00:00
|
|
|
|
|
|
|
raise ApiError("invalid_field",
|
|
|
|
f"The task '{spec_name}' has no field named '{field_id}'")
|
|
|
|
|
|
|
|
raise ApiError("invalid_spec",
|
|
|
|
f"Unable to find a task in the workflow called '{spec_name}'")
|