2020-03-27 12:29:31 +00:00
|
|
|
import random
|
|
|
|
import re
|
|
|
|
import string
|
2019-12-31 21:32:47 +00:00
|
|
|
import xml.etree.ElementTree as ElementTree
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-27 12:29:31 +00:00
|
|
|
from SpiffWorkflow import Task as SpiffTask
|
2019-12-18 19:02:17 +00:00
|
|
|
from SpiffWorkflow.bpmn.BpmnScriptEngine import BpmnScriptEngine
|
2020-03-11 20:33:18 +00:00
|
|
|
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
2020-01-22 21:51:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer
|
2020-02-25 17:01:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
2019-12-18 19:02:17 +00:00
|
|
|
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
|
2019-12-30 16:07:26 +00:00
|
|
|
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
|
2020-01-23 20:32:53 +00:00
|
|
|
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
|
2020-03-27 12:29:31 +00:00
|
|
|
from SpiffWorkflow.exceptions import WorkflowException
|
2020-02-04 21:49:28 +00:00
|
|
|
from SpiffWorkflow.operators import Operator
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from SpiffWorkflow.specs import WorkflowSpec
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-27 12:29:31 +00:00
|
|
|
from crc import session
|
2020-02-18 21:38:56 +00:00
|
|
|
from crc.api.common import ApiError
|
2020-01-24 16:52:52 +00:00
|
|
|
from crc.models.file import FileDataModel, FileModel, FileType
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.models.workflow import WorkflowStatus, WorkflowModel
|
2020-03-03 18:50:22 +00:00
|
|
|
from crc.scripts.script import Script
|
2019-12-31 21:32:47 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|
|
|
"""This is a custom script processor that can be easily injected into Spiff Workflow.
|
|
|
|
Rather than execute arbitrary code, this assumes the script references a fully qualified python class
|
|
|
|
such as myapp.RandomFact. """
|
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def execute(self, task: SpiffTask, script, **kwargs):
|
2019-12-18 19:02:17 +00:00
|
|
|
"""
|
|
|
|
Assume that the script read in from the BPMN file is a fully qualified python class. Instantiate
|
|
|
|
that class, pass in any data available to the current task so that it might act on it.
|
|
|
|
Assume that the class implements the "do_task" method.
|
|
|
|
|
|
|
|
This allows us to reference custom code from the BPMN diagram.
|
|
|
|
"""
|
2020-02-10 21:19:23 +00:00
|
|
|
commands = script.split(" ")
|
2020-03-03 18:50:22 +00:00
|
|
|
path_and_command = commands[0].rsplit(".", 1)
|
|
|
|
if len(path_and_command) == 1:
|
|
|
|
module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0])
|
|
|
|
class_name = path_and_command[0]
|
|
|
|
else:
|
|
|
|
module_name = "crc.scripts." + path_and_command[0] + "." + self.camel_to_snake(path_and_command[1])
|
|
|
|
class_name = path_and_command[1]
|
|
|
|
try:
|
|
|
|
mod = __import__(module_name, fromlist=[class_name])
|
|
|
|
klass = getattr(mod, class_name)
|
|
|
|
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
|
|
|
if not isinstance(klass(), Script):
|
2020-03-27 12:29:31 +00:00
|
|
|
raise ApiError.from_task("invalid_script",
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
"This is an internal error. The script '%s:%s' you called " %
|
|
|
|
(module_name, class_name) +
|
|
|
|
"does not properly implement the CRC Script class.",
|
|
|
|
task=task)
|
2020-03-27 12:29:31 +00:00
|
|
|
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
|
|
|
"""If this is running a validation, and not a normal process, then we want to
|
|
|
|
mimic running the script, but not make any external calls or database changes."""
|
|
|
|
klass().do_task_validate_only(task, study_id, *commands[1:])
|
|
|
|
else:
|
|
|
|
klass().do_task(task, study_id, *commands[1:])
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
except ModuleNotFoundError:
|
2020-03-27 12:29:31 +00:00
|
|
|
raise ApiError.from_task("invalid_script",
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
"Unable to locate Script: '%s:%s'" % (module_name, class_name),
|
|
|
|
task=task)
|
2020-03-03 18:50:22 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def camel_to_snake(camel):
|
|
|
|
camel = camel.strip()
|
|
|
|
return re.sub(r'(?<!^)(?=[A-Z])', '_', camel).lower()
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-02-04 21:49:28 +00:00
|
|
|
def evaluate(self, task, expression):
|
|
|
|
"""
|
|
|
|
Evaluate the given expression, within the context of the given task and
|
|
|
|
return the result.
|
|
|
|
"""
|
|
|
|
if isinstance(expression, Operator):
|
|
|
|
return expression._matches(task)
|
|
|
|
else:
|
|
|
|
return self._eval(task, expression, **task.data)
|
|
|
|
|
|
|
|
def _eval(self, task, expression, **kwargs):
|
|
|
|
locals().update(kwargs)
|
2020-03-27 12:29:31 +00:00
|
|
|
try:
|
2020-02-04 21:49:28 +00:00
|
|
|
return eval(expression)
|
|
|
|
except NameError as ne:
|
2020-03-27 12:29:31 +00:00
|
|
|
raise ApiError.from_task('invalid_expression',
|
2020-04-23 13:44:11 +00:00
|
|
|
"The expression '%s' you provided has a missing value. % s" % (expression, str(ne)),
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
task=task)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
class MyCustomParser(BpmnDmnParser):
|
|
|
|
"""
|
|
|
|
A BPMN and DMN parser that can also parse Camunda forms.
|
|
|
|
"""
|
|
|
|
OVERRIDE_PARSER_CLASSES = BpmnDmnParser.OVERRIDE_PARSER_CLASSES
|
|
|
|
OVERRIDE_PARSER_CLASSES.update(CamundaParser.OVERRIDE_PARSER_CLASSES)
|
|
|
|
|
2020-02-27 14:54:46 +00:00
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
class WorkflowProcessor(object):
|
2019-12-18 20:22:46 +00:00
|
|
|
_script_engine = CustomBpmnScriptEngine()
|
2020-01-22 21:51:25 +00:00
|
|
|
_serializer = BpmnSerializer()
|
2020-04-28 17:48:44 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
WORKFLOW_ID_KEY = "workflow_id"
|
2020-03-03 18:50:22 +00:00
|
|
|
STUDY_ID_KEY = "study_id"
|
2020-03-27 12:29:31 +00:00
|
|
|
VALIDATION_PROCESS_KEY = "validate_only"
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False):
|
2020-03-05 21:45:44 +00:00
|
|
|
"""Create a Workflow Processor based on the serialized information available in the workflow model.
|
2020-03-05 20:35:55 +00:00
|
|
|
If soft_reset is set to true, it will try to use the latest version of the workflow specification.
|
|
|
|
If hard_reset is set to true, it will create a new Workflow, but embed the data from the last
|
|
|
|
completed task in the previous workflow.
|
|
|
|
If neither flag is set, it will use the same version of the specification that was used to originally
|
|
|
|
create the workflow model. """
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.workflow_model = workflow_model
|
2020-03-11 20:33:18 +00:00
|
|
|
orig_version = workflow_model.spec_version
|
2020-04-28 17:48:44 +00:00
|
|
|
if soft_reset or workflow_model.spec_version is None:
|
|
|
|
self.workflow_model.spec_version = WorkflowProcessor.get_latest_version_string(
|
|
|
|
workflow_model.workflow_spec_id)
|
2020-03-05 20:35:55 +00:00
|
|
|
|
2020-04-28 17:48:44 +00:00
|
|
|
spec = self.get_spec(workflow_model.workflow_spec_id, workflow_model.spec_version)
|
2020-03-05 18:25:28 +00:00
|
|
|
self.workflow_spec_id = workflow_model.workflow_spec_id
|
2020-03-11 20:33:18 +00:00
|
|
|
try:
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.bpmn_workflow = self.__get_bpmn_workflow(workflow_model, spec)
|
2020-03-30 18:01:57 +00:00
|
|
|
self.bpmn_workflow.script_engine = self._script_engine
|
|
|
|
|
|
|
|
workflow_model.total_tasks = len(self.get_all_user_tasks())
|
|
|
|
workflow_model.completed_tasks = len(self.get_all_completed_tasks())
|
|
|
|
workflow_model.status = self.get_status()
|
|
|
|
session.add(workflow_model)
|
|
|
|
session.commit()
|
|
|
|
|
|
|
|
# Need to commit twice, first to get a unique id for the workflow model, and
|
|
|
|
# a second time to store the serialization so we can maintain this link within
|
|
|
|
# the spiff-workflow process.
|
|
|
|
self.bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
|
|
|
|
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(self.bpmn_workflow)
|
|
|
|
session.add(workflow_model)
|
|
|
|
|
2020-03-11 20:33:18 +00:00
|
|
|
except KeyError as ke:
|
|
|
|
if soft_reset:
|
|
|
|
# Undo the soft-reset.
|
|
|
|
workflow_model.spec_version = orig_version
|
|
|
|
raise ApiError(code="unexpected_workflow_structure",
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
message="Failed to deserialize workflow"
|
|
|
|
" '%s' version %s, due to a mis-placed or missing task '%s'" %
|
2020-03-11 20:33:18 +00:00
|
|
|
(self.workflow_spec_id, workflow_model.spec_version, str(ke)) +
|
|
|
|
" This is very likely due to a soft reset where there was a structural change.")
|
2020-03-05 20:35:55 +00:00
|
|
|
if hard_reset:
|
|
|
|
# Now that the spec is loaded, get the data and rebuild the bpmn with the new details
|
|
|
|
workflow_model.spec_version = self.hard_reset()
|
2020-04-27 20:08:23 +00:00
|
|
|
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(self.bpmn_workflow)
|
|
|
|
session.add(workflow_model)
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def __get_bpmn_workflow(self, workflow_model: WorkflowModel, spec: WorkflowSpec):
|
|
|
|
|
|
|
|
if workflow_model.bpmn_workflow_json:
|
|
|
|
bpmn_workflow = self._serializer.deserialize_workflow(workflow_model.bpmn_workflow_json, workflow_spec=spec)
|
|
|
|
else:
|
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
|
|
|
bpmn_workflow.do_engine_steps()
|
2020-03-30 18:01:57 +00:00
|
|
|
return bpmn_workflow
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
|
2020-03-30 18:01:57 +00:00
|
|
|
@staticmethod
|
|
|
|
def run_master_spec(spec_model, study):
|
|
|
|
"""Executes a BPMN specification for the given study, without recording any information to the database
|
|
|
|
Useful for running the master specification, which should not persist. """
|
2020-04-28 17:48:44 +00:00
|
|
|
version = WorkflowProcessor.get_latest_version_string(spec_model.id)
|
|
|
|
spec = WorkflowProcessor.get_spec(spec_model.id, version)
|
2020-03-30 18:01:57 +00:00
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=WorkflowProcessor._script_engine)
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
|
|
|
|
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
|
|
|
bpmn_workflow.do_engine_steps()
|
|
|
|
if not bpmn_workflow.is_completed():
|
|
|
|
raise ApiError("master_spec_not_automatic",
|
|
|
|
"The master spec should only contain fully automated tasks, it failed to complete.")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
|
2020-03-30 18:01:57 +00:00
|
|
|
return bpmn_workflow.last_task.data
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_parser():
|
|
|
|
parser = MyCustomParser()
|
|
|
|
return parser
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
@staticmethod
|
2020-03-05 21:45:44 +00:00
|
|
|
def get_latest_version_string(workflow_spec_id):
|
|
|
|
"""Version is in the format v[VERSION] (FILE_ID_LIST)
|
2020-03-05 20:35:55 +00:00
|
|
|
For example, a single bpmn file with only one version would be
|
2020-03-05 21:45:44 +00:00
|
|
|
v1 (12) Where 12 is the id of the file data model that is used to create the
|
2020-03-05 20:35:55 +00:00
|
|
|
specification. If multiple files exist, they are added on in
|
|
|
|
dot notation to both the version number and the file list. So
|
|
|
|
a Spec that includes a BPMN, DMN, an a Word file all on the first
|
2020-03-05 21:45:44 +00:00
|
|
|
version would be v1.1.1 (12.45.21)"""
|
|
|
|
|
|
|
|
# this could potentially become expensive to load all the data in the data models.
|
|
|
|
# in which case we might consider using a deferred loader for the actual data, but
|
|
|
|
# trying not to pre-optimize.
|
|
|
|
file_data_models = WorkflowProcessor.__get_latest_file_models(workflow_spec_id)
|
|
|
|
major_version = 0 # The version of the primary file.
|
|
|
|
minor_version = [] # The versions of the minor files if any.
|
|
|
|
file_ids = []
|
|
|
|
for file_data in file_data_models:
|
|
|
|
file_ids.append(file_data.id)
|
|
|
|
if file_data.file_model.primary:
|
|
|
|
major_version = file_data.version
|
|
|
|
else:
|
|
|
|
minor_version.append(file_data.version)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
minor_version.insert(0, major_version) # Add major version to beginning.
|
2020-03-05 21:45:44 +00:00
|
|
|
version = ".".join(str(x) for x in minor_version)
|
|
|
|
files = ".".join(str(x) for x in file_ids)
|
|
|
|
full_version = "v%s (%s)" % (version, files)
|
|
|
|
return full_version
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __get_file_models_for_version(workflow_spec_id, version):
|
2020-03-05 20:35:55 +00:00
|
|
|
file_id_strings = re.findall('\((.*)\)', version)[0].split(".")
|
|
|
|
file_ids = [int(i) for i in file_id_strings]
|
|
|
|
files = session.query(FileDataModel)\
|
|
|
|
.join(FileModel) \
|
|
|
|
.filter(FileModel.workflow_spec_id == workflow_spec_id)\
|
|
|
|
.filter(FileDataModel.id.in_(file_ids)).all()
|
|
|
|
if len(files) != len(file_ids):
|
|
|
|
raise ApiError("invalid_version",
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
"The version '%s' of workflow specification '%s' is invalid. " %
|
|
|
|
(version, workflow_spec_id) +
|
|
|
|
" Unable to locate the correct files to recreate it.")
|
2020-03-05 20:35:55 +00:00
|
|
|
return files
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __get_latest_file_models(workflow_spec_id):
|
|
|
|
"""Returns all the latest files related to a workflow specification"""
|
|
|
|
return session.query(FileDataModel) \
|
2019-12-30 16:07:26 +00:00
|
|
|
.join(FileModel) \
|
2020-03-04 22:08:45 +00:00
|
|
|
.filter(FileModel.workflow_spec_id == workflow_spec_id)\
|
|
|
|
.filter(FileDataModel.version == FileModel.latest_version)\
|
|
|
|
.order_by(FileModel.id)\
|
|
|
|
.all()
|
2020-03-05 20:35:55 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2020-04-28 17:48:44 +00:00
|
|
|
def get_spec(workflow_spec_id, version):
|
2020-03-05 20:35:55 +00:00
|
|
|
"""Returns the requested version of the specification,
|
|
|
|
or the lastest version if none is specified."""
|
|
|
|
parser = WorkflowProcessor.get_parser()
|
|
|
|
process_id = None
|
2020-04-28 17:48:44 +00:00
|
|
|
file_data_models = WorkflowProcessor.__get_file_models_for_version(workflow_spec_id, version)
|
2019-12-30 16:07:26 +00:00
|
|
|
for file_data in file_data_models:
|
2020-01-23 20:32:53 +00:00
|
|
|
if file_data.file_model.type == FileType.bpmn:
|
|
|
|
bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
|
|
|
|
if file_data.file_model.primary:
|
2020-02-10 21:19:23 +00:00
|
|
|
process_id = WorkflowProcessor.get_process_id(bpmn)
|
2020-01-23 20:32:53 +00:00
|
|
|
parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
|
|
|
|
elif file_data.file_model.type == FileType.dmn:
|
|
|
|
dmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
|
|
|
|
parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
|
|
|
|
if process_id is None:
|
2020-03-16 12:31:19 +00:00
|
|
|
raise(ApiError(code="no_primary_bpmn_error",
|
|
|
|
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
|
2020-03-11 20:33:18 +00:00
|
|
|
try:
|
|
|
|
spec = parser.get_spec(process_id)
|
|
|
|
except ValidationException as ve:
|
|
|
|
raise ApiError(code="workflow_validation_error",
|
|
|
|
message="Failed to parse Workflow Specification '%s' %s." % (workflow_spec_id, version) +
|
2020-03-27 12:29:31 +00:00
|
|
|
"Error is %s" % str(ve),
|
|
|
|
file_name=ve.filename,
|
|
|
|
task_id=ve.id,
|
|
|
|
tag=ve.tag)
|
2020-03-04 22:08:45 +00:00
|
|
|
return spec
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-04-15 15:13:32 +00:00
|
|
|
|
2020-03-27 12:29:31 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def populate_form_with_random_data(task):
|
|
|
|
"""populates a task with random data - useful for testing a spec."""
|
|
|
|
|
2020-04-17 17:30:32 +00:00
|
|
|
if not hasattr(task.task_spec, 'form'): return
|
|
|
|
|
2020-03-27 12:29:31 +00:00
|
|
|
form_data = {}
|
|
|
|
for field in task.task_spec.form.fields:
|
|
|
|
if field.type == "enum":
|
2020-04-27 19:10:09 +00:00
|
|
|
if len(field.options) > 0:
|
|
|
|
form_data[field.id] = random.choice(field.options)
|
|
|
|
else:
|
|
|
|
raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
|
|
|
|
" with no options" % field.id,
|
|
|
|
task)
|
2020-03-27 12:29:31 +00:00
|
|
|
elif field.type == "long":
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
form_data[field.id] = random.randint(1, 1000)
|
2020-04-02 18:47:20 +00:00
|
|
|
elif field.type == 'boolean':
|
|
|
|
form_data[field.id] = random.choice([True, False])
|
2020-03-27 12:29:31 +00:00
|
|
|
else:
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
form_data[field.id] = WorkflowProcessor._random_string()
|
2020-03-27 12:29:31 +00:00
|
|
|
if task.data is None:
|
|
|
|
task.data = {}
|
|
|
|
task.data.update(form_data)
|
|
|
|
|
|
|
|
@staticmethod
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def _random_string(string_length=10):
|
2020-03-27 12:29:31 +00:00
|
|
|
"""Generate a random string of fixed length """
|
|
|
|
letters = string.ascii_lowercase
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
return ''.join(random.choice(letters) for i in range(string_length))
|
2020-03-27 12:29:31 +00:00
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
@staticmethod
|
|
|
|
def status_of(bpmn_workflow):
|
|
|
|
if bpmn_workflow.is_completed():
|
|
|
|
return WorkflowStatus.complete
|
|
|
|
user_tasks = bpmn_workflow.get_ready_user_tasks()
|
|
|
|
if len(user_tasks) > 0:
|
|
|
|
return WorkflowStatus.user_input_required
|
|
|
|
else:
|
|
|
|
return WorkflowStatus.waiting
|
2020-02-07 16:34:44 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def hard_reset(self):
|
2020-03-04 22:08:45 +00:00
|
|
|
"""Recreate this workflow, but keep the data from the last completed task and add it back into the first task.
|
|
|
|
This may be useful when a workflow specification changes, and users need to review all the
|
2020-03-05 20:35:55 +00:00
|
|
|
prior steps, but don't need to reenter all the previous data.
|
|
|
|
|
|
|
|
Returns the new version.
|
|
|
|
"""
|
2020-04-28 17:48:44 +00:00
|
|
|
version = WorkflowProcessor.get_latest_version_string(self.workflow_spec_id)
|
|
|
|
spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
|
2020-03-04 22:08:45 +00:00
|
|
|
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
|
|
|
bpmn_workflow.data = self.bpmn_workflow.data
|
|
|
|
for task in bpmn_workflow.get_tasks(SpiffTask.READY):
|
|
|
|
task.data = self.bpmn_workflow.last_task.data
|
|
|
|
bpmn_workflow.do_engine_steps()
|
|
|
|
self.bpmn_workflow = bpmn_workflow
|
2020-04-28 17:48:44 +00:00
|
|
|
return version
|
2020-03-04 22:08:45 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def get_status(self):
|
2020-03-05 18:25:28 +00:00
|
|
|
return self.status_of(self.bpmn_workflow)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-03-04 22:08:45 +00:00
|
|
|
def get_spec_version(self):
|
2020-04-28 17:48:44 +00:00
|
|
|
return self.workflow_model.spec_version
|
2020-03-04 22:08:45 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def do_engine_steps(self):
|
|
|
|
self.bpmn_workflow.do_engine_steps()
|
|
|
|
|
|
|
|
def serialize(self):
|
2019-12-19 16:58:51 +00:00
|
|
|
return self._serializer.serialize_workflow(self.bpmn_workflow)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
def next_user_tasks(self):
|
|
|
|
return self.bpmn_workflow.get_ready_user_tasks()
|
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def next_task(self):
|
2020-02-25 17:01:25 +00:00
|
|
|
"""Returns the next task that should be completed
|
|
|
|
even if there are parallel tasks and multiple options are
|
|
|
|
available.
|
|
|
|
If the workflow is complete
|
|
|
|
it will return the final end task.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# If the whole blessed mess is done, return the end_event task in the tree
|
|
|
|
if self.bpmn_workflow.is_completed():
|
|
|
|
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.ANY_MASK):
|
|
|
|
if isinstance(task.task_spec, EndEvent):
|
|
|
|
return task
|
|
|
|
|
|
|
|
# If there are ready tasks to complete, return the next ready task, but return the one
|
|
|
|
# in the active parallel path if possible.
|
|
|
|
ready_tasks = self.bpmn_workflow.get_tasks(SpiffTask.READY)
|
|
|
|
if len(ready_tasks) > 0:
|
2020-02-07 16:34:44 +00:00
|
|
|
for task in ready_tasks:
|
|
|
|
if task.parent == self.bpmn_workflow.last_task:
|
2020-02-11 16:11:21 +00:00
|
|
|
return task
|
2020-02-07 16:34:44 +00:00
|
|
|
return ready_tasks[0]
|
|
|
|
|
2020-02-25 17:01:25 +00:00
|
|
|
# If there are no ready tasks, but the thing isn't complete yet, find the first non-complete task
|
|
|
|
# and return that
|
|
|
|
next_task = None
|
|
|
|
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.NOT_FINISHED_MASK):
|
|
|
|
next_task = task
|
|
|
|
return next_task
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def complete_task(self, task):
|
|
|
|
self.bpmn_workflow.complete_task_from_id(task.id)
|
2020-03-30 18:01:57 +00:00
|
|
|
self.workflow_model.total_tasks = len(self.get_all_user_tasks())
|
|
|
|
self.workflow_model.completed_tasks = len(self.get_all_completed_tasks())
|
|
|
|
self.workflow_model.status = self.get_status()
|
|
|
|
session.add(self.workflow_model)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
def get_data(self):
|
|
|
|
return self.bpmn_workflow.data
|
2019-12-19 16:58:51 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
def get_workflow_id(self):
|
|
|
|
return self.bpmn_workflow.data[self.WORKFLOW_ID_KEY]
|
|
|
|
|
|
|
|
def get_study_id(self):
|
|
|
|
return self.bpmn_workflow.data[self.STUDY_ID_KEY]
|
|
|
|
|
2019-12-19 16:58:51 +00:00
|
|
|
def get_ready_user_tasks(self):
|
|
|
|
return self.bpmn_workflow.get_ready_user_tasks()
|
2019-12-30 16:07:26 +00:00
|
|
|
|
2020-01-31 17:40:08 +00:00
|
|
|
def get_all_user_tasks(self):
|
|
|
|
all_tasks = self.bpmn_workflow.get_tasks(SpiffTask.ANY_MASK)
|
|
|
|
return [t for t in all_tasks if not self.bpmn_workflow._is_engine_task(t.task_spec)]
|
|
|
|
|
2020-03-30 18:01:57 +00:00
|
|
|
def get_all_completed_tasks(self):
|
|
|
|
all_tasks = self.bpmn_workflow.get_tasks(SpiffTask.ANY_MASK)
|
|
|
|
return [t for t in all_tasks
|
|
|
|
if not self.bpmn_workflow._is_engine_task(t.task_spec) and t.state in [t.COMPLETED, t.CANCELLED]]
|
|
|
|
|
2019-12-30 16:07:26 +00:00
|
|
|
@staticmethod
|
2020-02-10 21:19:23 +00:00
|
|
|
def get_process_id(et_root: ElementTree.Element):
|
2019-12-30 16:07:26 +00:00
|
|
|
process_elements = []
|
|
|
|
for child in et_root:
|
|
|
|
if child.tag.endswith('process') and child.attrib.get('isExecutable', False):
|
|
|
|
process_elements.append(child)
|
|
|
|
|
|
|
|
if len(process_elements) == 0:
|
2020-03-16 12:31:19 +00:00
|
|
|
raise ValidationException('No executable process tag found')
|
2019-12-30 16:07:26 +00:00
|
|
|
|
2020-02-10 20:20:45 +00:00
|
|
|
# There are multiple root elements
|
2019-12-30 16:07:26 +00:00
|
|
|
if len(process_elements) > 1:
|
2020-02-10 20:20:45 +00:00
|
|
|
|
|
|
|
# Look for the element that has the startEvent in it
|
|
|
|
for e in process_elements:
|
|
|
|
this_element: ElementTree.Element = e
|
|
|
|
for child_element in list(this_element):
|
|
|
|
if child_element.tag.endswith('startEvent'):
|
|
|
|
return this_element.attrib['id']
|
|
|
|
|
2020-03-16 12:31:19 +00:00
|
|
|
raise ValidationException('No start event found in %s' % et_root.attrib['id'])
|
2019-12-30 16:07:26 +00:00
|
|
|
|
2020-01-14 16:45:12 +00:00
|
|
|
return process_elements[0].attrib['id']
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|