Squashed 'SpiffWorkflow/' changes from 580939cc..cd4da465

cd4da465 Merge pull request #264 from sartography/bugfix/dmn-equality-with-boolean
414a59eb disambiguate DMN expressions
eea53c91 Merge pull request #263 from sartography/feature/cleanup-task-completion
d248d5b1 execute postscript before other complete hook tasks
c09f1a90 streamline predict & remove some duplicated calls to it
64c21791 remove duplicate calls to update
4ca1076d move task update to _on_complete to ensure data is copied consistently after task related activities are done
d037a7eb small changes for readability
025bc30f Quick patch -- is_executable needs to be accurate immediately.
14d3d8c3 Merge pull request #262 from sartography/feature/parser_info_features
849c223e We are jumping through a lot of complex xml parsing in SpiffWorkflow-Backend because we need to know some basic information about a BPMN process at the moment it is saved.  Rather than do that work in the backend, it seems better to have SpiffWorkflow handle parsing the xml and providing a bit of metadata, including:

git-subtree-dir: SpiffWorkflow
git-subtree-split: cd4da465e125ca1ae1b57d227bfa324d9d4c507c
This commit is contained in:
burnettk 2022-11-18 10:03:32 -05:00
parent 9bb7af4f43
commit 37b7f3d7cd
25 changed files with 215 additions and 173 deletions

View File

@ -120,6 +120,8 @@ class BpmnParser(object):
self.process_parsers_by_name = {}
self.collaborations = {}
self.process_dependencies = set()
self.messages = {}
self.correlations = {}
def _get_parser_class(self, tag):
if tag in self.OVERRIDE_PARSER_CLASSES:
@ -179,6 +181,8 @@ class BpmnParser(object):
self._add_processes(bpmn, filename)
self._add_collaborations(bpmn)
self._add_messages(bpmn)
self._add_correlations(bpmn)
def _add_processes(self, bpmn, filename=None):
for process in bpmn.xpath('.//bpmn:process', namespaces=self.namespaces):
@ -192,6 +196,43 @@ class BpmnParser(object):
name = collaboration.get('id')
self.collaborations[name] = [ participant.get('processRef') for participant in collaboration_xpath('.//bpmn:participant') ]
def _add_messages(self, bpmn):
for message in bpmn.xpath('.//bpmn:message', namespaces=self.namespaces):
if message.attrib.get("id") is None:
raise ValidationException(
"Message identifier is missing from bpmn xml"
)
self.messages[message.attrib.get("id")] = message.attrib.get("name")
def _add_correlations(self, bpmn):
for correlation in bpmn.xpath('.//bpmn:correlationProperty', namespaces=self.namespaces):
correlation_identifier = correlation.attrib.get("id")
if correlation_identifier is None:
raise ValidationException(
"Correlation identifier is missing from bpmn xml"
)
correlation_property_retrieval_expressions = correlation.xpath(
"//bpmn:correlationPropertyRetrievalExpression", namespaces = self.namespaces)
if not correlation_property_retrieval_expressions:
raise ValidationException(
f"Correlation is missing correlation property retrieval expressions: {correlation_identifier}"
)
retrieval_expressions = []
for cpre in correlation_property_retrieval_expressions:
message_model_identifier = cpre.attrib.get("messageRef")
if message_model_identifier is None:
raise ValidationException(
f"Message identifier is missing from correlation property: {correlation_identifier}"
)
children = cpre.getchildren()
expression = children[0].text if len(children) > 0 else None
retrieval_expressions.append({"messageRef": message_model_identifier,
"expression": expression})
self.correlations[correlation_identifier] = {
"name": correlation.attrib.get("name"),
"retrieval_expressions": retrieval_expressions
}
def _find_dependencies(self, process):
"""Locate all calls to external BPMN, and store their ids in our list of dependencies"""
for call_activity in process.xpath('.//bpmn:callActivity', namespaces=self.namespaces):

View File

@ -44,7 +44,7 @@ class ProcessParser(NodeParser):
self.parsed_nodes = {}
self.lane = lane
self.spec = None
self.process_executable = True
self.process_executable = self.is_executable()
def get_name(self):
"""
@ -52,6 +52,35 @@ class ProcessParser(NodeParser):
"""
return self.node.get('name', default=self.get_id())
def has_lanes(self) -> bool:
"""Returns true if this process has one or more named lanes """
elements = self.xpath("//bpmn:lane")
for el in elements:
if el.get("name"):
return True
return False
def is_executable(self) -> bool:
return self.node.get('isExecutable', 'true') == 'true'
def start_messages(self):
""" This returns a list of messages that would cause this
process to start. """
messages = []
message_event_definitions = self.xpath(
"//bpmn:startEvent/bpmn:messageEventDefinition")
for message_event_definition in message_event_definitions:
message_model_identifier = message_event_definition.attrib.get(
"messageRef"
)
if message_model_identifier is None:
raise ValidationException(
"Could not find messageRef from message event definition: {message_event_definition}"
)
messages.append(message_model_identifier)
return messages
def parse_node(self, node):
"""
Parses the specified child task node, and returns the task spec. This
@ -72,7 +101,6 @@ class ProcessParser(NodeParser):
def _parse(self):
# here we only look in the top level, We will have another
# bpmn:startEvent if we have a subworkflow task
self.process_executable = self.node.get('isExecutable', 'true') == 'true'
start_node_list = self.xpath('./bpmn:startEvent')
if not start_node_list and self.process_executable:
raise ValidationException("No start event found", node=self.node, filename=self.filename)

View File

@ -81,12 +81,6 @@ class MultiInstanceTask(TaskSpec):
TaskSpec.__init__(self, wf_spec, name, **kwargs)
# DO NOT OVERRIDE THE SPEC TYPE.
# @property
# def spec_type(self):
# return 'MultiInstance Task'
def _find_my_task(self, task):
for thetask in task.workflow.task_tree:
if thetask.thread_id != task.thread_id:
@ -113,17 +107,6 @@ class MultiInstanceTask(TaskSpec):
new_task.triggered = True
output._predict(new_task)
def _check_inputs(self, my_task):
if self.collection is None:
return
# look for variable in context, if we don't find it, default to 1
variable = valueof(my_task, self.times, 1)
if self.times.name == self.collection.name and type(variable) == type([]):
raise WorkflowTaskExecException(my_task,
'If we are updating a collection,'
' then the collection must be a '
'dictionary.')
def _get_loop_completion(self,my_task):
if not self.completioncondition == None:
terminate = my_task.workflow.script_engine.evaluate(my_task,self.completioncondition)
@ -154,17 +137,6 @@ class MultiInstanceTask(TaskSpec):
return len(variable.keys())
return 1 # we shouldn't ever get here, but just in case return a sane value.
def _get_current_var(self, my_task, pos):
variable = valueof(my_task, self.times, 1)
if is_number(variable):
return pos
if isinstance(variable,list) and len(variable) >= pos:
return variable[pos - 1]
elif isinstance(variable,dict) and len(list(variable.keys())) >= pos:
return variable[list(variable.keys())[pos - 1]]
else:
return pos
def _get_predicted_outputs(self, my_task):
split_n = self._get_count(my_task)
@ -418,52 +390,60 @@ class MultiInstanceTask(TaskSpec):
if my_task.task_spec.prevtaskclass in classes.keys() and not terminate:
super()._on_complete_hook(my_task)
def _merge_element_variable(self,my_task,collect,runtimes,colvarname):
# if we are updating the same collection as was our loopcardinality
# then all the keys should be there and we can use the sorted keylist
# if not, we use an integer - we should be guaranteed that the
# collection is a dictionary
def _check_inputs(self, my_task):
if self.collection is None:
return
# look for variable in context, if we don't find it, default to 1
variable = valueof(my_task, self.times, 1)
if self.times.name == self.collection.name and type(variable) == type([]):
raise WorkflowTaskExecException(my_task,
'If we are updating a collection, then the collection must be a dictionary.')
def _get_current_var(self, my_task, pos):
variable = valueof(my_task, self.times, 1)
if is_number(variable):
return pos
if isinstance(variable,list) and len(variable) >= pos:
return variable[pos - 1]
elif isinstance(variable,dict) and len(list(variable.keys())) >= pos:
return variable[list(variable.keys())[pos - 1]]
else:
return pos
def _merge_element_variable(self, my_task, collect, runtimes):
if self.collection is not None and self.times.name == self.collection.name:
# Update an existing collection (we used the collection as the cardinality)
keys = list(collect.keys())
if len(keys) < runtimes:
msg = f"There is a mismatch between runtimes and the number " \
f"items in the collection, please check for empty " \
f"collection {self.collection.name}."
raise WorkflowTaskExecException(my_task, msg)
runtimesvar = keys[runtimes - 1]
else:
# Use an integer (for arrays)
runtimesvar = runtimes
if self.elementVar in my_task.data and isinstance(my_task.data[self.elementVar], dict):
collect[str(runtimesvar)] = DeepMerge.merge(collect.get(runtimesvar, {}),
copy.copy(my_task.data[self.elementVar]))
collect[str(runtimesvar)] = DeepMerge.merge(
collect.get(runtimesvar, {}),
copy.copy(my_task.data[self.elementVar])
)
my_task.data = DeepMerge.merge(my_task.data,
gendict(colvarname.split('/'), collect))
def _update_sibling_data(self, my_task, runtimes, runcount, colvarname, collect):
def _update_sibling_data(self,my_task,runtimes,runcount,colvarname,collect):
if (runtimes < runcount) and not my_task.terminate_current_loop and self.loopTask:
my_task._set_state(TaskState.READY)
my_task._set_internal_data(runtimes=runtimes + 1)
my_task.data[self.elementVar] = self._get_current_var(my_task, runtimes + 1)
element_var_data = None
else:
# The element var data should not be passed on to children
# but will add this back onto this task later.
element_var_data = my_task.data.pop(self.elementVar, None)
my_task.data.pop(self.elementVar, None)
# if this is a parallel mi - then update all siblings with the
# current data
if not self.isSequential:
for task in my_task.parent.children:
task.data = DeepMerge.merge(
task.data,
gendict(colvarname.split('/'),
collect)
)
return element_var_data
for task in my_task.parent.children:
task.data = DeepMerge.merge(
task.data,
gendict(colvarname.split('/'), collect)
)
def _on_complete_hook(self, my_task):
# do special stuff for non-user tasks
@ -486,9 +466,9 @@ class MultiInstanceTask(TaskSpec):
collect = valueof(my_task, self.collection, {})
self._merge_element_variable(my_task,collect,runtimes,colvarname)
self._merge_element_variable(my_task, collect, runtimes)
element_var_data = self._update_sibling_data(my_task,runtimes,runcount,colvarname,collect)
self._update_sibling_data(my_task, runtimes, runcount, colvarname, collect)
# please see MultiInstance code for previous version
outputs = []
@ -497,14 +477,6 @@ class MultiInstanceTask(TaskSpec):
if not isinstance(my_task.task_spec,SubWorkflowTask):
my_task._sync_children(outputs, TaskState.FUTURE)
for child in my_task.children:
child.task_spec._update(child)
# If removed, add the element_var_data back onto this task, after
# updating the children.
if(element_var_data):
my_task.data[self.elementVar] = element_var_data
def serialize(self, serializer):
return serializer.serialize_multi_instance(self)

View File

@ -66,8 +66,6 @@ class SubWorkflowTask(BpmnSpecMixin):
def _on_complete_hook(self, my_task):
BpmnSpecMixin._on_complete_hook(self, my_task)
for child in my_task.children:
child.task_spec._update(child)
def _on_cancel(self, my_task):
subworkflow = my_task.workflow.get_subprocess(my_task)

View File

@ -146,8 +146,6 @@ class UnstructuredJoin(Join, BpmnSpecMixin):
def _update_hook(self, my_task):
if my_task._is_predicted():
self._predict(my_task)
if not my_task.parent._is_finished():
return

View File

@ -95,7 +95,8 @@ class DMNEngine:
# If we get here, we need to check whether the match expression includes
# an operator or if can use '=='
needs_eq = self.needs_eq(script_engine, match_expr)
expr = input_expr + ' == ' + match_expr if needs_eq else input_expr + match_expr
# Disambiguate cases like a == 0 == True when we add '=='
expr = f'({input_expr}) == ({match_expr})' if needs_eq else input_expr + match_expr
return script_engine.evaluate(task, expr)
@staticmethod

View File

@ -88,8 +88,6 @@ class ExclusiveChoice(MultiChoice):
f'No conditions satisfied for {my_task.task_spec.name}')
my_task._sync_children([output], TaskState.FUTURE)
for child in my_task.children:
child.task_spec._update(child)
def serialize(self, serializer):
return serializer.serialize_exclusive_choice(self)

View File

@ -134,8 +134,6 @@ class MultiChoice(TaskSpec):
outputs.append(self._wf_spec.get_task_spec_from_name(output))
my_task._sync_children(outputs, TaskState.FUTURE)
for child in my_task.children:
child.task_spec._update(child)
def serialize(self, serializer):
return serializer.serialize_multi_choice(self)

View File

@ -102,8 +102,6 @@ class MultiInstance(TaskSpec):
def _on_complete_hook(self, my_task):
outputs = self._get_predicted_outputs(my_task)
my_task._sync_children(outputs, TaskState.FUTURE)
for child in my_task.children:
child.task_spec._update(child)
def serialize(self, serializer):
return serializer.serialize_multi_instance(self)

View File

@ -76,12 +76,14 @@ class SubWorkflow(TaskSpec):
self, 'File does not exist: %s' % self.file)
def _predict_hook(self, my_task):
# Modifying the task spec is a TERRIBLE idea, but if we don't do it, sync_children won't work
outputs = [task.task_spec for task in my_task.children]
for output in self.outputs:
if output not in outputs:
outputs.insert(0, output)
if my_task._is_definite():
my_task._sync_children(outputs, TaskState.FUTURE)
# This prevents errors with sync children
my_task._sync_children(outputs, TaskState.LIKELY)
else:
my_task._sync_children(outputs, my_task.state)
@ -107,10 +109,7 @@ class SubWorkflow(TaskSpec):
def _integrate_subworkflow_tree(self, my_task, subworkflow):
# Integrate the tree of the subworkflow into the tree of this workflow.
my_task._sync_children(self.outputs, TaskState.FUTURE)
for child in my_task.children:
child.task_spec._update(child)
child._inherit_data()
my_task._sync_children(self.outputs, TaskState.LIKELY)
for child in subworkflow.task_tree.children:
my_task.children.insert(0, child)
child.parent = my_task
@ -121,10 +120,18 @@ class SubWorkflow(TaskSpec):
for child in subworkflow.task_tree.children:
for assignment in self.in_assign:
assignment.assign(my_task, child)
self._predict(my_task)
for child in subworkflow.task_tree.children:
child.task_spec._update(child)
# Instead of completing immediately, we'll wait for the subworkflow to complete
my_task._set_state(TaskState.WAITING)
def _update_hook(self, my_task):
subworkflow = my_task._get_internal_data('subworkflow')
if subworkflow is None:
# On the first update, we have to create the subworkflow
super()._update_hook(my_task)
elif subworkflow.is_completed():
# Then wait until it finishes to complete
my_task.complete()
def _on_subworkflow_completed(self, subworkflow, my_task):
# Assign variables, if so requested.
@ -138,11 +145,6 @@ class SubWorkflow(TaskSpec):
# Alright, abusing that hook is just evil but it works.
child.task_spec._update_hook(child)
def _on_complete_hook(self, my_task):
for child in my_task.children:
if isinstance(child.task_spec, StartTask):
child.task_spec._update(child)
def serialize(self, serializer):
return serializer.serialize_sub_workflow(self)

View File

@ -133,8 +133,6 @@ class ThreadSplit(TaskSpec):
for i in range(split_n):
outputs.append(self.thread_starter)
my_task._sync_children(outputs, TaskState.FUTURE)
for child in my_task.children:
child.task_spec._update(child)
def serialize(self, serializer):
return serializer.serialize_thread_split(self)

View File

@ -242,24 +242,19 @@ class TaskSpec(object):
:type looked_ahead: integer
:param looked_ahead: The depth of the predicted path so far.
"""
if my_task._is_finished():
return
if seen is None:
seen = []
elif self in seen:
return
if not my_task._is_finished():
self._predict_hook(my_task)
self._predict_hook(my_task)
if not my_task._is_definite():
if looked_ahead + 1 >= self.lookahead:
return
seen.append(self)
look_ahead = my_task._is_definite() or looked_ahead + 1 < self.lookahead
for child in my_task.children:
child.task_spec._predict(child, seen[:], looked_ahead + 1)
if not child._is_finished() and child not in seen and look_ahead:
child.task_spec._predict(child, seen[:], looked_ahead + 1)
def _predict_hook(self, my_task):
# If the task's status is not predicted, we default to FUTURE
# for all it's outputs.
# If the task's status is not predicted, we default to FUTURE for all it's outputs.
# Otherwise, copy my own state to the children.
if my_task._is_definite():
best_state = TaskState.FUTURE
@ -278,6 +273,12 @@ class TaskSpec(object):
completes it makes sure to call this method so we can react.
"""
my_task._inherit_data()
# We were doing this in _update_hook, but to me that seems inconsistent with the spirit
# of the hook functions. Moving it here allows removal of some repeated calls (overridden
# hook methods still need to do these things)
if my_task._is_predicted():
self._predict(my_task)
self.entered_event.emit(my_task.workflow, my_task)
self._update_hook(my_task)
def _update_hook(self, my_task):
@ -290,11 +291,8 @@ class TaskSpec(object):
Returning non-False will cause the task to go into READY.
Returning any other value will cause no action.
"""
if my_task._is_predicted():
self._predict(my_task)
if not my_task.parent._is_finished():
return
self.entered_event.emit(my_task.workflow, my_task)
# If this actually did what the documentation said (returned a value indicating
# that the task was ready), then a lot of things might be easier.
my_task._ready()
def _on_ready(self, my_task):
@ -387,21 +385,14 @@ class TaskSpec(object):
"""
assert my_task is not None
if my_task.workflow.debug:
print("Executing %s: %s (%s)" % (
my_task.task_spec.__class__.__name__,
my_task.get_name(), my_task.get_description()))
# We have to set the last task here, because the on_complete_hook
# of a loopback task may overwrite what the last_task will be.
my_task.workflow.last_task = my_task
self._on_complete_hook(my_task)
for child in my_task.children:
child.task_spec._update(child)
my_task.workflow._task_completed_notify(my_task)
if my_task.workflow.debug:
if hasattr(my_task.workflow, "outer_workflow"):
my_task.workflow.outer_workflow.task_tree.dump()
self.completed_event.emit(my_task.workflow, my_task)
return True
@ -414,9 +405,7 @@ class TaskSpec(object):
:rtype: bool
:returns: True on success, False otherwise.
"""
# If we have more than one output, implicitly split.
for child in my_task.children:
child.task_spec._update(child)
pass
@abstractmethod
def serialize(self, serializer, **kwargs):
@ -478,8 +467,6 @@ class TaskSpec(object):
:rtype: TaskSpec
:returns: The task specification instance.
"""
print(s_state)
print(wf_spec)
out = cls(wf_spec,s_state.get('name'))
out.id = s_state.get('id')
out.name = s_state.get('name')

View File

@ -39,6 +39,6 @@ class SpiffBpmnTask(BpmnSpecMixin):
self.execute_script(my_task, self.prescript)
def _on_complete_hook(self, my_task):
super()._on_complete_hook(my_task)
if self.postscript is not None:
self.execute_script(my_task, self.postscript)
super()._on_complete_hook(my_task)

View File

@ -522,46 +522,42 @@ class Task(object, metaclass=DeprecatedMetaTask):
"""
if task_specs is None:
raise ValueError('"task_specs" argument is None')
add = task_specs[:]
new_children = task_specs[:]
# If a child task_spec is also an ancestor, we are looping back,
# replace those specs with a loopReset task.
root_task = self._get_root()
for index, task_spec in enumerate(add):
for index, task_spec in enumerate(new_children):
ancestor_task = self._find_ancestor(task_spec)
if ancestor_task and ancestor_task != root_task:
destination = ancestor_task
new_spec = self.workflow.get_reset_task_spec(destination)
new_spec.outputs = []
new_spec.inputs = task_spec.inputs
add[index] = new_spec
new_children[index] = new_spec
# Create a list of all children that are no longer needed.
remove = []
unneeded_children = []
for child in self.children:
# Triggered tasks are never removed.
if child.triggered:
continue
# Check whether the task needs to be removed.
if child.task_spec in add:
add.remove(child.task_spec)
# If the task already exists, remove it from to-be-added
if child.task_spec in new_children:
new_children.remove(child.task_spec)
# We should set the state here but that breaks everything
continue
# Non-predicted tasks must not be removed, so they HAVE to be in
# the given task spec list.
# Definite tasks must not be removed, so they HAVE to be in the given task spec list.
if child._is_definite():
raise WorkflowException(self.task_spec,
'removal of non-predicted child %s' %
repr(child))
remove.append(child)
raise WorkflowException(self.task_spec, f'removal of non-predicted child {child}')
unneeded_children.append(child)
# Remove and add the children accordingly.
for child in remove:
for child in unneeded_children:
self.children.remove(child)
for task_spec in add:
for task_spec in new_children:
self._add_child(task_spec, state)
def _set_likely_task(self, task_specs):
@ -574,7 +570,6 @@ class Task(object, metaclass=DeprecatedMetaTask):
if child._is_definite():
continue
child._set_state(TaskState.LIKELY)
return
def _is_descendant_of(self, parent):
"""

View File

@ -19,11 +19,15 @@ class BpmnWorkflowTestCase(unittest.TestCase):
serializer = BpmnWorkflowSerializer(wf_spec_converter)
def load_workflow_spec(self, filename, process_name, validate=True):
def get_parser(self, filename, validate=True):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
validator = BpmnValidator() if validate else None
parser = TestBpmnParser(validator=validator)
parser.add_bpmn_files_by_glob(f)
return parser
def load_workflow_spec(self, filename, process_name, validate=True):
parser = self.get_parser(filename, validate)
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses

View File

@ -6,10 +6,22 @@ from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
class CollaborationTest(BpmnWorkflowTestCase):
def testParserProvidesInfoOnMessagesAndCorrelations(self):
parser = self.get_parser('collaboration.bpmn')
self.assertEqual(list(parser.messages.keys()), ['love_letter', 'love_letter_response'])
self.assertEqual(parser.correlations,
{'lover_name': {'name': "Lover's Name",
'retrieval_expressions': [
{'expression': 'lover_name',
'messageRef': 'love_letter'},
{'expression': 'from_name',
'messageRef': 'love_letter_response'}]}}
)
def testCollaboration(self):
spec, subprocesses = self.load_collaboration('collaboration.bpmn', 'my_collaboration')
# Only executable processes should be started
self.assertIn('process_buddy', subprocesses)
self.assertNotIn('random_person_process', subprocesses)
@ -122,4 +134,4 @@ class CollaborationTest(BpmnWorkflowTestCase):
start = self.workflow.get_tasks_from_spec_name('Start')[0]
start.data['lover_name'] = 'Peggy'
self.workflow.do_engine_steps()
self.save_restore()
self.save_restore()

View File

@ -19,6 +19,12 @@ class SwimLaneTest(BpmnWorkflowTestCase):
spec, subprocesses = self.load_workflow_spec('lanes.bpmn','lanes')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testBpmnParserKnowsLanesExist(self):
parser = self.get_parser('lanes.bpmn')
self.assertTrue(parser.get_process_parser('lanes').has_lanes())
parser = self.get_parser('random_fact.bpmn')
self.assertFalse(parser.get_process_parser('random_fact').has_lanes())
def testRunThroughHappy(self):
self.workflow.do_engine_steps()

View File

@ -23,13 +23,17 @@ class BaseTestCase(BpmnWorkflowTestCase):
serializer = BpmnWorkflowSerializer(wf_spec_converter)
def load_workflow_spec(self, filename, process_name, dmn_filename=None):
bpmn = os.path.join(os.path.dirname(__file__), 'data', filename)
def get_parser(self, filename, dmn_filename=None):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = CamundaParser()
parser.add_bpmn_files_by_glob(bpmn)
parser.add_bpmn_files_by_glob(f)
if dmn_filename is not None:
dmn = os.path.join(os.path.dirname(__file__), 'data', 'dmn', dmn_filename)
parser.add_dmn_files_by_glob(dmn)
return parser
def load_workflow_spec(self, filename, process_name, dmn_filename=None):
parser = self.get_parser(filename, dmn_filename)
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses

View File

@ -70,11 +70,9 @@ class MultiInstanceParallelArrayTest(BaseTestCase):
{"CurrentFamilyMember": {"Birthdate": "10/05/1985" + str(x)}})
self.workflow.do_engine_steps()
self.workflow.complete_task_from_id(task.id)
# The data should still be available on the current task.
self.assertEqual({'FirstName': "The Funk #%i" % x,
'Birthdate': '10/05/1985' + str(x)},
self.workflow.get_task(task.id)
.data['CurrentFamilyMember'])
# We used to check that the current data variable was available in the task,
# but there's no reason to preserve it after the task completes. We removed it
# in some cases and left it in others, which just adds to the confusion.
self.workflow.do_engine_steps()
if save_restore:
self.reload_save_restore()

View File

@ -44,8 +44,7 @@ class ResetTokenTestMIParallel(BaseTestCase):
self.workflow.do_engine_steps()
if save_restore: self.save_restore()
self.assertEqual({'current': {'A': 'y'},
'do_step': 'Yes',
self.assertEqual({'do_step': 'Yes',
'output': {'1': {'A': 'x'}, '2': {'A': 'y'}, '3': {'A': 'z'}}},
self.workflow.last_task.data)
@ -66,8 +65,7 @@ class ResetTokenTestMIParallel(BaseTestCase):
self.assertTrue(self.workflow.is_completed())
self.assertEqual({'current': {'A': 'x'},
'do_step': 'Yes',
self.assertEqual({'do_step': 'Yes',
'C': 'c',
'output': {'1': {'A': 'a1'},
'2': {'A': 'y'},
@ -75,11 +73,6 @@ class ResetTokenTestMIParallel(BaseTestCase):
self.workflow.last_task.data)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestMIParallel)

View File

@ -14,6 +14,16 @@ class StartMessageTest(BaseTestCase):
self.spec, self.subprocesses = self.load_workflow_spec('message_test.bpmn', 'ThrowCatch')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
def testParserCanReturnStartMessages(self):
parser = self.get_parser('message_test.bpmn')
self.assertEqual(
parser.process_parsers['ThrowCatch'].start_messages(), ['Message_1rkbi27'])
parser = self.get_parser('random_fact.bpmn')
self.assertEqual(
parser.process_parsers['random_fact'].start_messages(), [])
def testRunThroughHappy(self):
self.actual_test(save_restore=False)

View File

@ -1,11 +1,11 @@
Start
first
excl_choice_1
sub_workflow_1
Start
first
excl_choice_1
last
End
sub_workflow_1
last
End

View File

@ -1,10 +1,10 @@
Start
first
sub_workflow_1
Start
first
last
End
sub_workflow_1
second
join
last

View File

@ -38,6 +38,8 @@ class TaskSpecTest(unittest.TestCase):
def do_next_unique_task(self, name):
# This method asserts that there is only one ready task! The specified
# one - and then completes it
for task in self.workflow.get_tasks(TaskState.WAITING):
task.task_spec._update(task)
ready_tasks = self.workflow.get_tasks(TaskState.READY)
self.assertEqual(1, len(ready_tasks))
task = ready_tasks[0]
@ -58,12 +60,13 @@ class TaskSpecTest(unittest.TestCase):
self.load_workflow_spec('data', 'block_to_subworkflow.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_unique_task('sub_workflow_1')
# Inner:
# Inner. The subworkflow task will complete automatically after the subwokflow completes
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_unique_task('last')
self.do_next_unique_task('End')
# Back to outer:
self.do_next_unique_task('last')
self.do_next_unique_task('End')
@ -72,7 +75,7 @@ class TaskSpecTest(unittest.TestCase):
self.load_workflow_spec('data', 'subworkflow_to_block.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_unique_task('sub_workflow_1')
# Inner:
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
@ -86,8 +89,9 @@ class TaskSpecTest(unittest.TestCase):
self.load_workflow_spec('control-flow', 'subworkflow_to_join.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_named_step('second', ['sub_workflow_1'])
self.do_next_unique_task('sub_workflow_1')
# The subworkflow task now sets its child tasks to READY and waits
self.do_next_named_step('second', ['Start'])
# Inner:
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
@ -102,8 +106,8 @@ class TaskSpecTest(unittest.TestCase):
self.load_workflow_spec('control-flow', 'subworkflow_to_join.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_named_step('second', ['sub_workflow_1'])
self.do_next_unique_task('sub_workflow_1')
self.do_next_named_step('second', ['Start'])
# Inner:
self.do_next_unique_task('Start')
self.do_next_unique_task('first')

View File

@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
from builtins import str
from builtins import range
import time
from SpiffWorkflow.task import Task, TaskState
from SpiffWorkflow.workflow import Workflow
@ -33,7 +31,6 @@ def on_reached_cb(workflow, task, taken_path):
props = []
for key, value in list(task.task_spec.data.items()):
props.append('='.join((key, str(value))))
# print "REACHED:", task.get_name(), atts, props
# Store the list of data in the workflow.
atts = ';'.join(atts)