Bug/process locking (#375)

* re-enable the move canvas and zoom modules, and try like hell to get the !@#$! display to zoom to fit consistently. As the from_xml does not trigger the "import.done" where the resize is happening.

* Clicking on timestamp should reload the page so we are observing the diagram at the correct point in time.

* Use a grid rather than a stack for the filter buttons, make them a consistent width, and force the close "x" button to the far right.

* Better handling the lots and lots of metadata fields in both the process instance list table, and in the metadata information on the process instance view page.

* Using DL rather than Carbon's grid, because my mind was starting to break.
* Process instance List Tables are set to optimize their max width for readability.

* assure the diagram editor is top aligned - otherwise when the proprties pannel get's super duper long (as with recent additions to multi-instance) we don't end up with weird spaces showing up above a center aligned canvas.

* wip

* updated interstitial to exit at the correct time if already locked

---------

Co-authored-by: danfunk <daniel.h.funk@gmail.com>
Co-authored-by: jasquat <jasquat@users.noreply.github.com>
This commit is contained in:
jasquat 2023-07-06 16:21:02 -04:00 committed by GitHub
parent 09ae7ffe00
commit 35c13212ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 36 additions and 10 deletions

View File

@ -52,6 +52,7 @@ from spiffworkflow_backend.services.authorization_service import HumanTaskNotFou
from spiffworkflow_backend.services.authorization_service import UserDoesNotHaveAccessToTaskError from spiffworkflow_backend.services.authorization_service import UserDoesNotHaveAccessToTaskError
from spiffworkflow_backend.services.file_system_service import FileSystemService from spiffworkflow_backend.services.file_system_service import FileSystemService
from spiffworkflow_backend.services.process_instance_processor import ProcessInstanceProcessor from spiffworkflow_backend.services.process_instance_processor import ProcessInstanceProcessor
from spiffworkflow_backend.services.process_instance_queue_service import ProcessInstanceIsAlreadyLockedError
from spiffworkflow_backend.services.process_instance_queue_service import ProcessInstanceQueueService from spiffworkflow_backend.services.process_instance_queue_service import ProcessInstanceQueueService
from spiffworkflow_backend.services.process_instance_service import ProcessInstanceService from spiffworkflow_backend.services.process_instance_service import ProcessInstanceService
from spiffworkflow_backend.services.process_instance_tmp_service import ProcessInstanceTmpService from spiffworkflow_backend.services.process_instance_tmp_service import ProcessInstanceTmpService
@ -369,7 +370,7 @@ def _render_instructions_for_end_user(task_model: TaskModel, extensions: dict |
def _interstitial_stream( def _interstitial_stream(
process_instance: ProcessInstanceModel, execute_tasks: bool = True process_instance: ProcessInstanceModel, execute_tasks: bool = True, is_locked: bool = False
) -> Generator[str, str | None, None]: ) -> Generator[str, str | None, None]:
def get_reportable_tasks() -> Any: def get_reportable_tasks() -> Any:
return processor.bpmn_process_instance.get_tasks( return processor.bpmn_process_instance.get_tasks(
@ -413,19 +414,42 @@ def _interstitial_stream(
processor.do_engine_steps(execution_strategy_name="one_at_a_time") processor.do_engine_steps(execution_strategy_name="one_at_a_time")
processor.do_engine_steps(execution_strategy_name="run_until_user_message") processor.do_engine_steps(execution_strategy_name="run_until_user_message")
processor.save() # Fixme - maybe find a way not to do this on every loop? processor.save() # Fixme - maybe find a way not to do this on every loop?
processor.refresh_waiting_tasks()
except WorkflowTaskException as wfe: except WorkflowTaskException as wfe:
api_error = ApiError.from_workflow_exception( api_error = ApiError.from_workflow_exception(
"engine_steps_error", "Failed to complete an automated task.", exp=wfe "engine_steps_error", "Failed to complete an automated task.", exp=wfe
) )
yield _render_data("error", api_error) yield _render_data("error", api_error)
return return
if execute_tasks is False:
break # path used by the interstitial page while executing tasks - ie the background processor is not executing them
processor.refresh_waiting_tasks()
ready_engine_task_count = get_ready_engine_step_count(processor.bpmn_process_instance) ready_engine_task_count = get_ready_engine_step_count(processor.bpmn_process_instance)
if execute_tasks and ready_engine_task_count == 0:
break
if not execute_tasks:
# path used by the process instance show page to display most recent instructions
if not is_locked:
break
# HACK: db.session.refresh doesn't seem to refresh without rollback or commit so use rollback.
# we are not executing tasks so there shouldn't be anything to write anyway, so no harm in rollback.
# https://stackoverflow.com/a/20361132/6090676
# note that the thing changing the data in this case is probably the background worker,
# and it is definitely committing its changes, but since we have already queried the data,
# our session has stale results without the rollback.
db.session.rollback()
db.session.refresh(process_instance)
# if process instance is done or blocked by a human task, then break out
if is_locked and process_instance.status not in ["not_started", "waiting"]:
break
# only get a new processor if we are not executing tasks otherwise we are the ones updating it
processor = ProcessInstanceProcessor(process_instance)
tasks = get_reportable_tasks() tasks = get_reportable_tasks()
if ready_engine_task_count == 0:
break # No more tasks to report
spiff_task = processor.next_task() spiff_task = processor.next_task()
if spiff_task is not None: if spiff_task is not None:
@ -454,14 +478,16 @@ def _dequeued_interstitial_stream(
) -> Generator[str | None, str | None, None]: ) -> Generator[str | None, str | None, None]:
try: try:
process_instance = _find_process_instance_by_id_or_raise(process_instance_id) process_instance = _find_process_instance_by_id_or_raise(process_instance_id)
ProcessInstanceProcessor(process_instance)
# TODO: currently this just redirects back to home if the process has not been started # TODO: currently this just redirects back to home if the process has not been started
# need something better to show? # need something better to show?
if execute_tasks: if execute_tasks:
if not ProcessInstanceQueueService.is_enqueued_to_run_in_the_future(process_instance): try:
with ProcessInstanceQueueService.dequeued(process_instance): if not ProcessInstanceQueueService.is_enqueued_to_run_in_the_future(process_instance):
yield from _interstitial_stream(process_instance, execute_tasks=execute_tasks) with ProcessInstanceQueueService.dequeued(process_instance):
yield from _interstitial_stream(process_instance, execute_tasks=execute_tasks)
except ProcessInstanceIsAlreadyLockedError:
yield from _interstitial_stream(process_instance, execute_tasks=False, is_locked=True)
else: else:
# no reason to get a lock if we are reading only # no reason to get a lock if we are reading only
yield from _interstitial_stream(process_instance, execute_tasks=execute_tasks) yield from _interstitial_stream(process_instance, execute_tasks=execute_tasks)