queue process instances from the apscheduler instead of running them when using celery (#1855)

Co-authored-by: jasquat <jasquat@users.noreply.github.com>
This commit is contained in:
jasquat 2024-07-02 11:20:29 -04:00 committed by GitHub
parent d29683bf9e
commit 7dc15fbebb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 7 additions and 3 deletions

View File

@ -19,6 +19,9 @@ from SpiffWorkflow.task import Task as SpiffTask # type: ignore
from SpiffWorkflow.util.deep_merge import DeepMerge # type: ignore
from SpiffWorkflow.util.task import TaskState # type: ignore
from spiffworkflow_backend.background_processing.celery_tasks.process_instance_task_producer import (
queue_process_instance_if_appropriate,
)
from spiffworkflow_backend.background_processing.celery_tasks.process_instance_task_producer import should_queue_process_instance
from spiffworkflow_backend.data_migrations.process_instance_migrator import ProcessInstanceMigrator
from spiffworkflow_backend.exceptions.api_error import ApiError
@ -259,9 +262,10 @@ class ProcessInstanceService:
for process_instance in records:
current_app.logger.info(f"Processor {status_value}: Processing process_instance {process_instance.id}")
try:
cls.run_process_instance_with_processor(
process_instance, status_value=status_value, execution_strategy_name=execution_strategy_name
)
if not queue_process_instance_if_appropriate(process_instance):
cls.run_process_instance_with_processor(
process_instance, status_value=status_value, execution_strategy_name=execution_strategy_name
)
except ProcessInstanceIsAlreadyLockedError:
# we will try again later
continue