From 6a99762f7003b0d98335363e3761fd2d91f0b6e9 Mon Sep 17 00:00:00 2001 From: jasquat <2487833+jasquat@users.noreply.github.com> Date: Thu, 13 Jun 2024 13:34:32 -0400 Subject: [PATCH] better-db-migration-waiting (#1732) * explicitly wait for db migrations to be completed instead of sleeping w/ burnettk * code rabbit suggestion w/ burnettk * get rid of the set x w/ burnettk --------- Co-authored-by: jasquat --- .../bin/boot_server_in_docker | 14 +++++++++----- .../bin/local_development_environment_setup | 6 +++--- .../bin/run_local_python_script | 9 ++++++--- spiffworkflow-backend/bin/run_server_locally | 6 +++--- .../bin/start_blocking_appscheduler.py | 15 +++------------ .../bin/start_blocking_apscheduler | 15 +++++++++++++++ spiffworkflow-backend/bin/start_celery_worker | 7 ++++++- .../bin/wait_for_db_schema_migrations | 18 ++++++++++++++++++ 8 files changed, 63 insertions(+), 27 deletions(-) create mode 100755 spiffworkflow-backend/bin/start_blocking_apscheduler create mode 100644 spiffworkflow-backend/bin/wait_for_db_schema_migrations diff --git a/spiffworkflow-backend/bin/boot_server_in_docker b/spiffworkflow-backend/bin/boot_server_in_docker index ad13f4ea..5c0bf615 100755 --- a/spiffworkflow-backend/bin/boot_server_in_docker +++ b/spiffworkflow-backend/bin/boot_server_in_docker @@ -14,7 +14,6 @@ function log_info() { # example command: # SPIFFWORKFLOW_BACKEND_PERMISSIONS_FILE_NAME=example.yml SPIFFWORKFLOW_BACKEND_DATABASE_TYPE=sqlite SPIFFWORKFLOW_BACKEND_ENV=local_docker SPIFFWORKFLOW_BACKEND_RUN_BACKGROUND_SCHEDULER_IN_CREATE_APP=true FLASK_DEBUG=0 FLASK_SESSION_SECRET_KEY=HEY SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR="${HOME}/projects/github/sartography/sample-process-models/" ./bin/boot_server_in_docker -# run migrations export FLASK_APP=/app/src/spiffworkflow_backend if [[ -z "${FLASK_DEBUG:-}" ]]; then @@ -109,10 +108,15 @@ if [[ -z "${SPIFFWORKFLOW_BACKEND_THREADS_PER_WORKER:-}" ]]; then export SPIFFWORKFLOW_BACKEND_THREADS_PER_WORKER fi -# DELETE after this runs on all necessary environments -# TODO: make a system somewhat like schema migrations (storing versions in a db table) to handle data migrations -log_info "Running data migrations" -poetry run python ./bin/data_migrations/run_all.py +# # VersionOneThree and the process_instance_file_data db => filesystem migration have run on all necessary environments, +# so commenting out run_all.py. +# one day we might have another blocking-style data migration that we want to complete before the app boots. +# in that case, probably update run_all.py to not run VersionOneThree and filesystem migration, but instead the new function. +# we also might want to build a db-backed system to let other non-api containers know when this long-running migration is +# happening (somewhat like schema migrations (storing versions in a db table) to handle data migrations), +# so it could poll some table and not start its work until the database is fully migrated. +# log_info "Running data migrations" +# poetry run python ./bin/data_migrations/run_all.py ##### DO THIS right before starting the server if [[ "${SPIFFWORKFLOW_BACKEND_RUN_DATA_SETUP:-}" != "false" ]]; then diff --git a/spiffworkflow-backend/bin/local_development_environment_setup b/spiffworkflow-backend/bin/local_development_environment_setup index eb1f8dfc..41e7ced6 100755 --- a/spiffworkflow-backend/bin/local_development_environment_setup +++ b/spiffworkflow-backend/bin/local_development_environment_setup @@ -12,7 +12,7 @@ if [ "${BASH_SOURCE[0]}" -ef "$0" ]; then exit 1 fi -port="${SPIFFWORKFLOW_BACKEND_PORT:-7000}" +PORT="${SPIFFWORKFLOW_BACKEND_PORT:-7000}" use_local_open_id="false" acceptance_test_mode="false" @@ -47,7 +47,7 @@ if [[ "$acceptance_test_mode" == "true" ]]; then elif [[ "$use_local_open_id" == "true" ]]; then backend_base_url="${SPIFFWORKFLOW_BACKEND_URL:-}" if [[ -z "$backend_base_url" ]]; then - backend_base_url="http://localhost:$port" + backend_base_url="http://localhost:$PORT" fi export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__0__identifier="default" export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__0__label="internal openid" @@ -65,7 +65,7 @@ elif [[ "$use_local_open_id" == "true" ]]; then # # export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__1__identifier="openid" # export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__1__label="I am a vendor" -# export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__1__uri="http://localhost:$port/openid" +# export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__1__uri="http://localhost:$PORT/openid" # export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__1__client_id="spiffworkflow-backend" # export SPIFFWORKFLOW_BACKEND_AUTH_CONFIGS__1__client_secret="JXeQExm0JhQPLumgHtIIqf52bDalHz0q" diff --git a/spiffworkflow-backend/bin/run_local_python_script b/spiffworkflow-backend/bin/run_local_python_script index e003d29c..83f3ddd1 100755 --- a/spiffworkflow-backend/bin/run_local_python_script +++ b/spiffworkflow-backend/bin/run_local_python_script @@ -1,7 +1,7 @@ #!/usr/bin/env bash function error_handler() { - >&2 echo "Exited with BAD EXIT CODE '${2}' in ${0} script at line: ${1}." + echo >&2 "Exited with BAD EXIT CODE '${2}' in ${0} script at line: ${1}." exit "$2" } trap 'error_handler ${LINENO} $?' ERR @@ -9,9 +9,12 @@ set -o errtrace -o errexit -o nounset -o pipefail script="$1" shift -script_dir="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +script_dir="$( + cd -- "$(dirname "$0")" >/dev/null 2>&1 + pwd -P +)" . "${script_dir}/local_development_environment_setup" export SPIFFWORKFLOW_BACKEND_RUN_BACKGROUND_SCHEDULER_IN_CREATE_APP=false -poet run python "$script" "$@" +exec poet run python "$script" "$@" diff --git a/spiffworkflow-backend/bin/run_server_locally b/spiffworkflow-backend/bin/run_server_locally index 2e52f277..2486cda2 100755 --- a/spiffworkflow-backend/bin/run_server_locally +++ b/spiffworkflow-backend/bin/run_server_locally @@ -16,11 +16,11 @@ script_dir="$( server_type="${1:-api}" if [[ "$server_type" == "celery_worker" ]]; then - "${script_dir}/start_celery_worker" + exec "${script_dir}/start_celery_worker" else if [[ -n "${SPIFFWORKFLOW_BACKEND_LOAD_FIXTURE_DATA:-}" || -n "${SPIFFWORKFLOW_BACKEND_WSGI_PATH_PREFIX:-}" ]]; then echo "using ./bin/boot_server_in_docker because we actually load fixture data in wsgi.py, which will not be run with the typical local dev flask server" - ./bin/boot_server_in_docker + exec "${script_dir}/boot_server_in_docker" else if [[ "${SPIFFWORKFLOW_BACKEND_RUN_DATA_SETUP:-}" != "false" ]]; then @@ -28,6 +28,6 @@ else fi # this line blocks - poetry run flask run -p "$port" --host=0.0.0.0 + exec poetry run flask run -p "$PORT" --host=0.0.0.0 fi fi diff --git a/spiffworkflow-backend/bin/start_blocking_appscheduler.py b/spiffworkflow-backend/bin/start_blocking_appscheduler.py index b2f358c5..414b2aab 100755 --- a/spiffworkflow-backend/bin/start_blocking_appscheduler.py +++ b/spiffworkflow-backend/bin/start_blocking_appscheduler.py @@ -5,26 +5,17 @@ import time from apscheduler.schedulers.background import BlockingScheduler # type: ignore from spiffworkflow_backend import create_app from spiffworkflow_backend.background_processing.apscheduler import start_apscheduler -from spiffworkflow_backend.data_migrations.version_1_3 import VersionOneThree -from spiffworkflow_backend.helpers.db_helper import try_to_connect def main() -> None: - seconds_to_wait = 300 + # TODO: in 30 days remove this sleep when the new bash wrapper script is on prod envs + seconds_to_wait = 100 print(f"sleeping for {seconds_to_wait} seconds to give the api container time to run the migration") time.sleep(seconds_to_wait) print("done sleeping") + #### - print("running data migration from background processor") app = create_app() - start_time = time.time() - - with app.app_context(): - try_to_connect(start_time) - VersionOneThree().run() - - end_time = time.time() - print(f"done running data migration from background processor. took {end_time - start_time} seconds. starting scheduler") start_apscheduler(app, BlockingScheduler) diff --git a/spiffworkflow-backend/bin/start_blocking_apscheduler b/spiffworkflow-backend/bin/start_blocking_apscheduler new file mode 100755 index 00000000..2fefb206 --- /dev/null +++ b/spiffworkflow-backend/bin/start_blocking_apscheduler @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +function error_handler() { + echo >&2 "Exited with BAD EXIT CODE '${2}' in ${0} script at line: ${1}." + exit "$2" +} +trap 'error_handler ${LINENO} $?' ERR +set -o errtrace -o errexit -o nounset -o pipefail + +script_dir="$( + cd -- "$(dirname "$0")" >/dev/null 2>&1 + pwd -P +)" +"${script_dir}/wait_for_db_schema_migrations" +exec poetry run ./bin/start_blocking_appscheduler.py diff --git a/spiffworkflow-backend/bin/start_celery_worker b/spiffworkflow-backend/bin/start_celery_worker index 1eed7caf..3a18635f 100755 --- a/spiffworkflow-backend/bin/start_celery_worker +++ b/spiffworkflow-backend/bin/start_celery_worker @@ -13,4 +13,9 @@ export SPIFFWORKFLOW_BACKEND_RUN_BACKGROUND_SCHEDULER_IN_CREATE_APP=false # so we can raise if calling unsafe code in celery export SPIFFWORKFLOW_BACKEND_RUNNING_IN_CELERY_WORKER=true -poetry run celery -A src.spiffworkflow_backend.background_processing.celery_worker worker --loglevel=info +script_dir="$( + cd -- "$(dirname "$0")" >/dev/null 2>&1 + pwd -P +)" +"${script_dir}/wait_for_db_schema_migrations" +exec poetry run celery -A src.spiffworkflow_backend.background_processing.celery_worker worker --loglevel=info diff --git a/spiffworkflow-backend/bin/wait_for_db_schema_migrations b/spiffworkflow-backend/bin/wait_for_db_schema_migrations new file mode 100644 index 00000000..c066c0e0 --- /dev/null +++ b/spiffworkflow-backend/bin/wait_for_db_schema_migrations @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +function error_handler() { + echo >&2 "Exited with BAD EXIT CODE '${2}' in ${0} script at line: ${1}." + exit "$2" +} +trap 'error_handler ${LINENO} $?' ERR +set -o errtrace -o errexit -o nounset -o pipefail + +current_db_migration_revision=$(poetry run flask db current | awk '{print $1}') +current_db_migration_head=$(poetry run flask db heads | awk '{print $1}') +if [[ "$current_db_migration_revision" != "$current_db_migration_head" ]]; then + echo "Waiting for db migrations to finish" + echo "current revision: ${current_db_migration_head}" + echo "head revision: ${current_db_migration_revision}" + sleep 2 + wait_for_db_migrations +fi