144 lines
6.0 KiB
Bash
Executable File
144 lines
6.0 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
function error_handler() {
|
|
echo >&2 "Exited with BAD EXIT CODE '${2}' in ${0} script at line: ${1}."
|
|
exit "$2"
|
|
}
|
|
trap 'error_handler ${LINENO} $?' ERR
|
|
set -o errtrace -o errexit -o nounset -o pipefail
|
|
|
|
function log_info() {
|
|
echo "BOOT SERVER IN DOCKER ($(date +"%Y-%m-%d %H:%M:%S")): $1"
|
|
}
|
|
|
|
# example command:
|
|
# SPIFFWORKFLOW_BACKEND_PERMISSIONS_FILE_NAME=example.yml SPIFFWORKFLOW_BACKEND_DATABASE_TYPE=sqlite SPIFFWORKFLOW_BACKEND_ENV=local_docker SPIFFWORKFLOW_BACKEND_RUN_BACKGROUND_SCHEDULER_IN_CREATE_APP=true FLASK_DEBUG=0 FLASK_SESSION_SECRET_KEY=HEY SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR="${HOME}/projects/github/sartography/sample-process-models/" ./bin/boot_server_in_docker
|
|
|
|
export FLASK_APP=/app/src/spiffworkflow_backend
|
|
|
|
if [[ -z "${FLASK_DEBUG:-}" ]]; then
|
|
export FLASK_DEBUG=0
|
|
fi
|
|
|
|
if [[ "${SPIFFWORKFLOW_BACKEND_WAIT_FOR_DB_TO_BE_READY:-}" == "true" ]]; then
|
|
echo 'Waiting for db to be ready...'
|
|
poetry run python ./bin/wait_for_db_to_be_ready.py
|
|
fi
|
|
|
|
if [[ "${SPIFFWORKFLOW_BACKEND_DOWNGRADE_DB:-}" == "true" ]]; then
|
|
echo 'Downgrading database...'
|
|
poetry run flask db downgrade
|
|
fi
|
|
|
|
if [[ "${SPIFFWORKFLOW_BACKEND_UPGRADE_DB:-}" == "true" ]]; then
|
|
echo 'Upgrading database...'
|
|
poetry run flask db upgrade
|
|
fi
|
|
|
|
if [[ -z "${GUNICORN_LOG_LEVEL:-}" ]]; then
|
|
GUNICORN_LOG_LEVEL=debug
|
|
fi
|
|
|
|
if [[ -z "${GUNICORN_TIMEOUT_SECONDS:-}" ]]; then
|
|
GUNICORN_TIMEOUT_SECONDS=90
|
|
fi
|
|
|
|
port="${SPIFFWORKFLOW_BACKEND_PORT:-}"
|
|
if [[ -z "$port" ]]; then
|
|
port=7000
|
|
fi
|
|
|
|
additional_args=""
|
|
|
|
app_root="${SPIFFWORKFLOW_BACKEND_APPLICATION_ROOT:-}"
|
|
if [[ -n "$app_root" ]] && [[ "${app_root}" != "/" ]]; then
|
|
echo >&2 "ERROR: SPIFFWORKFLOW_BACKEND_APPLICATION_ROOT has been set. This variable has been renamed to SPIFFWORKFLOW_BACKEND_WSGI_PATH_PREFIX. Please use that instead."
|
|
exit 1
|
|
fi
|
|
path_prefix="${SPIFFWORKFLOW_BACKEND_WSGI_PATH_PREFIX:-}"
|
|
if [[ -n "$path_prefix" ]] && [[ "${path_prefix}" != "/" ]]; then
|
|
if ! grep -E '^/' <<<"${path_prefix}"; then
|
|
echo >&2 "ERROR: SPIFFWORKFLOW_BACKEND_WSGI_PATH_PREFIX must start with a '/'. '$SPIFFWORKFLOW_BACKEND_WSGI_PATH_PREFIX' was used instead."
|
|
exit 1
|
|
fi
|
|
additional_args="${additional_args} -e SCRIPT_NAME=${path_prefix}"
|
|
fi
|
|
|
|
# HACK: if loading fixtures for acceptance tests when we do not need multiple workers
|
|
# it causes issues with attempting to add duplicate data to the db
|
|
worker_count=4
|
|
if [[ "${SPIFFWORKFLOW_BACKEND_LOAD_FIXTURE_DATA:-}" == "true" ]]; then
|
|
worker_count=1
|
|
fi
|
|
|
|
if [[ -z "${SPIFFWORKFLOW_BACKEND_GIT_SSH_PRIVATE_KEY_PATH:-}" ]]; then
|
|
if [[ -n "${SPIFFWORKFLOW_BACKEND_GIT_SSH_PRIVATE_KEY:-}" ]]; then
|
|
export SPIFFWORKFLOW_BACKEND_GIT_SSH_PRIVATE_KEY_PATH=$(mktemp /tmp/ssh_private_key.XXXXXX)
|
|
chmod 600 "${SPIFFWORKFLOW_BACKEND_GIT_SSH_PRIVATE_KEY_PATH}"
|
|
echo "${SPIFFWORKFLOW_BACKEND_GIT_SSH_PRIVATE_KEY}" >"${SPIFFWORKFLOW_BACKEND_GIT_SSH_PRIVATE_KEY_PATH}"
|
|
fi
|
|
fi
|
|
|
|
if [[ -n "${SPIFFWORKFLOW_BACKEND_GIT_CLONE_PROCESS_MODELS:-}" ]]; then
|
|
./bin/clone_process_models
|
|
fi
|
|
|
|
# ensure that the Process Models Directory is initialized as a git repo
|
|
log_info "Running git init"
|
|
git init "${SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR}"
|
|
git config --global --add safe.directory "${SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR}"
|
|
|
|
if [[ -z "${SPIFFWORKFLOW_BACKEND_THREADS_PER_WORKER:-}" ]]; then
|
|
# default to 3 * 2 = 6 threads per worker
|
|
# you may want to configure threads_to_use_per_core based on whether your workload is more cpu intensive or more I/O intensive:
|
|
# cpu heavy, make it smaller
|
|
# I/O heavy, make it larger
|
|
threads_to_use_per_core=3
|
|
|
|
# https://stackoverflow.com/a/55423170/6090676
|
|
# if we had access to python (i'm not sure i want to run another python script here),
|
|
# we could do something like this (on linux) to get the number of cores available to this process and a better estimate of a
|
|
# reasonable num_cores_multiple_for_threads
|
|
# if hasattr(os, 'sched_getaffinity')
|
|
# number_of_available_cores = os.sched_getaffinity(0)
|
|
# BUT the python solution isn't even as portable as this one, which is mostly posix compliant and works on linux/mac/freebsd.
|
|
num_cores_multiple_for_threads=$(getconf _NPROCESSORS_ONLN 2>/dev/null || getconf NPROCESSORS_ONLN 2>/dev/null || echo 1)
|
|
|
|
SPIFFWORKFLOW_BACKEND_THREADS_PER_WORKER=$((threads_to_use_per_core * num_cores_multiple_for_threads))
|
|
export SPIFFWORKFLOW_BACKEND_THREADS_PER_WORKER
|
|
fi
|
|
|
|
# # VersionOneThree and the process_instance_file_data db => filesystem migration have run on all necessary environments,
|
|
# so commenting out run_all.py.
|
|
# one day we might have another blocking-style data migration that we want to complete before the app boots.
|
|
# in that case, probably update run_all.py to not run VersionOneThree and filesystem migration, but instead the new function.
|
|
# we also might want to build a db-backed system to let other non-api containers know when this long-running migration is
|
|
# happening (somewhat like schema migrations (storing versions in a db table) to handle data migrations),
|
|
# so it could poll some table and not start its work until the database is fully migrated.
|
|
# log_info "Running data migrations"
|
|
# poetry run python ./bin/data_migrations/run_all.py
|
|
|
|
##### DO THIS right before starting the server
|
|
if [[ "${SPIFFWORKFLOW_BACKEND_RUN_DATA_SETUP:-}" != "false" ]]; then
|
|
if [[ -z "${SPIFFWORKFLOW_BACKEND_FAIL_ON_INVALID_PROCESS_MODELS:-}" ]]; then
|
|
export SPIFFWORKFLOW_BACKEND_FAIL_ON_INVALID_PROCESS_MODELS=false
|
|
fi
|
|
poetry run python bin/save_all_bpmn.py
|
|
fi
|
|
|
|
# --worker-class is not strictly necessary, since setting threads will automatically set the worker class to gthread, but meh
|
|
log_info "Starting gunicorn server"
|
|
export IS_GUNICORN="true"
|
|
# THIS MUST BE THE LAST COMMAND!
|
|
exec poetry run gunicorn ${additional_args} \
|
|
--bind "0.0.0.0:$port" \
|
|
--preload \
|
|
--worker-class "gthread" \
|
|
--workers="$worker_count" \
|
|
--threads "$SPIFFWORKFLOW_BACKEND_THREADS_PER_WORKER" \
|
|
--limit-request-line 8192 \
|
|
--timeout "$GUNICORN_TIMEOUT_SECONDS" \
|
|
--capture-output \
|
|
--access-logfile '-' \
|
|
--log-level "$GUNICORN_LOG_LEVEL" wsgi:app
|