From db2a07c18e45624ef3b46a29c30e0c4e3e6779b4 Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Mon, 17 Mar 2025 02:01:08 +0100 Subject: [PATCH] waku-simulator prepared for full featured hacking - added a full service node, an edge node (connected to service node), fixed ip for bootstrap/service/edge nodes, supports store with built in postgres db. --- docker-compose.yml | 164 +++- .../configuration/pg-exporter-queries.yml | 284 ++++++ .../configuration/postgres-exporter.yml | 9 + monitoring/prometheus-config.yml | 4 +- postgres_cfg/db.sql | 8 + postgres_cfg/postgresql.conf | 813 ++++++++++++++++++ run_bootstrap.sh | 4 + run_edgenode.sh | 49 ++ run_nwaku.sh | 6 +- run_servicenode.sh | 170 ++++ wakusim.env | 5 +- 11 files changed, 1486 insertions(+), 30 deletions(-) create mode 100644 monitoring/configuration/pg-exporter-queries.yml create mode 100644 monitoring/configuration/postgres-exporter.yml create mode 100644 postgres_cfg/db.sql create mode 100644 postgres_cfg/postgresql.conf create mode 100755 run_edgenode.sh create mode 100755 run_servicenode.sh diff --git a/docker-compose.yml b/docker-compose.yml index 6592794..638bff4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,12 +1,51 @@ version: "3.7" +x-bootstrap-environment: &bootstrap_env + BOOTSTRAP_IP: 10.2.0.100 + BOOTSTRAP_MULTIADDRESS: "/ip4/10.2.0.100/tcp/60000/p2p/16Uiu2HAmGNtM2rQ8abySFNhqPDFY4cmfAEpfo9Z9fD3NekoFR2ip" + BOOTSTRAP_ENR: "enr:-LG4QK3uc1orOi79J5eAprzwyfj4QcYaR_oamz1YY0U3PmaRY807UrJTkQJiSDT8PNmIPwxIw9POrel-sf1OOTlcK9UCgmlkgnY0gmlwhAoCAGSKbXVsdGlhZGRyc4oACAQKAgBkBupggnJzhQBCAQAAiXNlY3AyNTZrMaEDN06qX-XhZ-Cc7ZuIAdGNCVUicscmbFvEEfkFOQ2W_j2DdGNwgupgg3VkcIIjKIV3YWt1MgA" + +x-servicenode-environment: &servicenode_env + SERVICENODE_IP: 10.2.0.101 + SERVICENODE_MULTIADDRESS: "/ip4/10.2.0.101/tcp/60001/p2p/16Uiu2HAkyte8uj451tGkbww4Mjcg6DRnmAHxNeWyF4zp23RbpG3n" + SERVICENODE_WS_MULTIADDRESS: "/ip4/10.2.0.101/tcp/8000/ws/p2p/16Uiu2HAkyte8uj451tGkbww4Mjcg6DRnmAHxNeWyF4zp23RbpG3n" + SERVICENODE_ENR: "enr:-LO4QDhRxZ-YJBeiriq07BkSiA-qSJCcy3Kz7bAWXeop48dIPpsQK2QNuDX7umonw3Wu0zFXXoMxGrIFmpQiJ1mBd_sBgmlkgnY0gmlwhAoCAGWKbXVsdGlhZGRyc4wACgQKAgBlBh9A3QOCcnOFAEIBAACJc2VjcDI1NmsxoQJCV1iKpD3kj-6EDB8QIiRtUZE3-g0OK1QbmXL2OoziNYN0Y3CC6mCDdWRwgiMohXdha3UyDw" + +x-edgenode-environment: &edgenode_env + EDGENODE_IP: 10.2.0.102 + EDGENODE_MULTIADDRESS: "/ip4/10.2.0.102/tcp/60002/p2p/16Uiu2HAm5tojCrfxXrum5VxAVtCQk6h1jkA2Ecy447rQkKwwgf51" + EDGENODE_ENR: "enr:-KC4QAsSQM0tP9Zs8UxbHl3pe7HKE_0xLNA2P5LLVCbzCArsATKeH6EK43hhQJznAKjaMcpzqbMcd3UEjYJSkahMyg4BgmlkgnY0gmlwhAoCAGaKbXVsdGlhZGRyc4CCcnOFAEIBAACJc2VjcDI1NmsxoQKbiE_1i7pL24P02qgEFs0jHaso1XPo8HmcXAfqJPjGeIN0Y3CC6mKFd2FrdTIA" + +x-rln-environment: &rln_env + RPC_URL: ${RPC_URL:-http://foundry:8545} + RLN_CONTRACT_ADDRESS: 0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9 + RLN_CREDENTIAL_PATH: /keystore.json + RLN_CREDENTIAL_PASSWORD: passw123 + RLN_RELAY_MSG_LIMIT: ${RLN_RELAY_MSG_LIMIT:-10} + RLN_RELAY_EPOCH_SEC: ${RLN_RELAY_EPOCH_SEC:-60} + +x-pg-pass: &pg_pass test123 +x-pg-user: &pg_user postgres + +x-pg-environment: &pg_env + POSTGRES_USER: *pg_user + POSTGRES_PASSWORD: *pg_pass + +x-pg-exporter-env: &pg_exp_env + environment: + POSTGRES_PASSWORD: *pg_pass + DATA_SOURCE_URI: postgres?sslmode=disable + DATA_SOURCE_USER: *pg_user + DATA_SOURCE_PASS: *pg_pass + PG_EXPORTER_EXTEND_QUERY_PATH: /etc/pgexporter/queries.yml + networks: simulation: - driver: bridge - ipam: - driver: default - config: - - subnet: "10.2.0.0/24" + driver: bridge + ipam: + driver: default + config: + - subnet: "10.2.0.0/24" services: @@ -63,14 +102,47 @@ services: - 127.0.0.1:60000:60000 - 127.0.0.1:8008:8008 - 127.0.0.1:9000:9000 - - 127.0.0.1:8645:8645 + - 127.0.0.1:8646:8645 entrypoint: sh command: - '/opt/run_bootstrap.sh' volumes: - ./run_bootstrap.sh:/opt/run_bootstrap.sh:Z networks: - - simulation + simulation: + ipv4_address: 10.2.0.100 + + servicenode: + image: ${NWAKU_IMAGE:-wakuorg/nwaku:latest} + restart: on-failure + labels: + com.centurylinklabs.watchtower.enable: '${WATCHTOWER_ENABLED:-false}' + ports: + - 127.0.0.1:60001:60001/tcp + - 127.0.0.1:60001:60001/udp + - 127.0.0.1:8000:8000/tcp #WSS + - 127.0.0.1:8645:8645 # Service node REST-API + environment: + NODEKEY: ${NODEKEY} + STORAGE_SIZE: ${STORAGE_SIZE} + <<: + - *pg_env + - *rln_env + - *bootstrap_env + volumes: + - ./run_servicenode.sh:/opt/run_servicenode.sh:Z + - privatekeys-volume:/shared + entrypoint: sh + command: + - /opt/run_servicenode.sh + depends_on: + contract-repo-deployer: + condition: service_completed_successfully + postgres: + condition: service_started + networks: + simulation: + ipv4_address: 10.2.0.101 nwaku: image: ${NWAKU_IMAGE:-wakuorg/nwaku:latest} @@ -81,12 +153,9 @@ services: replicas: ${NUM_NWAKU_NODES:-5} entrypoint: sh environment: - - RPC_URL=${RPC_URL:-http://foundry:8545} - - RLN_CONTRACT_ADDRESS=0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9 - - RLN_CREDENTIAL_PATH=/keystore.json - - RLN_CREDENTIAL_PASSWORD=passw123 - - RLN_RELAY_MSG_LIMIT=${RLN_RELAY_MSG_LIMIT:-10} - - RLN_RELAY_EPOCH_SEC=${RLN_RELAY_EPOCH_SEC:-60} + <<: + - *rln_env + - *bootstrap_env command: - '/opt/run_nwaku.sh' volumes: @@ -97,17 +166,28 @@ services: condition: service_completed_successfully networks: - simulation - - rest-traffic: - image: alrevuelta/rest-traffic:28430f8 + + edgenode: + image: ${NWAKU_IMAGE:-wakuorg/nwaku:latest} + restart: on-failure + ports: + - 127.0.0.1:60002:60002/tcp + - 127.0.0.1:60002:60002/udp + - 127.0.0.1:8644:8645 # Service node REST-API + entrypoint: sh + environment: + <<: + - *servicenode_env command: - --multiple-nodes=http://waku-simulator-nwaku-[1..${NUM_NWAKU_NODES:-5}]:8645 - --msg-size-kbytes=${MSG_SIZE_KBYTES:-10} - --delay-seconds=${TRAFFIC_DELAY_SECONDS:-15} - networks: - - simulation + - '/opt/run_edgenode.sh' + volumes: + - ./run_edgenode.sh:/opt/run_edgenode.sh:Z depends_on: - - nwaku + servicenode: + condition: service_started + networks: + simulation: + ipv4_address: 10.2.0.102 prometheus: image: prom/prometheus:latest @@ -234,5 +314,45 @@ services: networks: - simulation + postgres: + # This service is used when the Waku node has the 'store' protocol enabled + # and the store-message-db-url is set to use Postgres + image: postgres:15.4-alpine3.18 + restart: on-failure:5 + shm_size: "${POSTGRES_SHM:-1g}" # Set default shared memory size to 1 GB + environment: + <<: *pg_env + volumes: + - ./postgres_cfg/postgresql.conf:/etc/postgresql/postgresql.conf:Z + - ./postgres_cfg/db.sql:/docker-entrypoint-initdb.d/db.sql:Z + - ${PG_DATA_DIR:-./postgresql}:/var/lib/postgresql/data:Z + command: postgres -c config_file=/etc/postgresql/postgresql.conf + ports: + - 127.0.0.1:5432:5432 + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d postgres"] + interval: 30s + timeout: 60s + retries: 5 + start_period: 80s + networks: + - simulation + + postgres-exporter: + # Service aimed to scrape information from Postgres and post it to Prometeus + image: quay.io/prometheuscommunity/postgres-exporter:v0.12.0 + restart: on-failure:5 + <<: *pg_exp_env + volumes: + - ./monitoring/configuration/postgres-exporter.yml:/etc/pgexporter/postgres-exporter.yml:Z + - ./monitoring/configuration/pg-exporter-queries.yml:/etc/pgexporter/queries.yml:Z + command: + # Both the config file and 'DATA_SOURCE_NAME' should contain valid connection info + - --config.file=/etc/pgexporter/postgres-exporter.yml + depends_on: + - postgres + networks: + - simulation + volumes: privatekeys-volume: \ No newline at end of file diff --git a/monitoring/configuration/pg-exporter-queries.yml b/monitoring/configuration/pg-exporter-queries.yml new file mode 100644 index 0000000..bb1d732 --- /dev/null +++ b/monitoring/configuration/pg-exporter-queries.yml @@ -0,0 +1,284 @@ +pg_replication: + query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag" + master: true + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind master in seconds" + +pg_postmaster: + query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" + master: true + metrics: + - start_time_seconds: + usage: "GAUGE" + description: "Time at which postmaster started" + +pg_stat_user_tables: + query: | + SELECT + current_database() datname, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, + COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, + COALESCE(last_analyze, '1970-01-01Z') as last_analyze, + COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count + FROM + pg_stat_user_tables + metrics: + - datname: + usage: "LABEL" + description: "Name of current database" + - schemaname: + usage: "LABEL" + description: "Name of the schema that this table is in" + - relname: + usage: "LABEL" + description: "Name of this table" + - seq_scan: + usage: "COUNTER" + description: "Number of sequential scans initiated on this table" + - seq_tup_read: + usage: "COUNTER" + description: "Number of live rows fetched by sequential scans" + - idx_scan: + usage: "COUNTER" + description: "Number of index scans initiated on this table" + - idx_tup_fetch: + usage: "COUNTER" + description: "Number of live rows fetched by index scans" + - n_tup_ins: + usage: "COUNTER" + description: "Number of rows inserted" + - n_tup_upd: + usage: "COUNTER" + description: "Number of rows updated" + - n_tup_del: + usage: "COUNTER" + description: "Number of rows deleted" + - n_tup_hot_upd: + usage: "COUNTER" + description: "Number of rows HOT updated (i.e., with no separate index update required)" + - n_live_tup: + usage: "GAUGE" + description: "Estimated number of live rows" + - n_dead_tup: + usage: "GAUGE" + description: "Estimated number of dead rows" + - n_mod_since_analyze: + usage: "GAUGE" + description: "Estimated number of rows changed since last analyze" + - last_vacuum: + usage: "GAUGE" + description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)" + - last_autovacuum: + usage: "GAUGE" + description: "Last time at which this table was vacuumed by the autovacuum daemon" + - last_analyze: + usage: "GAUGE" + description: "Last time at which this table was manually analyzed" + - last_autoanalyze: + usage: "GAUGE" + description: "Last time at which this table was analyzed by the autovacuum daemon" + - vacuum_count: + usage: "COUNTER" + description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)" + - autovacuum_count: + usage: "COUNTER" + description: "Number of times this table has been vacuumed by the autovacuum daemon" + - analyze_count: + usage: "COUNTER" + description: "Number of times this table has been manually analyzed" + - autoanalyze_count: + usage: "COUNTER" + description: "Number of times this table has been analyzed by the autovacuum daemon" + +pg_statio_user_tables: + query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables" + metrics: + - datname: + usage: "LABEL" + description: "Name of current database" + - schemaname: + usage: "LABEL" + description: "Name of the schema that this table is in" + - relname: + usage: "LABEL" + description: "Name of this table" + - heap_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from this table" + - heap_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in this table" + - idx_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from all indexes on this table" + - idx_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in all indexes on this table" + - toast_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from this table's TOAST table (if any)" + - toast_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in this table's TOAST table (if any)" + - tidx_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from this table's TOAST table indexes (if any)" + - tidx_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in this table's TOAST table indexes (if any)" + +# WARNING: This set of metrics can be very expensive on a busy server as every unique query executed will create an additional time series +pg_stat_statements: + query: "SELECT t2.rolname, t3.datname, queryid, calls, ( total_plan_time + total_exec_time ) / 1000 as total_time_seconds, ( min_plan_time + min_exec_time ) / 1000 as min_time_seconds, ( max_plan_time + max_exec_time ) / 1000 as max_time_seconds, ( mean_plan_time + mean_exec_time ) / 1000 as mean_time_seconds, ( stddev_plan_time + stddev_exec_time ) / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin' AND queryid IS NOT NULL" + master: true + metrics: + - rolname: + usage: "LABEL" + description: "Name of user" + - datname: + usage: "LABEL" + description: "Name of database" + - queryid: + usage: "LABEL" + description: "Query ID" + - calls: + usage: "COUNTER" + description: "Number of times executed" + - total_time_seconds: + usage: "COUNTER" + description: "Total time spent in the statement, in milliseconds" + - min_time_seconds: + usage: "GAUGE" + description: "Minimum time spent in the statement, in milliseconds" + - max_time_seconds: + usage: "GAUGE" + description: "Maximum time spent in the statement, in milliseconds" + - mean_time_seconds: + usage: "GAUGE" + description: "Mean time spent in the statement, in milliseconds" + - stddev_time_seconds: + usage: "GAUGE" + description: "Population standard deviation of time spent in the statement, in milliseconds" + - rows: + usage: "COUNTER" + description: "Total number of rows retrieved or affected by the statement" + - shared_blks_hit: + usage: "COUNTER" + description: "Total number of shared block cache hits by the statement" + - shared_blks_read: + usage: "COUNTER" + description: "Total number of shared blocks read by the statement" + - shared_blks_dirtied: + usage: "COUNTER" + description: "Total number of shared blocks dirtied by the statement" + - shared_blks_written: + usage: "COUNTER" + description: "Total number of shared blocks written by the statement" + - local_blks_hit: + usage: "COUNTER" + description: "Total number of local block cache hits by the statement" + - local_blks_read: + usage: "COUNTER" + description: "Total number of local blocks read by the statement" + - local_blks_dirtied: + usage: "COUNTER" + description: "Total number of local blocks dirtied by the statement" + - local_blks_written: + usage: "COUNTER" + description: "Total number of local blocks written by the statement" + - temp_blks_read: + usage: "COUNTER" + description: "Total number of temp blocks read by the statement" + - temp_blks_written: + usage: "COUNTER" + description: "Total number of temp blocks written by the statement" + - blk_read_time_seconds: + usage: "COUNTER" + description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" + - blk_write_time_seconds: + usage: "COUNTER" + description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" + +pg_process_idle: + query: | + WITH + metrics AS ( + SELECT + application_name, + SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum, + COUNT(*) AS process_idle_seconds_count + FROM pg_stat_activity + WHERE state = 'idle' + GROUP BY application_name + ), + buckets AS ( + SELECT + application_name, + le, + SUM( + CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le + THEN 1 + ELSE 0 + END + )::bigint AS bucket + FROM + pg_stat_activity, + UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le + GROUP BY application_name, le + ORDER BY application_name, le + ) + SELECT + application_name, + process_idle_seconds_sum as seconds_sum, + process_idle_seconds_count as seconds_count, + ARRAY_AGG(le) AS seconds, + ARRAY_AGG(bucket) AS seconds_bucket + FROM metrics JOIN buckets USING (application_name) + GROUP BY 1, 2, 3 + metrics: + - application_name: + usage: "LABEL" + description: "Application Name" + - seconds: + usage: "HISTOGRAM" + description: "Idle time of server processes" + +pg_tb_stats: + query: | + select pubsubtopic, count(*) AS messages FROM (SELECT id, array_agg(pubsubtopic ORDER BY pubsubtopic) AS pubsubtopic FROM messages GROUP BY id) sub GROUP BY pubsubtopic ORDER BY pubsubtopic; + metrics: + - pubsubtopic: + usage: "LABEL" + description: "pubsubtopic" + - messages: + usage: "GAUGE" + description: "Number of messages for the given pubsub topic" + +pg_tb_messages: + query: | + SELECT + COUNT(ID) + FROM messages + metrics: + - count: + usage: "GAUGE" + description: "Row count in `messages` table" diff --git a/monitoring/configuration/postgres-exporter.yml b/monitoring/configuration/postgres-exporter.yml new file mode 100644 index 0000000..a8380dd --- /dev/null +++ b/monitoring/configuration/postgres-exporter.yml @@ -0,0 +1,9 @@ +auth_modules: + mypostgres: + type: userpass + userpass: + username: postgres + password: ${POSTGRES_PASSWORD} + options: + # options become key=value parameters of the DSN + sslmode: disable diff --git a/monitoring/prometheus-config.yml b/monitoring/prometheus-config.yml index 033f8d0..6785bdc 100644 --- a/monitoring/prometheus-config.yml +++ b/monitoring/prometheus-config.yml @@ -14,11 +14,13 @@ scrape_configs: static_configs: - targets: - bootstrap:8008 + - servicenode:8008 + - edgenode:8008 - waku-simulator-nwaku-1:8008 - waku-simulator-nwaku-2:8008 - waku-simulator-nwaku-3:8008 - waku-simulator-nwaku-4:8008 - - waku-simulator-nwaku-5:8008 + - waku-simulator-nwaku-5:8008https://docs.google.com/presentation/d/1ZOc2v3lwHck28LvamhAN0V0045XX1VoXnsbTCuMSmyk/edit?usp=sharing - waku-simulator-nwaku-6:8008 - waku-simulator-nwaku-7:8008 - waku-simulator-nwaku-8:8008 diff --git a/postgres_cfg/db.sql b/postgres_cfg/db.sql new file mode 100644 index 0000000..48b8c24 --- /dev/null +++ b/postgres_cfg/db.sql @@ -0,0 +1,8 @@ + +/* +The next two lines allow 'pg_stat_statements_calls' information queried +by Prometheus/Grafana. This setting is aimed for performance +analysis. Comment these lines for Prod environments. +*/ +CREATE EXTENSION pg_stat_statements; +alter system set shared_preload_libraries='pg_stat_statements'; diff --git a/postgres_cfg/postgresql.conf b/postgres_cfg/postgresql.conf new file mode 100644 index 0000000..59cb905 --- /dev/null +++ b/postgres_cfg/postgresql.conf @@ -0,0 +1,813 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = scram-sha-256 # scram-sha-256 or md5 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is usually the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enables compression of full-page writes; + # off, pglz, lz4, zstd, or on +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 1GB +min_wal_size = 80MB + +# - Prefetching during recovery - + +#recovery_prefetch = try # prefetch pages referenced in the WAL? +#wal_decode_buffer_size = 512kB # lookahead window used for prefetching + # (change requires restart) + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_library = '' # library to use to archive a logfile segment + # (empty string indicates archive_command should + # be used) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan +#recursive_worktable_factor = 10.0 # range 0.001-1000000 + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, jsonlog, syslog, and + # eventlog, depending on platform. + # csvlog and jsonlog require + # logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr, jsonlog, + # and csvlog into log files. Required + # to be on for csvlogs and jsonlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +#log_startup_progress_interval = 10s # Time between progress updates for + # long-running startup operations. + # 0 disables the feature, > 0 indicates + # the interval in milliseconds. + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = 10min # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = on +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Cumulative Query and Index Statistics - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_fetch_consistency = cache + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.utf8' # locale for system error message + # strings +lc_monetary = 'en_US.utf8' # locale for monetary formatting +lc_numeric = 'en_US.utf8' # locale for number formatting +lc_time = 'en_US.utf8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +shared_preload_libraries = 'pg_stat_statements' # (change requires restart) +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/run_bootstrap.sh b/run_bootstrap.sh index cab3d90..658c427 100755 --- a/run_bootstrap.sh +++ b/run_bootstrap.sh @@ -7,7 +7,11 @@ echo "I am a bootstrap node" exec /usr/bin/wakunode\ --relay=false\ --rest=true\ + --rest-admin=true\ --rest-address=0.0.0.0\ + --rest-port=8645\ + --rest-allow-origin="waku-org.github.io"\ + --rest-allow-origin="localhost:*"\ --max-connections=300\ --dns-discovery=true\ --discv5-discovery=true\ diff --git a/run_edgenode.sh b/run_edgenode.sh new file mode 100755 index 0000000..e523937 --- /dev/null +++ b/run_edgenode.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +# Check Linux Distro Version - it can differ depending on the nwaku image used +OS=$(cat /etc/os-release) +if echo $OS | grep -q "Debian"; then + echo "The operating system is Debian." + apt update + apt install -y dnsutils + apt install -y jq +elif echo $OS | grep -q "Alpine"; then + echo "The operating system is Alpine." + apk add bind-tools + apk add jq +fi + +if test -f .env; then + echo "Using .env file" + . $(pwd)/.env +fi + +IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/') + +echo "I am a nwaku edge node" + +RETRIES=${RETRIES:=10} + +echo "My IP is: ${IP}" + +exec /usr/bin/wakunode\ + --relay=false\ + --lightpushnode=${SERVICENODE_MULTIADDRESS}\ + --filternode=${SERVICENODE_MULTIADDRESS}\ + --storenode=${SERVICENODE_MULTIADDRESS}\ + --max-connections=80\ + --rest=true\ + --rest-admin=true\ + --rest-address=0.0.0.0\ + --rest-port=8645\ + --rest-allow-origin="waku-org.github.io"\ + --rest-allow-origin="localhost:*"\ + --log-level=INFO\ + --metrics-server=True\ + --metrics-server-address=0.0.0.0\ + --nat=extip:${IP}\ + --tcp-port:60002\ + --pubsub-topic=/waku/2/rs/66/0\ + --cluster-id=66\ + --nodekey=5358f02c157accb30a6c1d5920e778604de12e23d6009512be44f72f1a64d828\ + --relay-service-ratio="0:100" \ No newline at end of file diff --git a/run_nwaku.sh b/run_nwaku.sh index f1563ca..1cf61f3 100755 --- a/run_nwaku.sh +++ b/run_nwaku.sh @@ -124,13 +124,13 @@ if [ -z "${BOOTSTRAP_ENR}" ]; then fi echo "Using bootstrap node: ${BOOTSTRAP_ENR}" +echo "My IP is: ${IP}" + exec /usr/bin/wakunode\ --relay=true\ --lightpush=true\ --max-connections=250\ - --rest=true\ - --rest-address=0.0.0.0\ - --rest-port=8645\ + --rest=false\ --rln-relay=true\ --rln-relay-dynamic=true\ --rln-relay-eth-client-address="$RPC_URL"\ diff --git a/run_servicenode.sh b/run_servicenode.sh new file mode 100755 index 0000000..c59f7dd --- /dev/null +++ b/run_servicenode.sh @@ -0,0 +1,170 @@ +#!/bin/sh + +# Check Linux Distro Version - it can differ depending on the nwaku image used +OS=$(cat /etc/os-release) +if echo $OS | grep -q "Debian"; then + echo "The operating system is Debian." + apt update + apt install -y dnsutils + apt install -y jq +elif echo $OS | grep -q "Alpine"; then + echo "The operating system is Alpine." + apk add bind-tools + apk add jq +fi + +if test -f .env; then + echo "Using .env file" + . $(pwd)/.env +fi + +IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/') + +# Function to extract IP address from URL, resolve the IP and replace it in the original URL +get_ip_address_and_replace() { + local url=$1 + local domain_name=$(echo $RPC_URL | awk -F[/:] '{print $4}') + local ip_address=$(dig +short $domain_name) + valid_rpc_url="$(echo "$url" | sed "s/$domain_name/$ip_address/g")" + echo $valid_rpc_url +} + +# the format of the RPC URL is checked in the generateRlnKeystore command and hostnames are not valid +pattern="^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" +# Perform regex matching +if echo "$RPC_URL" | grep -q "$pattern"; then + echo "RPC URL is valid" +else + echo "RPC URL is invalid: $RPC_URL. Attempting to resolve hostname." + resolved_rpc_url="$(get_ip_address_and_replace $RPC_URL)" + if [ -z "$resolved_rpc_url" ]; then + echo -e "Failed to retrieve IP address for $RPC_URL\n" + else + echo -e "Resolved RPC URL for $RPC_URL: $resolved_rpc_url" + RPC_URL="$resolved_rpc_url" + fi +fi + +#Function to get the index of the container and use it to retrieve a private key to be used to generate the keystore, allowing for either dash or underscore container name format (for docker-compose backward compatibility) +get_private_key(){ + + # Read the JSON file + json_content=$(cat /shared/anvil-config.txt) + + # Check if json_content has a value + if [ -z "$json_content" ]; then + echo "Error: Failed to read the JSON file or the file is empty." >&2 + return 1 + fi + + # Extract private_keys json array using jq + private_keys=$(echo "$json_content" | jq -r '.private_keys[]') + + CNTR=`dig -x $IP +short | cut -d'.' -f1` + INDEX=`echo $CNTR | sed 's/.*[-_]\([0-9]*\)/\1/'` + + if [ $? -ne 0 ] || [ -z "$INDEX" ]; then + echo "Error: Failed to determine the replica index from IP." >&2 + return 1 + fi + + + # iterate through list of private keys and get the one corresponding to the container index + # we need to iterate because array objects cannot be used in /bin/ash (Alpine) and a separate script would need to be called to use bash + current_index=1 + for key in $private_keys + do + if [ $current_index -eq $INDEX ]; then + pk=$key + echo $key + break + fi + current_index=$((current_index+1)) + done + + if [ -z "$pk" ]; then + echo "Error: Failed to get private key for the container with index=$INDEX." >&2 + return 1 + fi +} + +if test -f .$RLN_CREDENTIAL_PATH; then + echo "$RLN_CREDENTIAL_PATH already exists. Use it instead of creating a new one." +else + private_key="$(get_private_key)" + echo "Private key: $private_key" + + echo "Generating RLN keystore" + /usr/bin/wakunode generateRlnKeystore \ + --rln-relay-eth-client-address="$RPC_URL" \ + --rln-relay-eth-private-key=$private_key \ + --rln-relay-eth-contract-address=$RLN_CONTRACT_ADDRESS \ + --rln-relay-cred-path=$RLN_CREDENTIAL_PATH \ + --rln-relay-cred-password=$RLN_CREDENTIAL_PASSWORD \ + --rln-relay-user-message-limit=$RLN_RELAY_MSG_LIMIT \ + --rln-relay-epoch-sec=$RLN_RELAY_EPOCH_SEC \ + --log-level=DEBUG \ + --execute +fi + +echo "I am a nwaku service node" + +RETRIES=${RETRIES:=10} + +while [ -z "${BOOTSTRAP_ENR}" ] && [ ${RETRIES} -ge 0 ]; do + BOOTSTRAP_ENR=$(wget -qO- http://bootstrap:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null | sed 's/.*"enrUri":"\([^"]*\)".*/\1/'); + echo "Bootstrap node not ready, retrying (retries left: ${RETRIES})" + sleep 1 + RETRIES=$(( $RETRIES - 1 )) +done + +if [ -z "${BOOTSTRAP_ENR}" ]; then + echo "Could not get BOOTSTRAP_ENR and none provided. Failing" + exit 1 +fi + +STORE_RETENTION_POLICY=--store-message-retention-policy=size:1GB + +if [ -n "${STORAGE_SIZE}" ]; then + STORE_RETENTION_POLICY=--store-message-retention-policy=size:"${STORAGE_SIZE}" +fi + +echo "Using bootstrap node: ${BOOTSTRAP_ENR}" +echo "My IP is: ${IP}" + +exec /usr/bin/wakunode\ + --relay=true\ + --lightpush=true\ + --filter=true\ + --store=true\ + --max-connections=250\ + --rest=true\ + --rest-admin=true\ + --rest-address=0.0.0.0\ + --rest-port=8645\ + --rest-allow-origin="waku-org.github.io"\ + --rest-allow-origin="localhost:*"\ + --websocket-support=true\ + --rln-relay=true\ + --rln-relay-dynamic=true\ + --rln-relay-eth-client-address="$RPC_URL"\ + --rln-relay-eth-contract-address=$RLN_CONTRACT_ADDRESS\ + --rln-relay-cred-path=$RLN_CREDENTIAL_PATH\ + --rln-relay-cred-password=$RLN_CREDENTIAL_PASSWORD\ + --rln-relay-tree-path="rlnv2_tree1"\ + --rln-relay-epoch-sec=$RLN_RELAY_EPOCH_SEC\ + --rln-relay-user-message-limit=$RLN_RELAY_MSG_LIMIT\ + --dns-discovery=true\ + --discv5-discovery=true\ + --discv5-enr-auto-update=True\ + --discv5-bootstrap-node=${BOOTSTRAP_ENR}\ + --log-level=INFO\ + --metrics-server=True\ + --metrics-server-address=0.0.0.0\ + --nat=extip:${IP}\ + --tcp-port:60001\ + --nodekey=e3416f0b00005aa3ebc9cd42797b3847bfbf4fe810edaa6a1fc65e755638b7fb\ + --pubsub-topic=/waku/2/rs/66/0\ + --cluster-id=66\ + --store-message-db-url="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/postgres"\ + ${STORE_RETENTION_POLICY}\ diff --git a/wakusim.env b/wakusim.env index 36c1782..19dd835 100644 --- a/wakusim.env +++ b/wakusim.env @@ -1,10 +1,7 @@ # Env variables for metal-01.he-eu-hel1.misc.wakusim host. NWAKU_IMAGE=harbor.status.im/wakuorg/nwaku:latest # Network scaling. -NUM_NWAKU_NODES=50 -# Simulation traffic. -MSG_SIZE_KBYTES=10 -TRAFFIC_DELAY_SECONDS=30 +NUM_NWAKU_NODES=5 # Enable automatic Docker image updates. WATCHTOWER_ENABLED=true # Anvil RPC Node external IP and port