diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yml index ce17254..2aff6cb 100644 --- a/.github/workflows/build-binaries.yml +++ b/.github/workflows/build-binaries.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest runs-on: ${{ matrix.runs-on }} env: - NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits + LOGOS_BLOCKCHAIN_CIRCUITS: ${{ github.workspace }}/.tmp/logos-blockchain-circuits CARGO_INCREMENTAL: 0 CARGO_PROFILE_DEV_DEBUG: 0 RUSTFLAGS: -C debuginfo=0 --cfg feature="pol-dev-mode" @@ -26,7 +26,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -35,8 +35,8 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Install system dependencies (Linux) if: runner.os == 'Linux' run: | @@ -45,12 +45,12 @@ jobs: sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm - name: Install nomos circuits run: | - ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$NOMOS_CIRCUITS" - echo "NOMOS_CIRCUITS=$NOMOS_CIRCUITS" >> "$GITHUB_ENV" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$LOGOS_BLOCKCHAIN_CIRCUITS" + echo "LOGOS_BLOCKCHAIN_CIRCUITS=$LOGOS_BLOCKCHAIN_CIRCUITS" >> "$GITHUB_ENV" - name: Add top-level KZG params file run: | - curl -fsSL "https://raw.githubusercontent.com/logos-co/nomos-node/${NOMOS_NODE_REV}/tests/kzgrs/kzgrs_test_params" \ - -o "${NOMOS_CIRCUITS}/kzgrs_test_params" + curl -fsSL "https://raw.githubusercontent.com/logos-co/nomos-node/${LOGOS_BLOCKCHAIN_NODE_REV}/tests/kzgrs/kzgrs_test_params" \ + -o "${LOGOS_BLOCKCHAIN_CIRCUITS}/kzgrs_test_params" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -66,13 +66,13 @@ jobs: chmod +x scripts/build/build-bundle.sh DEST=".tmp/nomos-binaries-linux-${VERSION}.tar.gz" scripts/build/build-bundle.sh --platform linux --output "$DEST" - echo "NOMOS_BINARIES_TAR=$DEST" >> "$GITHUB_ENV" + echo "LOGOS_BLOCKCHAIN_BINARIES_TAR=$DEST" >> "$GITHUB_ENV" - name: Save nomos binaries cache uses: actions/cache@v4 with: path: ${{ github.workspace }}/.tmp/nomos-binaries-linux-${{ env.VERSION }}.tar.gz - key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }} + key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }} - uses: actions/upload-artifact@v4 with: - name: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }} + name: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }} path: .tmp/nomos-binaries-linux-${{ env.VERSION }}.tar.gz diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index e4e9911..828921d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,7 +24,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -33,12 +33,12 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Install nomos circuits run: | - ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits" - echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits" + echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -65,7 +65,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -74,12 +74,12 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Install nomos circuits run: | - ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits" - echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits" + echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -106,7 +106,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -115,12 +115,12 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Install nomos circuits run: | - ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits" - echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits" + echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -141,7 +141,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -150,12 +150,12 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Install nomos circuits run: | - ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits" - echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits" + echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -178,7 +178,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -187,8 +187,8 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -207,7 +207,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -216,12 +216,12 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Install nomos circuits run: | - ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits" - echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits" + echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV" - uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2025-09-14 @@ -242,8 +242,7 @@ jobs: POL_PROOF_DEV_MODE: true LOCAL_DEMO_RUN_SECS: 120 LOCAL_DEMO_VALIDATORS: 1 - NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits - NOMOS_KZGRS_PARAMS_PATH: ${{ github.workspace }}/.tmp/kzgrs_test_params + LOGOS_BLOCKCHAIN_CIRCUITS: ${{ github.workspace }}/.tmp/logos-blockchain-circuits CARGO_INCREMENTAL: 0 CARGO_PROFILE_DEV_DEBUG: 0 RUSTFLAGS: -C debuginfo=0 @@ -254,7 +253,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -263,16 +262,20 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Set temp dir run: | echo "TMPDIR=${{ runner.temp }}" >> "$GITHUB_ENV" echo "CARGO_TARGET_DIR=${{ runner.temp }}/target-local" >> "$GITHUB_ENV" - echo "NOMOS_LOG_DIR=${{ runner.temp }}/local-logs" >> "$GITHUB_ENV" - echo "NOMOS_STATE_DIR=${{ runner.temp }}/nomos-state" >> "$GITHUB_ENV" + echo "LOGOS_BLOCKCHAIN_LOG_DIR=${{ runner.temp }}/local-logs" >> "$GITHUB_ENV" + echo "LOGOS_BLOCKCHAIN_STATE_DIR=${{ runner.temp }}/nomos-state" >> "$GITHUB_ENV" rm -rf "${{ runner.temp }}/local-logs" "${{ runner.temp }}/nomos-state" mkdir -p "${{ runner.temp }}/local-logs" "${{ runner.temp }}/nomos-state" + - name: Install circuits + run: | + mkdir -p "${LOGOS_BLOCKCHAIN_CIRCUITS}" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "${LOGOS_BLOCKCHAIN_CIRCUITS}" - name: Clean workspace caches run: | rm -rf .tmp/nomos-* testing-framework/assets/stack/kzgrs_test_params @@ -303,13 +306,13 @@ jobs: uses: actions/cache@v4 with: path: ${{ github.workspace }}/.tmp/nomos-binaries.tar.gz - key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }} + key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }} - name: Download nomos binaries artifact (fallback) if: steps.restore-nomos-bins-host.outputs.cache-hit != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }} + ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }} run: | set -euo pipefail mkdir -p "${TMPDIR}" @@ -338,27 +341,27 @@ jobs: DEST="${GITHUB_WORKSPACE}/.tmp/nomos-binaries-host-${VERSION}.tar.gz" if [ -f "${SRC}" ]; then mv "${SRC}" "${DEST}" - echo "NOMOS_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV" + echo "LOGOS_BLOCKCHAIN_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV" else echo "Expected ${SRC} not found" >&2 exit 1 fi - name: Run host demo (scripted) env: - NOMOS_TESTS_KEEP_LOGS: "true" + LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS: "true" RUST_LOG: "info" - NOMOS_LOG_DIR: "${{ runner.temp }}/local-logs" + LOGOS_BLOCKCHAIN_LOG_DIR: "${{ runner.temp }}/local-logs" run: | - scripts/run/run-examples.sh -t 120 -v 1 -e 1 host + scripts/run/run-examples.sh -t 120 -n 1 host - name: Collect host demo logs (on failure) if: failure() run: | - if [ -d "${NOMOS_LOG_DIR}" ]; then - tar -czf "${RUNNER_TEMP}/local-logs.tgz" -C "$(dirname "${NOMOS_LOG_DIR}")" "$(basename "${NOMOS_LOG_DIR}")" + if [ -d "${LOGOS_BLOCKCHAIN_LOG_DIR}" ]; then + tar -czf "${RUNNER_TEMP}/local-logs.tgz" -C "$(dirname "${LOGOS_BLOCKCHAIN_LOG_DIR}")" "$(basename "${LOGOS_BLOCKCHAIN_LOG_DIR}")" echo "Local logs tar: $(realpath ${RUNNER_TEMP}/local-logs.tgz)" - find "${NOMOS_LOG_DIR}" -type f -print + find "${LOGOS_BLOCKCHAIN_LOG_DIR}" -type f -print else - echo "No local logs directory at ${NOMOS_LOG_DIR}" + echo "No local logs directory at ${LOGOS_BLOCKCHAIN_LOG_DIR}" fi - name: Upload host smoke logs if: failure() @@ -380,8 +383,8 @@ jobs: runs-on: ubuntu-latest env: TMPDIR: ${{ github.workspace }}/.tmp - NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits - NOMOS_TESTNET_IMAGE: nomos-testnet:${{ github.run_id }} + LOGOS_BLOCKCHAIN_CIRCUITS: ${{ github.workspace }}/.tmp/logos-blockchain-circuits + LOGOS_BLOCKCHAIN_TESTNET_IMAGE: nomos-testnet:${{ github.run_id }} DOCKER_BUILDKIT: 1 CARGO_INCREMENTAL: 0 CARGO_PROFILE_DEV_DEBUG: 0 @@ -393,7 +396,7 @@ jobs: run: | set -euo pipefail if [ ! -f versions.env ]; then - echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2 + echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2 exit 1 fi set -a @@ -402,24 +405,28 @@ jobs: # $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports. grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV" : "${VERSION:?Missing VERSION}" - : "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}" - : "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}" + : "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}" + : "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}" - name: Prepare workspace tmpdir run: mkdir -p "$TMPDIR" + - name: Install circuits + run: | + mkdir -p "${LOGOS_BLOCKCHAIN_CIRCUITS}" + ./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "${LOGOS_BLOCKCHAIN_CIRCUITS}" - name: Restore cached nomos binaries id: restore-nomos-bins uses: actions/cache@v4 with: path: ${{ github.workspace }}/.tmp/nomos-binaries.tar.gz - key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }} + key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }} - name: Download nomos binaries artifact (fallback) if: steps.restore-nomos-bins.outputs.cache-hit != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }} + ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }} run: | set -euo pipefail download_dir="${TMPDIR}/nomos-binaries-download" @@ -450,7 +457,7 @@ jobs: DEST="${GITHUB_WORKSPACE}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz" if [ -f "${SRC}" ]; then mv "${SRC}" "${DEST}" - echo "NOMOS_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV" + echo "LOGOS_BLOCKCHAIN_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV" else echo "Expected ${SRC} not found" >&2 exit 1 @@ -495,18 +502,17 @@ jobs: env: POL_PROOF_DEV_MODE: "true" COMPOSE_NODE_PAIRS: "1x1" - NOMOS_TESTNET_IMAGE: ${{ env.NOMOS_TESTNET_IMAGE }} + LOGOS_BLOCKCHAIN_TESTNET_IMAGE: ${{ env.LOGOS_BLOCKCHAIN_TESTNET_IMAGE }} COMPOSE_RUNNER_HOST: "127.0.0.1" - NOMOS_TIME_BACKEND: "monotonic" - NOMOS_KZGRS_PARAMS_PATH: "/kzgrs_test_params/kzgrs_test_params" + LOGOS_BLOCKCHAIN_TIME_BACKEND: "monotonic" RUST_BACKTRACE: "1" - NOMOS_TESTS_TRACING: "true" + LOGOS_BLOCKCHAIN_TESTS_TRACING: "true" RUST_LOG: "info" - NOMOS_LOG_LEVEL: "info" - NOMOS_LOG_DIR: "${{ github.workspace }}/.tmp/compose-logs" + LOGOS_BLOCKCHAIN_LOG_LEVEL: "info" + LOGOS_BLOCKCHAIN_LOG_DIR: "${{ github.workspace }}/.tmp/compose-logs" run: | mkdir -p "$TMPDIR" - scripts/run/run-examples.sh -t 120 -v 1 -e 1 compose + scripts/run/run-examples.sh -t 120 -n 1 compose - name: Show compose runner log env: diff --git a/Cargo.lock b/Cargo.lock index a8b2469..c239e76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,56 +54,12 @@ dependencies = [ "libc", ] -[[package]] -name = "anstream" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - [[package]] name = "anstyle" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" -dependencies = [ - "windows-sys 0.60.2", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys 0.60.2", -] - [[package]] name = "anyhow" version = "1.0.100" @@ -119,12 +75,6 @@ dependencies = [ "derive_arbitrary", ] -[[package]] -name = "arc-swap" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" - [[package]] name = "archery" version = "1.2.2" @@ -134,18 +84,6 @@ dependencies = [ "triomphe", ] -[[package]] -name = "ark-bls12-381" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" -dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", -] - [[package]] name = "ark-bn254" version = "0.4.0" @@ -281,7 +219,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -307,7 +245,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -353,23 +291,6 @@ dependencies = [ "hashbrown 0.15.5", ] -[[package]] -name = "ark-poly-commit" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a741492629ffcd228337676dc223a28551aa6792eedb8a2a22c767f00df6c89" -dependencies = [ - "ark-crypto-primitives", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-relations", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", - "digest", -] - [[package]] name = "ark-relations" version = "0.4.0" @@ -426,7 +347,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -485,7 +406,7 @@ dependencies = [ "nom 7.1.3", "num-traits", "rusticata-macros", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", ] @@ -497,7 +418,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -509,7 +430,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -565,7 +486,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -587,7 +508,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -598,7 +519,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -677,7 +598,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tower 0.5.2", + "tower 0.5.3", "tower-layer", "tower-service", "tracing", @@ -704,19 +625,13 @@ dependencies = [ "tracing", ] -[[package]] -name = "az" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" - [[package]] name = "backoff" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "instant", "rand 0.8.5", ] @@ -783,9 +698,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bincode" @@ -811,7 +726,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -826,18 +741,6 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "blake2" version = "0.10.6" @@ -865,34 +768,6 @@ dependencies = [ "objc2", ] -[[package]] -name = "blst" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" -dependencies = [ - "cc", - "glob", - "threadpool", - "zeroize", -] - -[[package]] -name = "broadcast-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "derivative", - "futures", - "nomos-core", - "overwatch", - "serde", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "bs58" version = "0.5.1" @@ -914,21 +789,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" - -[[package]] -name = "bytecount" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" - -[[package]] -name = "bytemuck" -version = "1.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "byteorder" @@ -955,23 +818,11 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cached" -version = "0.55.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0839c297f8783316fcca9d90344424e968395413f0662a5481f79c6648bbc14" -dependencies = [ - "hashbrown 0.14.5", - "once_cell", - "thiserror 2.0.17", - "web-time", -] - [[package]] name = "cc" -version = "1.2.49" +version = "1.2.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" dependencies = [ "find-msvc-tools", "jobserver", @@ -1008,7 +859,7 @@ checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1033,146 +884,18 @@ dependencies = [ "serde_path_to_error", "serde_with", "serde_yaml", - "testing-framework-config 0.1.0", - "testing-framework-core 0.1.0", - "thiserror 2.0.17", + "testing-framework-config", + "testing-framework-core", + "thiserror 2.0.18", "tokio", "tracing", ] -[[package]] -name = "cfgsync_tf" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" -dependencies = [ - "anyhow", - "axum", - "clap", - "groth16", - "hex", - "key-management-system-service", - "nomos-core", - "nomos-da-network-core", - "nomos-executor", - "nomos-libp2p", - "nomos-node", - "nomos-tracing-service", - "nomos-utils", - "rand 0.8.5", - "reqwest", - "serde", - "serde_json", - "serde_path_to_error", - "serde_with", - "serde_yaml", - "subnetworks-assignations", - "testing-framework-config 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-core 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", - "tokio", - "tracing", -] - -[[package]] -name = "chain-common" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "nomos-core", - "serde", -] - -[[package]] -name = "chain-leader" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "chain-common", - "chain-service", - "cryptarchia-engine", - "futures", - "key-management-system-keys", - "nomos-blend-service", - "nomos-core", - "nomos-da-sampling", - "nomos-ledger", - "nomos-time", - "nomos-wallet", - "overwatch", - "serde", - "services-utils", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "tx-service", -] - -[[package]] -name = "chain-network" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "chain-common", - "chain-service", - "cryptarchia-engine", - "cryptarchia-sync", - "futures", - "nomos-core", - "nomos-da-sampling", - "nomos-ledger", - "nomos-network", - "nomos-time", - "overwatch", - "rand 0.8.5", - "serde", - "serde_with", - "services-utils", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", - "tracing-futures", - "tx-service", -] - -[[package]] -name = "chain-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "broadcast-service", - "bytes", - "cryptarchia-engine", - "cryptarchia-sync", - "futures", - "groth16", - "nomos-core", - "nomos-ledger", - "nomos-network", - "nomos-storage", - "nomos-time", - "nomos-utils", - "num-bigint", - "overwatch", - "serde", - "serde_with", - "services-utils", - "strum", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", -] - [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "num-traits", @@ -1212,23 +935,6 @@ dependencies = [ "inout", ] -[[package]] -name = "circuits-prover" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "circuits-utils", - "tempfile", -] - -[[package]] -name = "circuits-utils" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "dirs", -] - [[package]] name = "clang-sys" version = "1.8.1" @@ -1242,9 +948,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive", @@ -1252,15 +958,12 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ - "anstream", "anstyle", "clap_lex", - "strsim", - "terminal_size", ] [[package]] @@ -1272,14 +975,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "color-eyre" @@ -1294,30 +997,6 @@ dependencies = [ "owo-colors", ] -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "common-http-client" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "broadcast-service", - "chain-service", - "futures", - "nomos-core", - "nomos-da-messages", - "nomos-http-api-common", - "reqwest", - "serde", - "serde_json", - "thiserror 1.0.69", - "url", -] - [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1327,19 +1006,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "console" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e45a4a8926227e4197636ba97a9fc9b00477e9f4bd711395687c5f0734bec4" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "unicode-width", - "windows-sys 0.61.2", -] - [[package]] name = "const-hex" version = "1.17.0" @@ -1373,15 +1039,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "convert_case" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -1407,15 +1064,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "counter" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f009fcafa949dc1fc46a762dae84d0c2687d3b550906b633c4979d58d2c6ae52" -dependencies = [ - "num-traits", -] - [[package]] name = "cpufeatures" version = "0.2.17" @@ -1474,40 +1122,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" -[[package]] -name = "cryptarchia-engine" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "cfg_eval", - "nomos-utils", - "serde", - "serde_with", - "thiserror 1.0.69", - "time", - "tokio", - "tracing", -] - -[[package]] -name = "cryptarchia-sync" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "bytes", - "cryptarchia-engine", - "futures", - "libp2p", - "libp2p-stream", - "nomos-core", - "rand 0.8.5", - "serde", - "serde_with", - "thiserror 1.0.69", - "tokio", - "tracing", -] - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1541,64 +1155,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "cucumber" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18c09939b8de21501b829a3839fa8a01ef6cc226e6bc1f5f163f7104bd5e847d" -dependencies = [ - "anyhow", - "clap", - "console", - "cucumber-codegen", - "cucumber-expressions", - "derive_more", - "either", - "futures", - "gherkin", - "globwalk", - "humantime", - "inventory", - "itertools 0.14.0", - "junit-report", - "linked-hash-map", - "pin-project", - "ref-cast", - "regex", - "sealed", - "smart-default", -] - -[[package]] -name = "cucumber-codegen" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f5afe541b5147a7b986816153ccfd502622bb37789420cfff412685f27c0a95" -dependencies = [ - "cucumber-expressions", - "inflections", - "itertools 0.14.0", - "proc-macro2", - "quote", - "regex", - "syn 2.0.111", - "synthez", -] - -[[package]] -name = "cucumber-expressions" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6401038de3af44fe74e6fccdb8a5b7db7ba418f480c8e9ad584c6f65c05a27a6" -dependencies = [ - "derive_more", - "either", - "nom 8.0.0", - "nom_locate", - "regex", - "regex-syntax", -] - [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -1624,7 +1180,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1648,7 +1204,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1659,7 +1215,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1677,15 +1233,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] name = "data-encoding-macro" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +checksum = "8142a83c17aa9461d637e649271eae18bf2edd00e91f2e105df36c3c16355bdb" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1693,12 +1249,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1763,17 +1319,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive-getters" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2c35ab6e03642397cdda1dd58abbc05d418aef8e36297f336d5aba060fe8df" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "derive_arbitrary" version = "1.4.2" @@ -1782,30 +1327,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "derive_more" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" -dependencies = [ - "derive_more-impl", -] - -[[package]] -name = "derive_more-impl" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" -dependencies = [ - "convert_case 0.10.0", - "proc-macro2", - "quote", - "rustc_version", - "syn 2.0.111", - "unicode-xid", + "syn 2.0.114", ] [[package]] @@ -1867,7 +1389,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1910,18 +1432,18 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "testing-framework-core 0.1.0", - "testing-framework-runner-compose 0.1.0", + "testing-framework-core", + "testing-framework-runner-compose", "testing-framework-runner-k8s", - "testing-framework-runner-local 0.1.0", - "testing-framework-workflows 0.1.0", + "testing-framework-runner-local", + "testing-framework-workflows", ] [[package]] name = "dtoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" +checksum = "4c3cf4824e2d5f025c7b531afcb2325364084a16806f6d47fbc1f5fbd9960590" [[package]] name = "ecdsa" @@ -1972,7 +1494,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2000,12 +1522,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encode_unicode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" - [[package]] name = "enum-as-inner" version = "0.6.1" @@ -2015,7 +1531,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2035,7 +1551,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2075,19 +1591,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "executor-http-client" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "common-http-client", - "futures", - "nomos-core", - "nomos-http-api-common", - "reqwest", - "serde", -] - [[package]] name = "eyre" version = "0.6.12" @@ -2122,28 +1625,15 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" - -[[package]] -name = "fixed" -version = "1.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707070ccf8c4173548210893a0186e29c266901b71ed20cd9e2ca0193dfe95c3" -dependencies = [ - "az", - "bytemuck", - "half", - "serde", - "typenum", -] +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "miniz_oxide", @@ -2205,12 +1695,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "futures" version = "0.3.31" @@ -2288,7 +1772,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2298,7 +1782,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.35", + "rustls 0.23.36", "rustls-pki-types", ] @@ -2362,9 +1846,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", @@ -2387,23 +1871,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "gherkin" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70197ce7751bfe8bc828e3a855502d3a869a1e9416b58b10c4bde5cf8a0a3cb3" -dependencies = [ - "heck", - "peg", - "quote", - "serde", - "serde_json", - "syn 2.0.111", - "textwrap", - "thiserror 2.0.17", - "typed-builder", -] - [[package]] name = "gimli" version = "0.32.3" @@ -2472,24 +1939,6 @@ dependencies = [ "spinning_top", ] -[[package]] -name = "groth16" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-bn254 0.4.0", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-groth16", - "ark-serialize 0.4.2", - "generic-array 1.3.5", - "hex", - "num-bigint", - "serde", - "serde_json", - "thiserror 2.0.17", -] - [[package]] name = "group" version = "0.13.0" @@ -2503,9 +1952,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -2513,24 +1962,13 @@ dependencies = [ "futures-core", "futures-sink", "http 1.4.0", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "half" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" -dependencies = [ - "cfg-if", - "crunchy", - "zerocopy", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -2625,7 +2063,7 @@ dependencies = [ "once_cell", "rand 0.9.2", "socket2 0.5.10", - "thiserror 2.0.17", + "thiserror 2.0.18", "tinyvec", "tokio", "tracing", @@ -2648,7 +2086,7 @@ dependencies = [ "rand 0.9.2", "resolv-conf", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] @@ -2850,7 +2288,7 @@ dependencies = [ "http 1.4.0", "hyper 1.8.1", "hyper-util", - "rustls 0.23.35", + "rustls 0.23.36", "rustls-pki-types", "tokio", "tokio-rustls 0.26.4", @@ -2917,7 +2355,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.6.2", "tokio", "tower-service", "tracing", @@ -3164,9 +2602,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -3174,12 +2612,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "inflections" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a257582fdcde896fd96463bf2d40eefea0580021c0712a0e2b028b60b47a837a" - [[package]] name = "inout" version = "0.1.4" @@ -3198,15 +2630,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "inventory" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc61209c082fbeb19919bee74b176221b27223e27b65d781eb91af24eb1fb46e" -dependencies = [ - "rustversion", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -3227,20 +2650,14 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ "memchr", "serde", ] -[[package]] -name = "is_terminal_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" - [[package]] name = "itertools" version = "0.10.5" @@ -3250,15 +2667,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.13.0" @@ -3279,9 +2687,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jf-crhf" @@ -3320,9 +2728,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -3352,18 +2760,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "junit-report" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c3a3342e6720a82d7d179f380e9841b73a1dd49344e33959fdfe571ce56b55" -dependencies = [ - "derive-getters", - "quick-xml", - "strip-ansi-escapes", - "time", -] - [[package]] name = "k256" version = "0.13.4" @@ -3401,57 +2797,6 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "key-management-system-keys" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "bytes", - "ed25519-dalek", - "generic-array 1.3.5", - "groth16", - "key-management-system-macros", - "nomos-blend-proofs", - "nomos-utils", - "num-bigint", - "poseidon2", - "rand_core 0.6.4", - "serde", - "subtle", - "thiserror 2.0.17", - "tokio", - "tracing", - "x25519-dalek", - "zeroize", - "zksign", -] - -[[package]] -name = "key-management-system-macros" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - -[[package]] -name = "key-management-system-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "key-management-system-keys", - "log", - "overwatch", - "serde", - "thiserror 2.0.17", - "tokio", - "tracing", -] - [[package]] name = "kube" version = "0.87.2" @@ -3543,41 +2888,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "kzgrs" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-bls12-381", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-poly-commit", - "ark-serialize 0.4.2", - "blake2", - "blst", - "num-bigint", - "num-traits", - "rand 0.8.5", - "thiserror 1.0.69", -] - -[[package]] -name = "kzgrs-backend" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", - "blake2", - "itertools 0.12.1", - "kzgrs", - "nomos-core", - "rand 0.8.5", - "serde", -] - [[package]] name = "lazy_static" version = "1.5.0" @@ -3586,9 +2896,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.178" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libloading" @@ -3602,9 +2912,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libp2p" @@ -3616,7 +2926,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.16", + "getrandom 0.2.17", "libp2p-allow-block-list", "libp2p-autonat", "libp2p-connection-limits", @@ -3635,7 +2945,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3669,7 +2979,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "rand_core 0.6.4", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "web-time", ] @@ -3687,9 +2997,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.43.1" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d28e2d2def7c344170f5c6450c0dbe3dfef655610dbfde2f6ac28a527abbe36" +checksum = "249128cd37a2199aff30a7675dffa51caf073b51aa612d2f544b19932b9aebca" dependencies = [ "either", "fnv", @@ -3704,7 +3014,7 @@ dependencies = [ "quick-protobuf", "rand 0.8.5", "rw-stream-sink", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "unsigned-varint 0.8.0", "web-time", @@ -3741,7 +3051,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom 0.2.16", + "getrandom 0.2.17", "hashlink", "hex_fmt", "libp2p-core", @@ -3775,7 +3085,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -3795,7 +3105,7 @@ dependencies = [ "rand 0.8.5", "serde", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "zeroize", ] @@ -3822,7 +3132,7 @@ dependencies = [ "serde", "sha2", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "uint", "web-time", @@ -3880,9 +3190,9 @@ dependencies = [ "quinn", "rand 0.8.5", "ring", - "rustls 0.23.35", + "rustls 0.23.36", "socket2 0.5.10", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] @@ -3950,7 +3260,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3981,9 +3291,9 @@ dependencies = [ "libp2p-identity", "rcgen", "ring", - "rustls 0.23.35", - "rustls-webpki 0.103.8", - "thiserror 2.0.17", + "rustls 0.23.36", + "rustls-webpki 0.103.9", + "thiserror 2.0.18", "x509-parser", "yasna", ] @@ -4005,9 +3315,9 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags 2.10.0", "libc", @@ -4049,12 +3359,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.11.0" @@ -4468,7 +3772,7 @@ dependencies = [ "num-bigint", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4505,7 +3809,7 @@ dependencies = [ "rand_core 0.6.4", "serde", "subtle", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", "x25519-dalek", @@ -4519,7 +3823,7 @@ source = "git+https://github.com/logos-co/nomos-node.git?rev=47ae18e95f643bde563 dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4532,7 +3836,7 @@ dependencies = [ "logos-blockchain-key-management-system-keys", "overwatch", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", ] @@ -4666,7 +3970,7 @@ dependencies = [ "serde_json", "serde_with", "serde_yaml", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", "tokio", "tokio-stream", @@ -4690,7 +3994,7 @@ dependencies = [ "num-traits", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4706,7 +4010,7 @@ dependencies = [ "num-bigint", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4732,7 +4036,7 @@ dependencies = [ "logos-blockchain-tx-service", "overwatch", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tracing", @@ -4782,58 +4086,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "logos-blockchain-tests" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=47ae18e95f643bde563b4769212b37f6f018fed3#47ae18e95f643bde563b4769212b37f6f018fed3" -dependencies = [ - "async-trait", - "cucumber", - "futures", - "futures-util", - "hex", - "logos-blockchain-api-service", - "logos-blockchain-blend-proofs", - "logos-blockchain-blend-service", - "logos-blockchain-chain-broadcast-service", - "logos-blockchain-chain-leader-service", - "logos-blockchain-chain-network-service", - "logos-blockchain-chain-service", - "logos-blockchain-common-http-client", - "logos-blockchain-core", - "logos-blockchain-cryptarchia-engine", - "logos-blockchain-cryptarchia-sync", - "logos-blockchain-groth16", - "logos-blockchain-http-api-common", - "logos-blockchain-key-management-system-service", - "logos-blockchain-libp2p", - "logos-blockchain-network-service", - "logos-blockchain-node", - "logos-blockchain-sdp-service", - "logos-blockchain-time-service", - "logos-blockchain-tracing", - "logos-blockchain-tracing-service", - "logos-blockchain-tx-service", - "logos-blockchain-utils", - "logos-blockchain-wallet-service", - "num-bigint", - "rand 0.8.5", - "reqwest", - "serde_json", - "serde_yaml", - "serial_test", - "tempfile", - "testing-framework-core 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-runner-compose 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-runner-local 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-workflows 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", - "time", - "tokio", - "tracing", - "tracing-subscriber 0.3.22", -] - [[package]] name = "logos-blockchain-time-service" version = "0.1.0" @@ -4849,7 +4101,7 @@ dependencies = [ "serde", "serde_with", "sntpc", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", "tokio", "tokio-stream", @@ -4957,7 +4209,7 @@ dependencies = [ "logos-blockchain-ledger", "num-bigint", "rpds", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -5006,7 +4258,7 @@ dependencies = [ "num-bigint", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5036,13 +4288,13 @@ checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "match-lookup" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.114", ] [[package]] @@ -5121,23 +4373,11 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "mmr" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-ff 0.4.2", - "groth16", - "poseidon2", - "rpds", - "serde", -] - [[package]] name = "moka" -version = "0.12.11" +version = "0.12.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" +checksum = "b4ac832c50ced444ef6be0767a008b02c106a909ba79d1d830501e94b96f6b7e" dependencies = [ "crossbeam-channel", "crossbeam-epoch", @@ -5145,7 +4385,6 @@ dependencies = [ "equivalent", "parking_lot", "portable-atomic", - "rustc_version", "smallvec", "tagptr", "uuid", @@ -5327,17 +4566,17 @@ dependencies = [ "log", "netlink-packet-core 0.7.0", "netlink-sys", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "netlink-sys" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +checksum = "cd6c30ed10fa69cc491d491b85cc971f6bdeb8e7367b7cde2ee6cc878d583fae" dependencies = [ "bytes", - "futures", + "futures-util", "libc", "log", "tokio", @@ -5409,717 +4648,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "nom_locate" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b577e2d69827c4740cba2b52efaad1c4cc7c73042860b199710b3575c68438d" -dependencies = [ - "bytecount", - "memchr", - "nom 8.0.0", -] - -[[package]] -name = "nomos-api" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "broadcast-service", - "bytes", - "chain-service", - "futures", - "kzgrs-backend", - "nomos-core", - "nomos-da-dispersal", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-da-sampling", - "nomos-da-verifier", - "nomos-libp2p", - "nomos-network", - "nomos-sdp", - "nomos-storage", - "nomos-time", - "overwatch", - "serde", - "serde_json", - "subnetworks-assignations", - "tokio", - "tokio-stream", - "tracing", - "tx-service", - "utoipa", - "utoipa-swagger-ui", -] - -[[package]] -name = "nomos-blend" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "nomos-blend-crypto", - "nomos-blend-message", - "nomos-blend-network", - "nomos-blend-proofs", - "nomos-blend-scheduling", -] - -[[package]] -name = "nomos-blend-crypto" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "blake2", - "groth16", - "nomos-utils", - "poq", - "poseidon2", - "rs-merkle-tree", - "thiserror 1.0.69", -] - -[[package]] -name = "nomos-blend-message" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "blake2", - "derivative", - "groth16", - "itertools 0.14.0", - "key-management-system-keys", - "nomos-blend-crypto", - "nomos-blend-proofs", - "nomos-core", - "nomos-utils", - "serde", - "serde-big-array", - "serde_with", - "thiserror 1.0.69", - "tracing", - "zeroize", -] - -[[package]] -name = "nomos-blend-network" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "either", - "futures", - "futures-timer", - "key-management-system-keys", - "libp2p", - "nomos-blend-message", - "nomos-blend-proofs", - "nomos-blend-scheduling", - "nomos-core", - "nomos-libp2p", - "tracing", -] - -[[package]] -name = "nomos-blend-proofs" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ed25519-dalek", - "generic-array 1.3.5", - "groth16", - "nomos-blend-crypto", - "num-bigint", - "poq", - "serde", - "thiserror 1.0.69", -] - -[[package]] -name = "nomos-blend-scheduling" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "derivative", - "fork_stream", - "futures", - "key-management-system-keys", - "multiaddr", - "nomos-blend-message", - "nomos-blend-proofs", - "nomos-core", - "nomos-utils", - "rand 0.8.5", - "serde", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "nomos-blend-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "broadcast-service", - "chain-service", - "cryptarchia-engine", - "fork_stream", - "futures", - "groth16", - "key-management-system-service", - "libp2p", - "libp2p-stream", - "nomos-blend", - "nomos-core", - "nomos-ledger", - "nomos-libp2p", - "nomos-network", - "nomos-sdp", - "nomos-time", - "nomos-utils", - "overwatch", - "poq", - "rand 0.8.5", - "serde", - "serde_with", - "services-utils", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "nomos-core" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-ff 0.4.2", - "async-trait", - "bincode", - "blake2", - "bytes", - "const-hex", - "cryptarchia-engine", - "futures", - "generic-array 1.3.5", - "groth16", - "hex", - "key-management-system-keys", - "multiaddr", - "nom 8.0.0", - "nomos-blend-proofs", - "nomos-utils", - "num-bigint", - "pol", - "poseidon2", - "serde", - "strum", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "nomos-da-dispersal" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "broadcast-service", - "futures", - "key-management-system-keys", - "kzgrs-backend", - "nomos-core", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-tracing", - "nomos-utils", - "overwatch", - "serde", - "serde_with", - "services-utils", - "subnetworks-assignations", - "thiserror 2.0.17", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "nomos-da-messages" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "blake2", - "futures", - "kzgrs-backend", - "nomos-core", - "serde", - "tokio", -] - -[[package]] -name = "nomos-da-network-core" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "cached", - "fixed", - "futures", - "indexmap 2.12.1", - "kzgrs-backend", - "libp2p", - "libp2p-stream", - "log", - "nomos-core", - "nomos-da-messages", - "nomos-utils", - "rand 0.9.2", - "serde", - "serde_with", - "subnetworks-assignations", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", - "tracing-subscriber 0.3.22", -] - -[[package]] -name = "nomos-da-network-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "arc-swap", - "async-trait", - "bitvec", - "blake2", - "broadcast-service", - "common-http-client", - "futures", - "kzgrs-backend", - "libp2p", - "libp2p-identity", - "log", - "multiaddr", - "nomos-core", - "nomos-da-messages", - "nomos-da-network-core", - "nomos-libp2p", - "nomos-sdp", - "nomos-storage", - "nomos-tracing", - "nomos-utils", - "overwatch", - "rand 0.8.5", - "serde", - "services-utils", - "subnetworks-assignations", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", - "url", -] - -[[package]] -name = "nomos-da-sampling" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "bytes", - "either", - "futures", - "hex", - "kzgrs-backend", - "libp2p-identity", - "nomos-core", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-storage", - "nomos-tracing", - "overwatch", - "rand 0.8.5", - "serde", - "services-utils", - "subnetworks-assignations", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", - "tx-service", -] - -[[package]] -name = "nomos-da-verifier" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "futures", - "kzgrs-backend", - "libp2p", - "nomos-core", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-storage", - "nomos-tracing", - "nomos-utils", - "overwatch", - "serde", - "serde_with", - "services-utils", - "subnetworks-assignations", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", - "tx-service", -] - -[[package]] -name = "nomos-executor" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "axum", - "broadcast-service", - "clap", - "color-eyre", - "futures", - "kzgrs-backend", - "nomos-api", - "nomos-core", - "nomos-da-dispersal", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-da-sampling", - "nomos-da-verifier", - "nomos-http-api-common", - "nomos-libp2p", - "nomos-network", - "nomos-node", - "nomos-sdp", - "nomos-storage", - "nomos-time", - "overwatch", - "serde", - "services-utils", - "subnetworks-assignations", - "tokio", - "tower 0.4.13", - "tower-http 0.5.2", - "tracing", - "tx-service", - "utoipa", - "utoipa-swagger-ui", -] - -[[package]] -name = "nomos-http-api-common" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "axum", - "governor", - "key-management-system-keys", - "nomos-core", - "serde", - "serde_json", - "serde_with", - "tower_governor", -] - -[[package]] -name = "nomos-ledger" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "cryptarchia-engine", - "groth16", - "key-management-system-keys", - "mmr", - "nomos-blend-crypto", - "nomos-blend-message", - "nomos-blend-proofs", - "nomos-core", - "nomos-utils", - "num-bigint", - "rand 0.8.5", - "rpds", - "serde", - "thiserror 1.0.69", - "tracing", - "utxotree", -] - -[[package]] -name = "nomos-libp2p" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "backon", - "blake2", - "cryptarchia-sync", - "default-net", - "either", - "futures", - "hex", - "igd-next 0.16.2", - "libp2p", - "multiaddr", - "natpmp", - "netdev", - "nomos-utils", - "num_enum", - "rand 0.8.5", - "serde", - "serde_with", - "thiserror 1.0.69", - "tokio", - "tracing", - "zerocopy", -] - -[[package]] -name = "nomos-network" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "cryptarchia-sync", - "futures", - "nomos-core", - "nomos-libp2p", - "overwatch", - "rand 0.8.5", - "rand_chacha 0.3.1", - "serde", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "nomos-node" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "axum", - "broadcast-service", - "chain-leader", - "chain-network", - "chain-service", - "clap", - "color-eyre", - "cryptarchia-engine", - "futures", - "groth16", - "hex", - "http 1.4.0", - "key-management-system-service", - "kzgrs-backend", - "nomos-api", - "nomos-blend", - "nomos-blend-service", - "nomos-core", - "nomos-da-messages", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-da-sampling", - "nomos-da-verifier", - "nomos-http-api-common", - "nomos-ledger", - "nomos-libp2p", - "nomos-network", - "nomos-sdp", - "nomos-storage", - "nomos-system-sig", - "nomos-time", - "nomos-tracing", - "nomos-tracing-service", - "nomos-utils", - "nomos-wallet", - "num-bigint", - "overwatch", - "pol", - "poq", - "serde", - "serde_ignored", - "serde_json", - "serde_with", - "serde_yaml", - "services-utils", - "subnetworks-assignations", - "thiserror 2.0.17", - "time", - "tokio", - "tokio-stream", - "tower 0.4.13", - "tower-http 0.5.2", - "tracing", - "tx-service", - "utoipa", - "utoipa-swagger-ui", -] - -[[package]] -name = "nomos-sdp" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "futures", - "key-management-system-keys", - "nomos-core", - "overwatch", - "serde", - "thiserror 2.0.17", - "tokio", - "tokio-stream", - "tracing", - "tx-service", -] - -[[package]] -name = "nomos-storage" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "bytes", - "cryptarchia-engine", - "futures", - "libp2p-identity", - "multiaddr", - "nomos-core", - "overwatch", - "rocksdb", - "serde", - "thiserror 1.0.69", - "tokio", - "tracing", -] - -[[package]] -name = "nomos-system-sig" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-ctrlc", - "async-trait", - "overwatch", - "tracing", -] - -[[package]] -name = "nomos-time" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "cfg_eval", - "cryptarchia-engine", - "futures", - "log", - "nomos-utils", - "overwatch", - "serde", - "serde_with", - "sntpc", - "thiserror 2.0.17", - "time", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "nomos-tracing" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "opentelemetry", - "opentelemetry-http", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "opentelemetry_sdk", - "rand 0.8.5", - "reqwest", - "serde", - "tokio", - "tracing", - "tracing-appender", - "tracing-gelf", - "tracing-loki", - "tracing-opentelemetry", - "tracing-subscriber 0.3.22", - "url", -] - -[[package]] -name = "nomos-tracing-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "nomos-tracing", - "overwatch", - "serde", - "tracing", - "tracing-appender", - "tracing-subscriber 0.3.22", -] - -[[package]] -name = "nomos-utils" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "blake2", - "cipher", - "const-hex", - "humantime", - "overwatch", - "rand 0.8.5", - "serde", - "serde_with", - "time", -] - -[[package]] -name = "nomos-wallet" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "bytes", - "chain-service", - "futures", - "groth16", - "hex", - "key-management-system-service", - "nomos-core", - "nomos-ledger", - "nomos-storage", - "overwatch", - "serde", - "services-utils", - "thiserror 1.0.69", - "tokio", - "tracing", - "wallet", -] - [[package]] name = "nonempty" version = "0.7.0" @@ -6153,9 +4681,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -6204,7 +4732,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6246,12 +4774,6 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" -[[package]] -name = "once_cell_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" - [[package]] name = "openssl" version = "0.10.75" @@ -6275,7 +4797,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6403,7 +4925,7 @@ dependencies = [ "async-trait", "futures", "overwatch-derive", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -6415,11 +4937,11 @@ name = "overwatch-derive" version = "0.1.0" source = "git+https://github.com/logos-co/Overwatch?rev=f5a9902#f5a99022f389d65adbd55e51f1e3f9eead62432a" dependencies = [ - "convert_case 0.8.0", + "convert_case", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6472,33 +4994,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "peg" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f76678828272f177ac33b7e2ac2e3e73cc6c1cd1e3e387928aa69562fa51367" -dependencies = [ - "peg-macros", - "peg-runtime", -] - -[[package]] -name = "peg-macros" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636d60acf97633e48d266d7415a9355d4389cea327a193f87df395d88cd2b14d" -dependencies = [ - "peg-runtime", - "proc-macro2", - "quote", -] - -[[package]] -name = "peg-runtime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555b1514d2d99d78150d3c799d4c357a3e2c2a8062cd108e93a06d9057629c5" - [[package]] name = "pem" version = "3.0.6" @@ -6517,9 +5012,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22" +checksum = "2c9eb05c21a464ea704b53158d358a31e6425db2f63a1a7312268b05fe2b75f7" dependencies = [ "memchr", "ucd-trie", @@ -6527,9 +5022,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f72981ade67b1ca6adc26ec221be9f463f2b5839c7508998daa17c23d94d7f" +checksum = "68f9dbced329c441fa79d80472764b1a2c7e57123553b8519b36663a2fb234ed" dependencies = [ "pest", "pest_generator", @@ -6537,22 +5032,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee9efd8cdb50d719a80088b76f81aec7c41ed6d522ee750178f83883d271625" +checksum = "3bb96d5051a78f44f43c8f712d8e810adb0ebf923fc9ed2655a7f66f63ba8ee5" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "pest_meta" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf1d70880e76bdc13ba52eafa6239ce793d85c8e43896507e43dd8984ff05b82" +checksum = "602113b5b5e8621770cfd490cfd90b9f84ab29bd2b0e49ad83eb6d186cef2365" dependencies = [ "pest", "sha2", @@ -6613,7 +5108,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6644,22 +5139,6 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" -[[package]] -name = "pol" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "circuits-prover", - "circuits-utils", - "groth16", - "num-bigint", - "num-traits", - "serde", - "serde_json", - "thiserror 2.0.17", - "witness-generator", -] - [[package]] name = "polling" version = "3.11.0" @@ -6674,38 +5153,11 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "poq" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "circuits-prover", - "circuits-utils", - "groth16", - "num-bigint", - "pol", - "serde", - "serde_json", - "thiserror 2.0.17", - "witness-generator", -] - [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" - -[[package]] -name = "poseidon2" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-bn254 0.4.0", - "ark-ff 0.4.2", - "jf-poseidon2", - "num-bigint", -] +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "potential_utf" @@ -6783,14 +5235,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -6815,7 +5267,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6866,7 +5318,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6915,15 +5367,6 @@ dependencies = [ "unsigned-varint 0.8.0", ] -[[package]] -name = "quick-xml" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" -dependencies = [ - "memchr", -] - [[package]] name = "quinn" version = "0.11.9" @@ -6937,9 +5380,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.35", - "socket2 0.6.1", - "thiserror 2.0.17", + "rustls 0.23.36", + "socket2 0.6.2", + "thiserror 2.0.18", "tokio", "tracing", "web-time", @@ -6957,10 +5400,10 @@ dependencies = [ "rand 0.9.2", "ring", "rustc-hash", - "rustls 0.23.35", + "rustls 0.23.36", "rustls-pki-types", "slab", - "thiserror 2.0.17", + "thiserror 2.0.18", "tinyvec", "tracing", "web-time", @@ -6975,16 +5418,16 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.1", + "socket2 0.6.2", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.42" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -6995,12 +5438,6 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.8.5" @@ -7019,7 +5456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7039,7 +5476,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7048,14 +5485,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -7066,7 +5503,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7106,29 +5543,9 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "libredox", - "thiserror 2.0.17", -] - -[[package]] -name = "ref-cast" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", + "thiserror 2.0.18", ] [[package]] @@ -7162,9 +5579,9 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.26" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", @@ -7184,7 +5601,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.35", + "rustls 0.23.36", "rustls-pki-types", "serde", "serde_json", @@ -7194,7 +5611,7 @@ dependencies = [ "tokio-native-tls", "tokio-rustls 0.26.4", "tokio-util", - "tower 0.5.2", + "tower 0.5.3", "tower-http 0.6.8", "tower-service", "url", @@ -7229,7 +5646,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -7269,7 +5686,7 @@ dependencies = [ "quote", "rand 0.9.2", "syn 1.0.109", - "thiserror 2.0.17", + "thiserror 2.0.18", "tiny-keccak", "tokio", ] @@ -7298,11 +5715,11 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "testing-framework-core 0.1.0", - "testing-framework-runner-compose 0.1.0", + "testing-framework-core", + "testing-framework-runner-compose", "testing-framework-runner-k8s", - "testing-framework-runner-local 0.1.0", - "testing-framework-workflows 0.1.0", + "testing-framework-runner-local", + "testing-framework-workflows", "tokio", "tracing", "tracing-subscriber 0.3.22", @@ -7310,9 +5727,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "8.9.0" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "947d7f3fad52b283d261c4c99a084937e2fe492248cb9a68a8435a861b8798ca" +checksum = "04113cb9355a377d83f06ef1f0a45b8ab8cd7d8b1288160717d66df5c7988d27" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -7321,22 +5738,22 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "8.9.0" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fa2c8c9e8711e10f9c4fd2d64317ef13feaab820a4c51541f1a8c8e2e851ab2" +checksum = "da0902e4c7c8e997159ab384e6d0fc91c221375f6894346ae107f47dd0f3ccaa" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.111", + "syn 2.0.114", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "8.9.0" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b161f275cb337fe0a44d924a5f4df0ed69c2c39519858f931ce61c779d3475" +checksum = "5bcdef0be6fe7f6fa333b1073c949729274b05f123a0ad7efcb8efd878e5c3b1" dependencies = [ "sha2", "walkdir", @@ -7344,9 +5761,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" @@ -7374,9 +5791,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags 2.10.0", "errno", @@ -7399,14 +5816,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.9", "subtle", "zeroize", ] @@ -7434,9 +5851,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ "web-time", "zeroize", @@ -7454,9 +5871,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -7482,9 +5899,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -7495,15 +5912,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "scc" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" -dependencies = [ - "sdd", -] - [[package]] name = "schannel" version = "0.1.28" @@ -7529,23 +5937,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sdd" -version = "3.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" - -[[package]] -name = "sealed" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "sec1" version = "0.7.3" @@ -7645,7 +6036,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -7660,15 +6051,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -7718,7 +6109,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -7727,53 +6118,13 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "ryu", "serde", "unsafe-libyaml", ] -[[package]] -name = "serial_test" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" -dependencies = [ - "futures", - "log", - "once_cell", - "parking_lot", - "scc", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - -[[package]] -name = "services-utils" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "futures", - "log", - "overwatch", - "serde", - "serde_json", - "thiserror 1.0.69", - "tracing", -] - [[package]] name = "sha2" version = "0.10.9" @@ -7802,10 +6153,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -7853,23 +6205,6 @@ version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" -[[package]] -name = "smart-default" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eb01866308440fc64d6c44d9e86c5cc17adfe33c4d6eed55da9145044d0ffc1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - -[[package]] -name = "smawk" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" - [[package]] name = "snap" version = "1.1.1" @@ -7898,9 +6233,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ "libc", "windows-sys 0.60.2", @@ -7937,15 +6272,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "strip-ansi-escapes" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a8f8038e7e7969abb3f1b7c2a811225e9296da208539e0f79c5251d6cac0025" -dependencies = [ - "vte", -] - [[package]] name = "strsim" version = "0.11.1" @@ -7970,20 +6296,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "subnetworks-assignations" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "counter", - "libp2p-identity", - "nomos-core", - "nomos-utils", - "rand 0.8.5", - "serde", + "syn 2.0.114", ] [[package]] @@ -8005,9 +6318,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -8031,40 +6344,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "synthez" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d8a928f38f1bc873f28e0d2ba8298ad65374a6ac2241dabd297271531a736cd" -dependencies = [ - "syn 2.0.111", - "synthez-codegen", - "synthez-core", -] - -[[package]] -name = "synthez-codegen" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb83b8df4238e11746984dfb3819b155cd270de0e25847f45abad56b3671047" -dependencies = [ - "syn 2.0.111", - "synthez-core", -] - -[[package]] -name = "synthez-core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906fba967105d822e7c7ed60477b5e76116724d33de68a585681fb253fc30d5c" -dependencies = [ - "proc-macro2", - "quote", - "sealed", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -8115,17 +6395,11 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "tempfile" -version = "3.23.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", "getrandom 0.3.4", @@ -8156,21 +6430,10 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "terminal_size" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" -dependencies = [ - "rustix", - "windows-sys 0.60.2", -] - [[package]] name = "testing-framework-config" version = "0.1.0" dependencies = [ - "blst", "hex", "logos-blockchain-api-service", "logos-blockchain-blend-service", @@ -8194,50 +6457,8 @@ dependencies = [ "num-bigint", "rand 0.8.5", "serde", - "testing-framework-env 0.1.0", - "thiserror 2.0.17", - "time", - "tracing", -] - -[[package]] -name = "testing-framework-config" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" -dependencies = [ - "blst", - "chain-leader", - "chain-network", - "chain-service", - "cryptarchia-engine", - "cryptarchia-sync", - "groth16", - "hex", - "key-management-system-service", - "nomos-api", - "nomos-blend-service", - "nomos-core", - "nomos-da-dispersal", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-da-sampling", - "nomos-da-verifier", - "nomos-executor", - "nomos-ledger", - "nomos-libp2p", - "nomos-node", - "nomos-sdp", - "nomos-time", - "nomos-tracing", - "nomos-tracing-service", - "nomos-utils", - "nomos-wallet", - "num-bigint", - "rand 0.8.5", - "serde", - "subnetworks-assignations", - "testing-framework-env 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", + "testing-framework-env", + "thiserror 2.0.18", "time", "tracing", ] @@ -8270,48 +6491,9 @@ dependencies = [ "serde_with", "serde_yaml", "tempfile", - "testing-framework-config 0.1.0", - "testing-framework-env 0.1.0", - "thiserror 2.0.17", - "tokio", - "tracing", -] - -[[package]] -name = "testing-framework-core" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" -dependencies = [ - "anyhow", - "async-trait", - "chain-service", - "common-http-client", - "futures", - "groth16", - "hex", - "key-management-system-service", - "nomos-core", - "nomos-da-network-core", - "nomos-da-network-service", - "nomos-executor", - "nomos-http-api-common", - "nomos-libp2p", - "nomos-network", - "nomos-node", - "nomos-tracing", - "nomos-tracing-service", - "nomos-utils", - "prometheus-http-query", - "rand 0.8.5", - "reqwest", - "serde", - "serde_json", - "serde_with", - "serde_yaml", - "tempfile", - "testing-framework-config 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-env 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", + "testing-framework-config", + "testing-framework-env", + "thiserror 2.0.18", "tokio", "tracing", ] @@ -8320,23 +6502,17 @@ dependencies = [ name = "testing-framework-env" version = "0.1.0" -[[package]] -name = "testing-framework-env" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" - [[package]] name = "testing-framework-runner-compose" version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "cfgsync_tf 0.1.0", + "cfgsync_tf", "logos-blockchain-core", "logos-blockchain-groth16", "logos-blockchain-key-management-system-service", "logos-blockchain-ledger", - "logos-blockchain-tests", "logos-blockchain-tracing", "logos-blockchain-tracing-service", "logos-blockchain-zksign", @@ -8344,34 +6520,10 @@ dependencies = [ "serde", "tempfile", "tera", - "testing-framework-config 0.1.0", - "testing-framework-core 0.1.0", - "testing-framework-env 0.1.0", - "thiserror 2.0.17", - "tokio", - "tracing", - "url", - "uuid", -] - -[[package]] -name = "testing-framework-runner-compose" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" -dependencies = [ - "anyhow", - "async-trait", - "cfgsync_tf 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "nomos-tracing", - "nomos-tracing-service", - "reqwest", - "serde", - "tempfile", - "tera", - "testing-framework-config 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-core 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-env 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", + "testing-framework-config", + "testing-framework-core", + "testing-framework-env", + "thiserror 2.0.18", "tokio", "tracing", "url", @@ -8392,10 +6544,10 @@ dependencies = [ "serde", "serde_yaml", "tempfile", - "testing-framework-config 0.1.0", - "testing-framework-core 0.1.0", - "testing-framework-env 0.1.0", - "thiserror 2.0.17", + "testing-framework-config", + "testing-framework-core", + "testing-framework-env", + "thiserror 2.0.18", "tokio", "tracing", "url", @@ -8412,24 +6564,13 @@ dependencies = [ "logos-blockchain-node", "logos-blockchain-utils", "rand 0.8.5", - "testing-framework-config 0.1.0", - "testing-framework-core 0.1.0", - "thiserror 2.0.17", + "testing-framework-config", + "testing-framework-core", + "thiserror 2.0.18", "tokio", "tracing", ] -[[package]] -name = "testing-framework-runner-local" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" -dependencies = [ - "async-trait", - "testing-framework-core 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", - "tracing", -] - [[package]] name = "testing-framework-workflows" version = "0.1.0" @@ -8440,44 +6581,13 @@ dependencies = [ "logos-blockchain-key-management-system-service", "rand 0.8.5", "reqwest", - "testing-framework-config 0.1.0", - "testing-framework-core 0.1.0", - "thiserror 2.0.17", + "testing-framework-config", + "testing-framework-core", + "thiserror 2.0.18", "tokio", "tracing", ] -[[package]] -name = "testing-framework-workflows" -version = "0.1.0" -source = "git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master#28685298ba8b8bb8e628ac4649fac150276aa5d0" -dependencies = [ - "async-trait", - "chain-service", - "executor-http-client", - "futures", - "key-management-system-service", - "nomos-core", - "rand 0.8.5", - "reqwest", - "testing-framework-config 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "testing-framework-core 0.1.0 (git+https://github.com/logos-blockchain/logos-blockchain-testing.git?branch=master)", - "thiserror 2.0.17", - "tokio", - "tracing", -] - -[[package]] -name = "textwrap" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" -dependencies = [ - "smawk", - "unicode-linebreak", - "unicode-width", -] - [[package]] name = "thiserror" version = "1.0.69" @@ -8489,11 +6599,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -8504,18 +6614,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -8527,41 +6637,32 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - [[package]] name = "time" -version = "0.3.44" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" dependencies = [ "num-conv", "time-core", @@ -8603,9 +6704,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", @@ -8613,7 +6714,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.1", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -8636,7 +6737,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -8665,15 +6766,15 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.35", + "rustls 0.23.36", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -8683,9 +6784,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -8697,20 +6798,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] [[package]] name = "toml_edit" -version = "0.23.9" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime", "toml_parser", "winnow", @@ -8718,9 +6819,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -8777,9 +6878,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", @@ -8843,7 +6944,7 @@ dependencies = [ "http-body 1.0.1", "iri-string", "pin-project-lite", - "tower 0.5.2", + "tower 0.5.3", "tower-layer", "tower-service", ] @@ -8878,9 +6979,9 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -8895,7 +6996,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" dependencies = [ "crossbeam-channel", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", "tracing-subscriber 0.3.22", ] @@ -8908,14 +7009,14 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -9048,48 +7149,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "tx-service" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "async-trait", - "futures", - "nomos-core", - "nomos-network", - "nomos-storage", - "overwatch", - "rand 0.8.5", - "serde", - "serde_json", - "services-utils", - "thiserror 1.0.69", - "tokio", - "tokio-stream", - "tracing", - "utoipa", -] - -[[package]] -name = "typed-builder" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31aa81521b70f94402501d848ccc0ecaa8f93c8eb6999eb9747e72287757ffda" -dependencies = [ - "typed-builder-macro", -] - -[[package]] -name = "typed-builder-macro" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076a02dc54dd46795c2e9c8282ed40bcfb1e22747e955de9389a1de28190fb26" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "typenum" version = "1.19.0" @@ -9122,9 +7181,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -9132,30 +7191,12 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" -[[package]] -name = "unicode-linebreak" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" - [[package]] name = "unicode-segmentation" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" -[[package]] -name = "unicode-width" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - [[package]] name = "unsafe-libyaml" version = "0.2.11" @@ -9182,14 +7223,15 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -9198,19 +7240,13 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - [[package]] name = "utoipa" version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_json", "utoipa-gen", @@ -9225,7 +7261,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -9252,26 +7288,11 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2eebbbfe4093922c2b6734d7c679ebfebd704a0d7e56dfcb0d05818ce28977d" -[[package]] -name = "utxotree" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "ark-ff 0.4.2", - "groth16", - "nomos-core", - "num-bigint", - "poseidon2", - "rpds", - "serde", - "thiserror 1.0.69", -] - [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -9296,15 +7317,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" -[[package]] -name = "vte" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "231fdcd7ef3037e8330d8e17e61011a2c244126acc0a982f4040ac3f9f0bc077" -dependencies = [ - "memchr", -] - [[package]] name = "walkdir" version = "2.5.0" @@ -9315,20 +7327,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "wallet" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "key-management-system-keys", - "nomos-core", - "nomos-ledger", - "num-bigint", - "rpds", - "thiserror 2.0.17", - "tracing", -] - [[package]] name = "want" version = "0.3.1" @@ -9346,18 +7344,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -9368,11 +7366,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -9381,9 +7380,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9391,22 +7390,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -9426,9 +7425,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -9446,9 +7445,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" dependencies = [ "rustls-pki-types", ] @@ -9544,7 +7543,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -9555,7 +7554,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -9864,18 +7863,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" - -[[package]] -name = "witness-generator" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "circuits-utils", - "tempfile", -] +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "writeable" @@ -9883,15 +7873,6 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - [[package]] name = "x25519-dalek" version = "2.0.1" @@ -9917,7 +7898,7 @@ dependencies = [ "nom 7.1.3", "oid-registry", "rusticata-macros", - "thiserror 2.0.17", + "thiserror 2.0.18", "time", ] @@ -9964,28 +7945,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -10005,7 +7986,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -10020,13 +8001,13 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -10059,7 +8040,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -10073,23 +8054,13 @@ dependencies = [ "crossbeam-utils", "displaydoc", "flate2", - "indexmap 2.12.1", + "indexmap 2.13.0", "num_enum", "thiserror 1.0.69", ] [[package]] -name = "zksign" -version = "0.1.0" -source = "git+https://github.com/logos-co/nomos-node.git?rev=06e2738bb83851fa6ba94ef36d03a4b7be8e17a8#06e2738bb83851fa6ba94ef36d03a4b7be8e17a8" -dependencies = [ - "circuits-prover", - "circuits-utils", - "groth16", - "num-bigint", - "poseidon2", - "serde", - "serde_json", - "thiserror 2.0.17", - "witness-generator", -] +name = "zmij" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" diff --git a/Cargo.toml b/Cargo.toml index 992339d..c262329 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,12 +52,9 @@ cryptarchia-engine = { default-features = false, git = "https://github.com/logos cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } -kzgrs = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-kzgrs", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } -kzgrs-backend = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-kzgrs-backend", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } -nomos-cli = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cli", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" } diff --git a/README.md b/README.md index 387065b..48d5503 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ For compose/k8s deployments, you can create prebuilt bundles to speed up image b scripts/build/build-bundle.sh --platform linux # Use the bundle when building images -export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz +export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz scripts/build/build_test_image.sh ``` @@ -124,10 +124,10 @@ Key environment variables for customization: | Variable | Purpose | Default | |----------|---------|---------| | `POL_PROOF_DEV_MODE=true` | **Required** — Disable expensive proof generation (set automatically by `scripts/run/run-examples.sh`) | (none) | -| `NOMOS_TESTNET_IMAGE` | Docker image tag for compose/k8s | `logos-blockchain-testing:local` | -| `NOMOS_DEMO_VALIDATORS` | Number of validator nodes | Varies by example | -| `NOMOS_LOG_DIR` | Directory for persistent log files | (temporary) | -| `NOMOS_LOG_LEVEL` | Logging verbosity | `info` | +| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | Docker image tag for compose/k8s | `logos-blockchain-testing:local` | +| `LOGOS_BLOCKCHAIN_DEMO_NODES` | Number of nodes | Varies by example | +| `LOGOS_BLOCKCHAIN_LOG_DIR` | Directory for persistent log files | (temporary) | +| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | Logging verbosity | `info` | See [Operations Guide](https://logos-blockchain.github.io/logos-blockchain-testing/operations.html) for complete configuration reference. diff --git a/book/COMPREHENSIVE_REPO_SYNC_REVIEW.md b/book/COMPREHENSIVE_REPO_SYNC_REVIEW.md index f0040c4..e90b979 100644 --- a/book/COMPREHENSIVE_REPO_SYNC_REVIEW.md +++ b/book/COMPREHENSIVE_REPO_SYNC_REVIEW.md @@ -14,7 +14,7 @@ Reviewed against `git rev-parse HEAD` at the time of writing, plus local working ## Findings / Fixes Applied - `book/src/environment-variables.md` was not a complete reference: it missed multiple `NOMOS_*` variables used by the repo (scripts + framework). Added the missing variables and corrected a misleading note about `RUST_LOG` vs node logging. -- `book/src/running-examples.md` “Quick Smoke Matrix” section didn’t reflect current `scripts/run/run-test-matrix.sh` flags. Added the commonly used options and clarified the relationship to `NOMOS_SKIP_IMAGE_BUILD`. +- `book/src/running-examples.md` “Quick Smoke Matrix” section didn’t reflect current `scripts/run/run-test-matrix.sh` flags. Added the commonly used options and clarified the relationship to `LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD`. - `book/src/part-iv.md` existed but was not in `book/src/SUMMARY.md`. Removed it so the rendered book doesn’t silently diverge from the filesystem. - `mdbook test book` was failing because: - Many Rust examples were written as ` ```rust` (doctested by default) but depend on workspace crates; they aren’t standalone doctest snippets. diff --git a/book/README.md b/book/README.md index af21ddd..ca17ccc 100644 --- a/book/README.md +++ b/book/README.md @@ -159,8 +159,8 @@ cargo doc --no-deps --document-private-items **When:** New environment variable added, changed, or removed **Examples:** -- New: `NOMOS_NEW_FEATURE_ENABLED` -- Changed: `NOMOS_LOG_LEVEL` accepts new values +- New: `LOGOS_BLOCKCHAIN_NEW_FEATURE_ENABLED` +- Changed: `LOGOS_BLOCKCHAIN_LOG_LEVEL` accepts new values - Deprecated: `OLD_FEATURE_FLAG` **Update these pages:** @@ -235,7 +235,7 @@ rg "scripts/" book/src/ --no-heading ```bash - [ ] src/prerequisites.md # Image build instructions - [ ] src/runners.md # Compose/K8s prerequisites -- [ ] src/environment-variables.md # NOMOS_TESTNET_IMAGE, NOMOS_BINARIES_TAR +- [ ] src/environment-variables.md # LOGOS_BLOCKCHAIN_TESTNET_IMAGE, LOGOS_BLOCKCHAIN_BINARIES_TAR - [ ] src/architecture-overview.md # Assets and Images section ``` @@ -247,7 +247,7 @@ rg "scripts/" book/src/ --no-heading ```bash - [ ] src/logging-observability.md # Primary documentation -- [ ] src/environment-variables.md # NOMOS_METRICS_*, NOMOS_OTLP_* +- [ ] src/environment-variables.md # LOGOS_BLOCKCHAIN_METRICS_*, LOGOS_BLOCKCHAIN_OTLP_* - [ ] src/architecture-overview.md # Observability section - [ ] src/runners.md # Runner observability support ``` diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 308c148..cc63a44 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -23,6 +23,7 @@ - [RunContext: BlockFeed & Node Control](node-control.md) - [Chaos Workloads](chaos.md) - [Topology & Chaos Patterns](topology-chaos.md) + - [Manual Clusters: Imperative Control](manual-cluster.md) - [Part III — Developer Reference](part-iii.md) - [Scenario Model (Developer Level)](scenario-model.md) - [API Levels: Builder DSL vs. Direct](api-levels.md) diff --git a/book/src/annotated-tree.md b/book/src/annotated-tree.md index 37f2f71..5835460 100644 --- a/book/src/annotated-tree.md +++ b/book/src/annotated-tree.md @@ -7,15 +7,13 @@ logos-blockchain-testing/ ├─ testing-framework/ # Core library crates │ ├─ configs/ # Node config builders, topology generation, tracing/logging config │ ├─ core/ # Scenario model (ScenarioBuilder), runtime (Runner, Deployer), topology, node spawning -│ ├─ workflows/ # Workloads (transactions, DA, chaos), expectations (liveness), builder DSL extensions -│ ├─ runners/ # Deployment backends +│ ├─ workflows/ # Workloads (transactions, chaos), expectations (liveness), builder DSL extensions +│ ├─ deployers/ # Deployment backends │ │ ├─ local/ # LocalDeployer (spawns local processes) │ │ ├─ compose/ # ComposeDeployer (Docker Compose + Prometheus) │ │ └─ k8s/ # K8sDeployer (Kubernetes Helm) │ └─ assets/ # Docker/K8s stack assets │ └─ stack/ -│ ├─ kzgrs_test_params/ # KZG circuit parameters directory -│ │ └─ kzgrs_test_params # Actual proving key file (note repeated name) │ ├─ monitoring/ # Prometheus config │ ├─ scripts/ # Container entrypoints │ └─ cfgsync.yaml # Config sync server template @@ -29,8 +27,7 @@ logos-blockchain-testing/ ├─ scripts/ # Helper utilities │ ├─ run-examples.sh # Convenience script (handles setup + runs examples) │ ├─ build-bundle.sh # Build prebuilt binaries+circuits bundle -│ ├─ setup-circuits-stack.sh # Fetch KZG parameters (Linux + host) -│ └─ setup-nomos-circuits.sh # Legacy circuit fetcher +│ └─ setup-logos-blockchain-circuits.sh # Fetch circuit assets (Linux + host) │ └─ book/ # This documentation (mdBook) ``` @@ -45,13 +42,12 @@ Core library crates providing the testing API. | `configs` | Node configuration builders | Topology generation, tracing config | | `core` | Scenario model & runtime | `ScenarioBuilder`, `Deployer`, `Runner` | | `workflows` | Workloads & expectations | `ScenarioBuilderExt`, `ChaosBuilderExt` | -| `runners/local` | Local process deployer | `LocalDeployer` | -| `runners/compose` | Docker Compose deployer | `ComposeDeployer` | -| `runners/k8s` | Kubernetes deployer | `K8sDeployer` | +| `deployers/local` | Local process deployer | `LocalDeployer` | +| `deployers/compose` | Docker Compose deployer | `ComposeDeployer` | +| `deployers/k8s` | Kubernetes deployer | `K8sDeployer` | ### `testing-framework/assets/stack/` Docker/K8s deployment assets: -- **`kzgrs_test_params/kzgrs_test_params`**: Circuit parameters file (note repeated name; override via `NOMOS_KZGRS_PARAMS_PATH`) - **`monitoring/`**: Prometheus config - **`scripts/`**: Container entrypoints @@ -60,13 +56,13 @@ Convenience utilities: - **`run-examples.sh`**: All-in-one script for host/compose/k8s modes (recommended) - **`build-bundle.sh`**: Create prebuilt binaries+circuits bundle for compose/k8s - **`build_test_image.sh`**: Build the compose/k8s Docker image (bakes in assets) -- **`setup-circuits-stack.sh`**: Fetch KZG parameters for both Linux and host +- **`setup-logos-blockchain-circuits.sh`**: Fetch circuit assets for both Linux and host - **`cfgsync.yaml`**: Configuration sync server template ### `examples/` (Start Here!) **Runnable binaries** demonstrating framework usage: - `local_runner.rs` — Local processes -- `compose_runner.rs` — Docker Compose (requires `NOMOS_TESTNET_IMAGE` built) +- `compose_runner.rs` — Docker Compose (requires `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` built) - `k8s_runner.rs` — Kubernetes (requires cluster + image) **Run with:** `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin ` @@ -75,20 +71,20 @@ Convenience utilities: ### `scripts/` Helper utilities: -- **`setup-nomos-circuits.sh`**: Fetch KZG parameters from releases +- **`setup-logos-blockchain-circuits.sh`**: Fetch circuit assets from releases ## Observability **Compose runner** includes: - **Prometheus** at `http://localhost:9090` (metrics scraping) -- Node metrics exposed per validator +- Node metrics exposed per node - Access in expectations: `ctx.telemetry().prometheus().map(|p| p.base_url())` **Logging** controlled by: -- `NOMOS_LOG_DIR` — Write per-node log files -- `NOMOS_LOG_LEVEL` — Global log level (error/warn/info/debug/trace) -- `NOMOS_LOG_FILTER` — Target-specific filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`) -- `NOMOS_TESTS_TRACING` — Enable file logging for local runner +- `LOGOS_BLOCKCHAIN_LOG_DIR` — Write per-node log files +- `LOGOS_BLOCKCHAIN_LOG_LEVEL` — Global log level (error/warn/info/debug/trace) +- `LOGOS_BLOCKCHAIN_LOG_FILTER` — Target-specific filtering (e.g., `cryptarchia=trace`) +- `LOGOS_BLOCKCHAIN_TESTS_TRACING` — Enable file logging for local runner See [Logging & Observability](logging-observability.md) for details. @@ -102,6 +98,6 @@ See [Logging & Observability](logging-observability.md) for details. | **Add a new expectation** | `testing-framework/workflows/src/expectations/` → Implement `Expectation` trait | | **Modify node configs** | `testing-framework/configs/src/topology/configs/` | | **Extend builder DSL** | `testing-framework/workflows/src/builder/` → Add trait methods | -| **Add a new deployer** | `testing-framework/runners/` → Implement `Deployer` trait | +| **Add a new deployer** | `testing-framework/deployers/` → Implement `Deployer` trait | For detailed guidance, see [Internal Crate Reference](internal-crate-reference.md). diff --git a/book/src/api-levels.md b/book/src/api-levels.md index 0459468..6a14ef0 100644 --- a/book/src/api-levels.md +++ b/book/src/api-levels.md @@ -17,10 +17,9 @@ use std::time::Duration; use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; -let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +let plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(5) .transactions_with(|txs| txs.rate(5).users(3)) - .da_with(|da| da.channel_rate(1).blob_rate(1).headroom_percent(20)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(60)) .build(); @@ -36,30 +35,23 @@ Direct instantiation gives you explicit control over the concrete types you atta ```rust,ignore use std::{ - num::{NonZeroU64, NonZeroUsize}, + num::NonZeroUsize, time::Duration, }; use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::{ expectations::ConsensusLiveness, - workloads::{da, transaction}, + workloads::transaction, }; let tx_workload = transaction::Workload::with_rate(5) .expect("transaction rate must be non-zero") .with_user_limit(NonZeroUsize::new(3)); -let da_workload = da::Workload::with_rate( - NonZeroU64::new(1).unwrap(), // blob rate per block - NonZeroU64::new(1).unwrap(), // channel rate per block - da::Workload::default_headroom_percent(), -); - -let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +let plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(5) .with_workload(tx_workload) - .with_workload(da_workload) .with_expectation(ConsensusLiveness::default()) .with_run_duration(Duration::from_secs(60)) .build(); @@ -75,7 +67,6 @@ let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) | High-Level DSL | Low-Level Direct | |----------------|------------------| | `.transactions_with(\|txs\| txs.rate(5).users(3))` | `.with_workload(transaction::Workload::with_rate(5).expect(...).with_user_limit(...))` | -| `.da_with(\|da\| da.blob_rate(1).channel_rate(1))` | `.with_workload(da::Workload::with_rate(...))` | | `.expect_consensus_liveness()` | `.with_expectation(ConsensusLiveness::default())` | ## Bundled Expectations (Important) @@ -97,7 +88,7 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::transaction}; let tx_workload = transaction::Workload::with_rate(5) .expect("transaction rate must be non-zero"); -let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +let plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(5) .with_workload(tx_workload) // direct instantiation .expect_consensus_liveness() // DSL diff --git a/book/src/architecture-overview.md b/book/src/architecture-overview.md index 2143a9e..f55a159 100644 --- a/book/src/architecture-overview.md +++ b/book/src/architecture-overview.md @@ -26,7 +26,6 @@ flowchart TB subgraph Workflows["Workflows (Batteries Included)"] DSL[ScenarioBuilderExt
Fluent API] TxWorkload[Transaction Workload] - DAWorkload[DA Workload] ChaosWorkload[Chaos Workload] Expectations[Built-in Expectations] end @@ -74,7 +73,7 @@ flowchart TB **Workflows (High-Level API)** - `ScenarioBuilderExt` trait provides fluent DSL -- Built-in workloads (transactions, DA, chaos) +- Built-in workloads (transactions, chaos) - Common expectations (liveness, inclusion) - Simplifies scenario authoring @@ -120,7 +119,7 @@ See [Extending the Framework](extending.md) for details. ### Components -- **Topology** describes the cluster: how many nodes, their roles, and the high-level network and data-availability parameters they should follow. +- **Topology** describes the cluster: how many nodes and the high-level network parameters they should follow. - **Scenario** combines that topology with the activities to run and the checks to perform, forming a single plan. - **Deployer** provisions infrastructure on the chosen backend (local processes, Docker Compose, or Kubernetes), waits for readiness, and returns a Runner. - **Runner** orchestrates scenario execution: starts workloads, observes signals, evaluates expectations, and triggers cleanup. @@ -136,13 +135,13 @@ together predictably. The framework is consumed via **runnable example binaries** in `examples/src/bin/`: - `local_runner.rs` — Spawns nodes as host processes -- `compose_runner.rs` — Deploys via Docker Compose (requires `NOMOS_TESTNET_IMAGE` built) +- `compose_runner.rs` — Deploys via Docker Compose (requires `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` built) - `k8s_runner.rs` — Deploys via Kubernetes Helm (requires cluster + image) **Recommended:** Use the convenience script: ```bash -scripts/run/run-examples.sh -t -v +scripts/run/run-examples.sh -t -n # mode: host, compose, or k8s ``` @@ -169,10 +168,9 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn scenario_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(50) .transactions_with(|txs| txs.rate(5).users(20)) - .da_with(|da| da.channel_rate(1).blob_rate(2)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(90)) .build() @@ -180,8 +178,8 @@ pub fn scenario_plan() -> testing_framework_core::scenario::Scenario<()> { ``` **Key API Points:** -- Topology uses `.topology_with(|t| { t.validators(N) })` closure pattern -- Workloads are configured via `_with` closures (`transactions_with`, `da_with`, `chaos_with`) +- Topology uses `.topology_with(|t| { t.nodes(N) })` closure pattern +- Workloads are configured via `_with` closures (`transactions_with`, `chaos_with`) - Chaos workloads require `.enable_node_control()` and a compatible runner ## Deployers @@ -195,29 +193,29 @@ Three deployer implementations: | `K8sDeployer` | Kubernetes Helm | Cluster + image loaded | Not yet | **Compose-specific features:** -- Observability is external (set `NOMOS_METRICS_QUERY_URL` / `NOMOS_METRICS_OTLP_INGEST_URL` / `NOMOS_GRAFANA_URL` as needed) -- Optional OTLP trace/metrics endpoints (`NOMOS_OTLP_ENDPOINT`, `NOMOS_OTLP_METRICS_ENDPOINT`) -- Node control for chaos testing (restart validators) +- Observability is external (set `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` / `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` / `LOGOS_BLOCKCHAIN_GRAFANA_URL` as needed) +- Optional OTLP trace/metrics endpoints (`LOGOS_BLOCKCHAIN_OTLP_ENDPOINT`, `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT`) +- Node control for chaos testing (restart nodes) ## Assets and Images ### Docker Image Built via `scripts/build/build_test_image.sh`: -- Embeds KZG circuit parameters and binaries from `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` +- Embeds circuit assets and binaries - Includes runner scripts: `run_nomos_node.sh` -- Tagged as `NOMOS_TESTNET_IMAGE` (default: `logos-blockchain-testing:local`) -- **Recommended:** Use prebuilt bundle via `scripts/build/build-bundle.sh --platform linux` and set `NOMOS_BINARIES_TAR` before building image +- Tagged as `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` (default: `logos-blockchain-testing:local`) +- **Recommended:** Use prebuilt bundle via `scripts/build/build-bundle.sh --platform linux` and set `LOGOS_BLOCKCHAIN_BINARIES_TAR` before building image ### Circuit Assets -KZG parameters required for DA workloads: -- **Host path:** `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note repeated filename—directory contains file `kzgrs_test_params`) -- **Container path:** `/kzgrs_test_params/kzgrs_test_params` (for compose/k8s) -- **Override:** `NOMOS_KZGRS_PARAMS_PATH=/custom/path/to/file` (must point to file) -- **Fetch via:** `scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/circuits` or use `scripts/run/run-examples.sh` +Circuit assets required by the node binary: +- **Host path:** `~/.logos-blockchain-circuits` (default) +- **Container path:** `/opt/circuits` (for compose/k8s) +- **Override:** `LOGOS_BLOCKCHAIN_CIRCUITS=/custom/path/to/dir` (must point to a directory) +- **Fetch via:** `scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits` or use `scripts/run/run-examples.sh` ### Compose Stack Templates and configs in `testing-framework/runners/compose/assets/`: -- `docker-compose.yml.tera` — Stack template (validators) +- `docker-compose.yml.tera` — Stack template (nodes) - Cfgsync config: `testing-framework/assets/stack/cfgsync.yaml` - Monitoring assets (not deployed by the framework): `testing-framework/assets/stack/monitoring/` @@ -228,33 +226,33 @@ Templates and configs in `testing-framework/runners/compose/assets/`: | Component | Configuration | Output | |-----------|--------------|--------| | **Runner binaries** | `RUST_LOG` | Framework orchestration logs | -| **Node processes** | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (+ `NOMOS_LOG_DIR` on host runner) | Consensus, DA, mempool logs | +| **Node processes** | `LOGOS_BLOCKCHAIN_LOG_LEVEL`, `LOGOS_BLOCKCHAIN_LOG_FILTER` (+ `LOGOS_BLOCKCHAIN_LOG_DIR` on host runner) | Consensus, mempool, network logs | **Node logging:** -- **Local runner:** Writes to temporary directories by default (cleaned up). Set `NOMOS_TESTS_TRACING=true` + `NOMOS_LOG_DIR` for persistent files. +- **Local runner:** Writes to temporary directories by default (cleaned up). Set `LOGOS_BLOCKCHAIN_TESTS_TRACING=true` + `LOGOS_BLOCKCHAIN_LOG_DIR` for persistent files. - **Compose runner:** Default logs to container stdout/stderr (`docker logs`). To write per-node files, set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory). - **K8s runner:** Logs to pod stdout/stderr (`kubectl logs`). To write per-node files, set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory). -**File naming:** Per-node files use prefix `nomos-node-{index}` (may include timestamps). +**File naming:** Per-node files use prefix `logos-blockchain-node-{index}` (may include timestamps). ## Observability **Prometheus-compatible metrics querying (optional):** - The framework does **not** deploy Prometheus/Grafana. -- Provide a Prometheus-compatible base URL (PromQL API) via `NOMOS_METRICS_QUERY_URL`. +- Provide a Prometheus-compatible base URL (PromQL API) via `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL`. - Accessible in expectations when configured: `ctx.telemetry().prometheus().map(|p| p.base_url())` **Grafana dashboards (optional):** - Dashboards live in `testing-framework/assets/stack/monitoring/grafana/dashboards/` and can be imported into your Grafana. -- If you set `NOMOS_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS`. +- If you set `LOGOS_BLOCKCHAIN_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS`. **Node APIs:** -- HTTP endpoints per node for consensus info, network status, DA membership -- Accessible in expectations: `ctx.node_clients().validator_clients().get(0)` +- HTTP endpoints per node for consensus info and network status +- Accessible in expectations: `ctx.node_clients().node_clients().get(0)` **OTLP (optional):** -- Trace endpoint: `NOMOS_OTLP_ENDPOINT=http://localhost:4317` -- Metrics endpoint: `NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318` +- Trace endpoint: `LOGOS_BLOCKCHAIN_OTLP_ENDPOINT=http://localhost:4317` +- Metrics endpoint: `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT=http://localhost:4318` - Disabled by default (no noise if unset) For detailed logging configuration, see [Logging & Observability](logging-observability.md). diff --git a/book/src/authoring-scenarios.md b/book/src/authoring-scenarios.md index 9dd16db..c9519da 100644 --- a/book/src/authoring-scenarios.md +++ b/book/src/authoring-scenarios.md @@ -16,8 +16,8 @@ flowchart LR D --> E[5. Deploy & Run] ``` -1. **Shape the topology** — How many nodes, what roles, what network shape -2. **Attach workloads** — What traffic to generate (transactions, blobs, chaos) +1. **Shape the topology** — How many nodes, what network shape +2. **Attach workloads** — What traffic to generate (transactions, chaos) 3. **Define expectations** — What success looks like (liveness, inclusion, recovery) 4. **Set duration** — How long to run the experiment 5. **Choose a runner** — Where to execute (local, compose, k8s) @@ -36,12 +36,12 @@ use testing_framework_workflows::ScenarioBuilderExt; let scenario = ScenarioBuilder::topology_with(|t| { t.network_star() // Star network (one gateway + nodes) - .validators(3) // 3 validator nodes + .nodes(3) // 3 nodes }) ``` **What goes in topology?** -- Node counts (validators) +- Node counts (nodes) - Network shape (`network_star()` is currently the only built-in layout) **What does NOT go in topology?** @@ -61,7 +61,6 @@ let scenario = ScenarioBuilder::topology_with(|t| { **What goes in workloads?** - Transaction traffic (rate, users) -- DA traffic (channels, blobs) - Chaos injection (restarts, delays) **Units explained:** @@ -136,7 +135,7 @@ use testing_framework_workflows::ScenarioBuilderExt; async fn hello_consensus_liveness() -> Result<()> { let mut scenario = ScenarioBuilder::topology_with(|t| { t.network_star() - .validators(3) + .nodes(3) }) .wallets(20) .transactions_with(|tx| tx.rate(10).users(5)) @@ -204,7 +203,7 @@ use testing_framework_workflows::ScenarioBuilderExt; #[tokio::test] async fn test_consensus_liveness() -> Result<()> { let mut scenario = ScenarioBuilder::topology_with(|t| { - t.network_star().validators(3) + t.network_star().nodes(3) }) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(30)) @@ -219,7 +218,7 @@ async fn test_consensus_liveness() -> Result<()> { #[tokio::test] async fn test_transaction_inclusion() -> Result<()> { let mut scenario = ScenarioBuilder::topology_with(|t| { - t.network_star().validators(2) + t.network_star().nodes(2) }) .wallets(10) .transactions_with(|tx| tx.rate(5).users(5)) @@ -245,13 +244,13 @@ use testing_framework_workflows::ScenarioBuilderExt; pub fn minimal_topology() -> ScenarioBuilder { ScenarioBuilder::topology_with(|t| { - t.network_star().validators(2) + t.network_star().nodes(2) }) } pub fn production_like_topology() -> ScenarioBuilder { ScenarioBuilder::topology_with(|t| { - t.network_star().validators(7) + t.network_star().nodes(7) }) } @@ -293,10 +292,10 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; -async fn test_liveness_with_topology(validators: usize) -> Result<()> { +async fn test_liveness_with_topology(nodes: usize) -> Result<()> { let mut scenario = ScenarioBuilder::topology_with(|t| { t.network_star() - .validators(validators) + .nodes(nodes) }) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(60)) @@ -331,7 +330,7 @@ async fn liveness_large() -> Result<()> { ### Topology **Do include:** -- Node counts (`.validators(3)`) +- Node counts (`.nodes(3)`) - Network shape (`.network_star()`) **Don't include:** @@ -343,7 +342,7 @@ async fn liveness_large() -> Result<()> { **Do include:** - Transaction traffic (`.transactions_with(|tx| ...)`) -- DA traffic (`.da_with(|da| ...)`) +- Chaos traffic (`.chaos().restart()` or `RandomRestartWorkload`) - Chaos injection (`.with_workload(RandomRestartWorkload::new(...))`) - Rates, users, timing @@ -367,8 +366,8 @@ async fn liveness_large() -> Result<()> { ## Best Practices 1. **Keep scenarios focused**: One scenario = one behavior under test -2. **Start small**: 2-3 validators, 30-60 seconds -3. **Use descriptive names**: `test_consensus_survives_validator_restart` not `test_1` +2. **Start small**: 2-3 nodes, 30-60 seconds +3. **Use descriptive names**: `test_consensus_survives_node_restart` not `test_1` 4. **Extract common patterns**: Shared topology builders, helper functions 5. **Document intent**: Add comments explaining what you're testing and why 6. **Mind the units**: `.rate(N)` is per-block, `.with_run_duration()` is wall-clock @@ -379,6 +378,6 @@ async fn liveness_large() -> Result<()> { ## Next Steps - **[Core Content: Workloads & Expectations](workloads.md)** — Comprehensive reference for built-in workloads and expectations -- **[Examples](examples.md)** — More scenario patterns (DA, chaos, advanced topologies) +- **[Examples](examples.md)** — More scenario patterns (chaos, advanced topologies) - **[Running Scenarios](running-scenarios.md)** — How execution works, artifacts produced, per-runner details - **[API Levels](api-levels.md)** — When to use builder DSL vs. direct instantiation diff --git a/book/src/best-practices.md b/book/src/best-practices.md index cc08dd8..3fdb756 100644 --- a/book/src/best-practices.md +++ b/book/src/best-practices.md @@ -5,7 +5,7 @@ This page collects proven patterns for authoring, running, and maintaining test ## Scenario Design **State your intent** -- Document the goal of each scenario (throughput, DA validation, resilience) so expectation choices are obvious +- Document the goal of each scenario (throughput, resilience) so expectation choices are obvious - Use descriptive variable names that explain topology purpose (e.g., `star_topology_3val_2exec` vs `topology`) - Add comments explaining why specific rates or durations were chosen @@ -20,7 +20,7 @@ This page collects proven patterns for authoring, running, and maintaining test - Don't mix high transaction load with aggressive chaos in the same test (hard to debug) **Start small, scale up** -- Begin with minimal topology (1-2 validators) to validate scenario logic +- Begin with minimal topology (1-2 nodes) to validate scenario logic - Gradually increase topology size and workload rates - Use Host runner for fast iteration, then validate on Compose before production @@ -34,10 +34,10 @@ This page collects proven patterns for authoring, running, and maintaining test **Example: Topology preset** ```rust,ignore -pub fn standard_da_topology() -> GeneratedTopology { +pub fn standard_topology() -> GeneratedTopology { TopologyBuilder::new() .network_star() - .validators(3) + .nodes(3) .generate() } ``` @@ -46,7 +46,6 @@ pub fn standard_da_topology() -> GeneratedTopology { ```rust,ignore pub const STANDARD_TX_RATE: f64 = 10.0; -pub const STANDARD_DA_CHANNEL_RATE: f64 = 2.0; pub const SHORT_RUN_DURATION: Duration = Duration::from_secs(60); pub const LONG_RUN_DURATION: Duration = Duration::from_secs(300); ``` @@ -55,8 +54,8 @@ pub const LONG_RUN_DURATION: Duration = Duration::from_secs(300); **Observe first, tune second** - Rely on liveness and inclusion signals to interpret outcomes before tweaking rates or topology -- Enable detailed logging (`RUST_LOG=debug`, `NOMOS_LOG_LEVEL=debug`) only after initial failure -- Use `NOMOS_TESTS_KEEP_LOGS=1` to persist logs when debugging failures +- Enable detailed logging (`RUST_LOG=debug`, `LOGOS_BLOCKCHAIN_LOG_LEVEL=debug`) only after initial failure +- Use `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` to persist logs when debugging failures **Use BlockFeed effectively** - Subscribe to BlockFeed in expectations for real-time block monitoring @@ -102,7 +101,7 @@ strategy: **Cache aggressively** - Cache Rust build artifacts (`target/`) -- Cache circuit parameters (`assets/stack/kzgrs_test_params/`) +- Cache circuit parameters (`~/.logos-blockchain-circuits/`) - Cache Docker layers (use BuildKit cache) **Collect logs on failure** @@ -163,34 +162,32 @@ runner.run(&mut scenario).await?; // BAD: Hard to debug when it fails .transactions_with(|tx| tx.rate(50).users(100)) // high load .chaos_with(|c| c.restart().min_delay(...)) // AND chaos -.da_with(|da| da.channel_rate(10).blob_rate(20)) // AND DA stress // GOOD: Separate tests for each concern // Test 1: High transaction load only // Test 2: Chaos resilience only -// Test 3: DA stress only ``` **DON'T: Hardcode paths or ports** ```rust,ignore // BAD: Breaks on different machines -let path = PathBuf::from("/home/user/circuits/kzgrs_test_params"); +let path = PathBuf::from("/home/user/circuits"); let port = 9000; // might conflict // GOOD: Use env vars and dynamic allocation -let path = std::env::var("NOMOS_KZGRS_PARAMS_PATH") - .unwrap_or_else(|_| "assets/stack/kzgrs_test_params/kzgrs_test_params".to_string()); +let path = std::env::var("LOGOS_BLOCKCHAIN_CIRCUITS") + .unwrap_or_else(|_| "~/.logos-blockchain-circuits".to_string()); let port = get_available_tcp_port(); ``` **DON'T: Ignore resource limits** ```bash # BAD: Large topology without checking resources -scripts/run/run-examples.sh -v 20 -e 10 compose +scripts/run/run-examples.sh -n 20 compose # (might OOM or exhaust ulimits) # GOOD: Scale gradually and monitor resources -scripts/run/run-examples.sh -v 3 -e 2 compose # start small +scripts/run/run-examples.sh -n 3 compose # start small docker stats # monitor resource usage # then increase if resources allow ``` @@ -198,12 +195,11 @@ docker stats # monitor resource usage ## Scenario Design Heuristics **Minimal viable topology** -- Consensus: 3 validators (minimum for Byzantine fault tolerance) +- Consensus: 3 nodes (minimum for Byzantine fault tolerance) - Network: Star topology (simplest for debugging) **Workload rate selection** - Start with 1-5 tx/s per user, then increase -- DA: 1-2 channels, 1-3 blobs/channel initially - Chaos: 30s+ intervals between restarts (allow recovery) **Duration guidelines** @@ -222,7 +218,6 @@ docker stats # monitor resource usage |-----------|--------------| | Basic functionality | `expect_consensus_liveness()` | | Transaction handling | `expect_consensus_liveness()` + custom inclusion check | -| DA correctness | `expect_consensus_liveness()` + DA dispersal/sampling checks | | Resilience | `expect_consensus_liveness()` + recovery time measurement | ## Testing the Tests diff --git a/book/src/chaos.md b/book/src/chaos.md index 4ddcf59..5a63917 100644 --- a/book/src/chaos.md +++ b/book/src/chaos.md @@ -9,7 +9,7 @@ recovery. The built-in restart workload lives in ## How it works - Requires `NodeControlCapability` (`enable_node_control()` in the scenario builder) and a runner that provides a `NodeControlHandle`. -- Randomly selects nodes (validators) to restart based on your +- Randomly selects nodes to restart based on your include/exclude flags. - Respects min/max delay between restarts and a target cooldown to avoid flapping the same node too frequently. @@ -29,13 +29,13 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRe pub fn random_restart_plan() -> testing_framework_core::scenario::Scenario< testing_framework_core::scenario::NodeControlCapability, > { - ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .enable_node_control() .with_workload(RandomRestartWorkload::new( Duration::from_secs(45), // min delay Duration::from_secs(75), // max delay Duration::from_secs(120), // target cooldown - true, // include validators + true, // include nodes )) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(150)) @@ -47,11 +47,11 @@ pub fn random_restart_plan() -> testing_framework_core::scenario::Scenario< - **Consensus liveness**: ensure blocks keep progressing despite restarts. - **Height convergence**: optionally check all nodes converge after the chaos window. -- Any workload-specific inclusion checks if you’re also driving tx/DA traffic. +- Any workload-specific inclusion checks if you’re also driving transactions. ## Best practices - Keep delays/cooldowns realistic; avoid back-to-back restarts that would never happen in production. -- Limit chaos scope: toggle validators based on what you want to +- Limit chaos scope: toggle nodes based on what you want to test. - Combine with observability: monitor metrics/logs to explain failures. diff --git a/book/src/ci-integration.md b/book/src/ci-integration.md index 8a94123..13d5a2f 100644 --- a/book/src/ci-integration.md +++ b/book/src/ci-integration.md @@ -74,19 +74,19 @@ jobs: restore-keys: | ${{ runner.os }}-cargo-host- - - name: Cache nomos-node build + - name: Cache logos-blockchain-node build uses: actions/cache@v3 with: path: | - ../nomos-node/target/release/nomos-node - key: ${{ runner.os }}-nomos-${{ hashFiles('../nomos-node/**/Cargo.lock') }} + ../logos-blockchain-node/target/release/logos-blockchain-node + key: ${{ runner.os }}-nomos-${{ hashFiles('../logos-blockchain-node/**/Cargo.lock') }} restore-keys: | ${{ runner.os }}-nomos- - name: Run host smoke test run: | # Use run-examples.sh which handles setup automatically - scripts/run/run-examples.sh -t 120 -v 3 -e 1 host + scripts/run/run-examples.sh -t 120 -n 3 host - name: Upload logs on failure if: failure() @@ -151,7 +151,7 @@ jobs: TOPOLOGY: ${{ matrix.topology }} run: | # Build and run with the specified topology - scripts/run/run-examples.sh -t 120 -v ${TOPOLOGY:0:1} -e ${TOPOLOGY:2:1} compose + scripts/run/run-examples.sh -t 120 -n ${TOPOLOGY:0:1} compose - name: Collect Docker logs on failure if: failure() @@ -198,7 +198,7 @@ jobs: ## Workflow Features 1. **Matrix Testing:** Runs compose tests with different topologies (`3v1e`, `5v1e`) -2. **Caching:** Caches Rust dependencies, Docker layers, and nomos-node builds for faster runs +2. **Caching:** Caches Rust dependencies, Docker layers, and logos-blockchain-node builds for faster runs 3. **Log Collection:** Automatically uploads logs and artifacts when tests fail 4. **Timeout Protection:** Reasonable timeouts prevent jobs from hanging indefinitely 6. **Clean Teardown:** Ensures Docker resources are cleaned up even on failure @@ -259,14 +259,14 @@ Without this, tests will hang due to expensive proof generation. Prefer `scripts/run/run-examples.sh` which handles all setup automatically: ```bash -scripts/run/run-examples.sh -t 120 -v 3 -e 1 host +scripts/run/run-examples.sh -t 120 -n 3 host ``` This is more reliable than manual `cargo run` commands. ### Cache Aggressively -Cache Rust dependencies, nomos-node builds, and Docker layers to speed up CI: +Cache Rust dependencies, logos-blockchain-node builds, and Docker layers to speed up CI: ```yaml - name: Cache Rust dependencies @@ -346,7 +346,7 @@ Add debug environment variables temporarily: ```yaml env: RUST_LOG: debug - NOMOS_LOG_LEVEL: debug + LOGOS_BLOCKCHAIN_LOG_LEVEL: debug ``` ### Preserve Containers (Compose) @@ -357,7 +357,7 @@ Set `COMPOSE_RUNNER_PRESERVE=1` to keep containers running for inspection: - name: Run compose test (preserve on failure) env: COMPOSE_RUNNER_PRESERVE: 1 - run: scripts/run/run-examples.sh -t 120 -v 3 -e 1 compose + run: scripts/run/run-examples.sh -t 120 -n 3 compose ``` ### Access Artifacts diff --git a/book/src/custom-workload-example.md b/book/src/custom-workload-example.md index 2b3507c..c6d9439 100644 --- a/book/src/custom-workload-example.md +++ b/book/src/custom-workload-example.md @@ -48,10 +48,10 @@ impl Workload for ReachabilityWorkload { topology: &GeneratedTopology, _run_metrics: &RunMetrics, ) -> Result<(), DynError> { - if topology.validators().get(self.target_idx).is_none() { + if topology.nodes().get(self.target_idx).is_none() { return Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, - "no validator at requested index", + "no node at requested index", ))); } Ok(()) @@ -60,7 +60,7 @@ impl Workload for ReachabilityWorkload { async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { let client = ctx .node_clients() - .validator_clients() + .node_clients() .get(self.target_idx) .ok_or_else(|| { Box::new(std::io::Error::new( @@ -108,7 +108,7 @@ impl Expectation for ReachabilityExpectation { async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { let client = ctx .node_clients() - .validator_clients() + .node_clients() .get(self.target_idx) .ok_or_else(|| { Box::new(std::io::Error::new( diff --git a/book/src/dsl-cheat-sheet.md b/book/src/dsl-cheat-sheet.md index d40c3f3..85262b3 100644 --- a/book/src/dsl-cheat-sheet.md +++ b/book/src/dsl-cheat-sheet.md @@ -22,7 +22,7 @@ use testing_framework_core::scenario::{Builder, ScenarioBuilder}; pub fn topology() -> Builder<()> { ScenarioBuilder::topology_with(|t| { t.network_star() // Star topology (all connect to seed node) - .validators(3) // Number of validator nodes + .nodes(3) // Number of nodes }) } ``` @@ -34,7 +34,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn wallets_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .wallets(50) // Seed 50 funded wallet accounts .build() } @@ -47,7 +47,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn transactions_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .wallets(50) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block @@ -57,24 +57,6 @@ pub fn transactions_plan() -> testing_framework_core::scenario::Scenario<()> { } ``` -## DA Workload - -```rust,ignore -use testing_framework_core::scenario::ScenarioBuilder; -use testing_framework_workflows::ScenarioBuilderExt; - -pub fn da_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) - .wallets(50) - .da_with(|da| { - da.channel_rate(1) // number of DA channels to run - .blob_rate(2) // target 2 blobs per block (headroom applied) - .headroom_percent(20) // optional headroom when sizing channels - }) // Finish DA workload config - .build() -} -``` - ## Chaos Workload (Requires `enable_node_control()`) ```rust,ignore @@ -84,7 +66,7 @@ use testing_framework_core::scenario::{NodeControlCapability, ScenarioBuilder}; use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt}; pub fn chaos_plan() -> testing_framework_core::scenario::Scenario { - ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .enable_node_control() // Enable node control capability .chaos_with(|c| { c.restart() // Random restart chaos @@ -104,7 +86,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn expectations_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .expect_consensus_liveness() // Assert blocks are produced continuously .build() } @@ -119,7 +101,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn run_duration_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .with_run_duration(Duration::from_secs(120)) // Run for 120 seconds .build() } @@ -132,7 +114,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn build_plan() -> testing_framework_core::scenario::Scenario<()> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)).build() // Construct the final Scenario + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)).build() // Construct the final Scenario } ``` @@ -164,7 +146,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn execution() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .expect_consensus_liveness() .build(); @@ -187,17 +169,12 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn run_test() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(50) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block .users(20) }) - .da_with(|da| { - da.channel_rate(1) // number of DA channels - .blob_rate(2) // target 2 blobs per block - .headroom_percent(20) // optional channel headroom - }) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(90)) .build(); diff --git a/book/src/environment-variables.md b/book/src/environment-variables.md index 0b52b9d..239e38b 100644 --- a/book/src/environment-variables.md +++ b/book/src/environment-variables.md @@ -31,19 +31,19 @@ Control which runner to use and the test topology: | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators (all runners) | -| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds (all runners) | -| `LOCAL_DEMO_VALIDATORS` | — | Legacy: Number of validators (host runner only) | +| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes (all runners) | +| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds (all runners) | +| `LOCAL_DEMO_NODES` | — | Legacy: Number of nodes (host runner only) | | `LOCAL_DEMO_RUN_SECS` | — | Legacy: Run duration (host runner only) | -| `COMPOSE_NODE_PAIRS` | — | Compose-specific topology format: "validators" (e.g., `3`) | +| `COMPOSE_NODE_PAIRS` | — | Compose-specific topology format: "nodes" (e.g., `3`) | **Example:** ```bash -# Run with 5 validators, for 120 seconds -NOMOS_DEMO_VALIDATORS=5 \ -NOMOS_DEMO_RUN_SECS=120 \ -scripts/run/run-examples.sh -t 120 -v 5 host +# Run with 5 nodes, for 120 seconds +LOGOS_BLOCKCHAIN_DEMO_NODES=5 \ +LOGOS_BLOCKCHAIN_DEMO_RUN_SECS=120 \ +scripts/run/run-examples.sh -t 120 -n 5 host ``` --- @@ -54,13 +54,13 @@ Required for host runner when not using helper scripts: | Variable | Required | Default | Effect | |----------|----------|---------|--------| -| `NOMOS_NODE_BIN` | Yes (host) | — | Path to `nomos-node` binary | -| `NOMOS_NODE_PATH` | No | — | Path to nomos-node git checkout (dev workflow) | +| `LOGOS_BLOCKCHAIN_NODE_BIN` | Yes (host) | — | Path to `logos-blockchain-node` binary | +| `LOGOS_BLOCKCHAIN_NODE_PATH` | No | — | Path to logos-blockchain-node git checkout (dev workflow) | **Example:** ```bash -export NOMOS_NODE_BIN=/path/to/nomos-node/target/release/nomos-node +export LOGOS_BLOCKCHAIN_NODE_BIN=/path/to/logos-blockchain-node/target/release/logos-blockchain-node ``` --- @@ -71,53 +71,47 @@ Required for compose and k8s runners: | Variable | Required | Default | Effect | |----------|----------|---------|--------| -| `NOMOS_TESTNET_IMAGE` | Yes (compose/k8s) | `logos-blockchain-testing:local` | Docker image tag for node containers | -| `NOMOS_TESTNET_IMAGE_PULL_POLICY` | No | `IfNotPresent` (local) / `Always` (ECR) | K8s `imagePullPolicy` used by the runner | -| `NOMOS_BINARIES_TAR` | No | — | Path to prebuilt bundle (`.tar.gz`) for image build | -| `NOMOS_SKIP_IMAGE_BUILD` | No | 0 | Skip image rebuild (compose/k8s); assumes image already exists | -| `NOMOS_FORCE_IMAGE_BUILD` | No | 0 | Force rebuilding the image even when the script would normally skip it (e.g. non-local k8s) | +| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | Yes (compose/k8s) | `logos-blockchain-testing:local` | Docker image tag for node containers | +| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY` | No | `IfNotPresent` (local) / `Always` (ECR) | K8s `imagePullPolicy` used by the runner | +| `LOGOS_BLOCKCHAIN_BINARIES_TAR` | No | — | Path to prebuilt bundle (`.tar.gz`) for image build | +| `LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD` | No | 0 | Skip image rebuild (compose/k8s); assumes image already exists | +| `LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD` | No | 0 | Force rebuilding the image even when the script would normally skip it (e.g. non-local k8s) | **Example:** ```bash # Using prebuilt bundle -export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz -export NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local +export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz +export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local scripts/build/build_test_image.sh # Using pre-existing image (skip build) -export NOMOS_SKIP_IMAGE_BUILD=1 -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +export LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1 +scripts/run/run-examples.sh -t 60 -n 3 compose ``` --- -## Circuit Assets (KZG Parameters) +## Circuit Assets -Circuit asset configuration for DA workloads: +Circuit asset configuration used by local runs and image builds: | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_KZGRS_PARAMS_PATH` | `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` | Path to KZG proving key file | -| `NOMOS_KZG_DIR_REL` | `testing-framework/assets/stack/kzgrs_test_params` | Directory containing KZG assets (relative to workspace root) | -| `NOMOS_KZG_FILE` | `kzgrs_test_params` | Filename of the proving key within `NOMOS_KZG_DIR_REL` | -| `NOMOS_KZG_CONTAINER_PATH` | `/kzgrs_test_params/kzgrs_test_params` | File path where the node expects KZG params inside containers | -| `NOMOS_KZG_MODE` | Runner-specific | K8s only: `hostPath` (mount from host) or `inImage` (embed into image) | -| `NOMOS_KZG_IN_IMAGE_PARAMS_PATH` | `/opt/nomos/kzg-params/kzgrs_test_params` | K8s `inImage` mode: where the proving key is stored inside the image | +| `LOGOS_BLOCKCHAIN_CIRCUITS` | `~/.logos-blockchain-circuits` | Directory containing circuit assets | | `VERSION` | From `versions.env` | Circuit release tag (used by helper scripts) | -| `NOMOS_CIRCUITS` | — | Directory containing fetched circuit bundles (set by `scripts/setup/setup-circuits-stack.sh`) | -| `NOMOS_CIRCUITS_VERSION` | — | Legacy alias for `VERSION` (supported by some build scripts) | -| `NOMOS_CIRCUITS_PLATFORM` | Auto-detected | Override circuits platform (e.g. `linux-x86_64`, `macos-aarch64`) | -| `NOMOS_CIRCUITS_HOST_DIR_REL` | `.tmp/nomos-circuits-host` | Output dir for host circuits bundle (relative to repo root) | -| `NOMOS_CIRCUITS_LINUX_DIR_REL` | `.tmp/nomos-circuits-linux` | Output dir for linux circuits bundle (relative to repo root) | -| `NOMOS_CIRCUITS_NONINTERACTIVE` | 0 | Set to `1` to overwrite outputs without prompting in setup scripts | -| `NOMOS_CIRCUITS_REBUILD_RAPIDSNARK` | 0 | Set to `1` to force rebuilding rapidsnark (host bundle only) | +| `LOGOS_BLOCKCHAIN_CIRCUITS_VERSION` | — | Legacy alias for `VERSION` (supported by some build scripts) | +| `LOGOS_BLOCKCHAIN_CIRCUITS_PLATFORM` | Auto-detected | Override circuits platform (e.g. `linux-x86_64`, `macos-aarch64`) | +| `LOGOS_BLOCKCHAIN_CIRCUITS_HOST_DIR_REL` | `.tmp/logos-blockchain-circuits-host` | Output dir for host circuit bundle (relative to repo root) | +| `LOGOS_BLOCKCHAIN_CIRCUITS_LINUX_DIR_REL` | `.tmp/logos-blockchain-circuits-linux` | Output dir for linux circuit bundle (relative to repo root) | +| `LOGOS_BLOCKCHAIN_CIRCUITS_NONINTERACTIVE` | 0 | Set to `1` to overwrite outputs without prompting in setup scripts | +| `LOGOS_BLOCKCHAIN_CIRCUITS_REBUILD_RAPIDSNARK` | 0 | Set to `1` to force rebuilding rapidsnark (host bundle only) | **Example:** ```bash # Use custom circuit assets -NOMOS_KZGRS_PARAMS_PATH=/custom/path/to/kzgrs_test_params \ +LOGOS_BLOCKCHAIN_CIRCUITS=/custom/path/to/circuits \ cargo run -p runner-examples --bin local_runner ``` @@ -129,28 +123,28 @@ Control node log output (not framework runner logs): | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` | -| `NOMOS_LOG_FILTER` | — | Fine-grained module filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`) | -| `NOMOS_LOG_DIR` | — | Host runner: directory for per-node log files (persistent). Compose/k8s: use `cfgsync.yaml` for file logging. | -| `NOMOS_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI artifacts) | -| `NOMOS_TESTS_TRACING` | false | Enable debug tracing preset (combine with `NOMOS_LOG_DIR` unless external tracing backends configured) | +| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` | +| `LOGOS_BLOCKCHAIN_LOG_FILTER` | — | Fine-grained module filtering (e.g., `cryptarchia=trace`) | +| `LOGOS_BLOCKCHAIN_LOG_DIR` | — | Host runner: directory for per-node log files (persistent). Compose/k8s: use `cfgsync.yaml` for file logging. | +| `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI artifacts) | +| `LOGOS_BLOCKCHAIN_TESTS_TRACING` | false | Enable debug tracing preset (combine with `LOGOS_BLOCKCHAIN_LOG_DIR` unless external tracing backends configured) | -**Important:** Node logging ignores `RUST_LOG`; use `NOMOS_LOG_LEVEL` and `NOMOS_LOG_FILTER` for node logs. +**Important:** Node logging ignores `RUST_LOG`; use `LOGOS_BLOCKCHAIN_LOG_LEVEL` and `LOGOS_BLOCKCHAIN_LOG_FILTER` for node logs. **Example:** ```bash # Debug logging to files -NOMOS_LOG_DIR=/tmp/test-logs \ -NOMOS_LOG_LEVEL=debug \ -NOMOS_LOG_FILTER="cryptarchia=trace,nomos_da_sampling=debug" \ +LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/test-logs \ +LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \ +LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin local_runner # Inspect logs ls /tmp/test-logs/ -# nomos-node-0.2024-12-18T14-30-00.log -# nomos-node-1.2024-12-18T14-30-00.log +# logos-blockchain-node-0.2024-12-18T14-30-00.log +# logos-blockchain-node-1.2024-12-18T14-30-00.log ``` **Common filter targets:** @@ -158,9 +152,6 @@ ls /tmp/test-logs/ | Target Prefix | Subsystem | |---------------|-----------| | `cryptarchia` | Consensus (Cryptarchia) | -| `nomos_da_sampling` | DA sampling service | -| `nomos_da_dispersal` | DA dispersal service | -| `nomos_da_verifier` | DA verification | | `nomos_blend` | Mix network/privacy layer | | `chain_service` | Chain service (node APIs/state) | | `chain_network` | P2P networking | @@ -174,21 +165,21 @@ Optional observability integration: | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_METRICS_QUERY_URL` | — | Prometheus-compatible base URL for runner to query (e.g., `http://localhost:9090`) | -| `NOMOS_METRICS_OTLP_INGEST_URL` | — | Full OTLP HTTP ingest URL for node metrics export (e.g., `http://localhost:9090/api/v1/otlp/v1/metrics`) | -| `NOMOS_GRAFANA_URL` | — | Grafana base URL for printing/logging (e.g., `http://localhost:3000`) | -| `NOMOS_OTLP_ENDPOINT` | — | OTLP trace endpoint (optional) | -| `NOMOS_OTLP_METRICS_ENDPOINT` | — | OTLP metrics endpoint (optional) | +| `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` | — | Prometheus-compatible base URL for runner to query (e.g., `http://localhost:9090`) | +| `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` | — | Full OTLP HTTP ingest URL for node metrics export (e.g., `http://localhost:9090/api/v1/otlp/v1/metrics`) | +| `LOGOS_BLOCKCHAIN_GRAFANA_URL` | — | Grafana base URL for printing/logging (e.g., `http://localhost:3000`) | +| `LOGOS_BLOCKCHAIN_OTLP_ENDPOINT` | — | OTLP trace endpoint (optional) | +| `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT` | — | OTLP metrics endpoint (optional) | **Example:** ```bash # Enable Prometheus querying -export NOMOS_METRICS_QUERY_URL=http://localhost:9090 -export NOMOS_METRICS_OTLP_INGEST_URL=http://localhost:9090/api/v1/otlp/v1/metrics -export NOMOS_GRAFANA_URL=http://localhost:3000 +export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090 +export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://localhost:9090/api/v1/otlp/v1/metrics +export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000 -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 3 compose ``` --- @@ -210,7 +201,7 @@ Variables specific to Docker Compose deployment: ```bash # Keep containers after test for debugging COMPOSE_RUNNER_PRESERVE=1 \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 3 compose # Containers remain running docker ps --filter "name=nomos-compose-" @@ -243,11 +234,11 @@ Variables specific to Kubernetes deployment: K8S_RUNNER_NAMESPACE=nomos-test-debug \ K8S_RUNNER_PRESERVE=1 \ K8S_RUNNER_DEBUG=1 \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s +scripts/run/run-examples.sh -t 60 -n 3 k8s # Inspect resources kubectl get pods -n nomos-test-debug -kubectl logs -n nomos-test-debug -l nomos/logical-role=validator +kubectl logs -n nomos-test-debug -l nomos/logical-role=node ``` --- @@ -258,19 +249,19 @@ Platform-specific build configuration: | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_BUNDLE_DOCKER_PLATFORM` | Host arch | Docker platform for bundle builds: `linux/arm64` or `linux/amd64` (macOS/Windows hosts) | -| `NOMOS_BIN_PLATFORM` | — | Legacy alias for `NOMOS_BUNDLE_DOCKER_PLATFORM` | +| `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM` | Host arch | Docker platform for bundle builds: `linux/arm64` or `linux/amd64` (macOS/Windows hosts) | +| `LOGOS_BLOCKCHAIN_BIN_PLATFORM` | — | Legacy alias for `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM` | | `COMPOSE_CIRCUITS_PLATFORM` | Host arch | Circuits platform for image builds: `linux-aarch64` or `linux-x86_64` | -| `NOMOS_EXTRA_FEATURES` | — | Extra cargo features to enable when building bundles (used by `scripts/build/build-bundle.sh`) | +| `LOGOS_BLOCKCHAIN_EXTRA_FEATURES` | — | Extra cargo features to enable when building bundles (used by `scripts/build/build-bundle.sh`) | **macOS / Apple Silicon:** ```bash # Native performance (recommended for local testing) -export NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64 +export LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64 # Or target amd64 (slower via emulation) -export NOMOS_BUNDLE_DOCKER_PLATFORM=linux/amd64 +export LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/amd64 ``` --- @@ -283,36 +274,28 @@ Timeout and performance tuning: |----------|---------|--------| | `SLOW_TEST_ENV` | false | Doubles built-in readiness timeouts (useful in CI / constrained laptops) | | `TESTNET_PRINT_ENDPOINTS` | 0 | Print `TESTNET_ENDPOINTS` / `TESTNET_PPROF` lines during deploy (set automatically by `scripts/run/run-examples.sh`) | -| `NOMOS_DISPERSAL_TIMEOUT_SECS` | 20 | DA dispersal timeout (seconds) | -| `NOMOS_RETRY_COOLDOWN_SECS` | 3 | Cooldown between retries (seconds) | -| `NOMOS_GRACE_PERIOD_SECS` | 1200 | Grace period before enforcing strict time-based expectations (seconds) | -| `NOMOS_PRUNE_DURATION_SECS` | 30 | Prune step duration (seconds) | -| `NOMOS_PRUNE_INTERVAL_SECS` | 5 | Interval between prune cycles (seconds) | -| `NOMOS_SHARE_DURATION_SECS` | 5 | Share duration (seconds) | -| `NOMOS_COMMITMENTS_WAIT_SECS` | 1 | Commitments wait duration (seconds) | -| `NOMOS_SDP_TRIGGER_DELAY_SECS` | 5 | SDP trigger delay (seconds) | **Example:** ```bash # Increase timeouts for slow environments SLOW_TEST_ENV=true \ -scripts/run/run-examples.sh -t 120 -v 5 -e 2 compose +scripts/run/run-examples.sh -t 120 -n 5 compose ``` --- ## Node Configuration (Advanced) -Node-level configuration passed through to nomos-node: +Node-level configuration passed through to logos-blockchain-node: | Variable | Default | Effect | |----------|---------|--------| | `CONSENSUS_SLOT_TIME` | — | Consensus slot time (seconds) | | `CONSENSUS_ACTIVE_SLOT_COEFF` | — | Active slot coefficient (0.0-1.0) | -| `NOMOS_USE_AUTONAT` | Unset | If set, use AutoNAT instead of a static loopback address for libp2p NAT settings | -| `NOMOS_CFGSYNC_PORT` | 4400 | Port used for cfgsync service inside the stack | -| `NOMOS_TIME_BACKEND` | `monotonic` | Select time backend (used by compose/k8s stack scripts and deployers) | +| `LOGOS_BLOCKCHAIN_USE_AUTONAT` | Unset | If set, use AutoNAT instead of a static loopback address for libp2p NAT settings | +| `LOGOS_BLOCKCHAIN_CFGSYNC_PORT` | 4400 | Port used for cfgsync service inside the stack | +| `LOGOS_BLOCKCHAIN_TIME_BACKEND` | `monotonic` | Select time backend (used by compose/k8s stack scripts and deployers) | **Example:** @@ -353,12 +336,12 @@ Variables used by helper scripts (`scripts/run/run-examples.sh`, etc.): | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_NODE_REV` | From `versions.env` | nomos-node git revision to build/fetch | -| `NOMOS_BUNDLE_VERSION` | From `versions.env` | Bundle schema version | -| `NOMOS_IMAGE_SELECTION` | — | Internal: image selection mode set by `run-examples.sh` (`local`/`ecr`/`auto`) | -| `NOMOS_NODE_APPLY_PATCHES` | 1 | Set to `0` to disable applying local patches when building bundles | -| `NOMOS_NODE_PATCH_DIR` | `patches/nomos-node` | Patch directory applied to nomos-node checkout during bundle builds | -| `NOMOS_NODE_PATCH_LEVEL` | — | Patch application level (`all` or an integer) for bundle builds | +| `LOGOS_BLOCKCHAIN_NODE_REV` | From `versions.env` | logos-blockchain-node git revision to build/fetch | +| `LOGOS_BLOCKCHAIN_BUNDLE_VERSION` | From `versions.env` | Bundle schema version | +| `LOGOS_BLOCKCHAIN_IMAGE_SELECTION` | — | Internal: image selection mode set by `run-examples.sh` (`local`/`ecr`/`auto`) | +| `LOGOS_BLOCKCHAIN_NODE_APPLY_PATCHES` | 1 | Set to `0` to disable applying local patches when building bundles | +| `LOGOS_BLOCKCHAIN_NODE_PATCH_DIR` | `patches/logos-blockchain-node` | Patch directory applied to logos-blockchain-node checkout during bundle builds | +| `LOGOS_BLOCKCHAIN_NODE_PATCH_LEVEL` | — | Patch application level (`all` or an integer) for bundle builds | --- @@ -368,26 +351,26 @@ Variables used by helper scripts (`scripts/run/run-examples.sh`, etc.): ```bash POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +scripts/run/run-examples.sh -t 60 -n 3 host ``` ### Debug Logging (Host) ```bash POL_PROOF_DEV_MODE=true \ -NOMOS_LOG_DIR=/tmp/logs \ -NOMOS_LOG_LEVEL=debug \ -NOMOS_LOG_FILTER="cryptarchia=trace" \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/logs \ +LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \ +LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \ +scripts/run/run-examples.sh -t 60 -n 3 host ``` ### Compose with Observability ```bash POL_PROOF_DEV_MODE=true \ -NOMOS_METRICS_QUERY_URL=http://localhost:9090 \ -NOMOS_GRAFANA_URL=http://localhost:3000 \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090 \ +LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000 \ +scripts/run/run-examples.sh -t 60 -n 3 compose ``` ### K8s with Debug @@ -397,7 +380,7 @@ POL_PROOF_DEV_MODE=true \ K8S_RUNNER_NAMESPACE=nomos-debug \ K8S_RUNNER_DEBUG=1 \ K8S_RUNNER_PRESERVE=1 \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s +scripts/run/run-examples.sh -t 60 -n 3 k8s ``` ### CI Environment @@ -406,7 +389,7 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s env: POL_PROOF_DEV_MODE: true RUST_BACKTRACE: 1 - NOMOS_TESTS_KEEP_LOGS: 1 + LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS: 1 ``` --- diff --git a/book/src/examples-advanced.md b/book/src/examples-advanced.md index adae6f9..f737ca4 100644 --- a/book/src/examples-advanced.md +++ b/book/src/examples-advanced.md @@ -13,9 +13,9 @@ Realistic advanced scenarios demonstrating framework capabilities for production | Example | Topology | Workloads | Deployer | Key Feature | |---------|----------|-----------|----------|-------------| -| Load Progression | 3 validators | Increasing tx rate | Compose | Dynamic load testing | -| Sustained Load | 4 validators | High tx + DA rate | Compose | Stress testing | -| Aggressive Chaos | 4 validators | Frequent restarts + traffic | Compose | Resilience validation | +| Load Progression | 3 nodes | Increasing tx rate | Compose | Dynamic load testing | +| Sustained Load | 4 nodes | High tx rate | Compose | Stress testing | +| Aggressive Chaos | 4 nodes | Frequent restarts + traffic | Compose | Resilience validation | ## Load Progression Test @@ -34,7 +34,7 @@ pub async fn load_progression_test() -> Result<()> { println!("Testing with rate: {}", rate); let mut plan = - ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(50) .transactions_with(|txs| txs.rate(rate).users(20)) .expect_consensus_liveness() @@ -54,7 +54,7 @@ pub async fn load_progression_test() -> Result<()> { ## Sustained Load Test -Run high transaction and DA load for extended duration: +Run high transaction load for extended duration: ```rust,ignore use std::time::Duration; @@ -65,10 +65,9 @@ use testing_framework_runner_compose::ComposeDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn sustained_load_test() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4)) .wallets(100) .transactions_with(|txs| txs.rate(15).users(50)) - .da_with(|da| da.channel_rate(2).blob_rate(3)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(300)) .build(); @@ -96,7 +95,7 @@ use testing_framework_runner_compose::ComposeDeployer; use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt}; pub async fn aggressive_chaos_test() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4)) .enable_node_control() .wallets(50) .transactions_with(|txs| txs.rate(10).users(20)) @@ -143,7 +142,7 @@ These scenarios require custom implementations but demonstrate framework extensi #### Cross-Validator Mempool Divergence & Convergence -**Concept:** Drive different transaction subsets into different validators (or differing arrival orders) to create temporary mempool divergence, then verify mempools/blocks converge to contain the union (no permanent divergence). +**Concept:** Drive different transaction subsets into different nodes (or differing arrival orders) to create temporary mempool divergence, then verify mempools/blocks converge to contain the union (no permanent divergence). **Requirements:** - **Custom workload:** Targets specific nodes via `ctx.node_clients()` with disjoint or jittered transaction batches @@ -238,7 +237,7 @@ These scenarios require custom implementations but demonstrate framework extensi **Requirements:** - Needs `block_peer()` / `unblock_peer()` methods in `NodeControlHandle` -- Partition subsets of validators, wait, then restore connectivity +- Partition subsets of nodes, wait, then restore connectivity - Verify chain convergence after partition heals **Why useful:** Tests the most realistic failure mode in distributed systems. diff --git a/book/src/examples.md b/book/src/examples.md index 3fce283..de8cd42 100644 --- a/book/src/examples.md +++ b/book/src/examples.md @@ -13,7 +13,7 @@ and expectations. - `compose_runner.rs` — Docker Compose (requires image built) - `k8s_runner.rs` — Kubernetes (requires cluster access and image loaded) -**Recommended:** Use `scripts/run/run-examples.sh -t -v ` where mode is `host`, `compose`, or `k8s`. +**Recommended:** Use `scripts/run/run-examples.sh -t -n ` where mode is `host`, `compose`, or `k8s`. **Alternative:** Direct cargo run: `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin ` @@ -34,7 +34,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn simple_consensus() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(30)) .build(); @@ -62,7 +62,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn transaction_workload() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .wallets(20) .transactions_with(|txs| txs.rate(5).users(10)) .expect_consensus_liveness() @@ -79,37 +79,6 @@ pub async fn transaction_workload() -> Result<()> { **When to use**: validate transaction submission and inclusion. -## DA + transaction workload - -Combined test stressing both transaction and DA layers: - -```rust,ignore -use std::time::Duration; - -use anyhow::Result; -use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; -use testing_framework_runner_local::LocalDeployer; -use testing_framework_workflows::ScenarioBuilderExt; - -pub async fn da_and_transactions() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) - .wallets(30) - .transactions_with(|txs| txs.rate(5).users(15)) - .da_with(|da| da.channel_rate(2).blob_rate(2)) - .expect_consensus_liveness() - .with_run_duration(Duration::from_secs(90)) - .build(); - - let deployer = LocalDeployer::default(); - let runner = deployer.deploy(&plan).await?; - let _handle = runner.run(&mut plan).await?; - - Ok(()) -} -``` - -**When to use**: end-to-end coverage of transaction and DA layers. - ## Chaos resilience Test system resilience under node restarts: @@ -123,7 +92,7 @@ use testing_framework_runner_compose::ComposeDeployer; use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt}; pub async fn chaos_resilience() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4)) .enable_node_control() .wallets(20) .transactions_with(|txs| txs.rate(3).users(10)) diff --git a/book/src/extending.md b/book/src/extending.md index 0014fe9..547e584 100644 --- a/book/src/extending.md +++ b/book/src/extending.md @@ -61,15 +61,15 @@ impl Workload for MyWorkload { _run_metrics: &RunMetrics, ) -> Result<(), DynError> { // Validate prerequisites (e.g., enough nodes, wallet data present) - if topology.validators().is_empty() { - return Err("no validators available".into()); + if topology.nodes().is_empty() { + return Err("no nodes available".into()); } Ok(()) } async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { // Drive async activity: submit transactions, query nodes, etc. - let clients = ctx.node_clients().validator_clients(); + let clients = ctx.node_clients().node_clients(); for client in clients { let info = client.consensus_info().await?; @@ -126,8 +126,8 @@ impl Expectation for MyExpectation { async fn start_capture(&mut self, ctx: &RunContext) -> Result<(), DynError> { // Optional: capture baseline state before workloads start - let client = ctx.node_clients().validator_clients().first() - .ok_or("no validators")?; + let client = ctx.node_clients().node_clients().first() + .ok_or("no nodes")?; let info = client.consensus_info().await?; self.captured_baseline = Some(info.height); @@ -138,8 +138,8 @@ impl Expectation for MyExpectation { async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { // Assert the expected condition holds after workloads finish - let client = ctx.node_clients().validator_clients().first() - .ok_or("no validators")?; + let client = ctx.node_clients().node_clients().first() + .ok_or("no nodes")?; let info = client.consensus_info().await?; let final_height = info.height; @@ -201,7 +201,7 @@ impl Deployer<()> for MyDeployer { async fn deploy(&self, scenario: &Scenario<()>) -> Result { // 1. Launch nodes using scenario.topology() // 2. Wait for readiness (e.g., consensus info endpoint responds) - // 3. Build NodeClients for validators + // 3. Build NodeClients for nodes // 4. Spawn a block feed for expectations (optional but recommended) // 5. Create NodeControlHandle if you support restarts (optional) // 6. Return a Runner wrapping RunContext + CleanupGuard @@ -345,7 +345,7 @@ impl MyWorkloadDsl for ScenarioBuilder { Users can then call: ```rust,ignore -ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) +ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .my_workload_with(|w| { w.target_rate(10) .some_option(true) diff --git a/book/src/faq.md b/book/src/faq.md index 907a62d..7986074 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -18,14 +18,13 @@ The framework enforces a minimum of **2× slot duration** (4 seconds with defaul - **Smoke tests**: 30s minimum (~14 blocks with default 2s slots, 0.9 coefficient) - **Transaction workloads**: 60s+ (~27 blocks) to observe inclusion patterns -- **DA workloads**: 90s+ (~40 blocks) to account for dispersal and sampling - **Chaos tests**: 120s+ (~54 blocks) to allow recovery after restarts Very short runs (< 30s) risk false confidence—one or two lucky blocks don't prove liveness. **Do I always need seeded wallets?** -Only for transaction scenarios. Data-availability or pure chaos scenarios may -not require them, but liveness checks still need validators producing blocks. +Only for transaction scenarios. Pure chaos scenarios may not require them, but +liveness checks still need nodes producing blocks. **What if expectations fail but workloads “look fine”?** Trust expectations first—they capture the intended success criteria. Use the diff --git a/book/src/glossary.md b/book/src/glossary.md index de2076c..c684bff 100644 --- a/book/src/glossary.md +++ b/book/src/glossary.md @@ -1,7 +1,6 @@ # Glossary -- **Validator**: node role responsible for participating in consensus and block - production. +- **Node**: process that participates in consensus and produces blocks. - **Deployer**: component that provisions infrastructure (spawns processes, creates containers, or launches pods), waits for readiness, and returns a Runner. Examples: LocalDeployer, ComposeDeployer, K8sDeployer. @@ -38,9 +37,7 @@ state (e.g., wallet balances, UTXO sets) rather than just progress signals. Also called "correctness expectations." - **Mantle transaction**: transaction type in Logos that can contain UTXO transfers - (LedgerTx) and operations (Op), including channel data (ChannelBlob). -- **Channel**: logical grouping for DA blobs; each blob belongs to a channel and - references a parent blob in the same channel, creating a chain of related data. + (LedgerTx) and operations (Op). - **POL_PROOF_DEV_MODE**: environment variable that disables expensive Groth16 zero-knowledge proof generation for leader election. **Required for all runners** (local, compose, k8s) for practical testing—without it, proof generation causes timeouts. Should never be diff --git a/book/src/internal-crate-reference.md b/book/src/internal-crate-reference.md index 8063488..50ed515 100644 --- a/book/src/internal-crate-reference.md +++ b/book/src/internal-crate-reference.md @@ -2,13 +2,13 @@ High-level roles of the crates that make up the framework: -- **Configs** (`testing-framework/configs/`): Prepares reusable configuration primitives for nodes, networking, tracing, data availability, and wallets, shared by all scenarios and runners. Includes topology generation and circuit asset resolution. +- **Configs** (`testing-framework/configs/`): Prepares reusable configuration primitives for nodes, networking, tracing, and wallets, shared by all scenarios and runners. Includes topology generation and circuit asset resolution. - **Core scenario orchestration** (`testing-framework/core/`): Houses the topology and scenario model, runtime coordination, node clients, and readiness/health probes. Defines `Deployer` and `Runner` traits, `ScenarioBuilder`, and `RunContext`. -- **Workflows** (`testing-framework/workflows/`): Packages workloads (transaction, DA, chaos) and expectations (consensus liveness) into reusable building blocks. Offers fluent DSL extensions (`ScenarioBuilderExt`, `ChaosBuilderExt`). +- **Workflows** (`testing-framework/workflows/`): Packages workloads (transaction, chaos) and expectations (consensus liveness) into reusable building blocks. Offers fluent DSL extensions (`ScenarioBuilderExt`, `ChaosBuilderExt`). -- **Runners** (`testing-framework/runners/{local,compose,k8s}/`): Implements deployment backends (local host, Docker Compose, Kubernetes) that all consume the same scenario plan. Each provides a `Deployer` implementation (`LocalDeployer`, `ComposeDeployer`, `K8sDeployer`). +- **Deployers** (`testing-framework/deployers/{local,compose,k8s}/`): Implements deployment backends (local host, Docker Compose, Kubernetes) that all consume the same scenario plan. Each provides a `Deployer` implementation (`LocalDeployer`, `ComposeDeployer`, `K8sDeployer`). - **Runner Examples** (crate name: `runner-examples`, path: `examples/`): Runnable binaries demonstrating framework usage and serving as living documentation. These are the **primary entry point** for running scenarios (`examples/src/bin/local_runner.rs`, `examples/src/bin/compose_runner.rs`, `examples/src/bin/k8s_runner.rs`). @@ -16,13 +16,13 @@ High-level roles of the crates that make up the framework: | What You're Adding | Where It Goes | Examples | |-------------------|---------------|----------| -| **Node config parameter** | `testing-framework/configs/src/topology/configs/` | Slot duration, log levels, DA params | -| **Topology feature** | `testing-framework/core/src/topology/` | New network layouts, node roles | +| **Node config parameter** | `testing-framework/configs/src/topology/configs/` | Slot duration, log levels | +| **Topology feature** | `testing-framework/core/src/topology/` | New network layouts | | **Scenario capability** | `testing-framework/core/src/scenario/` | New capabilities, context methods | | **Workload** | `testing-framework/workflows/src/workloads/` | New traffic generators | | **Expectation** | `testing-framework/workflows/src/expectations/` | New success criteria | | **Builder API** | `testing-framework/workflows/src/builder/` | DSL extensions, fluent methods | -| **Deployer** | `testing-framework/runners/` | New deployment backends | +| **Deployer** | `testing-framework/deployers/` | New deployment backends | | **Example scenario** | `examples/src/bin/` | Demonstration binaries | ## Extension Workflow @@ -93,7 +93,7 @@ impl YourWorkloadDslExt for testing_framework_core::scenario::Builder YourExpectationDslExt for testing_framework_core::scenario::Builder # Or use name pattern matching: -docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1) +docker logs -f $(docker ps --filter "name=nomos-compose-.*-node-0" -q | head -1) # Show last 100 lines docker logs --tail 100 @@ -139,7 +136,7 @@ To write per-node log files inside containers, set `tracing_settings.logger: !Fi ```bash # Ensure cfgsync.yaml is configured to log to /logs -NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \ +LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin compose_runner @@ -161,7 +158,7 @@ volumes: ```bash COMPOSE_RUNNER_PRESERVE=1 \ -NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \ +LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \ cargo run -p runner-examples --bin compose_runner # Containers remain running after test—inspect with docker logs or docker exec ``` @@ -172,7 +169,7 @@ cargo run -p runner-examples --bin compose_runner - `TESTNET_RUNNER_PRESERVE=1` — alias for `COMPOSE_RUNNER_PRESERVE=1` - `COMPOSE_RUNNER_HTTP_TIMEOUT_SECS=` — override HTTP readiness timeout -**Note:** Container names follow pattern `nomos-compose-{uuid}-validator-{index}-1` where `{uuid}` changes per run. +**Note:** Container names follow pattern `nomos-compose-{uuid}-node-{index}-1` where `{uuid}` changes per run. ### K8s Runner (Kubernetes Pods) @@ -184,25 +181,25 @@ kubectl get pods # Stream logs using label selectors (recommended) # Helm chart labels: -# - nomos/logical-role=validator -# - nomos/validator-index -kubectl logs -l nomos/logical-role=validator -f +# - nomos/logical-role=node +# - nomos/node-index +kubectl logs -l nomos/logical-role=node -f # Stream logs from specific pod -kubectl logs -f nomos-validator-0 +kubectl logs -f logos-blockchain-node-0 # Previous logs from crashed pods -kubectl logs --previous -l nomos/logical-role=validator +kubectl logs --previous -l nomos/logical-role=node ``` **Download logs for offline analysis:** ```bash # Using label selectors -kubectl logs -l nomos/logical-role=validator --tail=1000 > all-validators.log +kubectl logs -l nomos/logical-role=node --tail=1000 > all-nodes.log # Specific pods -kubectl logs nomos-validator-0 > validator-0.log +kubectl logs logos-blockchain-node-0 > node-0.log ``` **K8s debugging variables:** @@ -214,7 +211,7 @@ kubectl logs nomos-validator-0 > validator-0.log **Specify namespace (if not using default):** ```bash -kubectl logs -n my-namespace -l nomos/logical-role=validator -f +kubectl logs -n my-namespace -l nomos/logical-role=node -f ``` **Note:** K8s runner is optimized for local clusters (Docker Desktop K8s, minikube, kind). Remote clusters require additional setup. @@ -228,8 +225,8 @@ kubectl logs -n my-namespace -l nomos/logical-role=validator -f **To enable OTLP:** ```bash -NOMOS_OTLP_ENDPOINT=http://localhost:4317 \ -NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318 \ +LOGOS_BLOCKCHAIN_OTLP_ENDPOINT=http://localhost:4317 \ +LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT=http://localhost:4318 \ cargo run -p runner-examples --bin local_runner ``` @@ -247,7 +244,7 @@ Runners expose metrics and node HTTP endpoints for expectation code and debuggin - For a ready-to-run stack, use `scripts/setup/setup-observability.sh`: - Compose: `scripts/setup/setup-observability.sh compose up` then `scripts/setup/setup-observability.sh compose env` - K8s: `scripts/setup/setup-observability.sh k8s install` then `scripts/setup/setup-observability.sh k8s env` -- Provide `NOMOS_METRICS_QUERY_URL` (PromQL base URL) to enable `ctx.telemetry()` queries +- Provide `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` (PromQL base URL) to enable `ctx.telemetry()` queries - Access from expectations when configured: `ctx.telemetry().prometheus().map(|p| p.base_url())` **Example:** @@ -261,13 +258,13 @@ eval $(scripts/setup/setup-observability.sh compose env) # Run scenario with metrics POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 3 compose ``` ### Grafana (Optional) - Runners do **not** provision Grafana automatically (but `scripts/setup/setup-observability.sh` can) -- If you set `NOMOS_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS` +- If you set `LOGOS_BLOCKCHAIN_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS` - Dashboards live in `testing-framework/assets/stack/monitoring/grafana/dashboards/` (the bundled stack auto-provisions them) **Example:** @@ -277,16 +274,16 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose scripts/setup/setup-observability.sh compose up eval $(scripts/setup/setup-observability.sh compose env) -export NOMOS_GRAFANA_URL=http://localhost:3000 -POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000 +POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -n 3 compose ``` **Default bundled Grafana login:** `admin` / `admin` (see `scripts/observability/compose/docker-compose.yml`). ### Node APIs -- Access from expectations: `ctx.node_clients().validator_clients().get(0)` -- Endpoints: consensus info, network info, DA membership, etc. +- Access from expectations: `ctx.node_clients().node_clients().get(0)` +- Endpoints: consensus info, network info, etc. - See `testing-framework/core/src/nodes/api_client.rs` for available methods **Example usage in expectations:** @@ -295,10 +292,10 @@ POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose use testing_framework_core::scenario::{DynError, RunContext}; async fn evaluate(ctx: &RunContext) -> Result<(), DynError> { - let client = &ctx.node_clients().validator_clients()[0]; + let client = &ctx.node_clients().node_clients()[0]; let info = client.consensus_info().await?; - tracing::info!(height = info.height, "consensus info from validator 0"); + tracing::info!(height = info.height, "consensus info from node 0"); Ok(()) } @@ -322,11 +319,11 @@ flowchart TD ### Debug Logging (Host) ```bash -NOMOS_LOG_DIR=/tmp/logs \ -NOMOS_LOG_LEVEL=debug \ -NOMOS_LOG_FILTER="cryptarchia=trace" \ +LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/logs \ +LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \ +LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \ POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +scripts/run/run-examples.sh -t 60 -n 3 host ``` ### Compose with Observability @@ -338,7 +335,7 @@ eval $(scripts/setup/setup-observability.sh compose env) # Run with metrics POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 3 compose # Access Grafana at http://localhost:3000 ``` @@ -350,10 +347,10 @@ K8S_RUNNER_NAMESPACE=nomos-debug \ K8S_RUNNER_DEBUG=1 \ K8S_RUNNER_PRESERVE=1 \ POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s +scripts/run/run-examples.sh -t 60 -n 3 k8s # Inspect logs -kubectl logs -n nomos-debug -l nomos/logical-role=validator +kubectl logs -n nomos-debug -l nomos/logical-role=node ``` --- diff --git a/book/src/manual-cluster.md b/book/src/manual-cluster.md new file mode 100644 index 0000000..b9fce9a --- /dev/null +++ b/book/src/manual-cluster.md @@ -0,0 +1,399 @@ +# Manual Clusters: Imperative Control + +**When should I read this?** You're integrating external test drivers (like Cucumber/BDD frameworks) that need imperative node orchestration. This is an escape hatch for when the test orchestration must live outside the framework—most tests should use the standard scenario approach. + +--- + +## Overview + +**Manual clusters** provide imperative, on-demand node control for scenarios that don't fit the declarative `ScenarioBuilder` pattern: + +```rust +use testing_framework_core::topology::config::TopologyConfig; +use testing_framework_core::scenario::{PeerSelection, StartNodeOptions}; +use testing_framework_runner_local::LocalDeployer; + +let config = TopologyConfig::with_node_numbers(3); +let deployer = LocalDeployer::new(); +let cluster = deployer.manual_cluster(config)?; + +// Start nodes on demand with explicit peer selection +let node_a = cluster.start_node_with( + "a", + StartNodeOptions { + peers: PeerSelection::None, // Start isolated + } +).await?.api; + +let node_b = cluster.start_node_with( + "b", + StartNodeOptions { + peers: PeerSelection::Named(vec!["node-a".to_owned()]), // Connect to A + } +).await?.api; + +// Wait for network readiness +cluster.wait_network_ready().await?; + +// Custom validation logic +let info_a = node_a.consensus_info().await?; +let info_b = node_b.consensus_info().await?; +assert!(info_a.height.abs_diff(info_b.height) <= 5); +``` + +**Key difference from scenarios:** +- **External orchestration:** Your code (or an external driver like Cucumber) controls the execution flow step-by-step +- **Imperative model:** You call `start_node()`, `sleep()`, poll APIs directly in test logic +- **No framework execution:** The scenario runner doesn't drive workloads—you do + +Note: Scenarios with node control can also start nodes dynamically, control peer selection, and orchestrate timing—but via **workloads** within the framework's execution model. Use manual clusters only when the orchestration must be external (e.g., Cucumber steps). + +--- + +## When to Use Manual Clusters + +**Manual clusters are an escape hatch for when orchestration must live outside the framework.** + +Prefer workloads for scenario logic; use manual clusters only when an external system needs to control node lifecycle—for example: + +**Cucumber/BDD integration** +Gherkin steps control when nodes start, which peers they connect to, and when to verify state. The test driver (Cucumber) orchestrates the scenario step-by-step. + +**Custom test harnesses** +External scripts or tools that need programmatic control over node lifecycle as part of a larger testing pipeline. + +--- + +## Core API + +### Starting the Cluster + +```rust +use testing_framework_core::topology::config::TopologyConfig; +use testing_framework_runner_local::LocalDeployer; + +// Define capacity (preallocates ports/configs for N nodes) +let config = TopologyConfig::with_node_numbers(5); + +let deployer = LocalDeployer::new(); +let cluster = deployer.manual_cluster(config)?; +// Nodes are stopped automatically when cluster is dropped +``` + +**Important:** The `TopologyConfig` defines the **maximum capacity**, not the initial state. Nodes are started on-demand via API calls. + +### Starting Nodes + +**Default peers (topology layout):** + +```rust +let node = cluster.start_node("seed").await?; +``` + +**No peers (isolated):** + +```rust +use testing_framework_core::scenario::{PeerSelection, StartNodeOptions}; + +let node = cluster.start_node_with( + "isolated", + StartNodeOptions { + peers: PeerSelection::None, + } +).await?; +``` + +**Explicit peers (named):** + +```rust +let node = cluster.start_node_with( + "follower", + StartNodeOptions { + peers: PeerSelection::Named(vec![ + "node-seed".to_owned(), + "node-isolated".to_owned(), + ]), + } +).await?; +``` + +**Note:** Node names are prefixed with `node-` internally. If you start a node with name `"a"`, reference it as `"node-a"` in peer lists. + +### Getting Node Clients + +```rust +// From start result +let started = cluster.start_node("my-node").await?; +let client = started.api; + +// Or lookup by name +if let Some(client) = cluster.node_client("node-my-node") { + let info = client.consensus_info().await?; + println!("Height: {}", info.height); +} +``` + +### Waiting for Readiness + +```rust +// Waits until all started nodes have connected to their expected peers +cluster.wait_network_ready().await?; +``` + +**Behavior:** +- Single-node clusters always ready (no peers to verify) +- Multi-node clusters wait for peer counts to match expectations +- Timeout after 60 seconds (120 seconds if `SLOW_TEST_ENV=true`) with diagnostic message + +--- + +## Complete Example: External Test Driver Pattern + +This shows how an external test driver (like Cucumber) might use manual clusters to control node lifecycle: + +```rust +use std::time::Duration; +use anyhow::Result; +use testing_framework_core::{ + scenario::{PeerSelection, StartNodeOptions}, + topology::config::TopologyConfig, +}; +use testing_framework_runner_local::LocalDeployer; +use tokio::time::sleep; + +#[tokio::test] +async fn external_driver_example() -> Result<()> { + // Step 1: Create cluster with capacity for 3 nodes + let config = TopologyConfig::with_node_numbers(3); + let deployer = LocalDeployer::new(); + let cluster = deployer.manual_cluster(config)?; + + // Step 2: External driver decides to start 2 nodes initially + println!("Starting initial topology..."); + let node_a = cluster.start_node("a").await?.api; + let node_b = cluster + .start_node_with( + "b", + StartNodeOptions { + peers: PeerSelection::Named(vec!["node-a".to_owned()]), + }, + ) + .await? + .api; + + cluster.wait_network_ready().await?; + + // Step 3: External driver runs some protocol operations + let info = node_a.consensus_info().await?; + println!("Initial cluster height: {}", info.height); + + // Step 4: Later, external driver decides to add third node + println!("External driver adding third node..."); + let node_c = cluster + .start_node_with( + "c", + StartNodeOptions { + peers: PeerSelection::Named(vec!["node-a".to_owned()]), + }, + ) + .await? + .api; + + cluster.wait_network_ready().await?; + + // Step 5: External driver validates final state + let heights = vec![ + node_a.consensus_info().await?.height, + node_b.consensus_info().await?.height, + node_c.consensus_info().await?.height, + ]; + println!("Final heights: {:?}", heights); + + Ok(()) +} +``` + +**Key pattern:** +The external driver controls **when** nodes start and **which peers** they connect to, allowing test frameworks like Cucumber to orchestrate scenarios step-by-step based on Gherkin steps or other external logic. + +--- + +## Peer Selection Strategies + +**`PeerSelection::DefaultLayout`** +Uses the topology's network layout (star/chain/full). Default behavior. + +```rust +let node = cluster.start_node_with( + "normal", + StartNodeOptions { + peers: PeerSelection::DefaultLayout, + } +).await?; +``` + +**`PeerSelection::None`** +Node starts with no initial peers. Use when an external driver needs to build topology incrementally. + +```rust +let isolated = cluster.start_node_with( + "isolated", + StartNodeOptions { + peers: PeerSelection::None, + } +).await?; +``` + +**`PeerSelection::Named(vec!["node-a", "node-b"])`** +Explicit peer list. Use when an external driver needs to construct specific peer relationships. + +```rust +let follower = cluster.start_node_with( + "follower", + StartNodeOptions { + peers: PeerSelection::Named(vec![ + "node-seed".to_owned(), + "node-seed".to_owned(), + ]), + } +).await?; +``` + +**Remember:** Node names are automatically prefixed with `node-`. If you call `start_node("a")`, reference it as `"node-a"` in peer lists. + +--- + +## Custom Validation Patterns + +Manual clusters don't have built-in expectations—you write validation logic directly: + +### Height Convergence + +```rust +use tokio::time::{sleep, Duration}; + +let start = tokio::time::Instant::now(); +loop { + let heights: Vec = vec![ + node_a.consensus_info().await?.height, + node_b.consensus_info().await?.height, + node_c.consensus_info().await?.height, + ]; + + let max_diff = heights.iter().max().unwrap() - heights.iter().min().unwrap(); + if max_diff <= 5 { + println!("Converged: heights={:?}", heights); + break; + } + + if start.elapsed() > Duration::from_secs(60) { + return Err(anyhow::anyhow!("Convergence timeout: heights={:?}", heights)); + } + + sleep(Duration::from_secs(2)).await; +} +``` + +### Peer Count Verification + +```rust +let info = node.network_info().await?; +assert_eq!( + info.n_peers, 3, + "Expected 3 peers, found {}", + info.n_peers +); +``` + +### Block Production + +```rust +// Verify node is producing blocks +let initial_height = node_a.consensus_info().await?.height; + +sleep(Duration::from_secs(10)).await; + +let current_height = node_a.consensus_info().await?.height; +assert!( + current_height > initial_height, + "Node should have produced blocks: initial={}, current={}", + initial_height, + current_height +); +``` + +--- + +## Limitations + +**Local deployer only** +Manual clusters currently only work with `LocalDeployer`. Compose and K8s support is not available. + +**No built-in workloads** +You must manually submit transactions via node API clients. The framework's transaction workloads are scenario-specific. + +**No automatic expectations** +You wire validation yourself. The `.expect_*()` methods from scenarios are not automatically attached—you write custom validation loops. + +**No RunContext** +Manual clusters don't provide `RunContext`, so features like `BlockFeed` and metrics queries require manual setup. + +--- + +## Relationship to Node Control + +Manual clusters and [node control](node-control.md) share the same underlying infrastructure (`LocalDynamicNodes`), but serve different purposes: + +| Feature | Manual Cluster | Node Control (Scenario) | +|---------|---------------|-------------------------| +| **Orchestration** | External (your code/Cucumber) | Framework (workloads) | +| **Programming model** | Imperative (step-by-step) | Declarative (plan + execute) | +| **Node lifecycle** | Manual `start_node()` calls | Automatic + workload-driven | +| **Traffic generation** | Manual API calls | Built-in workloads (tx, chaos) | +| **Validation** | Manual polling loops | Built-in expectations + custom | +| **Use case** | Cucumber/BDD integration | Standard testing & chaos | + +**When to use which:** +- **Scenarios with node control** → Standard testing (built-in workloads drive node control) +- **Manual clusters** → External drivers (Cucumber/BDD where external logic drives node control) + +--- + +## Running Manual Cluster Tests + +Manual cluster tests are typically marked with `#[ignore]` to prevent accidental runs: + +```rust +#[tokio::test] +#[ignore = "run manually with: cargo test -- --ignored external_driver_example"] +async fn external_driver_example() -> Result<()> { + // ... +} +``` + +**To run:** + +```bash +# Required: dev mode for fast proofs +POL_PROOF_DEV_MODE=true \ +cargo test -p runner-examples -- --ignored external_driver_example +``` + +**Logs:** + +```bash +# Preserve logs after test +LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1 \ +RUST_LOG=info \ +POL_PROOF_DEV_MODE=true \ +cargo test -p runner-examples -- --ignored external_driver_example +``` + +--- + +## See Also + +- [Testing Philosophy](testing-philosophy.md) — Why the framework is declarative by default +- [RunContext: BlockFeed & Node Control](node-control.md) — Node control within scenarios +- [Chaos Testing](chaos.md) — Restart-based chaos (scenario approach) +- [Scenario Builder Extensions](scenario-builder-ext-patterns.md) — Extending the declarative model diff --git a/book/src/node-control.md b/book/src/node-control.md index c54d822..7b9df61 100644 --- a/book/src/node-control.md +++ b/book/src/node-control.md @@ -10,7 +10,7 @@ provides: ## BlockFeed: Observing Block Production -The `BlockFeed` is a broadcast stream of block observations that allows workloads and expectations to monitor blockchain progress in real-time. It polls a validator node continuously and broadcasts new blocks to all subscribers. +The `BlockFeed` is a broadcast stream of block observations that allows workloads and expectations to monitor blockchain progress in real-time. It polls a node continuously and broadcasts new blocks to all subscribers. ### What BlockFeed Provides @@ -134,7 +134,7 @@ async fn start_capture(ctx: &RunContext) -> Result<(), DynError> { "observed block" ); - // Process transactions, DA blobs, etc. + // Process transactions or other block data. } Err(tokio::sync::broadcast::error::RecvError::Closed) => break, Err(_) => continue, @@ -204,7 +204,7 @@ async fn generate_request() -> Option<()> { } async fn start(ctx: &RunContext) -> Result<(), DynError> { - let clients = ctx.node_clients().validator_clients(); + let clients = ctx.node_clients().node_clients(); let mut receiver = ctx.block_feed().subscribe(); let mut pending_requests: Vec<()> = Vec::new(); @@ -249,7 +249,7 @@ Example direct polling in expectations: use testing_framework_core::scenario::{DynError, RunContext}; async fn evaluate(ctx: &RunContext) -> Result<(), DynError> { - let client = &ctx.node_clients().validator_clients()[0]; + let client = &ctx.node_clients().node_clients()[0]; // Poll current height once let info = client.consensus_info().await?; @@ -311,7 +311,6 @@ async fn evaluate(ctx: &RunContext, expected_min: u64) -> Result<(), DynError> { The framework's built-in expectations use BlockFeed extensively: - **`ConsensusLiveness`**: Doesn't directly subscribe but uses block feed stats to verify progress -- **`DataAvailabilityExpectation`**: Subscribes to inspect DA blobs in each block and track inscription/dispersal - **`TransactionInclusion`**: Subscribes to find specific transactions in blocks See [Examples](examples.md) and [Workloads & Expectations](workloads.md) for more patterns. @@ -324,7 +323,7 @@ The framework currently supports **process-level chaos** (node restarts) for resilience testing: **Supported:** -- Restart validators (`restart_validator`) +- Restart nodes (`restart_node`) - Random restart workload via `.chaos().restart()` **Not Yet Supported:** @@ -354,8 +353,8 @@ impl Workload for RestartWorkload { async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { if let Some(control) = ctx.node_control() { - // Restart the first validator (index 0) if supported. - control.restart_validator(0).await?; + // Restart the first node (index 0) if supported. + control.restart_node(0).await?; } Ok(()) } @@ -375,7 +374,7 @@ use testing_framework_core::scenario::DynError; #[async_trait] pub trait NodeControlHandle: Send + Sync { - async fn restart_validator(&self, index: usize) -> Result<(), DynError>; + async fn restart_node(&self, index: usize) -> Result<(), DynError>; } ``` diff --git a/book/src/operations-overview.md b/book/src/operations-overview.md index 70b301a..230d9ef 100644 --- a/book/src/operations-overview.md +++ b/book/src/operations-overview.md @@ -13,18 +13,18 @@ Operational readiness focuses on prerequisites, environment fit, and clear signa **Prerequisites:** - `versions.env` file at repository root (required by helper scripts) -- Node binaries (`nomos-node`) available or built on demand +- Node binaries (`logos-blockchain-node`) available or built on demand - Platform requirements met (Docker for compose, cluster access for k8s) -- Circuit assets for DA workloads +- Circuit assets for proof generation **Artifacts:** -- KZG parameters (circuit assets) for Data Availability scenarios +- Circuit parameters required by the node binary - Docker images for compose/k8s deployments - Binary bundles for reproducible builds **Environment Configuration:** - `POL_PROOF_DEV_MODE=true` is **REQUIRED for all runners** to avoid expensive proof generation -- Logging configured via `NOMOS_LOG_*` variables +- Logging configured via `LOGOS_BLOCKCHAIN_LOG_*` variables - Observability endpoints (Prometheus, Grafana) optional but useful **Readiness & Health:** @@ -78,4 +78,3 @@ This Operations & Deployment section covers: - [Logging & Observability](logging-observability.md) — Log collection, metrics, and debugging **Philosophy:** Treat operational hygiene—assets present, prerequisites satisfied, observability reachable—as the first step to reliable scenario outcomes. - diff --git a/book/src/prerequisites.md b/book/src/prerequisites.md index 3049c35..7047e72 100644 --- a/book/src/prerequisites.md +++ b/book/src/prerequisites.md @@ -10,19 +10,19 @@ All helper scripts require a `versions.env` file at the repository root: ```bash VERSION=v0.3.1 -NOMOS_NODE_REV=abc123def456789 -NOMOS_BUNDLE_VERSION=v1 +LOGOS_BLOCKCHAIN_NODE_REV=abc123def456789 +LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v1 ``` **What it defines:** -- `VERSION` — Circuit release tag for KZG parameters -- `NOMOS_NODE_REV` — Git revision of nomos-node to build/fetch -- `NOMOS_BUNDLE_VERSION` — Bundle schema version +- `VERSION` — Circuit assets release tag +- `LOGOS_BLOCKCHAIN_NODE_REV` — Git revision of logos-blockchain-node to build/fetch +- `LOGOS_BLOCKCHAIN_BUNDLE_VERSION` — Bundle schema version **Where it's used:** - `scripts/run/run-examples.sh` - `scripts/build/build-bundle.sh` -- `scripts/setup/setup-nomos-circuits.sh` +- `scripts/setup/setup-logos-blockchain-circuits.sh` - CI workflows **Error if missing:** @@ -30,37 +30,37 @@ NOMOS_BUNDLE_VERSION=v1 ERROR: versions.env not found at repository root This file is required and should define: VERSION= - NOMOS_NODE_REV= - NOMOS_BUNDLE_VERSION= + LOGOS_BLOCKCHAIN_NODE_REV= + LOGOS_BLOCKCHAIN_BUNDLE_VERSION= ``` **Fix:** Ensure you're in the repository root. The file should already exist in the checked-out repo. ## Node Binaries -Scenarios need compiled `nomos-node` binaries. +Scenarios need compiled `logos-blockchain-node` binaries. ### Option 1: Use Helper Scripts (Recommended) ```bash -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +scripts/run/run-examples.sh -t 60 -n 3 host ``` This automatically: -- Clones/updates nomos-node checkout +- Clones/updates logos-blockchain-node checkout - Builds required binaries -- Sets `NOMOS_NODE_BIN` +- Sets `LOGOS_BLOCKCHAIN_NODE_BIN` ### Option 2: Manual Build -If you have a sibling `nomos-node` checkout: +If you have a sibling `logos-blockchain-node` checkout: ```bash -cd ../nomos-node -cargo build --release --bin nomos-node +cd ../logos-blockchain-node +cargo build --release --bin logos-blockchain-node # Set environment variables -export NOMOS_NODE_BIN=$PWD/target/release/nomos-node +export LOGOS_BLOCKCHAIN_NODE_BIN=$PWD/target/release/logos-blockchain-node # Return to testing framework cd ../nomos-testing @@ -80,51 +80,40 @@ CI workflows use prebuilt artifacts: - name: Extract bundle run: | tar -xzf .tmp/nomos-binaries-linux-*.tar.gz -C .tmp/ - export NOMOS_NODE_BIN=$PWD/.tmp/nomos-node + export LOGOS_BLOCKCHAIN_NODE_BIN=$PWD/.tmp/logos-blockchain-node ``` -## Circuit Assets (KZG Parameters) +## Circuit Assets -Data Availability (DA) workloads require KZG cryptographic parameters. +Nodes require circuit assets for proof generation. The framework expects a +directory containing the circuits, not a single file. ### Asset Location -**Default path:** `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` +**Default path:** `~/.logos-blockchain-circuits` -Note: The directory `kzgrs_test_params/` contains a file named `kzgrs_test_params`. This is the proving key file (~120MB). - -**Container path (compose/k8s):** `/kzgrs_test_params/kzgrs_test_params` +**Container path (compose/k8s):** `/opt/circuits` (set during image build) ### Getting Assets **Option 1: Use helper script** (recommended): ```bash -# Fetch circuits -scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits - -# Copy to default location -mkdir -p testing-framework/assets/stack/kzgrs_test_params -cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ - -# Verify (should be ~120MB) -ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params +scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits ``` **Option 2: Let `run-examples.sh` handle it**: ```bash -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +scripts/run/run-examples.sh -t 60 -n 3 host ``` -This automatically fetches and places assets. - ### Override Path -Set `NOMOS_KZGRS_PARAMS_PATH` to use a custom location: +Set `LOGOS_BLOCKCHAIN_CIRCUITS` to use a custom location: ```bash -NOMOS_KZGRS_PARAMS_PATH=/custom/path/to/kzgrs_test_params \ +LOGOS_BLOCKCHAIN_CIRCUITS=/custom/path/to/circuits \ cargo run -p runner-examples --bin local_runner ``` @@ -132,14 +121,14 @@ cargo run -p runner-examples --bin local_runner | Runner | When Required | |--------|---------------| -| **Host (local)** | Always (for DA workloads) | +| **Host (local)** | Always | | **Compose** | During image build (baked into image) | -| **K8s** | During image build + mounted via hostPath | +| **K8s** | During image build | **Error without assets:** ```text -Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params" } +Error: circuits directory not found (LOGOS_BLOCKCHAIN_CIRCUITS) ``` ## Platform Requirements @@ -149,7 +138,7 @@ Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-frame **Requires:** - Rust nightly toolchain - Node binaries built -- KZG circuit assets (for DA workloads) +- Circuit assets for proof generation - Available ports (18080+, 3100+, etc.) **No Docker required.** @@ -164,11 +153,11 @@ Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-frame **Requires:** - Docker daemon running - Docker image built: `logos-blockchain-testing:local` -- KZG assets baked into image +- Circuit assets baked into image - Docker Desktop (macOS) or Docker Engine (Linux) **Platform notes (macOS / Apple silicon):** -- Prefer `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` for native performance +- Prefer `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` for native performance - Use `linux/amd64` only if targeting amd64 environments (slower via emulation) **Best for:** @@ -182,7 +171,7 @@ Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-frame - Kubernetes cluster (Docker Desktop K8s, minikube, kind, or remote) - `kubectl` configured - Docker image built and loaded/pushed -- KZG assets baked into image + mounted via hostPath +- Circuit assets baked into image **Local cluster setup:** @@ -198,7 +187,7 @@ minikube start minikube image load logos-blockchain-testing:local ``` -**Remote cluster:** Push image to registry and set `NOMOS_TESTNET_IMAGE`. +**Remote cluster:** Push image to registry and set `LOGOS_BLOCKCHAIN_TESTNET_IMAGE`. **Best for:** - Production-like testing @@ -218,7 +207,7 @@ Without this, proof generation uses expensive Groth16 proving, causing: ```bash POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner -POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -n 3 compose # etc. ``` @@ -237,8 +226,8 @@ Run this checklist before your first scenario: # 1. Verify versions.env exists cat versions.env -# 2. Check circuit assets (for DA workloads) -ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params +# 2. Check circuit assets +ls -lh "${HOME}/.logos-blockchain-circuits" # 3. Verify POL_PROOF_DEV_MODE is set echo $POL_PROOF_DEV_MODE # Should print: true @@ -250,7 +239,7 @@ docker ps docker images | grep logos-blockchain-testing # 6. For host runner: verify node binaries (if not using scripts) -$NOMOS_NODE_BIN --version +$LOGOS_BLOCKCHAIN_NODE_BIN --version ``` ## Recommended: Use Helper Scripts @@ -259,18 +248,18 @@ The easiest path is to let the helper scripts handle everything: ```bash # Host runner -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +scripts/run/run-examples.sh -t 60 -n 3 host # Compose runner -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 3 compose # K8s runner -scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s +scripts/run/run-examples.sh -t 60 -n 3 k8s ``` These scripts: - Verify `versions.env` exists -- Clone/build nomos-node if needed +- Clone/build logos-blockchain-node if needed - Fetch circuit assets if missing - Build Docker images (compose/k8s) - Load images into cluster (k8s) diff --git a/book/src/project-context-primer.md b/book/src/project-context-primer.md index 9f91aec..8cce2ad 100644 --- a/book/src/project-context-primer.md +++ b/book/src/project-context-primer.md @@ -2,7 +2,7 @@ **Declarative, multi-node blockchain testing for the Logos network** -The Logos Testing Framework enables you to test consensus, data availability, and transaction workloads across local processes, Docker Compose, and Kubernetes deployments—all with a unified scenario API. +The Logos Testing Framework enables you to test consensus and transaction workloads across local processes, Docker Compose, and Kubernetes deployments—all with a unified scenario API. [**Get Started**](quickstart.md) @@ -13,8 +13,8 @@ The Logos Testing Framework enables you to test consensus, data availability, an **Everything in this framework is a Scenario.** A Scenario is a controlled experiment over time, composed of: -- **Topology** — The cluster shape (validators, network layout) -- **Workloads** — Traffic and conditions that exercise the system (transactions, DA, chaos) +- **Topology** — The cluster shape (nodes, network layout) +- **Workloads** — Traffic and conditions that exercise the system (transactions, chaos) - **Expectations** — Success criteria verified after execution (liveness, inclusion, recovery) - **Duration** — The time window for the experiment @@ -37,8 +37,8 @@ flowchart LR ``` 1. **Define Scenario** — Describe your test: topology, workloads, and success criteria -2. **Deploy Topology** — Launch validators using host, compose, or k8s runners -3. **Run Workloads** — Drive transactions, DA traffic, and chaos operations +2. **Deploy Topology** — Launch nodes using host, compose, or k8s runners +3. **Run Workloads** — Drive transactions and chaos operations 4. **Check Expectations** — Verify consensus liveness, inclusion, and system health --- @@ -57,7 +57,6 @@ flowchart LR **Built-in Workloads** - Transaction submission with configurable rates -- Data availability (DA) blob dispersal and sampling - Chaos testing with controlled node restarts **Comprehensive Observability** @@ -81,7 +80,7 @@ use testing_framework_workflows::ScenarioBuilderExt; async fn main() -> anyhow::Result<()> { let mut scenario = ScenarioBuilder::topology_with(|t| { t.network_star() - .validators(3) + .nodes(3) }) .transactions_with(|tx| tx.rate(10).users(5)) .expect_consensus_liveness() @@ -122,11 +121,9 @@ Check the **[Developer Reference](part-iii.md)** to implement custom workloads, ## Project Context -**Logos** is a modular blockchain protocol composed of validators, and a data-availability (DA) subsystem: +**Logos** is a modular blockchain protocol composed of nodes that participate in consensus and produce blocks. -- **Validators** participate in consensus and produce blocks - -These roles interact tightly, which is why meaningful testing must be performed in multi-node environments that include real networking, timing, and DA interaction. +Meaningful testing must be performed in multi-node environments that include real networking and timing behavior. The Logos Testing Framework provides the infrastructure to orchestrate these multi-node scenarios reliably across development, CI, and production-like environments. diff --git a/book/src/quickstart.md b/book/src/quickstart.md index 163327b..450afec 100644 --- a/book/src/quickstart.md +++ b/book/src/quickstart.md @@ -16,7 +16,7 @@ git clone https://github.com/logos-blockchain/logos-blockchain-testing.git cd logos-blockchain-testing # 3. Run your first scenario (downloads dependencies automatically) -POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 1 -e 1 host +POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -n 1 host ``` **First run takes 5-10 minutes** (downloads ~120MB circuit assets, builds binaries). @@ -32,10 +32,10 @@ If you already have the repository cloned: - Rust toolchain (nightly) - Unix-like system (tested on Linux and macOS) - For Docker Compose examples: Docker daemon running -- For Docker Desktop on Apple silicon (compose/k8s): set `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` to avoid slow/fragile amd64 emulation builds -- **`versions.env` file** at repository root (defines VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION) +- For Docker Desktop on Apple silicon (compose/k8s): set `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` to avoid slow/fragile amd64 emulation builds +- **`versions.env` file** at repository root (defines VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION) -**Note:** `nomos-node` binaries are built automatically on demand or can be provided via prebuilt bundles. +**Note:** `logos-blockchain-node` binaries are built automatically on demand or can be provided via prebuilt bundles. **Important:** The `versions.env` file is required by helper scripts. If missing, the scripts will fail with an error. The file should already exist in the repository root. @@ -47,15 +47,15 @@ The framework ships with runnable example binaries in `examples/src/bin/`. ```bash # From the logos-blockchain-testing directory -scripts/run/run-examples.sh -t 60 -v 1 -e 1 host +scripts/run/run-examples.sh -t 60 -n 1 host ``` -This handles circuit setup, binary building, and runs a complete scenario: 1 validator, mixed transaction + DA workload (5 tx/block + 1 channel + 1 blob), 60s duration. +This handles circuit setup, binary building, and runs a complete scenario: 1 node, transaction workload (5 tx/block), 60s duration. **Alternative:** Direct cargo run (requires manual setup): ```bash -# Requires circuits in place and NOMOS_NODE_BIN set +# Requires circuits in place and LOGOS_BLOCKCHAIN_NODE_BIN set POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner ``` @@ -70,18 +70,13 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn run_local_demo() -> Result<()> { - // Define the scenario (1 validator, tx + DA workload) - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + // Define the scenario (1 node, tx workload) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .wallets(1_000) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block .users(500) // use 500 of the seeded wallets }) - .da_with(|da| { - da.channel_rate(1) // 1 channel - .blob_rate(1) // target 1 blob per block - .headroom_percent(20) // default headroom when sizing channels - }) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(60)) .build(); @@ -103,8 +98,8 @@ pub async fn run_local_demo() -> Result<()> { - Nodes spawn as local processes - Consensus starts producing blocks - Scenario runs for the configured duration -- Node state/logs written under a temporary per-run directory in the current working directory (removed after the run unless `NOMOS_TESTS_KEEP_LOGS=1`) -- To write per-node log files to a stable location: set `NOMOS_LOG_DIR=/path/to/logs` (files will have prefix like `nomos-node-0*`, may include timestamps) +- Node state/logs written under a temporary per-run directory in the current working directory (removed after the run unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`) +- To write per-node log files to a stable location: set `LOGOS_BLOCKCHAIN_LOG_DIR=/path/to/logs` (files will have prefix like `logos-blockchain-node-0*`, may include timestamps) ## What Just Happened? @@ -118,7 +113,7 @@ use testing_framework_core::scenario::ScenarioBuilder; pub fn step_1_topology() -> testing_framework_core::scenario::Builder<()> { ScenarioBuilder::topology_with(|t| { t.network_star() // Star topology: all nodes connect to seed - .validators(1) // 1 validator node + .nodes(1) // 1 node }) } ``` @@ -132,7 +127,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn step_2_wallets() -> testing_framework_core::scenario::Builder<()> { - ScenarioBuilder::with_node_counts(1, 1).wallets(1_000) // Seed 1,000 funded wallet accounts + ScenarioBuilder::with_node_counts(1).wallets(1_000) // Seed 1,000 funded wallet accounts } ``` @@ -145,21 +140,16 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn step_3_workloads() -> testing_framework_core::scenario::Builder<()> { - ScenarioBuilder::with_node_counts(1, 1) + ScenarioBuilder::with_node_counts(1) .wallets(1_000) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block .users(500) // Use 500 of the 1,000 wallets }) - .da_with(|da| { - da.channel_rate(1) // 1 DA channel (more spawned with headroom) - .blob_rate(1) // target 1 blob per block - .headroom_percent(20) // default headroom when sizing channels - }) } ``` -Generates both transaction and DA traffic to stress both subsystems. +Generates transaction traffic to stress the inclusion pipeline. ### 4. Expectation @@ -168,7 +158,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; pub fn step_4_expectation() -> testing_framework_core::scenario::Builder<()> { - ScenarioBuilder::with_node_counts(1, 1).expect_consensus_liveness() // This says what success means: blocks must be produced continuously. + ScenarioBuilder::with_node_counts(1).expect_consensus_liveness() // This says what success means: blocks must be produced continuously. } ``` @@ -182,7 +172,7 @@ use std::time::Duration; use testing_framework_core::scenario::ScenarioBuilder; pub fn step_5_run_duration() -> testing_framework_core::scenario::Builder<()> { - ScenarioBuilder::with_node_counts(1, 1).with_run_duration(Duration::from_secs(60)) + ScenarioBuilder::with_node_counts(1).with_run_duration(Duration::from_secs(60)) } ``` @@ -196,7 +186,7 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; use testing_framework_runner_local::LocalDeployer; pub async fn step_6_deploy_and_execute() -> Result<()> { - let mut plan = ScenarioBuilder::with_node_counts(1, 1).build(); + let mut plan = ScenarioBuilder::with_node_counts(1).build(); let deployer = LocalDeployer::default(); // Use local process deployer let runner = deployer.deploy(&plan).await?; // Provision infrastructure @@ -213,16 +203,16 @@ pub async fn step_6_deploy_and_execute() -> Result<()> { **With run-examples.sh** (recommended): ```bash -# Scale up to 3 validators, run for 2 minutes -scripts/run/run-examples.sh -t 120 -v 3 -e 2 host +# Scale up to 3 nodes, run for 2 minutes +scripts/run/run-examples.sh -t 120 -n 3 host ``` **With direct cargo run:** ```bash -# Uses NOMOS_DEMO_* env vars (or legacy *_DEMO_* vars) -NOMOS_DEMO_VALIDATORS=3 \ -NOMOS_DEMO_RUN_SECS=120 \ +# Uses LOGOS_BLOCKCHAIN_DEMO_* env vars (or legacy *_DEMO_* vars) +LOGOS_BLOCKCHAIN_DEMO_NODES=3 \ +LOGOS_BLOCKCHAIN_DEMO_RUN_SECS=120 \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin local_runner ``` @@ -234,12 +224,12 @@ Use the same API with a different deployer for reproducible containerized enviro **Recommended:** Use the convenience script (handles everything): ```bash -scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 1 compose ``` This automatically: -- Fetches circuit assets (to `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`) -- Builds/uses prebuilt binaries (via `NOMOS_BINARIES_TAR` if available) +- Fetches circuit assets (to `~/.logos-blockchain-circuits` by default) +- Builds/uses prebuilt binaries (via `LOGOS_BLOCKCHAIN_BINARIES_TAR` if available) - Builds the Docker image - Runs the compose scenario @@ -248,15 +238,14 @@ This automatically: ```bash # Option 1: Use prebuilt bundle (recommended for compose/k8s) scripts/build/build-bundle.sh --platform linux # Creates .tmp/nomos-binaries-linux-v0.3.1.tar.gz -export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz +export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz # Option 2: Manual circuit/image setup (rebuilds during image build) -scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits -cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ +scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 /tmp/logos-blockchain-circuits scripts/build/build_test_image.sh # Run with Compose -NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \ +LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin compose_runner ``` @@ -274,7 +263,7 @@ eval "$(scripts/setup/setup-observability.sh compose env)" Then run your compose scenario as usual (the environment variables enable PromQL querying and node OTLP metrics export). -**Note:** Compose expects KZG parameters at `/kzgrs_test_params/kzgrs_test_params` inside containers (the directory name is repeated as the filename). +**Note:** Compose expects circuits at `/opt/circuits` inside containers (set by the image build). **In code:** Just swap the deployer: @@ -285,7 +274,7 @@ use testing_framework_runner_compose::ComposeDeployer; pub async fn run_with_compose_deployer() -> Result<()> { // ... same scenario definition ... - let mut plan = ScenarioBuilder::with_node_counts(1, 1).build(); + let mut plan = ScenarioBuilder::with_node_counts(1).build(); let deployer = ComposeDeployer::default(); // Use Docker Compose let runner = deployer.deploy(&plan).await?; diff --git a/book/src/runners.md b/book/src/runners.md index 695efae..000dbd5 100644 --- a/book/src/runners.md +++ b/book/src/runners.md @@ -14,7 +14,7 @@ environment and operational considerations, see [Operations Overview](operations - **Can run in CI** for fast smoke tests. - **Node control:** Not supported (chaos workloads not available) -**Run with:** `scripts/run/run-examples.sh -t 60 -v 1 -e 1 host` +**Run with:** `scripts/run/run-examples.sh -t 60 -n 1 host` ## Docker Compose runner - Starts nodes in containers to provide a reproducible multi-node stack on a @@ -25,7 +25,7 @@ environment and operational considerations, see [Operations Overview](operations - **Recommended for CI pipelines** (isolated environment, reproducible). - **Node control:** Supported (can restart nodes for chaos testing) -**Run with:** `scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose` +**Run with:** `scripts/run/run-examples.sh -t 60 -n 1 compose` ## Kubernetes runner - Deploys nodes onto a cluster for higher-fidelity, longer-running scenarios (via `K8sDeployer`). @@ -34,10 +34,10 @@ environment and operational considerations, see [Operations Overview](operations and scheduling matter. - **Node control:** Not supported yet (chaos workloads not available) -**Run with:** `scripts/run/run-examples.sh -t 60 -v 1 -e 1 k8s` +**Run with:** `scripts/run/run-examples.sh -t 60 -n 1 k8s` ### Common expectations -- All runners require at least one validator and, for transaction scenarios, +- All runners require at least one node and, for transaction scenarios, access to seeded wallets. - Readiness probes gate workload start so traffic begins only after nodes are reachable. diff --git a/book/src/running-examples.md b/book/src/running-examples.md index 0f6cdb6..f05f863 100644 --- a/book/src/running-examples.md +++ b/book/src/running-examples.md @@ -8,18 +8,18 @@ Use `scripts/run/run-examples.sh` for all modes—it handles all setup automatic ```bash # Host mode (local processes) -scripts/run/run-examples.sh -t 60 -v 3 host +scripts/run/run-examples.sh -t 60 -n 3 host # Compose mode (Docker Compose) -scripts/run/run-examples.sh -t 60 -v 3 compose +scripts/run/run-examples.sh -t 60 -n 3 compose # K8s mode (Kubernetes) -scripts/run/run-examples.sh -t 60 -v 3 k8s +scripts/run/run-examples.sh -t 60 -n 3 k8s ``` **Parameters:** - `-t 60` — Run duration in seconds -- `-v 3` — Number of validators +- `-n 3` — Number of nodes - `host|compose|k8s` — Deployment mode This script handles: @@ -29,14 +29,14 @@ This script handles: - Image loading into cluster (k8s) - Execution with proper environment -**Note:** For `k8s` runs against non-local clusters (e.g. EKS), the cluster pulls images from a registry. In that case, build + push your image separately (see `scripts/build/build_test_image.sh`) and set `NOMOS_TESTNET_IMAGE` to the pushed reference. +**Note:** For `k8s` runs against non-local clusters (e.g. EKS), the cluster pulls images from a registry. In that case, build + push your image separately (see `scripts/build/build_test_image.sh`) and set `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` to the pushed reference. ## Quick Smoke Matrix For a small "does everything still run?" matrix across all runners: ```bash -scripts/run/run-test-matrix.sh -t 120 -v 1 -e 1 +scripts/run/run-test-matrix.sh -t 120 -n 1 ``` This runs host, compose, and k8s modes with various image-build configurations. Useful after making runner/image/script changes. Forwards `--metrics-*` options through to `scripts/run/run-examples.sh`. @@ -51,31 +51,31 @@ This runs host, compose, and k8s modes with various image-build configurations. **Environment overrides:** - `VERSION=v0.3.1` — Circuit version -- `NOMOS_NODE_REV=` — nomos-node git revision -- `NOMOS_BINARIES_TAR=path/to/bundle.tar.gz` — Use prebuilt bundle -- `NOMOS_SKIP_IMAGE_BUILD=1` — Skip image rebuild inside `run-examples.sh` (compose/k8s) -- `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64|linux/amd64` — Docker platform for bundle builds (macOS/Windows) +- `LOGOS_BLOCKCHAIN_NODE_REV=` — logos-blockchain-node git revision +- `LOGOS_BLOCKCHAIN_BINARIES_TAR=path/to/bundle.tar.gz` — Use prebuilt bundle +- `LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1` — Skip image rebuild inside `run-examples.sh` (compose/k8s) +- `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64|linux/amd64` — Docker platform for bundle builds (macOS/Windows) - `COMPOSE_CIRCUITS_PLATFORM=linux-aarch64|linux-x86_64` — Circuits platform for image builds - `SLOW_TEST_ENV=true` — Doubles built-in readiness timeouts (useful in CI / constrained laptops) - `TESTNET_PRINT_ENDPOINTS=1` — Print `TESTNET_ENDPOINTS` / `TESTNET_PPROF` lines during deploy -## Dev Workflow: Updating nomos-node Revision +## Dev Workflow: Updating logos-blockchain-node Revision -The repo pins a `nomos-node` revision in `versions.env` for reproducible builds. To update it or point to a local checkout: +The repo pins a `logos-blockchain-node` revision in `versions.env` for reproducible builds. To update it or point to a local checkout: ```bash # Pin to a new git revision (updates versions.env + Cargo.toml git revs) scripts/ops/update-nomos-rev.sh --rev -# Use a local nomos-node checkout instead (for development) -scripts/ops/update-nomos-rev.sh --path /path/to/nomos-node +# Use a local logos-blockchain-node checkout instead (for development) +scripts/ops/update-nomos-rev.sh --path /path/to/logos-blockchain-node # If Cargo.toml was marked skip-worktree, clear it scripts/ops/update-nomos-rev.sh --unskip-worktree ``` **Notes:** -- Don't commit absolute `NOMOS_NODE_PATH` values; prefer `--rev` for shared history/CI +- Don't commit absolute `LOGOS_BLOCKCHAIN_NODE_PATH` values; prefer `--rev` for shared history/CI - After changing rev/path, expect `Cargo.lock` to update on the next `cargo build`/`cargo test` ## Cleanup Helper @@ -100,7 +100,7 @@ For manual control, run the `local_runner` binary directly: ```bash POL_PROOF_DEV_MODE=true \ -NOMOS_NODE_BIN=/path/to/nomos-node \ +LOGOS_BLOCKCHAIN_NODE_BIN=/path/to/logos-blockchain-node \ cargo run -p runner-examples --bin local_runner ``` @@ -108,14 +108,14 @@ cargo run -p runner-examples --bin local_runner | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators (legacy: `LOCAL_DEMO_VALIDATORS`) | -| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds (legacy: `LOCAL_DEMO_RUN_SECS`) | -| `NOMOS_NODE_BIN` | — | Path to nomos-node binary (required) | -| `NOMOS_LOG_DIR` | None | Directory for per-node log files | -| `NOMOS_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI) | -| `NOMOS_TESTS_TRACING` | false | Enable debug tracing preset | -| `NOMOS_LOG_LEVEL` | info | Global log level: error, warn, info, debug, trace | -| `NOMOS_LOG_FILTER` | None | Fine-grained module filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`) | +| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes (legacy: `LOCAL_DEMO_NODES`) | +| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds (legacy: `LOCAL_DEMO_RUN_SECS`) | +| `LOGOS_BLOCKCHAIN_NODE_BIN` | — | Path to logos-blockchain-node binary (required) | +| `LOGOS_BLOCKCHAIN_LOG_DIR` | None | Directory for per-node log files | +| `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI) | +| `LOGOS_BLOCKCHAIN_TESTS_TRACING` | false | Enable debug tracing preset | +| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | info | Global log level: error, warn, info, debug, trace | +| `LOGOS_BLOCKCHAIN_LOG_FILTER` | None | Fine-grained module filtering (e.g., `cryptarchia=trace`) | | `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners | **Note:** Requires circuit assets and host binaries. Use `scripts/run/run-examples.sh host` to handle setup automatically. @@ -134,11 +134,11 @@ scripts/build/build-bundle.sh --platform linux # Creates .tmp/nomos-binaries-linux-v0.3.1.tar.gz # 2. Build image (embeds bundle assets) -export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz +export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz scripts/build/build_test_image.sh # 3. Run -NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \ +LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin compose_runner ``` @@ -146,15 +146,14 @@ cargo run -p runner-examples --bin compose_runner ### Option 2: Manual Circuit/Image Setup ```bash -# Fetch and copy circuits -scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits -cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ +# Fetch circuits +scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits # Build image scripts/build/build_test_image.sh # Run -NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \ +LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin compose_runner ``` @@ -162,36 +161,36 @@ cargo run -p runner-examples --bin compose_runner ### Platform Note (macOS / Apple Silicon) - Docker Desktop runs a `linux/arm64` engine by default -- For native performance: `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` (recommended for local testing) -- For amd64 targets: `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/amd64` (slower via emulation) +- For native performance: `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` (recommended for local testing) +- For amd64 targets: `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/amd64` (slower via emulation) ### Compose Runner Environment Variables | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_TESTNET_IMAGE` | — | Image tag (required, must match built image) | +| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | — | Image tag (required, must match built image) | | `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners | -| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators | -| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds | -| `COMPOSE_NODE_PAIRS` | — | Alternative topology format: "validators" (e.g., `3`) | -| `NOMOS_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query | -| `NOMOS_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export | -| `NOMOS_GRAFANA_URL` | None | Grafana base URL for printing/logging | +| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes | +| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds | +| `COMPOSE_NODE_PAIRS` | — | Alternative topology format: "nodes" (e.g., `3`) | +| `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query | +| `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export | +| `LOGOS_BLOCKCHAIN_GRAFANA_URL` | None | Grafana base URL for printing/logging | | `COMPOSE_RUNNER_HOST` | 127.0.0.1 | Host address for port mappings | | `COMPOSE_RUNNER_PRESERVE` | 0 | Keep containers running after test | -| `NOMOS_LOG_LEVEL` | info | Node log level (stdout/stderr) | -| `NOMOS_LOG_FILTER` | None | Fine-grained module filtering | +| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | info | Node log level (stdout/stderr) | +| `LOGOS_BLOCKCHAIN_LOG_FILTER` | None | Fine-grained module filtering | **Config file option:** `testing-framework/assets/stack/cfgsync.yaml` (`tracing_settings.logger`) — Switch node logs between stdout/stderr and file output ### Compose-Specific Features - **Node control support**: Only runner that supports chaos testing (`.enable_node_control()` + chaos workloads) -- **External observability**: Set `NOMOS_METRICS_*` / `NOMOS_GRAFANA_URL` to enable telemetry links and querying +- **External observability**: Set `LOGOS_BLOCKCHAIN_METRICS_*` / `LOGOS_BLOCKCHAIN_GRAFANA_URL` to enable telemetry links and querying - Quickstart: `scripts/setup/setup-observability.sh compose up` then `scripts/setup/setup-observability.sh compose env` **Important:** -- Containers expect KZG parameters at `/kzgrs_test_params/kzgrs_test_params` (note the repeated filename) +- Containers expect circuits at `/opt/circuits` (set by the image build) - Use `scripts/run/run-examples.sh compose` to handle all setup automatically --- @@ -211,11 +210,11 @@ For manual control, run the `k8s_runner` binary directly. K8s requires the same ```bash # 1. Build image with bundle (recommended) scripts/build/build-bundle.sh --platform linux -export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz +export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz scripts/build/build_test_image.sh # 2. Load into cluster (choose one) -export NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local +export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local # For kind: kind load docker-image logos-blockchain-testing:local @@ -226,13 +225,13 @@ minikube image load logos-blockchain-testing:local # For remote cluster (push to registry): docker tag logos-blockchain-testing:local your-registry/logos-blockchain-testing:latest docker push your-registry/logos-blockchain-testing:latest -export NOMOS_TESTNET_IMAGE=your-registry/logos-blockchain-testing:latest +export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=your-registry/logos-blockchain-testing:latest ``` ### Run the Example ```bash -export NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local +export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local export POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin k8s_runner ``` @@ -241,13 +240,13 @@ cargo run -p runner-examples --bin k8s_runner | Variable | Default | Effect | |----------|---------|--------| -| `NOMOS_TESTNET_IMAGE` | — | Image tag (required) | +| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | — | Image tag (required) | | `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners | -| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators | -| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds | -| `NOMOS_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query (PromQL) | -| `NOMOS_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export | -| `NOMOS_GRAFANA_URL` | None | Grafana base URL for printing/logging | +| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes | +| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds | +| `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query (PromQL) | +| `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export | +| `LOGOS_BLOCKCHAIN_GRAFANA_URL` | None | Grafana base URL for printing/logging | | `K8S_RUNNER_NAMESPACE` | Random | Kubernetes namespace (pin for debugging) | | `K8S_RUNNER_RELEASE` | Random | Helm release name (pin for debugging) | | `K8S_RUNNER_NODE_HOST` | — | NodePort host resolution for non-local clusters | @@ -257,24 +256,24 @@ cargo run -p runner-examples --bin k8s_runner ### K8s + Observability (Optional) ```bash -export NOMOS_METRICS_QUERY_URL=http://your-prometheus:9090 +export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://your-prometheus:9090 # Prometheus OTLP receiver example: -export NOMOS_METRICS_OTLP_INGEST_URL=http://your-prometheus:9090/api/v1/otlp/v1/metrics +export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://your-prometheus:9090/api/v1/otlp/v1/metrics # Optional: print Grafana link in TESTNET_ENDPOINTS -export NOMOS_GRAFANA_URL=http://your-grafana:3000 +export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://your-grafana:3000 cargo run -p runner-examples --bin k8s_runner ``` **Notes:** -- `NOMOS_METRICS_QUERY_URL` must be reachable from the runner process (often via `kubectl port-forward`) -- `NOMOS_METRICS_OTLP_INGEST_URL` must be reachable from nodes (pods/containers) and is backend-specific +- `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` must be reachable from the runner process (often via `kubectl port-forward`) +- `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` must be reachable from nodes (pods/containers) and is backend-specific - Quickstart installer: `scripts/setup/setup-observability.sh k8s install` then `scripts/setup/setup-observability.sh k8s env` - Optional dashboards: `scripts/setup/setup-observability.sh k8s dashboards` ### Via `scripts/run/run-examples.sh` (Recommended) ```bash -scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s \ +scripts/run/run-examples.sh -t 60 -n 3 k8s \ --metrics-query-url http://your-prometheus:9090 \ --metrics-otlp-ingest-url http://your-prometheus:9090/api/v1/otlp/v1/metrics ``` @@ -285,7 +284,7 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s \ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ObservabilityBuilderExt as _; -let plan = ScenarioBuilder::with_node_counts(1, 1) +let plan = ScenarioBuilder::with_node_counts(1) .with_metrics_query_url_str("http://your-prometheus:9090") .with_metrics_otlp_ingest_url_str("http://your-prometheus:9090/api/v1/otlp/v1/metrics") .build(); @@ -293,8 +292,8 @@ let plan = ScenarioBuilder::with_node_counts(1, 1) ### Important K8s Notes -- K8s runner mounts `testing-framework/assets/stack/kzgrs_test_params` as a hostPath volume -- File path inside pods: `/kzgrs_test_params/kzgrs_test_params` +- K8s runner uses circuits baked into the image +- File path inside pods: `/opt/circuits` - **No node control support yet**: Chaos workloads (`.enable_node_control()`) will fail - Optimized for local clusters (Docker Desktop K8s / minikube / kind) - Remote clusters require additional setup (registry push, PV/CSI for assets, etc.) diff --git a/book/src/running-scenarios.md b/book/src/running-scenarios.md index b6a6625..995e539 100644 --- a/book/src/running-scenarios.md +++ b/book/src/running-scenarios.md @@ -37,7 +37,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; async fn run_once() -> anyhow::Result<()> { - let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(20) .transactions_with(|tx| tx.rate(1).users(5)) .expect_consensus_liveness() @@ -63,14 +63,14 @@ Notes: ### Local (Host) Runner - **Best for**: fast iteration and debugging -- **Logs/state**: stored under a temporary run directory unless you set `NOMOS_TESTS_KEEP_LOGS=1` and/or `NOMOS_LOG_DIR=...` +- **Logs/state**: stored under a temporary run directory unless you set `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` and/or `LOGOS_BLOCKCHAIN_LOG_DIR=...` - **Limitations**: no node-control capability (chaos workflows that require node control won’t work here) Run the built-in local examples: ```bash POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 host +scripts/run/run-examples.sh -t 60 -n 3 host ``` ### Compose Runner @@ -83,7 +83,7 @@ Run the built-in compose examples: ```bash POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 3 compose ``` ### K8s Runner @@ -96,16 +96,16 @@ Run the built-in k8s examples: ```bash POL_PROOF_DEV_MODE=true \ -scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s +scripts/run/run-examples.sh -t 60 -n 3 k8s ``` --- ## Artifacts & Where to Look -- **Node logs**: configure via `NOMOS_LOG_DIR`, `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (see [Logging & Observability](logging-observability.md)) +- **Node logs**: configure via `LOGOS_BLOCKCHAIN_LOG_DIR`, `LOGOS_BLOCKCHAIN_LOG_LEVEL`, `LOGOS_BLOCKCHAIN_LOG_FILTER` (see [Logging & Observability](logging-observability.md)) - **Runner logs**: controlled by `RUST_LOG` (runner process only) -- **Keep run directories**: set `NOMOS_TESTS_KEEP_LOGS=1` +- **Keep run directories**: set `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` - **Compose environment preservation**: set `COMPOSE_RUNNER_PRESERVE=1` - **K8s environment preservation**: set `K8S_RUNNER_PRESERVE=1` diff --git a/book/src/scenario-lifecycle.md b/book/src/scenario-lifecycle.md index 5cb202e..121f271 100644 --- a/book/src/scenario-lifecycle.md +++ b/book/src/scenario-lifecycle.md @@ -24,7 +24,7 @@ flowchart TB subgraph Phase4["4. Execution Phase"] Execute[Drive Workloads] - ExecuteDetails["• Submit transactions
• Disperse DA blobs
• Trigger chaos events
• Run for duration"] + ExecuteDetails["• Submit transactions
• Trigger chaos events
• Run for duration"] Execute --> ExecuteDetails end @@ -61,8 +61,8 @@ flowchart TB Declare a topology, attach workloads and expectations, and set the run window. The plan is the single source of truth for what will happen. **Key actions:** -- Define cluster shape (validators, network topology) -- Configure workloads (transaction rate, DA traffic, chaos patterns) +- Define cluster shape (nodes, network topology) +- Configure workloads (transaction rate, chaos patterns) - Attach expectations (liveness, inclusion, custom checks) - Set timing parameters (run duration, cooldown period) @@ -74,7 +74,7 @@ Hand the plan to a deployer. It provisions the environment on the chosen backend **Key actions:** - Provision infrastructure (processes, containers, or pods) -- Launch validator nodes +- Launch nodes - Wait for readiness probes (HTTP endpoints respond) - Establish node connectivity and metrics endpoints - Spawn BlockFeed for real-time block observation @@ -99,7 +99,6 @@ The runner starts traffic and behaviors for the planned duration. **Key actions:** - Submit transactions at configured rates -- Disperse and sample DA blobs - Trigger chaos events (node restarts) - Run concurrently for the specified duration - Observe blocks and metrics in real-time @@ -115,7 +114,6 @@ Once activity stops (and optional cooldown completes), the runner checks livenes **Key actions:** - Verify consensus liveness (minimum block production) - Check transaction inclusion rates -- Validate DA dispersal and sampling - Assess system recovery after chaos events - Aggregate pass/fail results @@ -128,7 +126,7 @@ Tear down resources so successive runs start fresh and do not inherit leaked sta **Key actions:** - Stop all node processes/containers/pods - Remove temporary directories and volumes -- Collect and archive logs (if `NOMOS_TESTS_KEEP_LOGS=1`) +- Collect and archive logs (if `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`) - Release ports and network resources - Cleanup observability stack (if spawned) diff --git a/book/src/testing-philosophy.md b/book/src/testing-philosophy.md index e87e2b8..455c6cb 100644 --- a/book/src/testing-philosophy.md +++ b/book/src/testing-philosophy.md @@ -14,7 +14,7 @@ use testing_framework_workflows::ScenarioBuilderExt; pub fn declarative_over_imperative() { // Good: declarative - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block }) @@ -22,7 +22,7 @@ pub fn declarative_over_imperative() { .build(); // Bad: imperative (framework doesn't work this way) - // spawn_validator(); + // spawn_node(); // loop { submit_tx(); check_block(); } } ``` @@ -30,6 +30,8 @@ pub fn declarative_over_imperative() { **Why it matters:** The framework handles deployment, readiness, and cleanup. You focus on test intent, not infrastructure orchestration. +**Exception:** For advanced network scenarios (split-brain, late joins, network healing) that can't be expressed declaratively, see [Manual Clusters](manual-cluster.md) for imperative control. + ## Protocol Time, Not Wall Time Reason in **blocks** and **consensus intervals**, not wall-clock seconds. @@ -47,7 +49,7 @@ use testing_framework_workflows::ScenarioBuilderExt; pub fn protocol_time_not_wall_time() { // Good: protocol-oriented thinking - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block }) @@ -84,7 +86,7 @@ use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt}; pub fn determinism_first() { // Separate: functional test (deterministic) - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block }) @@ -93,7 +95,7 @@ pub fn determinism_first() { // Separate: chaos test (introduces randomness) let _chaos_plan = - ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .enable_node_control() .chaos_with(|c| { c.restart() @@ -120,7 +122,7 @@ Prefer **user-facing signals** over internal state: **Good checks:** - Blocks progressing at expected rate (liveness) - Transactions included within N blocks (inclusion) -- DA blobs retrievable (availability) +- Transactions included within N blocks (inclusion) **Avoid internal checks:** - Memory pool size @@ -143,14 +145,14 @@ use testing_framework_workflows::ScenarioBuilderExt; pub fn minimum_run_windows() { // Bad: too short (~2 blocks with default 2s slots, 0.9 coeff) - let _too_short = ScenarioBuilder::with_node_counts(1, 0) + let _too_short = ScenarioBuilder::with_node_counts(1) .with_run_duration(Duration::from_secs(5)) .expect_consensus_liveness() .build(); // Good: enough blocks for assertions (~27 blocks with default 2s slots, 0.9 // coeff) - let _good = ScenarioBuilder::with_node_counts(1, 0) + let _good = ScenarioBuilder::with_node_counts(1) .with_run_duration(Duration::from_secs(60)) .expect_consensus_liveness() .build(); diff --git a/book/src/topology-chaos.md b/book/src/topology-chaos.md index 504d603..c4cb9ac 100644 --- a/book/src/topology-chaos.md +++ b/book/src/topology-chaos.md @@ -15,22 +15,22 @@ See also: [RunContext: BlockFeed & Node Control](node-control.md) for the curren - **Restarts**: random restarts with minimum delay/cooldown to test recovery. - **Partitions (planned)**: block/unblock peers to simulate partial isolation, then assert height convergence after healing. -- **Validator churn (planned)**: stop one validator and start another (new key) mid-run to +- **Node churn (planned)**: stop one node and start another (new key) mid-run to test membership changes; expect convergence. -- **Load SLOs**: push tx/DA rates and assert inclusion/availability budgets +- **Load SLOs**: push transaction rates and assert inclusion/latency budgets instead of only liveness. - **API probes**: poll HTTP/RPC endpoints during chaos to ensure external contracts stay healthy (shape + latency). ## Expectations to pair - **Liveness/height convergence** after chaos windows. -- **SLO checks**: inclusion latency, DA responsiveness, API latency/shape. +- **SLO checks**: inclusion latency, API latency/shape. - **Recovery checks**: ensure nodes that were isolated or restarted catch up to cluster height within a timeout. ## Guidance - Keep chaos realistic: avoid flapping or patterns you wouldn't operate in prod. -- Scope chaos: choose validators intentionally; don't restart all +- Scope chaos: choose nodes intentionally; don't restart all nodes at once unless you're testing full outages. - Combine chaos with observability: capture block feed/metrics and API health so failures are diagnosable. diff --git a/book/src/troubleshooting.md b/book/src/troubleshooting.md index 13929c9..50b5330 100644 --- a/book/src/troubleshooting.md +++ b/book/src/troubleshooting.md @@ -3,12 +3,12 @@ **Prerequisites for All Runners:** - **`versions.env` file** at repository root (required by helper scripts) - **`POL_PROOF_DEV_MODE=true`** MUST be set for all runners (host, compose, k8s) to avoid expensive Groth16 proof generation that causes timeouts -- **KZG circuit assets** must be present at `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note the repeated filename) for DA workloads +- **Circuit assets** must be present and `LOGOS_BLOCKCHAIN_CIRCUITS` must point to a directory that contains them **Platform/Environment Notes:** -- **macOS + Docker Desktop (Apple silicon):** prefer `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` for local compose/k8s runs to avoid slow/fragile amd64 emulation builds. +- **macOS + Docker Desktop (Apple silicon):** prefer `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` for local compose/k8s runs to avoid slow/fragile amd64 emulation builds. - **Disk space:** bundle/image builds are storage-heavy. If you see I/O errors or Docker build failures, check free space and prune old artifacts (`.tmp/`, `target/`, and Docker build cache) before retrying. -- **K8s runner scope:** the default Helm chart mounts KZG params via `hostPath` and uses a local image tag (`logos-blockchain-testing:local`). This is intended for local clusters (Docker Desktop / minikube / kind), not remote managed clusters without additional setup. +- **K8s runner scope:** the default Helm chart mounts circuit assets via `hostPath` and uses a local image tag (`logos-blockchain-testing:local`). This is intended for local clusters (Docker Desktop / minikube / kind), not remote managed clusters without additional setup. - Quick cleanup: `scripts/ops/clean.sh` (and `scripts/ops/clean.sh --docker` if needed). - Destructive cleanup (last resort): `scripts/ops/clean.sh --docker-system --dangerous` (add `--volumes` if you also want to prune Docker volumes). @@ -18,7 +18,7 @@ Common symptoms and likely causes: -- **No or slow block progression**: missing `POL_PROOF_DEV_MODE=true`, missing KZG circuit assets (`/kzgrs_test_params/kzgrs_test_params` file) for DA workloads, too-short run window, port conflicts, or resource exhaustion—set required env vars, verify assets exist, extend duration, check node logs for startup errors. +- **No or slow block progression**: missing `POL_PROOF_DEV_MODE=true`, missing circuit assets, too-short run window, port conflicts, or resource exhaustion—set required env vars, verify assets exist, extend duration, check node logs for startup errors. - **Transactions not included**: unfunded or misconfigured wallets (check `.wallets(N)` vs `.users(M)`), transaction rate exceeding block capacity, or rates exceeding block production speed—reduce rate, increase wallet count, verify wallet setup in logs. - **Chaos stalls the run**: chaos (node control) only works with ComposeDeployer; host runner (LocalDeployer) and K8sDeployer don't support it (won't "stall", just can't execute chaos workloads). With compose, aggressive restart cadence can prevent consensus recovery—widen restart intervals. - **Observability gaps**: metrics or logs unreachable because ports clash or services are not exposed—adjust observability ports and confirm runner wiring. @@ -43,7 +43,7 @@ $ cargo run -p runner-examples --bin local_runner Finished dev [unoptimized + debuginfo] target(s) in 0.48s Running `target/debug/local_runner` [INFO runner_examples::local_runner] Starting local runner scenario -[INFO testing_framework_runner_local] Launching 3 validators +[INFO testing_framework_runner_local] Launching 3 nodes [INFO testing_framework_runner_local] Waiting for node readiness... (hangs here for 5+ minutes, CPU at 100%) thread 'main' panicked at 'readiness timeout expired' @@ -71,12 +71,12 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner **What you'll see:** ```text -$ scripts/run/run-examples.sh -t 60 -v 1 -e 1 host +$ scripts/run/run-examples.sh -t 60 -n 1 host ERROR: versions.env not found at repository root This file is required and should define: VERSION= - NOMOS_NODE_REV= - NOMOS_BUNDLE_VERSION= + LOGOS_BLOCKCHAIN_NODE_REV= + LOGOS_BLOCKCHAIN_BUNDLE_VERSION= ``` **Root Cause:** Helper scripts need `versions.env` to know which versions to build/fetch. @@ -87,50 +87,44 @@ This file is required and should define: cat versions.env # Should show: # VERSION=v0.3.1 -# NOMOS_NODE_REV=abc123def456 -# NOMOS_BUNDLE_VERSION=v1 +# LOGOS_BLOCKCHAIN_NODE_REV=abc123def456 +# LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v1 ``` --- -### 3. Missing KZG Circuit Assets (DA Workloads) +### 3. Missing Circuit Assets **Symptoms:** -- DA workload tests fail +- Node startup fails early - Error messages about missing circuit files -- Nodes crash during DA operations **What you'll see:** ```text $ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner -[INFO testing_framework_runner_local] Starting DA workload -[ERROR nomos_da_dispersal] Failed to load KZG parameters -Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params" } +[INFO testing_framework_runner_local] Starting local runner scenario +Error: circuit assets directory missing or invalid thread 'main' panicked at 'workload init failed' ``` -**Root Cause:** DA (Data Availability) workloads require KZG cryptographic parameters. The file must exist at: `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note the repeated filename). +**Root Cause:** Circuit assets are required for proof-related paths. The runner expects `LOGOS_BLOCKCHAIN_CIRCUITS` to point to a directory containing the assets. **Fix (recommended):** ```bash # Use run-examples.sh which handles setup automatically -scripts/run/run-examples.sh -t 60 -v 1 -e 1 host +scripts/run/run-examples.sh -t 60 -n 1 host ``` **Fix (manual):** ```bash # Fetch circuits -scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits +scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits -# Copy to expected location -mkdir -p testing-framework/assets/stack/kzgrs_test_params -cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ - -# Verify (should be ~120MB) -ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params +# Set the environment variable +export LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits ``` --- @@ -138,37 +132,37 @@ ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params ### 4. Node Binaries Not Found **Symptoms:** -- Error about missing `nomos-node` binary +- Error about missing `logos-blockchain-node` binary - "file not found" or "no such file or directory" -- Environment variables `NOMOS_NODE_BIN` not set +- Environment variables `LOGOS_BLOCKCHAIN_NODE_BIN` not set **What you'll see:** ```text $ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner -[INFO testing_framework_runner_local] Spawning validator 0 +[INFO testing_framework_runner_local] Spawning node 0 Error: Os { code: 2, kind: NotFound, message: "No such file or directory" } -thread 'main' panicked at 'failed to spawn nomos-node process' +thread 'main' panicked at 'failed to spawn logos-blockchain-node process' ``` -**Root Cause:** The local runner needs compiled `nomos-node` binaries, but doesn't know where they are. +**Root Cause:** The local runner needs compiled `logos-blockchain-node` binaries, but doesn't know where they are. **Fix (recommended):** ```bash # Use run-examples.sh which builds binaries automatically -scripts/run/run-examples.sh -t 60 -v 1 -e 1 host +scripts/run/run-examples.sh -t 60 -n 1 host ``` **Fix (manual - set paths explicitly):** ```bash # Build binaries first -cd ../nomos-node # or wherever your nomos-node checkout is -cargo build --release --bin nomos-node +cd ../logos-blockchain-node # or wherever your logos-blockchain-node checkout is +cargo build --release --bin logos-blockchain-node # Set environment variables -export NOMOS_NODE_BIN=$PWD/target/release/nomos-node +export LOGOS_BLOCKCHAIN_NODE_BIN=$PWD/target/release/logos-blockchain-node # Return to testing framework cd ../nomos-testing @@ -187,7 +181,7 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner **What you'll see:** ```text -$ scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose +$ scripts/run/run-examples.sh -t 60 -n 1 compose [INFO runner_examples::compose_runner] Starting compose deployment Error: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? thread 'main' panicked at 'compose deployment failed' @@ -236,7 +230,7 @@ thread 'main' panicked at 'compose deployment failed' ```bash # Use run-examples.sh which builds the image automatically -scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose +scripts/run/run-examples.sh -t 60 -n 1 compose ``` **Fix (manual):** @@ -246,7 +240,7 @@ scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose scripts/build/build-bundle.sh --platform linux # 2. Set bundle path -export NOMOS_BINARIES_TAR=$(ls -t .tmp/nomos-binaries-linux-*.tar.gz | head -1) +export LOGOS_BLOCKCHAIN_BINARIES_TAR=$(ls -t .tmp/nomos-binaries-linux-*.tar.gz | head -1) # 3. Build Docker image scripts/build/build_test_image.sh @@ -272,7 +266,7 @@ kind load docker-image logos-blockchain-testing:local ```text $ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner -[INFO testing_framework_runner_local] Launching validator 0 on port 18080 +[INFO testing_framework_runner_local] Launching node 0 on port 18080 Error: Os { code: 48, kind: AddrInUse, message: "Address already in use" } thread 'main' panicked at 'failed to bind port 18080' ``` @@ -287,7 +281,7 @@ lsof -i :18080 # macOS/Linux netstat -ano | findstr :18080 # Windows # Kill orphaned nomos processes -pkill nomos-node +pkill logos-blockchain-node # For compose: ensure containers are stopped docker compose down @@ -335,7 +329,7 @@ thread 'main' panicked at 'workload init failed: insufficient wallets' use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; -let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(20) // ← Increase wallet count .transactions_with(|tx| { tx.users(10) // ← Must be ≤ wallets(20) @@ -362,7 +356,7 @@ CONTAINER ID STATUS abc123def456 Restarting (137) 30 seconds ago # 137 = OOM killed $ docker logs abc123def456 -[INFO nomos_node] Starting validator +[INFO nomos_node] Starting node [INFO consensus] Processing block Killed # ← OOM killer terminated the process ``` @@ -414,15 +408,15 @@ $ ls .tmp/ ```bash # Persist logs to a specific directory -NOMOS_LOG_DIR=/tmp/test-logs \ -NOMOS_TESTS_KEEP_LOGS=1 \ +LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/test-logs \ +LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1 \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin local_runner # Logs persist after run ls /tmp/test-logs/ -# nomos-node-0.2024-12-18T14-30-00.log -# nomos-node-1.2024-12-18T14-30-00.log +# logos-blockchain-node-0.2024-12-18T14-30-00.log +# logos-blockchain-node-1.2024-12-18T14-30-00.log # ... ``` @@ -457,7 +451,7 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::ScenarioBuilderExt; // Increase run duration to allow more blocks. -let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(120)) // ← Give more time .build(); @@ -481,15 +475,15 @@ When a test fails, check these in order: 1. **`POL_PROOF_DEV_MODE=true` is set** (REQUIRED for all runners) 2. **`versions.env` exists at repo root** -3. **KZG circuit assets present** (for DA workloads): `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` -4. **Node binaries available** (`NOMOS_NODE_BIN` set, or using `run-examples.sh`) +3. **Circuit assets present** (`LOGOS_BLOCKCHAIN_CIRCUITS` points to a valid directory) +4. **Node binaries available** (`LOGOS_BLOCKCHAIN_NODE_BIN` set, or using `run-examples.sh`) 5. **Docker daemon running** (for compose/k8s) 6. **Docker image built** (`logos-blockchain-testing:local` exists for compose/k8s) 7. **No port conflicts** (`lsof -i :18080`, kill orphaned processes) 8. **Sufficient wallets** (`.wallets(N)` ≥ `.users(M)`) 9. **Enough resources** (Docker memory 8GB+, ulimit -n 4096) 10. **Run duration appropriate** (long enough for consensus timing) -11. **Logs persisted** (`NOMOS_LOG_DIR` + `NOMOS_TESTS_KEEP_LOGS=1` if needed) +11. **Logs persisted** (`LOGOS_BLOCKCHAIN_LOG_DIR` + `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` if needed) **Still stuck?** Check node logs (see [Where to Find Logs](#where-to-find-logs)) for the actual error. @@ -497,17 +491,17 @@ When a test fails, check these in order: ### Log Location Quick Reference -| Runner | Default Output | With `NOMOS_LOG_DIR` + Flags | Access Command | +| Runner | Default Output | With `LOGOS_BLOCKCHAIN_LOG_DIR` + Flags | Access Command | |--------|---------------|------------------------------|----------------| -| **Host** (local) | Per-run temporary directories under the current working directory (removed unless `NOMOS_TESTS_KEEP_LOGS=1`) | Per-node files with prefix `nomos-node-{index}` (set `NOMOS_LOG_DIR`) | `cat $NOMOS_LOG_DIR/nomos-node-0*` | +| **Host** (local) | Per-run temporary directories under the current working directory (removed unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`) | Per-node files with prefix `logos-blockchain-node-{index}` (set `LOGOS_BLOCKCHAIN_LOG_DIR`) | `cat $LOGOS_BLOCKCHAIN_LOG_DIR/logos-blockchain-node-0*` | | **Compose** | Docker container stdout/stderr | Set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory) | `docker ps` then `docker logs ` | -| **K8s** | Pod stdout/stderr | Set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory) | `kubectl logs -l nomos/logical-role=validator` | +| **K8s** | Pod stdout/stderr | Set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory) | `kubectl logs -l nomos/logical-role=node` | **Important Notes:** -- **Host runner** (local processes): Per-run temporary directories are created under the current working directory and removed after the run unless `NOMOS_TESTS_KEEP_LOGS=1`. To write per-node log files to a stable location, set `NOMOS_LOG_DIR=/path/to/logs`. +- **Host runner** (local processes): Per-run temporary directories are created under the current working directory and removed after the run unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`. To write per-node log files to a stable location, set `LOGOS_BLOCKCHAIN_LOG_DIR=/path/to/logs`. - **Compose/K8s**: Node log destination is controlled by `testing-framework/assets/stack/cfgsync.yaml` (`tracing_settings.logger`). By default, rely on `docker logs` or `kubectl logs`. -- **File naming**: Log files use prefix `nomos-node-{index}*` with timestamps, e.g., `nomos-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix). -- **Container names**: Compose containers include project UUID, e.g., `nomos-compose--validator-0-1` where `` is randomly generated per run +- **File naming**: Log files use prefix `logos-blockchain-node-{index}*` with timestamps, e.g., `logos-blockchain-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix). +- **Container names**: Compose containers include project UUID, e.g., `nomos-compose--node-0-1` where `` is randomly generated per run ### Accessing Node Logs by Runner @@ -520,15 +514,15 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner 2>&1 | t **Persistent file output:** ```bash -NOMOS_LOG_DIR=/tmp/debug-logs \ -NOMOS_LOG_LEVEL=debug \ +LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/debug-logs \ +LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin local_runner # Inspect logs (note: filenames include timestamps): ls /tmp/debug-logs/ -# Example: nomos-node-0.2024-12-01T10-30-45.log -tail -f /tmp/debug-logs/nomos-node-0* # Use wildcard to match timestamp +# Example: logos-blockchain-node-0.2024-12-01T10-30-45.log +tail -f /tmp/debug-logs/logos-blockchain-node-0* # Use wildcard to match timestamp ``` #### Compose Runner @@ -542,7 +536,7 @@ docker ps --filter "name=nomos-compose-" docker logs -f # Or filter by name pattern: -docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1) +docker logs -f $(docker ps --filter "name=nomos-compose-.*-node-0" -q | head -1) # Show last 100 lines docker logs --tail 100 @@ -551,12 +545,12 @@ docker logs --tail 100 **Keep containers for post-mortem debugging:** ```bash COMPOSE_RUNNER_PRESERVE=1 \ -NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \ +LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \ POL_PROOF_DEV_MODE=true \ cargo run -p runner-examples --bin compose_runner # OR: Use run-examples.sh (handles setup automatically) -COMPOSE_RUNNER_PRESERVE=1 scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose +COMPOSE_RUNNER_PRESERVE=1 scripts/run/run-examples.sh -t 60 -n 1 compose # After test failure, containers remain running: docker ps --filter "name=nomos-compose-" @@ -564,7 +558,7 @@ docker exec -it /bin/sh docker logs > debug.log ``` -**Note:** Container names follow the pattern `nomos-compose-{uuid}-validator-{index}-1`, where `{uuid}` is randomly generated per run. +**Note:** Container names follow the pattern `nomos-compose-{uuid}-node-{index}-1`, where `{uuid}` is randomly generated per run. #### K8s Runner @@ -576,26 +570,26 @@ docker logs > debug.log # Check your namespace first kubectl config view --minify | grep namespace -# All validator pods (add -n if not using default) -kubectl logs -l nomos/logical-role=validator -f +# All node pods (add -n if not using default) +kubectl logs -l nomos/logical-role=node -f # Specific pod by name (find exact name first) -kubectl get pods -l nomos/logical-role=validator # Find the exact pod name +kubectl get pods -l nomos/logical-role=node # Find the exact pod name kubectl logs -f # Then use it # With explicit namespace -kubectl logs -n my-namespace -l nomos/logical-role=validator -f +kubectl logs -n my-namespace -l nomos/logical-role=node -f ``` **Download logs from crashed pods:** ```bash # Previous logs from crashed pod -kubectl get pods -l nomos/logical-role=validator # Find crashed pod name first -kubectl logs --previous > crashed-validator.log +kubectl get pods -l nomos/logical-role=node # Find crashed pod name first +kubectl logs --previous > crashed-node.log -# Or use label selector for all crashed validators -for pod in $(kubectl get pods -l nomos/logical-role=validator -o name); do +# Or use label selector for all crashed nodes +for pod in $(kubectl get pods -l nomos/logical-role=node -o name); do kubectl logs --previous $pod > $(basename $pod)-previous.log 2>&1 done ``` @@ -610,10 +604,10 @@ for pod in $(kubectl get pods -o name); do done > all-logs.txt # Or use label selectors (recommended) -kubectl logs -l nomos/logical-role=validator --tail=500 > validators.log +kubectl logs -l nomos/logical-role=node --tail=500 > nodes.log # With explicit namespace -kubectl logs -n my-namespace -l nomos/logical-role=validator --tail=500 > validators.log +kubectl logs -n my-namespace -l nomos/logical-role=node --tail=500 > nodes.log ``` ## Debugging Workflow @@ -644,7 +638,7 @@ ps aux | grep nomos docker ps -a --filter "name=nomos-compose-" # K8s: check pod status (use label selectors, add -n if needed) -kubectl get pods -l nomos/logical-role=validator +kubectl get pods -l nomos/logical-role=node kubectl describe pod # Get name from above first ``` @@ -658,7 +652,7 @@ Focus on the first node that exhibited problems or the node with the highest ind - "Failed to bind address" → port conflict - "Connection refused" → peer not ready or network issue - "Proof verification failed" or "Proof generation timeout" → missing `POL_PROOF_DEV_MODE=true` (REQUIRED for all runners) -- "Failed to load KZG parameters" or "Circuit file not found" → missing KZG circuit assets at `testing-framework/assets/stack/kzgrs_test_params/` +- "Circuit file not found" → missing circuit assets at the path in `LOGOS_BLOCKCHAIN_CIRCUITS` - "Insufficient funds" → wallet seeding issue (increase `.wallets(N)` or reduce `.users(M)`) ### 4. Check Log Levels @@ -666,12 +660,12 @@ Focus on the first node that exhibited problems or the node with the highest ind If logs are too sparse, increase verbosity: ```bash -NOMOS_LOG_LEVEL=debug \ -NOMOS_LOG_FILTER="cryptarchia=trace,nomos_da_sampling=debug" \ +LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \ +LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \ cargo run -p runner-examples --bin local_runner ``` -If metric updates are polluting your logs (fields like `counter.*` / `gauge.*`), move those events to a dedicated `tracing` target (e.g. `target: "nomos_metrics"`) and set `NOMOS_LOG_FILTER="nomos_metrics=off,..."` so they don’t get formatted into log output. +If metric updates are polluting your logs (fields like `counter.*` / `gauge.*`), move those events to a dedicated `tracing` target (e.g. `target: "nomos_metrics"`) and set `LOGOS_BLOCKCHAIN_LOG_FILTER="nomos_metrics=off,..."` so they don’t get formatted into log output. ### 5. Verify Observability Endpoints @@ -689,22 +683,22 @@ curl http://localhost:18080/consensus/info # Adjust port per node ### 6. Compare with Known-Good Scenario -Run a minimal baseline test (e.g., 2 validators, consensus liveness only). If it passes, the issue is in your workload or topology configuration. +Run a minimal baseline test (e.g., 2 nodes, consensus liveness only). If it passes, the issue is in your workload or topology configuration. ## Common Error Messages ### "Consensus liveness expectation failed" - **Cause**: Not enough blocks produced during the run window, missing - `POL_PROOF_DEV_MODE=true` (causes slow proof generation), or missing KZG - assets for DA workloads. + `POL_PROOF_DEV_MODE=true` (causes slow proof generation), or missing circuit + assets. - **Fix**: 1. Verify `POL_PROOF_DEV_MODE=true` is set (REQUIRED for all runners). - 2. Verify KZG assets exist at - `testing-framework/assets/stack/kzgrs_test_params/` (for DA workloads). + 2. Verify circuit assets exist at the path referenced by + `LOGOS_BLOCKCHAIN_CIRCUITS`. 3. Extend `with_run_duration()` to allow more blocks. - 4. Check node logs for proof generation or DA errors. - 5. Reduce transaction/DA rate if nodes are overwhelmed. + 4. Check node logs for proof generation or circuit asset errors. + 5. Reduce transaction rate if nodes are overwhelmed. ### "Wallet seeding failed" @@ -730,50 +724,50 @@ Run a minimal baseline test (e.g., 2 validators, consensus liveness only). If it it, proof generation is too slow). 2. Check node logs for startup errors (port conflicts, missing assets). 3. Verify network connectivity between nodes. - 4. For DA workloads, ensure KZG circuit assets are present. + 4. Ensure circuit assets are present and `LOGOS_BLOCKCHAIN_CIRCUITS` points to them. ### "ERROR: versions.env missing" -- **Cause**: Helper scripts (`run-examples.sh`, `build-bundle.sh`, `setup-circuits-stack.sh`) require `versions.env` file at repository root. +- **Cause**: Helper scripts (`run-examples.sh`, `build-bundle.sh`, `setup-logos-blockchain-circuits.sh`) require `versions.env` file at repository root. - **Fix**: Ensure you're running from the repository root directory. The `versions.env` file should already exist and contains: ```text VERSION= - NOMOS_NODE_REV= - NOMOS_BUNDLE_VERSION= + LOGOS_BLOCKCHAIN_NODE_REV= + LOGOS_BLOCKCHAIN_BUNDLE_VERSION= ``` Use the checked-in `versions.env` at the repository root as the source of truth. ### "Port already in use" - **Cause**: Previous test didn't clean up, or another process holds the port. -- **Fix**: Kill orphaned processes (`pkill nomos-node`), wait for Docker cleanup +- **Fix**: Kill orphaned processes (`pkill logos-blockchain-node`), wait for Docker cleanup (`docker compose down`), or restart Docker. ### "Image not found: logos-blockchain-testing:local" -- **Cause**: Docker image not built for Compose/K8s runners, or KZG assets not +- **Cause**: Docker image not built for Compose/K8s runners, or circuit assets not baked into the image. - **Fix (recommended)**: Use run-examples.sh which handles everything: ```bash - scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose + scripts/run/run-examples.sh -t 60 -n 1 compose ``` - **Fix (manual)**: 1. Build bundle: `scripts/build/build-bundle.sh --platform linux` - 2. Set bundle path: `export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz` + 2. Set bundle path: `export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz` 3. Build image: `scripts/build/build_test_image.sh` - 4. **kind/minikube:** load the image into the cluster nodes (e.g. `kind load docker-image logos-blockchain-testing:local`, or `minikube image load ...`), or push to a registry and set `NOMOS_TESTNET_IMAGE` accordingly. + 4. **kind/minikube:** load the image into the cluster nodes (e.g. `kind load docker-image logos-blockchain-testing:local`, or `minikube image load ...`), or push to a registry and set `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` accordingly. -### "Failed to load KZG parameters" or "Circuit file not found" +### "Circuit file not found" -- **Cause**: DA workload requires KZG circuit assets. The file `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note repeated filename) must exist. Inside containers, it's at `/kzgrs_test_params/kzgrs_test_params`. +- **Cause**: Circuit assets are missing or `LOGOS_BLOCKCHAIN_CIRCUITS` points to a non-existent directory. Inside containers, assets are expected at `/opt/circuits`. - **Fix (recommended)**: Use run-examples.sh which handles setup: ```bash - scripts/run/run-examples.sh -t 60 -v 1 -e 1 + scripts/run/run-examples.sh -t 60 -n 1 ``` - **Fix (manual)**: - 1. Fetch assets: `scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits` - 2. Copy to expected path: `cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/` - 3. Verify file exists: `ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` + 1. Fetch assets: `scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits` + 2. Set `LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits` + 3. Verify directory exists: `ls -lh $LOGOS_BLOCKCHAIN_CIRCUITS` 4. For Compose/K8s: rebuild image with assets baked in For detailed logging configuration and observability setup, see [Logging & Observability](logging-observability.md). diff --git a/book/src/what-you-will-learn.md b/book/src/what-you-will-learn.md index a3d2a33..044c82e 100644 --- a/book/src/what-you-will-learn.md +++ b/book/src/what-you-will-learn.md @@ -14,8 +14,8 @@ without changing the plan. - Understand when to use each runner (Host, Compose, Kubernetes) **Author and Run Scenarios** -- Define multi-node topologies with validators -- Configure transaction and DA workloads with appropriate rates +- Define multi-node topologies with nodes +- Configure transaction workloads with appropriate rates - Add consensus liveness and inclusion expectations - Run scenarios across all three deployment modes - Use BlockFeed to monitor block production in real-time @@ -56,8 +56,8 @@ without changing the plan. ## What This Book Does NOT Cover -- **Logos node internals** — This book focuses on testing infrastructure, not the blockchain protocol implementation. See the Logos node repository (`nomos-node`) for protocol documentation. -- **Consensus algorithm theory** — We assume familiarity with basic blockchain concepts (validators, blocks, transactions, data availability). +- **Logos node internals** — This book focuses on testing infrastructure, not the blockchain protocol implementation. See the Logos node repository (`logos-blockchain-node`) for protocol documentation. +- **Consensus algorithm theory** — We assume familiarity with basic blockchain concepts (nodes, blocks, transactions). - **Rust language basics** — Examples use Rust, but we don't teach the language. See [The Rust Book](https://doc.rust-lang.org/book/) if you're new to Rust. - **Kubernetes administration** — We show how to use the K8s runner, but don't cover cluster setup, networking, or operations. - **Docker fundamentals** — We assume basic Docker/Compose knowledge for the Compose runner. diff --git a/book/src/workloads.md b/book/src/workloads.md index 8cda5ff..a412b59 100644 --- a/book/src/workloads.md +++ b/book/src/workloads.md @@ -45,7 +45,7 @@ use testing_framework_workflows::workloads::transaction::Workload; ```rust,ignore use testing_framework_workflows::ScenarioBuilderExt; -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(20) // Seed 20 wallet accounts .transactions_with(|tx| { tx.rate(10) // 10 transactions per block @@ -63,7 +63,7 @@ use testing_framework_workflows::workloads::transaction; let tx_workload = transaction::Workload::with_rate(10) .expect("transaction rate must be non-zero"); -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(20) .with_workload(tx_workload) .with_run_duration(Duration::from_secs(60)) @@ -86,7 +86,7 @@ ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) 3. **Circuit artifacts must be available:** - Automatically staged by `scripts/run/run-examples.sh` - - Or manually via `scripts/setup/setup-circuits-stack.sh` (recommended) / `scripts/setup/setup-nomos-circuits.sh` + - Or manually via `scripts/setup/setup-logos-blockchain-circuits.sh` (recommended) / `scripts/setup/setup-logos-blockchain-circuits.sh` #### Attached Expectation @@ -117,7 +117,7 @@ Error: Expectation failed: TxInclusionExpectation **How to debug:** 1. Check logs for proof generation timing: ```bash - grep "proof generation" $NOMOS_LOG_DIR/*/*.log + grep "proof generation" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log ``` 2. Verify `POL_PROOF_DEV_MODE=true` was set 3. Increase duration: `.with_run_duration(Duration::from_secs(120))` @@ -125,97 +125,7 @@ Error: Expectation failed: TxInclusionExpectation --- -### 2. Data Availability (DA) Workload - -Drives blob and channel activity to exercise data availability paths and storage. - -**Import:** -```rust,ignore -use testing_framework_workflows::workloads::da::Workload; -``` - -#### Configuration - -| Parameter | Type | Default | Description | -|-----------|------|---------|-------------| -| `blob_rate_per_block` | `NonZeroU64` | **Required** | Blobs to publish per block | -| `channel_rate_per_block` | `NonZeroU64` | **Required** | Channels to create per block | -| `headroom_percent` | `u64` | `20` | Extra capacity for channel planning (avoids saturation) | - -#### DSL Usage - -```rust,ignore -use testing_framework_workflows::ScenarioBuilderExt; - -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) - .da_with(|da| { - da.channel_rate(2) // 2 channels per block - .blob_rate(4) // 4 blobs per block - }) - .with_run_duration(Duration::from_secs(120)) - .build(); -``` - -#### Direct Instantiation - -```rust,ignore -use std::num::NonZeroU64; -use testing_framework_workflows::workloads::da; - -let da_workload = da::Workload::with_rate( - NonZeroU64::new(4).unwrap(), // blob_rate_per_block - NonZeroU64::new(2).unwrap(), // channel_rate_per_block - 20, // headroom_percent -); - -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) - .with_workload(da_workload) - .with_run_duration(Duration::from_secs(120)) - .build(); -``` - -#### Prerequisites - -1. **Sufficient duration:** - Channel creation and blob publishing are slower than transaction submission. Allow 120+ seconds. - -2. **Circuit artifacts:** - Same as transaction workload (POL_PROOF_DEV_MODE, circuits staged). - -#### Attached Expectation - -**DaWorkloadExpectation** — Verifies blobs and channels were created and published. - -**What it checks:** -- At least `N` channels were created (where N = channel_rate × expected blocks) -- At least `M` blobs were published (where M = blob_rate × expected blocks × headroom) -- Uses BlockFeed API to verify - -**Failure modes:** -- "Expected >= X channels, observed Y" (Y < X) -- "Expected >= X blobs, observed Y" (Y < X) -- Common causes: insufficient duration, DA saturation - -#### What Failure Looks Like - -```text -Error: Expectation failed: DaWorkloadExpectation - Expected: >= 60 channels (2 channels/block × 30 blocks) - Observed: 23 channels - - Possible causes: - - Duration too short (channels still being created) - - Blob publishing failed (check API errors) - - Network issues (check validator connectivity) -``` - -**How to debug:** -1. Increase duration: `.with_run_duration(Duration::from_secs(180))` -2. Reduce rates: `.channel_rate(1).blob_rate(2)` - ---- - -### 3. Chaos Workload (Random Restart) +### 2. Chaos Workload (Random Restart) Triggers controlled node restarts to test resilience and recovery behaviors. @@ -231,7 +141,7 @@ use testing_framework_workflows::workloads::chaos::RandomRestartWorkload; | `min_delay` | `Duration` | **Required** | Minimum time between restart attempts | | `max_delay` | `Duration` | **Required** | Maximum time between restart attempts | | `target_cooldown` | `Duration` | **Required** | Minimum time before restarting same node again | -| `include_validators` | `bool` | **Required** | Whether to restart validators | +| `include_nodes` | `bool` | **Required** | Whether to restart nodes | #### Usage @@ -242,14 +152,14 @@ use testing_framework_core::scenario::ScenarioBuilder; use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRestartWorkload}; let scenario = ScenarioBuilder::topology_with(|t| { - t.network_star().validators(3) + t.network_star().nodes(3) }) .enable_node_control() // REQUIRED for chaos .with_workload(RandomRestartWorkload::new( Duration::from_secs(45), // min_delay Duration::from_secs(75), // max_delay Duration::from_secs(120), // target_cooldown - true, // include_validators + true, // include_nodes )) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(180)) @@ -270,7 +180,7 @@ let scenario = ScenarioBuilder::topology_with(|t| { - **K8s runner:** Not yet implemented 3. **Sufficient topology:** - - For validators: Need >1 validator (workload skips if only 1) + - For nodes: Need >1 node (workload skips if only 1) 4. **Realistic timing:** - Total duration should be 2-3× the max_delay + cooldown @@ -306,18 +216,18 @@ Error: Expectation failed: ConsensusLiveness Possible causes: - Restart frequency too high (nodes can't recover) - Consensus timing too slow (increase duration) - - Too many validators restarted simultaneously + - Too many nodes restarted simultaneously - Nodes crashed after restart (check logs) ``` **How to debug:** 1. Check restart events in logs: ```bash - grep "restarting\|restart complete" $NOMOS_LOG_DIR/*/*.log + grep "restarting\|restart complete" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log ``` 2. Verify node control is enabled: ```bash - grep "NodeControlHandle" $NOMOS_LOG_DIR/*/*.log + grep "NodeControlHandle" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log ``` 3. Increase cooldown: `Duration::from_secs(180)` 4. Increase duration: `.with_run_duration(Duration::from_secs(300))` @@ -338,7 +248,7 @@ use testing_framework_workflows::ScenarioBuilderExt; #### DSL Usage ```rust,ignore -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(60)) .build(); @@ -360,7 +270,7 @@ Error: Expectation failed: ConsensusLiveness Possible causes: - Nodes crashed or never started (check logs) - Consensus timing misconfigured (CONSENSUS_SLOT_TIME too high) - - Insufficient validators (need >= 2 for BFT consensus) + - Insufficient nodes (need >= 2 for BFT consensus) - Duration too short (nodes still syncing) ``` @@ -368,15 +278,15 @@ Error: Expectation failed: ConsensusLiveness 1. Check if nodes started: ```bash - grep "node started\|listening on" $NOMOS_LOG_DIR/*/*.log + grep "node started\|listening on" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log ``` 2. Check block production: ```bash - grep "block.*height" $NOMOS_LOG_DIR/validator-*/*.log + grep "block.*height" $LOGOS_BLOCKCHAIN_LOG_DIR/node-*/*.log ``` 3. Check consensus participation: ```bash - grep "consensus.*slot\|proposal" $NOMOS_LOG_DIR/validator-*/*.log + grep "consensus.*slot\|proposal" $LOGOS_BLOCKCHAIN_LOG_DIR/node-*/*.log ``` 4. Increase duration: `.with_run_duration(Duration::from_secs(120))` 5. Check env vars: `echo $CONSENSUS_SLOT_TIME $CONSENSUS_ACTIVE_SLOT_COEFF` @@ -390,10 +300,9 @@ Each workload automatically attaches its own expectation: | Workload | Expectation | What It Checks | |----------|-------------|----------------| | Transaction | `TxInclusionExpectation` | Transactions were included in blocks | -| DA | `DaWorkloadExpectation` | Blobs and channels were created/published | | Chaos | (None) | Add `.expect_consensus_liveness()` explicitly | -These expectations are added automatically when using the DSL (`.transactions_with()`, `.da_with()`). +These expectations are added automatically when using the DSL (`.transactions_with()`). --- @@ -412,18 +321,6 @@ These expectations are added automatically when using the DSL (`.transactions_wi | Users | 5 | wallet accounts | | Wallets | 20 | total seeded | -### DA Workload - -```rust,ignore -.da_with(|da| da.channel_rate(2).blob_rate(4)) -``` - -| What | Value | Unit | -|------|-------|------| -| Channel rate | 2 | channels/block | -| Blob rate | 4 | blobs/block | -| Headroom | 20 | percent | - ### Chaos Workload ```rust,ignore @@ -432,7 +329,7 @@ These expectations are added automatically when using the DSL (`.transactions_wi Duration::from_secs(45), // min Duration::from_secs(75), // max Duration::from_secs(120), // cooldown - true, // validators + true, // nodes )) ``` @@ -443,10 +340,9 @@ These expectations are added automatically when using the DSL (`.transactions_wi ### Pattern 1: Multiple Workloads ```rust,ignore -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(20) .transactions_with(|tx| tx.rate(5).users(10)) - .da_with(|da| da.channel_rate(2).blob_rate(2)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(120)) .build(); @@ -473,7 +369,7 @@ impl Expectation for MyCustomExpectation { } } -ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) +ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .with_expectation(MyCustomExpectation) .with_run_duration(Duration::from_secs(60)) .build(); @@ -485,12 +381,12 @@ ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) When a workload or expectation fails: -1. Check logs: `$NOMOS_LOG_DIR/*/` or `docker compose logs` or `kubectl logs` -2. Verify environment variables: `POL_PROOF_DEV_MODE`, `NOMOS_NODE_BIN`, etc. +1. Check logs: `$LOGOS_BLOCKCHAIN_LOG_DIR/*/` or `docker compose logs` or `kubectl logs` +2. Verify environment variables: `POL_PROOF_DEV_MODE`, `LOGOS_BLOCKCHAIN_NODE_BIN`, etc. 3. Check prerequisites: wallets, node control, circuits 4. Increase duration: Double the run duration and retry 5. Reduce rates: Half the traffic rates and retry -6. Check metrics: Prometheus queries for block height, tx count, DA stats +6. Check metrics: Prometheus queries for block height and tx count 7. Reproduce locally: Use local runner for faster iteration --- diff --git a/book/src/workspace-layout.md b/book/src/workspace-layout.md index b7af4ab..e18fd35 100644 --- a/book/src/workspace-layout.md +++ b/book/src/workspace-layout.md @@ -1,7 +1,7 @@ # Workspace Layout The workspace focuses on multi-node integration testing and sits alongside a -`nomos-node` checkout. Its crates separate concerns to keep scenarios +`logos-blockchain-node` checkout. Its crates separate concerns to keep scenarios repeatable and portable: - **Configs**: prepares high-level node, network, tracing, and wallet settings diff --git a/examples/doc-snippets/src/architecture_overview_builder_api.rs b/examples/doc-snippets/src/architecture_overview_builder_api.rs index 85fa3b5..cb1c407 100644 --- a/examples/doc-snippets/src/architecture_overview_builder_api.rs +++ b/examples/doc-snippets/src/architecture_overview_builder_api.rs @@ -6,7 +6,7 @@ use testing_framework_workflows::ScenarioBuilderExt; use crate::SnippetResult; pub fn scenario_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(50) .transactions_with(|txs| txs.rate(5).users(20)) .expect_consensus_liveness() diff --git a/examples/doc-snippets/src/chaos_workloads_random_restart.rs b/examples/doc-snippets/src/chaos_workloads_random_restart.rs index aaae08c..d351c20 100644 --- a/examples/doc-snippets/src/chaos_workloads_random_restart.rs +++ b/examples/doc-snippets/src/chaos_workloads_random_restart.rs @@ -6,13 +6,13 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRe use crate::SnippetResult; pub fn random_restart_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .enable_node_control() .with_workload(RandomRestartWorkload::new( Duration::from_secs(45), // min delay Duration::from_secs(75), // max delay Duration::from_secs(120), // target cooldown - true, // include validators + true, // include nodes )) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(150)) diff --git a/examples/doc-snippets/src/custom_workload_example_expectation.rs b/examples/doc-snippets/src/custom_workload_example_expectation.rs index 0278438..12f253f 100644 --- a/examples/doc-snippets/src/custom_workload_example_expectation.rs +++ b/examples/doc-snippets/src/custom_workload_example_expectation.rs @@ -18,8 +18,8 @@ impl Expectation for ReachabilityExpectation { } async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { - let validators = ctx.node_clients().validator_clients(); - let client = validators.get(self.target_idx).ok_or_else(|| { + let nodes = ctx.node_clients().node_clients(); + let client = nodes.get(self.target_idx).ok_or_else(|| { Box::new(std::io::Error::new( std::io::ErrorKind::Other, "missing target client", diff --git a/examples/doc-snippets/src/custom_workload_example_workload.rs b/examples/doc-snippets/src/custom_workload_example_workload.rs index 4b545a4..d9bffe8 100644 --- a/examples/doc-snippets/src/custom_workload_example_workload.rs +++ b/examples/doc-snippets/src/custom_workload_example_workload.rs @@ -33,18 +33,18 @@ impl Workload for ReachabilityWorkload { topology: &GeneratedTopology, _run_metrics: &RunMetrics, ) -> Result<(), DynError> { - if topology.validators().get(self.target_idx).is_none() { + if topology.nodes().get(self.target_idx).is_none() { return Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, - "no validator at requested index", + "no node at requested index", ))); } Ok(()) } async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { - let validators = ctx.node_clients().validator_clients(); - let client = validators.get(self.target_idx).ok_or_else(|| { + let nodes = ctx.node_clients().node_clients(); + let client = nodes.get(self.target_idx).ok_or_else(|| { Box::new(std::io::Error::new( std::io::ErrorKind::Other, "missing target client", diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_build.rs b/examples/doc-snippets/src/dsl_cheat_sheet_build.rs index 6c7a0ed..f8c9c50 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_build.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_build.rs @@ -4,5 +4,5 @@ use testing_framework_workflows::ScenarioBuilderExt; use crate::SnippetResult; pub fn build_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)).build() // Construct the final Scenario + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)).build() // Construct the final Scenario } diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_build_complete_example.rs b/examples/doc-snippets/src/dsl_cheat_sheet_build_complete_example.rs index a8ac266..7d02bc8 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_build_complete_example.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_build_complete_example.rs @@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn run_test() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(50) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_expectations.rs b/examples/doc-snippets/src/dsl_cheat_sheet_expectations.rs index 72f9bfc..7d224da 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_expectations.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_expectations.rs @@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt; use crate::SnippetResult; pub fn expectations_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .expect_consensus_liveness() // Assert blocks are produced continuously .build() } diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_run_duration.rs b/examples/doc-snippets/src/dsl_cheat_sheet_run_duration.rs index 57bce11..d22ae46 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_run_duration.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_run_duration.rs @@ -6,7 +6,7 @@ use testing_framework_workflows::ScenarioBuilderExt; use crate::SnippetResult; pub fn run_duration_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .with_run_duration(Duration::from_secs(120)) // Run for 120 seconds .build() } diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_topology.rs b/examples/doc-snippets/src/dsl_cheat_sheet_topology.rs index ed1b805..00df6a3 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_topology.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_topology.rs @@ -3,6 +3,6 @@ use testing_framework_core::scenario::{Builder, ScenarioBuilder}; pub fn topology() -> Builder<()> { ScenarioBuilder::topology_with(|t| { t.network_star() // Star topology (all connect to seed node) - .validators(3) // Number of validator nodes + .nodes(3) // Number of node nodes }) } diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_transactions_workload.rs b/examples/doc-snippets/src/dsl_cheat_sheet_transactions_workload.rs index 30daa35..c0427e4 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_transactions_workload.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_transactions_workload.rs @@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt; use crate::SnippetResult; pub fn transactions_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .wallets(50) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_wallets.rs b/examples/doc-snippets/src/dsl_cheat_sheet_wallets.rs index 650f888..4e1fe38 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_wallets.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_wallets.rs @@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt; use crate::SnippetResult; pub fn wallets_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .wallets(50) // Seed 50 funded wallet accounts .build() } diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_workload_chaos.rs b/examples/doc-snippets/src/dsl_cheat_sheet_workload_chaos.rs index 5a4b1d8..fa3d963 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_workload_chaos.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_workload_chaos.rs @@ -7,7 +7,7 @@ use crate::SnippetResult; pub fn chaos_plan() -> SnippetResult> { - ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .enable_node_control() // Enable node control capability .chaos_with(|c| { c.restart() // Random restart chaos diff --git a/examples/doc-snippets/src/dsl_cheat_sheet_workload_execution.rs b/examples/doc-snippets/src/dsl_cheat_sheet_workload_execution.rs index 75e146c..27c0b10 100644 --- a/examples/doc-snippets/src/dsl_cheat_sheet_workload_execution.rs +++ b/examples/doc-snippets/src/dsl_cheat_sheet_workload_execution.rs @@ -4,7 +4,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn execution() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)) .expect_consensus_liveness() .build()?; diff --git a/examples/doc-snippets/src/examples_advanced_aggressive_chaos_test.rs b/examples/doc-snippets/src/examples_advanced_aggressive_chaos_test.rs index 6df7c3f..bb9e927 100644 --- a/examples/doc-snippets/src/examples_advanced_aggressive_chaos_test.rs +++ b/examples/doc-snippets/src/examples_advanced_aggressive_chaos_test.rs @@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer; use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt}; pub async fn aggressive_chaos_test() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4)) .enable_node_control() .wallets(50) .transactions_with(|txs| txs.rate(10).users(20)) diff --git a/examples/doc-snippets/src/examples_advanced_load_progression_test.rs b/examples/doc-snippets/src/examples_advanced_load_progression_test.rs index 5213007..9c217e8 100644 --- a/examples/doc-snippets/src/examples_advanced_load_progression_test.rs +++ b/examples/doc-snippets/src/examples_advanced_load_progression_test.rs @@ -9,7 +9,7 @@ pub async fn load_progression_test() -> Result<()> { for rate in [5, 10, 20, 30] { println!("Testing with rate: {}", rate); - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(50) .transactions_with(|txs| txs.rate(rate).users(20)) .expect_consensus_liveness() diff --git a/examples/doc-snippets/src/examples_advanced_sustained_load_test.rs b/examples/doc-snippets/src/examples_advanced_sustained_load_test.rs index f354679..21ffe8a 100644 --- a/examples/doc-snippets/src/examples_advanced_sustained_load_test.rs +++ b/examples/doc-snippets/src/examples_advanced_sustained_load_test.rs @@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn sustained_load_test() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4)) .wallets(100) .transactions_with(|txs| txs.rate(15).users(50)) .expect_consensus_liveness() diff --git a/examples/doc-snippets/src/examples_chaos_resilience.rs b/examples/doc-snippets/src/examples_chaos_resilience.rs index 17f5d7e..7545260 100644 --- a/examples/doc-snippets/src/examples_chaos_resilience.rs +++ b/examples/doc-snippets/src/examples_chaos_resilience.rs @@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer; use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt}; pub async fn chaos_resilience() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4)) .enable_node_control() .wallets(20) .transactions_with(|txs| txs.rate(3).users(10)) diff --git a/examples/doc-snippets/src/examples_da_and_transactions.rs b/examples/doc-snippets/src/examples_da_and_transactions.rs index c7a1a8c..93301de 100644 --- a/examples/doc-snippets/src/examples_da_and_transactions.rs +++ b/examples/doc-snippets/src/examples_da_and_transactions.rs @@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn transactions_multi_node() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .wallets(30) .transactions_with(|txs| txs.rate(5).users(15)) .expect_consensus_liveness() diff --git a/examples/doc-snippets/src/examples_simple_consensus.rs b/examples/doc-snippets/src/examples_simple_consensus.rs index e9071b3..1978747 100644 --- a/examples/doc-snippets/src/examples_simple_consensus.rs +++ b/examples/doc-snippets/src/examples_simple_consensus.rs @@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn simple_consensus() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(30)) .build()?; diff --git a/examples/doc-snippets/src/examples_transaction_workload.rs b/examples/doc-snippets/src/examples_transaction_workload.rs index 9a078df..4c0a013 100644 --- a/examples/doc-snippets/src/examples_transaction_workload.rs +++ b/examples/doc-snippets/src/examples_transaction_workload.rs @@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn transaction_workload() -> Result<()> { - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .wallets(20) .transactions_with(|txs| txs.rate(5).users(10)) .expect_consensus_liveness() diff --git a/examples/doc-snippets/src/internal_crate_reference_add_expectation_builder_ext.rs b/examples/doc-snippets/src/internal_crate_reference_add_expectation_builder_ext.rs index f619dce..e61b2f2 100644 --- a/examples/doc-snippets/src/internal_crate_reference_add_expectation_builder_ext.rs +++ b/examples/doc-snippets/src/internal_crate_reference_add_expectation_builder_ext.rs @@ -13,7 +13,7 @@ impl YourExpectationDslExt for testing_framework_core::scenario::Builder SnippetResult<()> { - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .expect_your_condition() .build()?; Ok(()) diff --git a/examples/doc-snippets/src/internal_crate_reference_add_workload_use_in_examples.rs b/examples/doc-snippets/src/internal_crate_reference_add_workload_use_in_examples.rs index 4c415b2..d61d169 100644 --- a/examples/doc-snippets/src/internal_crate_reference_add_workload_use_in_examples.rs +++ b/examples/doc-snippets/src/internal_crate_reference_add_workload_use_in_examples.rs @@ -27,7 +27,7 @@ impl YourWorkloadDslExt for testing_framework_core::scenario::Builder SnippetResult<()> { - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .your_workload_with(|w| w.some_config()) .build()?; Ok(()) diff --git a/examples/doc-snippets/src/manual_cluster_external_driver_example.rs b/examples/doc-snippets/src/manual_cluster_external_driver_example.rs new file mode 100644 index 0000000..2010f23 --- /dev/null +++ b/examples/doc-snippets/src/manual_cluster_external_driver_example.rs @@ -0,0 +1,59 @@ +use std::time::Duration; +use anyhow::Result; +use testing_framework_core::{ + scenario::{PeerSelection, StartNodeOptions}, + topology::config::TopologyConfig, +}; +use testing_framework_runner_local::LocalDeployer; +use tokio::time::sleep; + +#[allow(dead_code)] +async fn external_driver_example() -> Result<()> { + // Step 1: Create cluster with capacity for 3 nodes + let config = TopologyConfig::with_node_numbers(3); + let deployer = LocalDeployer::new(); + let cluster = deployer.manual_cluster(config)?; + + // Step 2: External driver decides to start 2 nodes initially + println!("Starting initial topology..."); + let node_a = cluster.start_node("a").await?.api; + let node_b = cluster + .start_node_with( + "b", + StartNodeOptions { + peers: PeerSelection::Named(vec!["node-a".to_owned()]), + }, + ) + .await? + .api; + + cluster.wait_network_ready().await?; + + // Step 3: External driver runs some protocol operations + let info = node_a.consensus_info().await?; + println!("Initial cluster height: {}", info.height); + + // Step 4: Later, external driver decides to add third node + println!("External driver adding third node..."); + let node_c = cluster + .start_node_with( + "c", + StartNodeOptions { + peers: PeerSelection::Named(vec!["node-a".to_owned()]), + }, + ) + .await? + .api; + + cluster.wait_network_ready().await?; + + // Step 5: External driver validates final state + let heights = vec![ + node_a.consensus_info().await?.height, + node_b.consensus_info().await?.height, + node_c.consensus_info().await?.height, + ]; + println!("Final heights: {:?}", heights); + + Ok(()) +} diff --git a/examples/doc-snippets/src/manual_cluster_validation_patterns.rs b/examples/doc-snippets/src/manual_cluster_validation_patterns.rs new file mode 100644 index 0000000..076e6bb --- /dev/null +++ b/examples/doc-snippets/src/manual_cluster_validation_patterns.rs @@ -0,0 +1,60 @@ +use std::time::Duration; +use testing_framework_core::nodes::ApiClient; +use tokio::time::sleep; + +#[allow(dead_code)] +async fn height_convergence( + node_a: &ApiClient, + node_b: &ApiClient, + node_c: &ApiClient, +) -> anyhow::Result<()> { + let start = tokio::time::Instant::now(); + loop { + let heights: Vec = vec![ + node_a.consensus_info().await?.height, + node_b.consensus_info().await?.height, + node_c.consensus_info().await?.height, + ]; + + let max_diff = heights.iter().max().unwrap() - heights.iter().min().unwrap(); + if max_diff <= 5 { + println!("Converged: heights={:?}", heights); + break; + } + + if start.elapsed() > Duration::from_secs(60) { + return Err(anyhow::anyhow!("Convergence timeout: heights={:?}", heights)); + } + + sleep(Duration::from_secs(2)).await; + } + Ok(()) +} + +#[allow(dead_code)] +async fn peer_count_verification(node: &ApiClient) -> anyhow::Result<()> { + let info = node.network_info().await?; + assert_eq!( + info.n_peers, 3, + "Expected 3 peers, found {}", + info.n_peers + ); + Ok(()) +} + +#[allow(dead_code)] +async fn block_production(node_a: &ApiClient) -> anyhow::Result<()> { + // Verify node is producing blocks + let initial_height = node_a.consensus_info().await?.height; + + sleep(Duration::from_secs(10)).await; + + let current_height = node_a.consensus_info().await?.height; + assert!( + current_height > initial_height, + "Node should have produced blocks: initial={}, current={}", + initial_height, + current_height + ); + Ok(()) +} diff --git a/examples/doc-snippets/src/node_control_accessing_control.rs b/examples/doc-snippets/src/node_control_accessing_control.rs index 08f3af8..a7cc27d 100644 --- a/examples/doc-snippets/src/node_control_accessing_control.rs +++ b/examples/doc-snippets/src/node_control_accessing_control.rs @@ -11,8 +11,8 @@ impl Workload for RestartWorkload { async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { if let Some(control) = ctx.node_control() { - // Restart the first validator (index 0) if supported. - control.restart_validator(0).await?; + // Restart the first node (index 0) if supported. + control.restart_node(0).await?; } Ok(()) } diff --git a/examples/doc-snippets/src/node_control_trait.rs b/examples/doc-snippets/src/node_control_trait.rs index 025e85e..98976ca 100644 --- a/examples/doc-snippets/src/node_control_trait.rs +++ b/examples/doc-snippets/src/node_control_trait.rs @@ -3,5 +3,5 @@ use testing_framework_core::scenario::DynError; #[async_trait] pub trait NodeControlHandle: Send + Sync { - async fn restart_validator(&self, index: usize) -> Result<(), DynError>; + async fn restart_node(&self, index: usize) -> Result<(), DynError>; } diff --git a/examples/doc-snippets/src/quickstart_adjust_topology.rs b/examples/doc-snippets/src/quickstart_adjust_topology.rs index 05e6974..7002a04 100644 --- a/examples/doc-snippets/src/quickstart_adjust_topology.rs +++ b/examples/doc-snippets/src/quickstart_adjust_topology.rs @@ -3,7 +3,8 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; use testing_framework_runner_local::LocalDeployer; pub async fn run_with_env_overrides() -> Result<()> { - // Uses NOMOS_DEMO_* env vars (or legacy *_DEMO_* vars) + // Uses LOGOS_BLOCKCHAIN_DEMO_* env vars (for example + // LOGOS_BLOCKCHAIN_DEMO_NODES) let mut plan = ScenarioBuilder::with_node_counts(3) .with_run_duration(std::time::Duration::from_secs(120)) .build()?; diff --git a/examples/doc-snippets/src/quickstart_core_api_pattern.rs b/examples/doc-snippets/src/quickstart_core_api_pattern.rs index 428668e..eded1f3 100644 --- a/examples/doc-snippets/src/quickstart_core_api_pattern.rs +++ b/examples/doc-snippets/src/quickstart_core_api_pattern.rs @@ -6,8 +6,8 @@ use testing_framework_runner_local::LocalDeployer; use testing_framework_workflows::ScenarioBuilderExt; pub async fn run_local_demo() -> Result<()> { - // Define the scenario (2 validator, tx workload) - let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + // Define the scenario (2 node, tx workload) + let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .wallets(1_000) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block diff --git a/examples/doc-snippets/src/quickstart_step_1_topology.rs b/examples/doc-snippets/src/quickstart_step_1_topology.rs index 1979edc..2a2bbf7 100644 --- a/examples/doc-snippets/src/quickstart_step_1_topology.rs +++ b/examples/doc-snippets/src/quickstart_step_1_topology.rs @@ -3,6 +3,6 @@ use testing_framework_core::scenario::ScenarioBuilder; pub fn step_1_topology() -> testing_framework_core::scenario::Builder<()> { ScenarioBuilder::topology_with(|t| { t.network_star() // Star topology: all nodes connect to seed - .validators(2) // 2 validator nodes + .nodes(2) // 2 node nodes }) } diff --git a/examples/doc-snippets/src/testing_philosophy_declarative_over_imperative.rs b/examples/doc-snippets/src/testing_philosophy_declarative_over_imperative.rs index c4f2521..1827017 100644 --- a/examples/doc-snippets/src/testing_philosophy_declarative_over_imperative.rs +++ b/examples/doc-snippets/src/testing_philosophy_declarative_over_imperative.rs @@ -5,7 +5,7 @@ use crate::SnippetResult; pub fn declarative_over_imperative() -> SnippetResult<()> { // Good: declarative - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block }) @@ -13,7 +13,7 @@ pub fn declarative_over_imperative() -> SnippetResult<()> { .build()?; // Bad: imperative (framework doesn't work this way) - // spawn_validator(); + // spawn_node(); // loop { submit_tx(); check_block(); } Ok(()) diff --git a/examples/doc-snippets/src/testing_philosophy_determinism_first.rs b/examples/doc-snippets/src/testing_philosophy_determinism_first.rs index 8ed1a86..160db8c 100644 --- a/examples/doc-snippets/src/testing_philosophy_determinism_first.rs +++ b/examples/doc-snippets/src/testing_philosophy_determinism_first.rs @@ -7,7 +7,7 @@ use crate::SnippetResult; pub fn determinism_first() -> SnippetResult<()> { // Separate: functional test (deterministic) - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block }) @@ -15,7 +15,7 @@ pub fn determinism_first() -> SnippetResult<()> { .build()?; // Separate: chaos test (introduces randomness) - let _chaos_plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3)) + let _chaos_plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3)) .enable_node_control() .chaos_with(|c| { c.restart() diff --git a/examples/doc-snippets/src/testing_philosophy_protocol_time_not_wall_time.rs b/examples/doc-snippets/src/testing_philosophy_protocol_time_not_wall_time.rs index 66440e5..cc66dc1 100644 --- a/examples/doc-snippets/src/testing_philosophy_protocol_time_not_wall_time.rs +++ b/examples/doc-snippets/src/testing_philosophy_protocol_time_not_wall_time.rs @@ -7,7 +7,7 @@ use crate::SnippetResult; pub fn protocol_time_not_wall_time() -> SnippetResult<()> { // Good: protocol-oriented thinking - let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .transactions_with(|txs| { txs.rate(5) // 5 transactions per block }) diff --git a/examples/src/bin/compose_runner.rs b/examples/src/bin/compose_runner.rs index e9467cf..70f2308 100644 --- a/examples/src/bin/compose_runner.rs +++ b/examples/src/bin/compose_runner.rs @@ -24,27 +24,27 @@ async fn main() { tracing_subscriber::fmt::init(); - let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS); + let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES); - let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS); + let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS); - info!(validators, run_secs, "starting compose runner demo"); + info!(nodes, run_secs, "starting compose runner demo"); - if let Err(err) = run_compose_case(validators, Duration::from_secs(run_secs)).await { + if let Err(err) = run_compose_case(nodes, Duration::from_secs(run_secs)).await { warn!("compose runner demo failed: {err:#}"); process::exit(1); } } -async fn run_compose_case(validators: usize, run_duration: Duration) -> Result<()> { +async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> { info!( - validators, + nodes, duration_secs = run_duration.as_secs(), "building scenario plan" ); - let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators)) - .enable_node_control(); + let scenario = + ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes)).enable_node_control(); let scenario = if let Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown)) = chaos_timings(run_duration) @@ -80,7 +80,9 @@ async fn run_compose_case(validators: usize, run_duration: Duration) -> Result<( }; if !runner.context().telemetry().is_configured() { - warn!("metrics querying is disabled; set NOMOS_METRICS_QUERY_URL to enable PromQL queries"); + warn!( + "metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries" + ); } info!("running scenario"); diff --git a/examples/src/bin/k8s_runner.rs b/examples/src/bin/k8s_runner.rs index 22df4e6..d1d6439 100644 --- a/examples/src/bin/k8s_runner.rs +++ b/examples/src/bin/k8s_runner.rs @@ -17,37 +17,37 @@ const TRANSACTION_WALLETS: usize = 50; async fn main() { tracing_subscriber::fmt::init(); - let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS); - let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS); - info!(validators, run_secs, "starting k8s runner demo"); + let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES); + let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS); + info!(nodes, run_secs, "starting k8s runner demo"); - if let Err(err) = run_k8s_case(validators, Duration::from_secs(run_secs)).await { + if let Err(err) = run_k8s_case(nodes, Duration::from_secs(run_secs)).await { warn!("k8s runner demo failed: {err:#}"); process::exit(1); } } -async fn run_k8s_case(validators: usize, run_duration: Duration) -> Result<()> { +async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> { info!( - validators, + nodes, duration_secs = run_duration.as_secs(), "building scenario plan" ); - let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators)) + let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes)) .with_capabilities(ObservabilityCapability::default()) .wallets(TOTAL_WALLETS) .transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS)) .with_run_duration(run_duration) .expect_consensus_liveness(); - if let Ok(url) = env::var("NOMOS_METRICS_QUERY_URL") { + if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_QUERY_URL") { if !url.trim().is_empty() { scenario = scenario.with_metrics_query_url_str(url.trim()); } } - if let Ok(url) = env::var("NOMOS_METRICS_OTLP_INGEST_URL") { + if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL") { if !url.trim().is_empty() { scenario = scenario.with_metrics_otlp_ingest_url_str(url.trim()); } @@ -68,7 +68,9 @@ async fn run_k8s_case(validators: usize, run_duration: Duration) -> Result<()> { }; if !runner.context().telemetry().is_configured() { - warn!("metrics querying is disabled; set NOMOS_METRICS_QUERY_URL to enable PromQL queries"); + warn!( + "metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries" + ); } info!("running scenario"); diff --git a/examples/src/bin/local_runner.rs b/examples/src/bin/local_runner.rs index 34eef24..e21c6ff 100644 --- a/examples/src/bin/local_runner.rs +++ b/examples/src/bin/local_runner.rs @@ -22,25 +22,25 @@ async fn main() { process::exit(1); } - let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS); - let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS); + let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES); + let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS); - info!(validators, run_secs, "starting local runner demo"); + info!(nodes, run_secs, "starting local runner demo"); - if let Err(err) = run_local_case(validators, Duration::from_secs(run_secs)).await { + if let Err(err) = run_local_case(nodes, Duration::from_secs(run_secs)).await { warn!("local runner demo failed: {err:#}"); process::exit(1); } } -async fn run_local_case(validators: usize, run_duration: Duration) -> Result<()> { +async fn run_local_case(nodes: usize, run_duration: Duration) -> Result<()> { info!( - validators, + nodes, duration_secs = run_duration.as_secs(), "building scenario plan" ); - let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators)) + let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes)) .wallets(TOTAL_WALLETS) .with_run_duration(run_duration); diff --git a/examples/src/defaults.rs b/examples/src/defaults.rs index 75b5a41..0520321 100644 --- a/examples/src/defaults.rs +++ b/examples/src/defaults.rs @@ -22,13 +22,13 @@ fn set_default_env(key: &str, value: &str) { pub fn init_logging_defaults() { set_default_env("POL_PROOF_DEV_MODE", "true"); - set_default_env("NOMOS_TESTS_KEEP_LOGS", "1"); - set_default_env("NOMOS_LOG_LEVEL", "info"); + set_default_env("LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS", "1"); + set_default_env("LOGOS_BLOCKCHAIN_LOG_LEVEL", "info"); set_default_env("RUST_LOG", "info"); } pub fn init_node_log_dir_defaults(deployer: DeployerKind) { - if env::var_os("NOMOS_LOG_DIR").is_some() { + if env::var_os("LOGOS_BLOCKCHAIN_LOG_DIR").is_some() { return; } @@ -36,8 +36,12 @@ pub fn init_node_log_dir_defaults(deployer: DeployerKind) { let _ = fs::create_dir_all(&host_dir); match deployer { - DeployerKind::Local => set_default_env("NOMOS_LOG_DIR", &host_dir.display().to_string()), - DeployerKind::Compose => set_default_env("NOMOS_LOG_DIR", DEFAULT_CONTAINER_NODE_LOG_DIR), + DeployerKind::Local => { + set_default_env("LOGOS_BLOCKCHAIN_LOG_DIR", &host_dir.display().to_string()) + } + DeployerKind::Compose => { + set_default_env("LOGOS_BLOCKCHAIN_LOG_DIR", DEFAULT_CONTAINER_NODE_LOG_DIR) + } } } diff --git a/examples/src/demo.rs b/examples/src/demo.rs index 3efad6f..cf56a9b 100644 --- a/examples/src/demo.rs +++ b/examples/src/demo.rs @@ -1,2 +1,2 @@ -pub const DEFAULT_VALIDATORS: usize = 2; +pub const DEFAULT_NODES: usize = 2; pub const DEFAULT_RUN_SECS: u64 = 60; diff --git a/examples/tests/dynamic_join.rs b/examples/tests/dynamic_join.rs index 77f4e8a..d892b5a 100644 --- a/examples/tests/dynamic_join.rs +++ b/examples/tests/dynamic_join.rs @@ -37,7 +37,7 @@ impl Workload for JoinNodeWorkload { sleep(START_DELAY).await; - let node = handle.start_validator(&self.name).await?; + let node = handle.start_node(&self.name).await?; let client = node.api; timeout(READY_TIMEOUT, async { @@ -86,7 +86,7 @@ impl Workload for JoinNodeWithPeersWorkload { let options = StartNodeOptions { peers: PeerSelection::Named(self.peers.clone()), }; - let node = handle.start_validator_with(&self.name, options).await?; + let node = handle.start_node_with(&self.name, options).await?; let client = node.api; timeout(READY_TIMEOUT, async { @@ -110,7 +110,7 @@ impl Workload for JoinNodeWithPeersWorkload { async fn dynamic_join_reaches_consensus_liveness() -> Result<()> { let _ = try_init(); - let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .enable_node_control() .with_workload(JoinNodeWorkload::new("joiner")) .expect_consensus_liveness() @@ -127,11 +127,11 @@ async fn dynamic_join_reaches_consensus_liveness() -> Result<()> { #[tokio::test] #[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"] async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> { - let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(2)) + let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2)) .enable_node_control() .with_workload(JoinNodeWithPeersWorkload::new( "joiner", - vec!["validator-0".to_string()], + vec!["node-0".to_string()], )) .expect_consensus_liveness() .with_run_duration(Duration::from_secs(60)) diff --git a/examples/tests/manual_cluster.rs b/examples/tests/manual_cluster.rs index ef0d385..4af827e 100644 --- a/examples/tests/manual_cluster.rs +++ b/examples/tests/manual_cluster.rs @@ -10,6 +10,8 @@ use tokio::time::sleep; use tracing_subscriber::fmt::try_init; const MAX_HEIGHT_DIFF: u64 = 5; +const CONVERGENCE_TIMEOUT: Duration = Duration::from_secs(60); +const CONVERGENCE_POLL: Duration = Duration::from_secs(2); #[tokio::test] #[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_two_clusters_merge`"] @@ -23,10 +25,10 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> { let cluster = deployer.manual_cluster(config)?; // Nodes are stopped automatically when the cluster is dropped. - println!("starting validator a"); + println!("starting node a"); - let validator_a = cluster - .start_validator_with( + let node_a = cluster + .start_node_with( "a", StartNodeOptions { peers: PeerSelection::None, @@ -38,12 +40,12 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> { println!("waiting briefly before starting c"); sleep(Duration::from_secs(30)).await; - println!("starting validator c -> a"); - let validator_c = cluster - .start_validator_with( + println!("starting node c -> a"); + let node_c = cluster + .start_node_with( "c", StartNodeOptions { - peers: PeerSelection::Named(vec!["validator-a".to_owned()]), + peers: PeerSelection::Named(vec!["node-a".to_owned()]), }, ) .await? @@ -52,21 +54,29 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> { println!("waiting for network readiness: cluster a,c"); cluster.wait_network_ready().await?; - sleep(Duration::from_secs(5)).await; + let start = tokio::time::Instant::now(); - let a_info = validator_a.consensus_info().await?; - let c_info = validator_c.consensus_info().await?; - let height_diff = a_info.height.abs_diff(c_info.height); + loop { + let a_info = node_a.consensus_info().await?; + let c_info = node_c.consensus_info().await?; + let a_height = a_info.height; + let c_height = c_info.height; + let diff = a_height.abs_diff(c_height); - println!( - "final heights: validator-a={}, validator-c={}, diff={}", - a_info.height, c_info.height, height_diff - ); + if diff <= MAX_HEIGHT_DIFF { + println!( + "final heights: node-a={}, node-c={}, diff={}", + a_height, c_height, diff + ); + return Ok(()); + } - if height_diff > MAX_HEIGHT_DIFF { - return Err(anyhow::anyhow!( - "height diff too large: {height_diff} > {MAX_HEIGHT_DIFF}" - )); + if start.elapsed() >= CONVERGENCE_TIMEOUT { + return Err(anyhow::anyhow!( + "height diff too large after timeout: {diff} > {MAX_HEIGHT_DIFF} (node-a={a_height}, node-c={c_height})" + )); + } + + sleep(CONVERGENCE_POLL).await; } - Ok(()) } diff --git a/paths.env b/paths.env index 37a6408..7710de1 100644 --- a/paths.env +++ b/paths.env @@ -1,15 +1,6 @@ # Paths used by demo scripts and runners. # Relative paths are resolved from the repo root. -# Directory containing the KZG test parameters on the host. -NOMOS_KZG_DIR_REL="testing-framework/assets/stack/kzgrs_test_params" - -# The KZG parameters filename (repeated inside the directory). -NOMOS_KZG_FILE="kzgrs_test_params" - -# Path to the KZG params inside containers. -NOMOS_KZG_CONTAINER_PATH="/kzgrs_test_params/kzgrs_test_params" - # Host-side circuit bundle locations used by helper scripts. -NOMOS_CIRCUITS_HOST_DIR_REL=".tmp/logos-blockchain-circuits-host" -NOMOS_CIRCUITS_LINUX_DIR_REL=".tmp/logos-blockchain-circuits-linux" +LOGOS_BLOCKCHAIN_CIRCUITS_HOST_DIR_REL=".tmp/logos-blockchain-circuits-host" +LOGOS_BLOCKCHAIN_CIRCUITS_LINUX_DIR_REL=".tmp/logos-blockchain-circuits-linux" diff --git a/scripts/build/build-bundle.sh b/scripts/build/build-bundle.sh index 16622f0..4a32ede 100755 --- a/scripts/build/build-bundle.sh +++ b/scripts/build/build-bundle.sh @@ -30,7 +30,7 @@ Usage: scripts/build/build-bundle.sh [--platform host|linux] [--output PATH] Options: --platform Target platform for binaries (default: host) --output Output path for the tarball (default: .tmp/nomos-binaries--.tar.gz) - --rev logos-blockchain-node git revision to build (overrides NOMOS_NODE_REV) + --rev logos-blockchain-node git revision to build (overrides LOGOS_BLOCKCHAIN_NODE_REV) --path Use local logos-blockchain-node checkout at DIR (skip fetch/checkout) --features Extra cargo features to enable (comma-separated); base always includes "testing" --docker-platform Docker platform for Linux bundle when running on non-Linux host (default: auto; linux/arm64 on Apple silicon Docker Desktop, else linux/amd64) @@ -40,7 +40,7 @@ Notes: run inside a Linux Docker container to produce Linux binaries. - On Apple silicon, Docker defaults to linux/arm64; for compose/k8s you likely want linux/amd64 (the default here). Override with --docker-platform. - - VERSION, NOMOS_NODE_REV, and optional NOMOS_NODE_PATH env vars are honored (defaults align with run-examples.sh). + - VERSION, LOGOS_BLOCKCHAIN_NODE_REV, and optional LOGOS_BLOCKCHAIN_NODE_PATH env vars are honored (defaults align with run-examples.sh). USAGE } @@ -52,17 +52,17 @@ build_bundle::fail() { build_bundle::apply_nomos_node_patches() { local node_src="$1" - local apply="${NOMOS_NODE_APPLY_PATCHES:-1}" + local apply="${LOGOS_BLOCKCHAIN_NODE_APPLY_PATCHES:-1}" if [ "${apply}" = "0" ]; then return 0 fi - local patch_dir="${NOMOS_NODE_PATCH_DIR:-${ROOT_DIR}/patches/logos-blockchain-node}" + local patch_dir="${LOGOS_BLOCKCHAIN_NODE_PATCH_DIR:-${ROOT_DIR}/patches/logos-blockchain-node}" if [ ! -d "${patch_dir}" ]; then return 0 fi - local level="${NOMOS_NODE_PATCH_LEVEL:-}" + local level="${LOGOS_BLOCKCHAIN_NODE_PATCH_LEVEL:-}" if [ -z "${level}" ]; then level="all" fi @@ -84,7 +84,7 @@ build_bundle::apply_nomos_node_patches() { fi if [ "${level}" != "all" ] && [ "${level}" != "ALL" ]; then if ! [[ "${level}" =~ ^[0-9]+$ ]]; then - build_bundle::fail "Invalid NOMOS_NODE_PATCH_LEVEL: ${level} (expected integer or 'all')" + build_bundle::fail "Invalid LOGOS_BLOCKCHAIN_NODE_PATCH_LEVEL: ${level} (expected integer or 'all')" fi if [ -n "${phase}" ] && [ "${phase}" -gt "${level}" ]; then continue @@ -104,11 +104,11 @@ build_bundle::load_env() { . "${ROOT_DIR}/versions.env" DEFAULT_VERSION="${VERSION:?Missing VERSION in versions.env}" - DEFAULT_NODE_REV="${NOMOS_NODE_REV:-}" - DEFAULT_NODE_PATH="${NOMOS_NODE_PATH:-}" + DEFAULT_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV:-}" + DEFAULT_NODE_PATH="${LOGOS_BLOCKCHAIN_NODE_PATH:-}" - NOMOS_EXTRA_FEATURES="${NOMOS_EXTRA_FEATURES:-}" - DOCKER_PLATFORM="${NOMOS_BUNDLE_DOCKER_PLATFORM:-${NOMOS_BIN_PLATFORM:-}}" + LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${LOGOS_BLOCKCHAIN_EXTRA_FEATURES:-}" + DOCKER_PLATFORM="${LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM:-${LOGOS_BLOCKCHAIN_BIN_PLATFORM:-}}" BUNDLE_RUSTUP_TOOLCHAIN="${BUNDLE_RUSTUP_TOOLCHAIN:-}" if [ -z "${BUNDLE_RUSTUP_TOOLCHAIN}" ] && command -v rustup >/dev/null 2>&1 && [ -f "${ROOT_DIR}/rust-toolchain.toml" ]; then @@ -153,8 +153,8 @@ build_bundle::parse_args() { --rev) REV_OVERRIDE="${2:-}"; shift 2 ;; --path=*) PATH_OVERRIDE="${1#*=}"; shift ;; --path) PATH_OVERRIDE="${2:-}"; shift 2 ;; - --features=*) NOMOS_EXTRA_FEATURES="${1#*=}"; shift ;; - --features) NOMOS_EXTRA_FEATURES="${2:-}"; shift 2 ;; + --features=*) LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${1#*=}"; shift ;; + --features) LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${2:-}"; shift 2 ;; --docker-platform=*) DOCKER_PLATFORM="${1#*=}"; shift ;; --docker-platform) DOCKER_PLATFORM="${2:-}"; shift 2 ;; *) build_bundle::fail "Unknown argument: $1" ;; @@ -174,11 +174,11 @@ build_bundle::validate_and_finalize() { build_bundle::fail "Use either --rev or --path, not both" fi if [ -z "${REV_OVERRIDE}" ] && [ -z "${PATH_OVERRIDE}" ] && [ -z "${DEFAULT_NODE_REV}" ] && [ -z "${DEFAULT_NODE_PATH}" ]; then - build_bundle::fail "Provide --rev, --path, or set NOMOS_NODE_REV/NOMOS_NODE_PATH in versions.env" + build_bundle::fail "Provide --rev, --path, or set LOGOS_BLOCKCHAIN_NODE_REV/LOGOS_BLOCKCHAIN_NODE_PATH in versions.env" fi - NOMOS_NODE_REV="${REV_OVERRIDE:-${DEFAULT_NODE_REV}}" - NOMOS_NODE_PATH="${PATH_OVERRIDE:-${DEFAULT_NODE_PATH}}" - export NOMOS_NODE_REV NOMOS_NODE_PATH + LOGOS_BLOCKCHAIN_NODE_REV="${REV_OVERRIDE:-${DEFAULT_NODE_REV}}" + LOGOS_BLOCKCHAIN_NODE_PATH="${PATH_OVERRIDE:-${DEFAULT_NODE_PATH}}" + export LOGOS_BLOCKCHAIN_NODE_REV LOGOS_BLOCKCHAIN_NODE_PATH build_bundle::default_docker_platform DOCKER_PLATFORM="${DOCKER_PLATFORM:-linux/amd64}" @@ -223,16 +223,16 @@ build_bundle::maybe_run_linux_build_in_docker() { command -v docker >/dev/null 2>&1 || build_bundle::fail "Docker is required to build a Linux bundle from non-Linux host" [ -n "${DOCKER_PLATFORM}" ] || build_bundle::fail "--docker-platform must not be empty" - local node_path_env="${NOMOS_NODE_PATH}" + local node_path_env="${LOGOS_BLOCKCHAIN_NODE_PATH}" local -a extra_mounts=() - if [ -n "${NOMOS_NODE_PATH}" ]; then - case "${NOMOS_NODE_PATH}" in + if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then + case "${LOGOS_BLOCKCHAIN_NODE_PATH}" in "${ROOT_DIR}"/*) - node_path_env="/workspace${NOMOS_NODE_PATH#"${ROOT_DIR}"}" + node_path_env="/workspace${LOGOS_BLOCKCHAIN_NODE_PATH#"${ROOT_DIR}"}" ;; /*) node_path_env="/external/logos-blockchain-node" - extra_mounts+=("-v" "${NOMOS_NODE_PATH}:${node_path_env}") + extra_mounts+=("-v" "${LOGOS_BLOCKCHAIN_NODE_PATH}:${node_path_env}") ;; *) build_bundle::fail "--path must be absolute when cross-building in Docker" @@ -248,27 +248,23 @@ build_bundle::maybe_run_linux_build_in_docker() { mkdir -p "${ROOT_DIR}/.tmp/cargo-linux" "${host_target_dir}" local -a features_args=() - if [ -n "${NOMOS_EXTRA_FEATURES:-}" ]; then - features_args+=(--features "${NOMOS_EXTRA_FEATURES}") + if [ -n "${LOGOS_BLOCKCHAIN_EXTRA_FEATURES:-}" ]; then + features_args+=(--features "${LOGOS_BLOCKCHAIN_EXTRA_FEATURES}") fi local -a src_args=() if [ -n "${node_path_env}" ]; then src_args+=(--path "${node_path_env}") else - src_args+=(--rev "${NOMOS_NODE_REV}") + src_args+=(--rev "${LOGOS_BLOCKCHAIN_NODE_REV}") fi docker run --rm --platform "${DOCKER_PLATFORM}" \ -e VERSION="${VERSION}" \ - -e NOMOS_NODE_REV="${NOMOS_NODE_REV}" \ - -e NOMOS_NODE_PATH="${node_path_env}" \ - -e NOMOS_BUNDLE_DOCKER_PLATFORM="${DOCKER_PLATFORM}" \ - -e NOMOS_CIRCUITS="/workspace/.tmp/logos-blockchain-circuits-linux" \ - -e LOGOS_BLOCKCHAIN_CIRCUITS="/workspace/.tmp/logos-blockchain-circuits-linux" \ - -e STACK_DIR="/workspace/.tmp/logos-blockchain-circuits-linux" \ - -e HOST_DIR="/workspace/.tmp/logos-blockchain-circuits-linux" \ - -e NOMOS_EXTRA_FEATURES="${NOMOS_EXTRA_FEATURES:-}" \ + -e LOGOS_BLOCKCHAIN_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV}" \ + -e LOGOS_BLOCKCHAIN_NODE_PATH="${node_path_env}" \ + -e LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM="${DOCKER_PLATFORM}" \ + -e LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${LOGOS_BLOCKCHAIN_EXTRA_FEATURES:-}" \ -e BUNDLE_IN_CONTAINER=1 \ -e CARGO_HOME=/workspace/.tmp/cargo-linux \ -e CARGO_TARGET_DIR="/workspace/.tmp/logos-blockchain-node-linux-target${target_suffix}" \ @@ -284,26 +280,24 @@ build_bundle::maybe_run_linux_build_in_docker() { } build_bundle::prepare_circuits() { - echo "==> Preparing circuits (version ${VERSION})" + echo "==> Preparing build workspace (version ${VERSION})" if [ "${PLATFORM}" = "host" ]; then - CIRCUITS_DIR="${ROOT_DIR}/.tmp/logos-blockchain-circuits-host" NODE_TARGET="${ROOT_DIR}/.tmp/logos-blockchain-node-host-target" else - CIRCUITS_DIR="${ROOT_DIR}/.tmp/logos-blockchain-circuits-linux" # When building Linux bundles in Docker, avoid reusing the same target dir # across different container architectures (e.g. linux/arm64 vs linux/amd64), # as the native-host `target/debug` layout would otherwise get mixed. local target_suffix="" if [ -n "${BUNDLE_IN_CONTAINER:-}" ]; then - target_suffix="$(build_bundle::docker_platform_suffix "${NOMOS_BUNDLE_DOCKER_PLATFORM:-}")" + target_suffix="$(build_bundle::docker_platform_suffix "${LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM:-}")" fi NODE_TARGET="${ROOT_DIR}/.tmp/logos-blockchain-node-linux-target${target_suffix}" fi NODE_SRC_DEFAULT="${ROOT_DIR}/.tmp/logos-blockchain-node-${PLATFORM}-src" - NODE_SRC="${NOMOS_NODE_PATH:-${NODE_SRC_DEFAULT}}" - if [ -n "${NOMOS_NODE_PATH}" ]; then - [ -d "${NODE_SRC}" ] || build_bundle::fail "NOMOS_NODE_PATH does not exist: ${NODE_SRC}" + NODE_SRC="${LOGOS_BLOCKCHAIN_NODE_PATH:-${NODE_SRC_DEFAULT}}" + if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then + [ -d "${NODE_SRC}" ] || build_bundle::fail "LOGOS_BLOCKCHAIN_NODE_PATH does not exist: ${NODE_SRC}" rm -rf "${NODE_SRC_DEFAULT}" if [ -d "${NODE_TARGET}" ]; then find "${NODE_TARGET}" -mindepth 1 -maxdepth 1 -exec rm -rf {} + @@ -311,18 +305,7 @@ build_bundle::prepare_circuits() { NODE_TARGET="${NODE_TARGET}-local" fi - export NOMOS_CIRCUITS="${CIRCUITS_DIR}" - export LOGOS_BLOCKCHAIN_CIRCUITS="${CIRCUITS_DIR}" - mkdir -p "${ROOT_DIR}/.tmp" "${CIRCUITS_DIR}" - if [ -f "${CIRCUITS_DIR}/${KZG_FILE:-kzgrs_test_params}" ]; then - echo "Circuits already present at ${CIRCUITS_DIR}; skipping download" - else - STACK_DIR="${CIRCUITS_DIR}" HOST_DIR="${CIRCUITS_DIR}" \ - "${ROOT_DIR}/scripts/setup/setup-circuits-stack.sh" "${VERSION}" Packaging bundle" local bundle_dir="${ROOT_DIR}/.tmp/nomos-bundle" rm -rf "${bundle_dir}" - mkdir -p "${bundle_dir}/artifacts/circuits" - cp -a "${CIRCUITS_DIR}/." "${bundle_dir}/artifacts/circuits/" mkdir -p "${bundle_dir}/artifacts" cp "${NODE_BIN}" "${bundle_dir}/artifacts/logos-blockchain-node" - cp "${CLI_BIN}" "${bundle_dir}/artifacts/logos-blockchain-cli" { - echo "nomos_node_path=${NOMOS_NODE_PATH:-}" - echo "nomos_node_rev=${NOMOS_NODE_REV:-}" + echo "nomos_node_path=${LOGOS_BLOCKCHAIN_NODE_PATH:-}" + echo "nomos_node_rev=${LOGOS_BLOCKCHAIN_NODE_REV:-}" if [ -d "${NODE_SRC}/.git" ] && command -v git >/dev/null 2>&1; then echo "nomos_node_git_head=$(git -C "${NODE_SRC}" rev-parse HEAD 2>/dev/null || true)" fi diff --git a/scripts/build/build-linux-binaries.sh b/scripts/build/build-linux-binaries.sh index 298613f..3d97948 100755 --- a/scripts/build/build-linux-binaries.sh +++ b/scripts/build/build-linux-binaries.sh @@ -14,10 +14,9 @@ Usage: scripts/build/build-linux-binaries.sh [options] Builds a Linux bundle via scripts/build/build-bundle.sh, then stages artifacts into: - testing-framework/assets/stack/bin - - testing-framework/assets/stack/kzgrs_test_params (or NOMOS_KZG_DIR_REL) Options: - --rev REV logos-blockchain-node git revision to build (overrides NOMOS_NODE_REV) + --rev REV logos-blockchain-node git revision to build (overrides LOGOS_BLOCKCHAIN_NODE_REV) --path DIR use local logos-blockchain-node checkout (skip fetch/checkout) --features LIST extra cargo features (comma-separated); base includes "testing" --docker-platform PLAT docker platform for the Linux build (e.g. linux/amd64, linux/arm64) @@ -26,10 +25,9 @@ Options: -h, --help show help Environment: - VERSION circuits version (default from versions.env) - NOMOS_CIRCUITS_VERSION legacy alias for VERSION (supported) - NOMOS_NODE_REV default logos-blockchain-node revision (from versions.env) - NOMOS_KZG_DIR_REL host path for staged circuits dir (default: testing-framework/assets/stack/kzgrs_test_params) + VERSION bundle version (default from versions.env) + LOGOS_BLOCKCHAIN_CIRCUITS_VERSION legacy alias for VERSION (supported) + LOGOS_BLOCKCHAIN_NODE_REV default logos-blockchain-node revision (from versions.env) EOF } @@ -50,8 +48,8 @@ build_linux_binaries::load_env() { DEFAULT_VERSION="${VERSION:?Missing VERSION in versions.env}" VERSION="${VERSION:-${DEFAULT_VERSION}}" - if [ -n "${NOMOS_CIRCUITS_VERSION:-}" ]; then - VERSION="${NOMOS_CIRCUITS_VERSION}" + if [ -n "${LOGOS_BLOCKCHAIN_CIRCUITS_VERSION:-}" ]; then + VERSION="${LOGOS_BLOCKCHAIN_CIRCUITS_VERSION}" fi } @@ -134,28 +132,13 @@ build_linux_binaries::stage_from_bundle() { local artifacts="${extract_dir}/artifacts" [ -f "${artifacts}/logos-blockchain-node" ] || common::die "Missing logos-blockchain-node in bundle: ${tar_path}" - [ -f "${artifacts}/logos-blockchain-cli" ] || common::die "Missing logos-blockchain-cli in bundle: ${tar_path}" - [ -d "${artifacts}/circuits" ] || common::die "Missing circuits/ in bundle: ${tar_path}" - local bin_out="${ROOT_DIR}/testing-framework/assets/stack/bin" - local kzg_dir_rel="${NOMOS_KZG_DIR_REL:-testing-framework/assets/stack/kzgrs_test_params}" - local circuits_out="${ROOT_DIR}/${kzg_dir_rel}" echo "==> Staging binaries to ${bin_out}" mkdir -p "${bin_out}" - cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-cli" "${bin_out}/" - - echo "==> Staging circuits to ${circuits_out}" - rm -rf "${circuits_out}" - mkdir -p "${circuits_out}" - if command -v rsync >/dev/null 2>&1; then - rsync -a --delete "${artifacts}/circuits/" "${circuits_out}/" - else - cp -a "${artifacts}/circuits/." "${circuits_out}/" - fi - + cp "${artifacts}/logos-blockchain-node" "${bin_out}/" # If the tarball was produced inside Docker, it might be root-owned on the host. - chown -R "$(id -u)":"$(id -g)" "${bin_out}" "${circuits_out}" 2>/dev/null || true + chown -R "$(id -u)":"$(id -g)" "${bin_out}" 2>/dev/null || true } build_linux_binaries::main() { @@ -166,7 +149,6 @@ build_linux_binaries::main() { echo echo "Binaries staged in ${ROOT_DIR}/testing-framework/assets/stack/bin" - echo "Circuits staged in ${ROOT_DIR}/${NOMOS_KZG_DIR_REL:-testing-framework/assets/stack/kzgrs_test_params}" echo "Bundle tarball: ${BUNDLE_TAR}" } diff --git a/scripts/build/build_test_image.sh b/scripts/build/build_test_image.sh index 7ed017f..4e07960 100755 --- a/scripts/build/build_test_image.sh +++ b/scripts/build/build_test_image.sh @@ -12,23 +12,20 @@ build_test_image::usage() { cat <<'USAGE' Usage: scripts/build/build_test_image.sh [options] -Builds the compose/k8s test image (bakes in binaries + circuit assets). +Builds the compose/k8s test image (bakes in binaries). Options: --tag TAG Docker image tag (default: logos-blockchain-testing:local; or env IMAGE_TAG) - --version VERSION Circuits release tag (default: versions.env VERSION) + --version VERSION Bundle version tag (default: versions.env VERSION) --dockerfile PATH Dockerfile path (default: testing-framework/assets/stack/Dockerfile.runtime) --base-tag TAG Base image tag (default: logos-blockchain-testing:base) - --circuits-override PATH Relative path (within repo) to circuits dir/file to bake (default: testing-framework/assets/stack/kzgrs_test_params) - --circuits-platform NAME Circuits platform identifier for downloads (default: auto; linux-x86_64 or linux-aarch64) - --bundle-tar PATH Bundle tar containing artifacts/{nomos-*,circuits} (default: .tmp/nomos-binaries-linux-.tar.gz; or env NOMOS_BINARIES_TAR) - --no-restore Do not restore binaries/circuits from bundle tar (forces Dockerfile to build/download as needed) + --bundle-tar PATH Bundle tar containing artifacts/{nomos-*} (default: .tmp/nomos-binaries-linux-.tar.gz; or env LOGOS_BLOCKCHAIN_BINARIES_TAR) + --no-restore Do not restore binaries from bundle tar (forces Dockerfile to build/download as needed) --print-config Print resolved configuration and exit -h, --help Show this help and exit Env (legacy/compatible): - IMAGE_TAG, VERSION, CIRCUITS_OVERRIDE, CIRCUITS_PLATFORM, COMPOSE_CIRCUITS_PLATFORM, - NOMOS_BINARIES_TAR, NOMOS_KZG_DIR_REL + IMAGE_TAG, VERSION, LOGOS_BLOCKCHAIN_BINARIES_TAR USAGE } @@ -55,15 +52,7 @@ build_test_image::load_env() { BASE_IMAGE_TAG_DEFAULT="logos-blockchain-testing:base" VERSION_DEFAULT="${VERSION:?Missing VERSION in versions.env}" - NOMOS_NODE_REV="${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV in versions.env}" -} - -build_test_image::detect_circuits_platform() { - case "$(uname -m)" in - x86_64) echo "linux-x86_64" ;; - arm64|aarch64) echo "linux-aarch64" ;; - *) echo "linux-x86_64" ;; - esac + LOGOS_BLOCKCHAIN_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV in versions.env}" } build_test_image::parse_args() { @@ -72,10 +61,7 @@ build_test_image::parse_args() { DOCKERFILE_PATH="${DOCKERFILE_PATH_DEFAULT}" BASE_DOCKERFILE_PATH="${BASE_DOCKERFILE_PATH_DEFAULT}" BASE_IMAGE_TAG="${BASE_IMAGE_TAG:-${BASE_IMAGE_TAG_DEFAULT}}" - KZG_DIR_REL_DEFAULT="${NOMOS_KZG_DIR_REL:-testing-framework/assets/stack/kzgrs_test_params}" - CIRCUITS_OVERRIDE="${CIRCUITS_OVERRIDE:-${KZG_DIR_REL_DEFAULT}}" - CIRCUITS_PLATFORM="${CIRCUITS_PLATFORM:-${COMPOSE_CIRCUITS_PLATFORM:-}}" - BUNDLE_TAR_PATH="${NOMOS_BINARIES_TAR:-}" + BUNDLE_TAR_PATH="${LOGOS_BLOCKCHAIN_BINARIES_TAR:-}" NO_RESTORE=0 PRINT_CONFIG=0 @@ -90,10 +76,6 @@ build_test_image::parse_args() { --dockerfile) DOCKERFILE_PATH="${2:-}"; shift 2 ;; --base-tag=*) BASE_IMAGE_TAG="${1#*=}"; shift ;; --base-tag) BASE_IMAGE_TAG="${2:-}"; shift 2 ;; - --circuits-override=*) CIRCUITS_OVERRIDE="${1#*=}"; shift ;; - --circuits-override) CIRCUITS_OVERRIDE="${2:-}"; shift 2 ;; - --circuits-platform=*) CIRCUITS_PLATFORM="${1#*=}"; shift ;; - --circuits-platform) CIRCUITS_PLATFORM="${2:-}"; shift 2 ;; --bundle-tar=*) BUNDLE_TAR_PATH="${1#*=}"; shift ;; --bundle-tar) BUNDLE_TAR_PATH="${2:-}"; shift 2 ;; --no-restore) NO_RESTORE=1; shift ;; @@ -108,13 +90,7 @@ build_test_image::parse_args() { VERSION="${VERSION_DEFAULT}" fi - if [ -z "${CIRCUITS_PLATFORM}" ]; then - CIRCUITS_PLATFORM="$(build_test_image::detect_circuits_platform)" - fi - BIN_DST="${ROOT_DIR}/testing-framework/assets/stack/bin" - KZG_DIR_REL="${KZG_DIR_REL_DEFAULT}" - CIRCUITS_DIR_HOST="${ROOT_DIR}/${KZG_DIR_REL}" DEFAULT_LINUX_TAR="${ROOT_DIR}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz" TAR_PATH="${BUNDLE_TAR_PATH:-${DEFAULT_LINUX_TAR}}" @@ -126,11 +102,7 @@ build_test_image::print_config() { echo "Dockerfile: ${DOCKERFILE_PATH}" echo "Base image tag: ${BASE_IMAGE_TAG}" echo "Base Dockerfile: ${BASE_DOCKERFILE_PATH}" - echo "Logos node rev: ${NOMOS_NODE_REV}" - echo "Circuits override: ${CIRCUITS_OVERRIDE:-}" - echo "Circuits version (download fallback): ${VERSION}" - echo "Circuits platform: ${CIRCUITS_PLATFORM}" - echo "Host circuits dir: ${CIRCUITS_DIR_HOST}" + echo "Logos node rev: ${LOGOS_BLOCKCHAIN_NODE_REV}" echo "Binaries dir: ${BIN_DST}" echo "Bundle tar (if used): ${TAR_PATH}" echo "Restore from tar: $([ "${NO_RESTORE}" -eq 1 ] && echo "disabled" || echo "enabled")" @@ -138,14 +110,13 @@ build_test_image::print_config() { build_test_image::have_host_binaries() { # Preserve existing behavior: only require node on the host. - # If logos-blockchain-cli is missing, the Dockerfile can still build it from source. [ -x "${BIN_DST}/logos-blockchain-node" ] } build_test_image::restore_from_bundle() { [ -f "${TAR_PATH}" ] || build_test_image::fail "Prebuilt binaries missing and bundle tar not found at ${TAR_PATH}" - echo "==> Restoring binaries/circuits from ${TAR_PATH}" + echo "==> Restoring binaries from ${TAR_PATH}" local tmp_extract tmp_extract="$(common::tmpdir nomos-bundle-extract.XXXXXX)" trap "rm -rf -- '${tmp_extract}'" RETURN @@ -153,22 +124,13 @@ build_test_image::restore_from_bundle() { tar -xzf "${TAR_PATH}" -C "${tmp_extract}" local artifacts="${tmp_extract}/artifacts" - for bin in logos-blockchain-node logos-blockchain-cli; do - [ -f "${artifacts}/${bin}" ] || build_test_image::fail "Bundle ${TAR_PATH} missing artifacts/${bin}" - done + [ -f "${artifacts}/logos-blockchain-node" ] || build_test_image::fail \ + "Bundle ${TAR_PATH} missing artifacts/logos-blockchain-node" mkdir -p "${BIN_DST}" - cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-cli" "${BIN_DST}/" - chmod +x "${BIN_DST}/logos-blockchain-node" "${BIN_DST}/logos-blockchain-cli" || true + cp "${artifacts}/logos-blockchain-node" "${BIN_DST}/" + chmod +x "${BIN_DST}/logos-blockchain-node" || true - if [ -d "${artifacts}/circuits" ]; then - mkdir -p "${CIRCUITS_DIR_HOST}" - if command -v rsync >/dev/null 2>&1; then - rsync -a --delete "${artifacts}/circuits/" "${CIRCUITS_DIR_HOST}/" - else - cp -a "${artifacts}/circuits/." "${CIRCUITS_DIR_HOST}/" - fi - fi } build_test_image::maybe_restore_assets() { @@ -193,26 +155,25 @@ build_test_image::docker_build() { x86_64) host_platform="linux/amd64" ;; arm64|aarch64) host_platform="linux/arm64" ;; esac - case "${CIRCUITS_PLATFORM}" in - linux-x86_64) target_platform="linux/amd64" ;; - linux-aarch64) target_platform="linux/arm64" ;; - esac + + if [ -n "${DOCKER_PLATFORM:-}" ]; then + target_platform="${DOCKER_PLATFORM}" + elif [ -n "${COMPOSE_CIRCUITS_PLATFORM:-}" ] || [ -n "${CIRCUITS_PLATFORM:-}" ]; then + case "${COMPOSE_CIRCUITS_PLATFORM:-${CIRCUITS_PLATFORM}}" in + linux-x86_64) target_platform="linux/amd64" ;; + linux-aarch64) target_platform="linux/arm64" ;; + esac + fi local -a base_build_args=( -f "${BASE_DOCKERFILE_PATH}" -t "${BASE_IMAGE_TAG}" - --build-arg "NOMOS_NODE_REV=${NOMOS_NODE_REV}" - --build-arg "CIRCUITS_PLATFORM=${CIRCUITS_PLATFORM}" + --build-arg "LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV}" --build-arg "VERSION=${VERSION}" "${ROOT_DIR}" ) - - if [ -n "${CIRCUITS_OVERRIDE}" ]; then - base_build_args+=(--build-arg "CIRCUITS_OVERRIDE=${CIRCUITS_OVERRIDE}") - fi if [ -n "${host_platform}" ] && [ -n "${target_platform}" ] && [ "${host_platform}" != "${target_platform}" ]; then base_build_args+=(--platform "${target_platform}") - base_build_args+=(--build-arg "RAPIDSNARK_FORCE_REBUILD=1") fi printf "Running:" @@ -229,7 +190,6 @@ build_test_image::docker_build() { if [ -n "${host_platform}" ] && [ -n "${target_platform}" ] && [ "${host_platform}" != "${target_platform}" ]; then final_build_args+=(--platform "${target_platform}") fi - printf "Running:" printf " %q" docker build "${final_build_args[@]}" echo @@ -252,8 +212,7 @@ build_test_image::main() { cat <}" - checks::say "NOMOS_NODE_REV=${NOMOS_NODE_REV:-}" - if [ -n "${NOMOS_NODE_PATH:-}" ]; then - checks::say "NOMOS_NODE_PATH=${NOMOS_NODE_PATH}" + checks::say "LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV:-}" + if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH:-}" ]; then + checks::say "LOGOS_BLOCKCHAIN_NODE_PATH=${LOGOS_BLOCKCHAIN_NODE_PATH}" fi else checks::warn "versions.env missing (scripts depend on it)" @@ -80,26 +80,6 @@ checks::print_disk_space() { fi } -checks::print_kzg_params() { - checks::section "KZG Params" - - local default_kzg_dir_rel="testing-framework/assets/stack/kzgrs_test_params" - local default_kzg_file="kzgrs_test_params" - local default_kzg_container_path="/kzgrs_test_params/kzgrs_test_params" - - local kzg_dir_rel="${NOMOS_KZG_DIR_REL:-${default_kzg_dir_rel}}" - local kzg_file="${NOMOS_KZG_FILE:-${default_kzg_file}}" - local kzg_container_path="${NOMOS_KZG_CONTAINER_PATH:-${default_kzg_container_path}}" - local host_kzg_path="${ROOT_DIR}/${kzg_dir_rel}/${kzg_file}" - - checks::say "host: ${host_kzg_path}" - checks::say "container: ${kzg_container_path}" - if [ -f "${host_kzg_path}" ]; then - checks::ok "KZG params file exists" - else - checks::warn "KZG params file missing (DA workloads will fail); run: scripts/run/run-examples.sh (auto) or scripts/setup/setup-logos-blockchain-circuits.sh" - fi -} checks::print_rust_toolchain() { checks::section "Rust Toolchain" @@ -138,9 +118,9 @@ checks::print_docker() { checks::warn "could not query docker engine arch (is Docker running?)" fi - local bundle_platform="${NOMOS_BUNDLE_DOCKER_PLATFORM:-${NOMOS_BIN_PLATFORM:-}}" + local bundle_platform="${LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM:-${LOGOS_BLOCKCHAIN_BIN_PLATFORM:-}}" if [ -z "${bundle_platform}" ]; then - checks::say "NOMOS_BUNDLE_DOCKER_PLATFORM=" + checks::say "LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=" if [[ "${server_arch}" == *"linux/arm64"* ]]; then checks::say "bundle docker platform (auto): ${default_bundle_platform_arm64}" else @@ -148,19 +128,19 @@ checks::print_docker() { fi bundle_platform="auto" else - checks::say "NOMOS_BUNDLE_DOCKER_PLATFORM=${bundle_platform}" + checks::say "LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=${bundle_platform}" fi if [[ "${server_arch}" == *"linux/arm64"* ]] && [ "${bundle_platform}" = "${default_bundle_platform_amd64}" ]; then - checks::warn "Docker engine is linux/arm64 but bundle platform is ${default_bundle_platform_amd64} (emulation). If builds are slow/flaky, set: NOMOS_BUNDLE_DOCKER_PLATFORM=${default_bundle_platform_arm64}" + checks::warn "Docker engine is linux/arm64 but bundle platform is ${default_bundle_platform_amd64} (emulation). If builds are slow/flaky, set: LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=${default_bundle_platform_arm64}" fi - local image="${NOMOS_TESTNET_IMAGE:-${default_local_image}}" - checks::say "NOMOS_TESTNET_IMAGE=${image}" + local image="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE:-${default_local_image}}" + checks::say "LOGOS_BLOCKCHAIN_TESTNET_IMAGE=${image}" if docker image inspect "${image}" >/dev/null 2>&1; then checks::ok "testnet image present locally" else - checks::warn "testnet image not present locally (compose/k8s runs will rebuild or fail if NOMOS_SKIP_IMAGE_BUILD=1)" + checks::warn "testnet image not present locally (compose/k8s runs will rebuild or fail if LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1)" fi } @@ -206,7 +186,7 @@ checks::print_k8s_image_visibility() { checks::section "K8s Image Visibility" local default_local_image="logos-blockchain-testing:local" - local image="${NOMOS_TESTNET_IMAGE:-${default_local_image}}" + local image="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE:-${default_local_image}}" if [ -z "${KUBE_CONTEXT:-}" ]; then return 0 @@ -231,7 +211,7 @@ checks::print_k8s_image_visibility() { *) if [[ "${image}" == *":local" ]]; then checks::warn "current context is ${KUBE_CONTEXT}; a :local image tag may not be reachable by cluster nodes" - checks::say "Suggested: push to a registry and set NOMOS_TESTNET_IMAGE, or load into the cluster if supported" + checks::say "Suggested: push to a registry and set LOGOS_BLOCKCHAIN_TESTNET_IMAGE, or load into the cluster if supported" fi ;; esac @@ -268,7 +248,7 @@ checks::print_docker_desktop_kubernetes_health() { checks::print_debug_flags() { checks::section "Runner Debug Flags (optional)" checks::say "SLOW_TEST_ENV=${SLOW_TEST_ENV:-} (if true: doubles readiness timeouts)" - checks::say "NOMOS_SKIP_IMAGE_BUILD=${NOMOS_SKIP_IMAGE_BUILD:-} (compose/k8s)" + checks::say "LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-} (compose/k8s)" checks::say "COMPOSE_RUNNER_PRESERVE=${COMPOSE_RUNNER_PRESERVE:-} (compose)" checks::say "K8S_RUNNER_PRESERVE=${K8S_RUNNER_PRESERVE:-} (k8s)" checks::say "K8S_RUNNER_DEBUG=${K8S_RUNNER_DEBUG:-} (k8s helm debug)" @@ -285,7 +265,6 @@ checks::main() { checks::load_env checks::print_workspace checks::print_disk_space - checks::print_kzg_params checks::print_rust_toolchain checks::print_docker checks::print_docker_compose @@ -295,7 +274,7 @@ checks::main() { checks::print_debug_flags checks::section "Done" - checks::say "If something looks off, start with: scripts/run/run-examples.sh -t 60 -v 1 -e 1" + checks::say "If something looks off, start with: scripts/run/run-examples.sh -t 60 -n 1" } if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then diff --git a/scripts/run/run-examples.sh b/scripts/run/run-examples.sh index a74ded3..e4af1c0 100755 --- a/scripts/run/run-examples.sh +++ b/scripts/run/run-examples.sh @@ -8,11 +8,6 @@ fi # shellcheck disable=SC1091 . "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../lib/common.sh" -readonly DEFAULT_KZG_DIR_REL="testing-framework/assets/stack/kzgrs_test_params" -readonly DEFAULT_KZG_FILE="kzgrs_test_params" -readonly DEFAULT_KZG_CONTAINER_PATH="/kzgrs_test_params/kzgrs_test_params" -readonly DEFAULT_KZG_IN_IMAGE_PARAMS_PATH="/opt/nomos/kzg-params/kzgrs_test_params" - readonly DEFAULT_LOCAL_IMAGE="logos-blockchain-testing:local" readonly DEFAULT_PUBLIC_ECR_REGISTRY="public.ecr.aws/r4s5t9y4" readonly DEFAULT_PUBLIC_ECR_REPO="logos/logos-blockchain" @@ -44,37 +39,38 @@ Modes: Options: -t, --run-seconds N Duration to run the demo (required) - -v, --validators N Number of validators (required) - --bundle PATH Convenience alias for setting NOMOS_BINARIES_TAR=PATH + -n, --nodes N Number of nodes (required) + --bundle PATH Convenience alias for setting LOGOS_BLOCKCHAIN_BINARIES_TAR=PATH --metrics-query-url URL PromQL base URL the runner process can query (optional) --metrics-otlp-ingest-url URL Full OTLP HTTP ingest URL for node metrics export (optional) --external-prometheus URL Alias for --metrics-query-url --external-otlp-metrics-endpoint URL Alias for --metrics-otlp-ingest-url --local Use a local Docker image tag (default for docker-desktop k8s) - --no-image-build Skip rebuilding the compose/k8s image (sets NOMOS_SKIP_IMAGE_BUILD=1) + --no-image-build Skip rebuilding the compose/k8s image (sets LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1) Environment: - VERSION Circuits version (default from versions.env) + VERSION Bundle version (default from versions.env) CONSENSUS_SLOT_TIME Consensus slot duration in seconds (default 2) CONSENSUS_ACTIVE_SLOT_COEFF Probability a slot is active (default 0.9); expected block interval ≈ slot_time / coeff - NOMOS_TESTNET_IMAGE Image reference (overridden by --local/--ecr selection) + LOGOS_BLOCKCHAIN_TESTNET_IMAGE Image reference (overridden by --local/--ecr selection) ECR_IMAGE Full image reference for --ecr (overrides ECR_REGISTRY/ECR_REPO/TAG) ECR_REGISTRY Registry hostname for --ecr (default ${DEFAULT_PUBLIC_ECR_REGISTRY}) ECR_REPO Repository path for --ecr (default ${DEFAULT_PUBLIC_ECR_REPO}) TAG Tag for --ecr (default ${DEFAULT_ECR_TAG}) - NOMOS_TESTNET_IMAGE_PULL_POLICY K8s imagePullPolicy (default ${DEFAULT_PULL_POLICY_LOCAL}; set to ${DEFAULT_PULL_POLICY_ECR} for --ecr) - NOMOS_BINARIES_TAR Path to prebuilt binaries/circuits tarball (default .tmp/nomos-binaries--.tar.gz) - NOMOS_SKIP_IMAGE_BUILD Set to 1 to skip rebuilding the compose/k8s image - NOMOS_FORCE_IMAGE_BUILD Set to 1 to force image rebuild even for k8s ECR mode - NOMOS_METRICS_QUERY_URL PromQL base URL for the runner process (optional) - NOMOS_METRICS_OTLP_INGEST_URL Full OTLP HTTP ingest URL for node metrics export (optional) - NOMOS_GRAFANA_URL Grafana base URL for printing/logging (optional) + LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY K8s imagePullPolicy (default ${DEFAULT_PULL_POLICY_LOCAL}; set to ${DEFAULT_PULL_POLICY_ECR} for --ecr) + LOGOS_BLOCKCHAIN_BINARIES_TAR Path to prebuilt binaries tarball (default .tmp/nomos-binaries--.tar.gz) + LOGOS_BLOCKCHAIN_CIRCUITS Directory containing circuits assets (defaults to ~/.logos-blockchain-circuits) + LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD Set to 1 to skip rebuilding the compose/k8s image + LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD Set to 1 to force image rebuild even for k8s ECR mode + LOGOS_BLOCKCHAIN_METRICS_QUERY_URL PromQL base URL for the runner process (optional) + LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL Full OTLP HTTP ingest URL for node metrics export (optional) + LOGOS_BLOCKCHAIN_GRAFANA_URL Grafana base URL for printing/logging (optional) Notes: - For k8s runs on non-docker-desktop clusters (e.g. EKS), a locally built Docker image is not visible to the cluster. By default, this script skips local image rebuilds in that case. If you need a custom image, run scripts/build/build_test_image.sh and push it to a registry the - cluster can pull from, then set NOMOS_TESTNET_IMAGE accordingly. + cluster can pull from, then set LOGOS_BLOCKCHAIN_TESTNET_IMAGE accordingly. EOF } @@ -96,11 +92,6 @@ run_examples::load_env() { DEFAULT_VERSION="${VERSION:?Missing VERSION in versions.env}" VERSION="${VERSION:-${DEFAULT_VERSION}}" - KZG_DIR_REL="${NOMOS_KZG_DIR_REL:-${DEFAULT_KZG_DIR_REL}}" - KZG_FILE="${NOMOS_KZG_FILE:-${DEFAULT_KZG_FILE}}" - KZG_CONTAINER_PATH="${NOMOS_KZG_CONTAINER_PATH:-${DEFAULT_KZG_CONTAINER_PATH}}" - HOST_KZG_DIR="${ROOT_DIR}/${KZG_DIR_REL}" - HOST_KZG_FILE="${HOST_KZG_DIR}/${KZG_FILE}" } run_examples::select_bin() { @@ -115,7 +106,7 @@ run_examples::select_bin() { run_examples::parse_args() { MODE="compose" RUN_SECS_RAW="" - DEMO_VALIDATORS="" + DEMO_NODES="" IMAGE_SELECTION_MODE="auto" METRICS_QUERY_URL="" METRICS_OTLP_INGEST_URL="" @@ -138,22 +129,22 @@ run_examples::parse_args() { RUN_SECS_RAW="${1#*=}" shift ;; - -v|--validators) - DEMO_VALIDATORS="${2:-}" + -n|--nodes) + DEMO_NODES="${2:-}" shift 2 ;; - --validators=*) - DEMO_VALIDATORS="${1#*=}" + --nodes=*) + DEMO_NODES="${1#*=}" shift ;; --bundle) - NOMOS_BINARIES_TAR="${2:-}" - export NOMOS_BINARIES_TAR + LOGOS_BLOCKCHAIN_BINARIES_TAR="${2:-}" + export LOGOS_BLOCKCHAIN_BINARIES_TAR shift 2 ;; --bundle=*) - NOMOS_BINARIES_TAR="${1#*=}" - export NOMOS_BINARIES_TAR + LOGOS_BLOCKCHAIN_BINARIES_TAR="${1#*=}" + export LOGOS_BLOCKCHAIN_BINARIES_TAR shift ;; --metrics-query-url) @@ -193,8 +184,8 @@ run_examples::parse_args() { shift ;; --no-image-build) - NOMOS_SKIP_IMAGE_BUILD=1 - export NOMOS_SKIP_IMAGE_BUILD + LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1 + export LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD shift ;; compose|host|k8s) @@ -213,8 +204,8 @@ run_examples::parse_args() { esac done - if [ -n "${NOMOS_BINARIES_TAR:-}" ] && [ ! -f "${NOMOS_BINARIES_TAR}" ]; then - run_examples::fail_with_usage "NOMOS_BINARIES_TAR is set but missing: ${NOMOS_BINARIES_TAR}" + if [ -n "${LOGOS_BLOCKCHAIN_BINARIES_TAR:-}" ] && [ ! -f "${LOGOS_BLOCKCHAIN_BINARIES_TAR}" ]; then + run_examples::fail_with_usage "LOGOS_BLOCKCHAIN_BINARIES_TAR is set but missing: ${LOGOS_BLOCKCHAIN_BINARIES_TAR}" fi if ! common::is_uint "${RUN_SECS_RAW}" || [ "${RUN_SECS_RAW}" -le 0 ]; then @@ -222,11 +213,11 @@ run_examples::parse_args() { fi RUN_SECS="${RUN_SECS_RAW}" - if [ -z "${DEMO_VALIDATORS}" ] ]; then - run_examples::fail_with_usage "validators must be provided via -v/--validators" + if [ -z "${DEMO_NODES}" ]; then + run_examples::fail_with_usage "nodes must be provided via -n/--nodes" fi - if ! common::is_uint "${DEMO_VALIDATORS}" ; then - run_examples::fail_with_usage "validators must be a non-negative integer (pass -v/--validators)" + if ! common::is_uint "${DEMO_NODES}" ; then + run_examples::fail_with_usage "nodes must be a non-negative integer (pass -n/--nodes)" fi } @@ -248,8 +239,8 @@ run_examples::select_image() { fi if [ "${selection}" = "local" ]; then - IMAGE="${NOMOS_TESTNET_IMAGE:-${DEFAULT_LOCAL_IMAGE}}" - export NOMOS_TESTNET_IMAGE_PULL_POLICY="${NOMOS_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_LOCAL}}" + IMAGE="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE:-${DEFAULT_LOCAL_IMAGE}}" + export LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_LOCAL}}" elif [ "${selection}" = "ecr" ]; then local tag="${TAG:-${DEFAULT_ECR_TAG}}" if [ -n "${ECR_IMAGE:-}" ]; then @@ -268,40 +259,35 @@ run_examples::select_image() { local repo="${ECR_REPO:-${DEFAULT_PUBLIC_ECR_REPO}}" IMAGE="${registry}/${repo}:${tag}" fi - export NOMOS_TESTNET_IMAGE_PULL_POLICY="${NOMOS_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_ECR}}" + export LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_ECR}}" else run_examples::fail_with_usage "Unknown image selection mode: ${selection}" fi - export NOMOS_IMAGE_SELECTION="${selection}" + export LOGOS_BLOCKCHAIN_IMAGE_SELECTION="${selection}" export IMAGE_TAG="${IMAGE}" - export NOMOS_TESTNET_IMAGE="${IMAGE}" + export LOGOS_BLOCKCHAIN_TESTNET_IMAGE="${IMAGE}" - if [ "${MODE}" = "k8s" ]; then - if [ "${selection}" = "ecr" ]; then - export NOMOS_KZG_MODE="${NOMOS_KZG_MODE:-inImage}" - # A locally built Docker image isn't visible to remote clusters (e.g. EKS). Default to - # skipping the local rebuild, unless the user explicitly set NOMOS_SKIP_IMAGE_BUILD or - # overrides via NOMOS_FORCE_IMAGE_BUILD=1. - if [ "${NOMOS_FORCE_IMAGE_BUILD:-0}" != "1" ]; then - NOMOS_SKIP_IMAGE_BUILD="${NOMOS_SKIP_IMAGE_BUILD:-${DEFAULT_K8S_ECR_SKIP_IMAGE_BUILD}}" - export NOMOS_SKIP_IMAGE_BUILD - fi - else - export NOMOS_KZG_MODE="${NOMOS_KZG_MODE:-hostPath}" + if [ "${MODE}" = "k8s" ] && [ "${selection}" = "ecr" ]; then + # A locally built Docker image isn't visible to remote clusters (e.g. EKS). Default to + # skipping the local rebuild, unless the user explicitly set LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD or + # overrides via LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD=1. + if [ "${LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD:-0}" != "1" ]; then + LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD="${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-${DEFAULT_K8S_ECR_SKIP_IMAGE_BUILD}}" + export LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD fi fi } run_examples::default_tar_path() { - if [ -n "${NOMOS_BINARIES_TAR:-}" ]; then - echo "${NOMOS_BINARIES_TAR}" + if [ -n "${LOGOS_BLOCKCHAIN_BINARIES_TAR:-}" ]; then + echo "${LOGOS_BLOCKCHAIN_BINARIES_TAR}" return fi case "${MODE}" in host) echo "${ROOT_DIR}/.tmp/nomos-binaries-host-${VERSION}.tar.gz" ;; compose|k8s) - if [ "${NOMOS_SKIP_IMAGE_BUILD:-}" = "1" ]; then + if [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-}" = "1" ]; then echo "${ROOT_DIR}/.tmp/nomos-binaries-host-${VERSION}.tar.gz" else echo "${ROOT_DIR}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz" @@ -314,7 +300,7 @@ run_examples::default_tar_path() { run_examples::bundle_matches_expected() { local tar_path="$1" [ -f "${tar_path}" ] || return 1 - [ -z "${NOMOS_NODE_REV:-}" ] && return 0 + [ -z "${LOGOS_BLOCKCHAIN_NODE_REV:-}" ] && return 0 local meta tar_rev tar_head meta="$(tar -xOzf "${tar_path}" artifacts/nomos-bundle-meta.env 2>/dev/null || true)" @@ -324,13 +310,13 @@ run_examples::bundle_matches_expected() { fi tar_rev="$(echo "${meta}" | sed -n 's/^nomos_node_rev=//p' | head -n 1)" tar_head="$(echo "${meta}" | sed -n 's/^nomos_node_git_head=//p' | head -n 1)" - if [ -n "${tar_rev}" ] && [ "${tar_rev}" != "${NOMOS_NODE_REV}" ]; then - echo "Bundle ${tar_path} is for logos-blockchain-node rev ${tar_rev}, expected ${NOMOS_NODE_REV}; rebuilding." >&2 + if [ -n "${tar_rev}" ] && [ "${tar_rev}" != "${LOGOS_BLOCKCHAIN_NODE_REV}" ]; then + echo "Bundle ${tar_path} is for logos-blockchain-node rev ${tar_rev}, expected ${LOGOS_BLOCKCHAIN_NODE_REV}; rebuilding." >&2 return 1 fi - if [ -n "${tar_head}" ] && echo "${NOMOS_NODE_REV}" | grep -Eq '^[0-9a-f]{7,40}$'; then - if [ "${tar_head}" != "${NOMOS_NODE_REV}" ]; then - echo "Bundle ${tar_path} is for logos-blockchain-node git head ${tar_head}, expected ${NOMOS_NODE_REV}; rebuilding." >&2 + if [ -n "${tar_head}" ] && echo "${LOGOS_BLOCKCHAIN_NODE_REV}" | grep -Eq '^[0-9a-f]{7,40}$'; then + if [ "${tar_head}" != "${LOGOS_BLOCKCHAIN_NODE_REV}" ]; then + echo "Bundle ${tar_path} is for logos-blockchain-node git head ${tar_head}, expected ${LOGOS_BLOCKCHAIN_NODE_REV}; rebuilding." >&2 return 1 fi fi @@ -368,13 +354,10 @@ run_examples::restore_binaries_from_tar() { local src="${extract_dir}/artifacts" local bin_dst="${ROOT_DIR}/testing-framework/assets/stack/bin" - local circuits_src="${src}/circuits" - local circuits_dst="${HOST_KZG_DIR}" - RESTORED_BIN_DIR="${src}" export RESTORED_BIN_DIR - if [ ! -f "${src}/logos-blockchain-node" ] || [ ! -f "${src}/logos-blockchain-cli" ]; then + if [ ! -f "${src}/logos-blockchain-node" ]; then echo "Binaries missing in ${tar_path}; provide a prebuilt binaries tarball." >&2 return 1 fi @@ -383,25 +366,11 @@ run_examples::restore_binaries_from_tar() { if [ "${MODE}" != "host" ] && ! run_examples::host_bin_matches_arch "${src}/logos-blockchain-node"; then echo "Bundled binaries do not match host arch; skipping copy so containers rebuild from source." copy_bins=0 - rm -f "${bin_dst}/logos-blockchain-node" "${bin_dst}/logos-blockchain-cli" + rm -f "${bin_dst}/logos-blockchain-node" fi if [ "${copy_bins}" -eq 1 ]; then mkdir -p "${bin_dst}" - cp "${src}/logos-blockchain-node" "${src}/logos-blockchain-cli" "${bin_dst}/" - fi - - if [ -d "${circuits_src}" ] && [ -f "${circuits_src}/${KZG_FILE}" ]; then - rm -rf "${circuits_dst}" - mkdir -p "${circuits_dst}" - if command -v rsync >/dev/null 2>&1; then - rsync -a --delete "${circuits_src}/" "${circuits_dst}/" - else - rm -rf "${circuits_dst:?}/"* - cp -a "${circuits_src}/." "${circuits_dst}/" - fi - else - echo "Circuits missing in ${tar_path}; provide a prebuilt binaries/circuits tarball." >&2 - return 1 + cp "${src}/logos-blockchain-node" "${bin_dst}/" fi RESTORED_BINARIES=1 @@ -412,7 +381,7 @@ run_examples::ensure_binaries_tar() { local platform="$1" local tar_path="$2" echo "==> Building fresh binaries bundle (${platform}) at ${tar_path}" - "${ROOT_DIR}/scripts/build/build-bundle.sh" --platform "${platform}" --output "${tar_path}" --rev "${NOMOS_NODE_REV}" + "${ROOT_DIR}/scripts/build/build-bundle.sh" --platform "${platform}" --output "${tar_path}" --rev "${LOGOS_BLOCKCHAIN_NODE_REV}" } run_examples::prepare_bundles() { @@ -428,7 +397,7 @@ run_examples::prepare_bundles() { fi # On non-Linux compose/k8s runs, use the Linux bundle for image build, then restore host bundle for the runner. - if [ "${MODE}" != "host" ] && [ "$(uname -s)" != "Linux" ] && [ "${NOMOS_SKIP_IMAGE_BUILD:-0}" = "0" ] && [ -f "${LINUX_TAR}" ]; then + if [ "${MODE}" != "host" ] && [ "$(uname -s)" != "Linux" ] && [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-0}" = "0" ] && [ -f "${LINUX_TAR}" ]; then NEED_HOST_RESTORE_AFTER_IMAGE=1 run_examples::restore_binaries_from_tar "${LINUX_TAR}" || { run_examples::ensure_binaries_tar linux "${LINUX_TAR}" @@ -442,7 +411,7 @@ run_examples::prepare_bundles() { case "${MODE}" in host) run_examples::ensure_binaries_tar host "${tar_path}" ;; compose|k8s) - if [ "${NOMOS_SKIP_IMAGE_BUILD:-0}" = "1" ]; then + if [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-0}" = "1" ]; then run_examples::ensure_binaries_tar host "${tar_path}" else run_examples::ensure_binaries_tar linux "${tar_path}" @@ -452,7 +421,7 @@ run_examples::prepare_bundles() { esac run_examples::restore_binaries_from_tar "${tar_path}" || common::die \ - "Missing or invalid binaries tarball. Provide it via --bundle/NOMOS_BINARIES_TAR or place it at $(run_examples::default_tar_path)." + "Missing or invalid binaries tarball. Provide it via --bundle/LOGOS_BLOCKCHAIN_BINARIES_TAR or place it at $(run_examples::default_tar_path)." fi } @@ -461,14 +430,13 @@ run_examples::maybe_rebuild_image() { return 0 fi - if [ "${NOMOS_SKIP_IMAGE_BUILD:-0}" = "1" ]; then - echo "==> Skipping testnet image rebuild (NOMOS_SKIP_IMAGE_BUILD=1)" + if [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-0}" = "1" ]; then + echo "==> Skipping testnet image rebuild (LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1)" return 0 fi echo "==> Rebuilding testnet image (${IMAGE})" - IMAGE_TAG="${IMAGE}" COMPOSE_CIRCUITS_PLATFORM="${COMPOSE_CIRCUITS_PLATFORM:-}" \ - bash "${ROOT_DIR}/scripts/build/build_test_image.sh" + IMAGE_TAG="${IMAGE}" bash "${ROOT_DIR}/scripts/build/build_test_image.sh" } run_examples::maybe_restore_host_after_image() { @@ -484,21 +452,11 @@ run_examples::maybe_restore_host_after_image() { } run_examples::validate_restored_bundle() { - HOST_BUNDLE_PATH="${HOST_KZG_DIR}" - KZG_HOST_PATH="${HOST_BUNDLE_PATH}/${KZG_FILE}" - - if [ ! -x "${HOST_BUNDLE_PATH}/zksign/witness_generator" ]; then - common::die "Missing zksign/witness_generator in restored bundle; ensure the tarball contains host-compatible circuits." - fi - if [ ! -f "${KZG_HOST_PATH}" ]; then - common::die "KZG params missing at ${KZG_HOST_PATH}; ensure the tarball contains circuits." - fi - if [ "${MODE}" = "host" ] && ! { [ -n "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ]; }; then - local tar_node tar_exec + local tar_node tar_node="${RESTORED_BIN_DIR:-${ROOT_DIR}/testing-framework/assets/stack/bin}/logos-blockchain-node" - [ -x "${tar_node}" ] && [ -x "${tar_exec}" ] || common::die \ + [ -x "${tar_node}" ] || common::die \ "Restored tarball missing host executables; provide a host-compatible binaries tarball." run_examples::host_bin_matches_arch "${tar_node}" || common::die \ "Restored executables do not match host architecture; provide a host-compatible binaries tarball." @@ -509,59 +467,40 @@ run_examples::validate_restored_bundle() { fi } -run_examples::kzg_path_for_mode() { - if [ "${MODE}" = "compose" ] || [ "${MODE}" = "k8s" ]; then - if [ "${MODE}" = "k8s" ] && [ "${NOMOS_KZG_MODE:-hostPath}" = "inImage" ]; then - echo "${NOMOS_KZG_IN_IMAGE_PARAMS_PATH:-${DEFAULT_KZG_IN_IMAGE_PARAMS_PATH}}" - else - echo "${KZG_CONTAINER_PATH}" +run_examples::ensure_circuits() { + if [ -n "${LOGOS_BLOCKCHAIN_CIRCUITS:-}" ]; then + if [ -d "${LOGOS_BLOCKCHAIN_CIRCUITS}" ]; then + return 0 fi - else - echo "${KZG_HOST_PATH}" + common::die "LOGOS_BLOCKCHAIN_CIRCUITS is set to '${LOGOS_BLOCKCHAIN_CIRCUITS}', but the directory does not exist" fi -} -run_examples::ensure_compose_circuits_platform_default() { - if [ "${MODE}" != "compose" ] || [ -n "${COMPOSE_CIRCUITS_PLATFORM:-}" ]; then + local default_dir="${HOME}/.logos-blockchain-circuits" + if [ -d "${default_dir}" ]; then + LOGOS_BLOCKCHAIN_CIRCUITS="${default_dir}" + export LOGOS_BLOCKCHAIN_CIRCUITS return 0 fi - local arch - arch="$(uname -m)" - case "${arch}" in - x86_64) COMPOSE_CIRCUITS_PLATFORM="linux-x86_64" ;; - arm64|aarch64) COMPOSE_CIRCUITS_PLATFORM="linux-x86_64" ;; - *) COMPOSE_CIRCUITS_PLATFORM="linux-x86_64" ;; - esac - export COMPOSE_CIRCUITS_PLATFORM -} - -run_examples::maybe_set_docker_platform() { - if [ "${MODE}" != "compose" ] || [ -n "${DOCKER_DEFAULT_PLATFORM:-}" ]; then - return 0 - fi - - case "${COMPOSE_CIRCUITS_PLATFORM:-}" in - linux-x86_64) DOCKER_DEFAULT_PLATFORM="linux/amd64" ;; - linux-aarch64) DOCKER_DEFAULT_PLATFORM="linux/arm64" ;; - *) return 0 ;; - esac - - export DOCKER_DEFAULT_PLATFORM + echo "==> Circuits not found; installing to ${default_dir}" + bash "${ROOT_DIR}/scripts/setup/setup-logos-blockchain-circuits.sh" "${VERSION}" "${default_dir}" + LOGOS_BLOCKCHAIN_CIRCUITS="${default_dir}" + export LOGOS_BLOCKCHAIN_CIRCUITS } run_examples::run() { - local kzg_path - kzg_path="$(run_examples::kzg_path_for_mode)" - - export NOMOS_DEMO_RUN_SECS="${RUN_SECS}" - export NOMOS_DEMO_VALIDATORS="${DEMO_VALIDATORS}" + export LOGOS_BLOCKCHAIN_DEMO_RUN_SECS="${RUN_SECS}" + export LOGOS_BLOCKCHAIN_DEMO_NODES="${DEMO_NODES}" if [ -n "${METRICS_QUERY_URL}" ]; then - export NOMOS_METRICS_QUERY_URL="${METRICS_QUERY_URL}" + export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL="${METRICS_QUERY_URL}" fi if [ -n "${METRICS_OTLP_INGEST_URL}" ]; then - export NOMOS_METRICS_OTLP_INGEST_URL="${METRICS_OTLP_INGEST_URL}" + export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL="${METRICS_OTLP_INGEST_URL}" + fi + + if [ "${MODE}" = "host" ]; then + run_examples::ensure_circuits fi echo "==> Running ${BIN} for ${RUN_SECS}s (mode=${MODE}, image=${IMAGE})" @@ -569,12 +508,8 @@ run_examples::run() { POL_PROOF_DEV_MODE=true \ TESTNET_PRINT_ENDPOINTS=1 \ - NOMOS_TESTNET_IMAGE="${IMAGE}" \ - NOMOS_CIRCUITS="${HOST_BUNDLE_PATH}" \ - LOGOS_BLOCKCHAIN_CIRCUITS="${HOST_BUNDLE_PATH}" \ - LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH="${kzg_path}" \ + LOGOS_BLOCKCHAIN_TESTNET_IMAGE="${IMAGE}" \ LOGOS_BLOCKCHAIN_NODE_BIN="${LOGOS_BLOCKCHAIN_NODE_BIN:-}" \ - COMPOSE_CIRCUITS_PLATFORM="${COMPOSE_CIRCUITS_PLATFORM:-}" \ cargo run -p runner-examples --bin "${BIN}" } @@ -585,12 +520,10 @@ run_examples::main() { run_examples::select_image run_examples::prepare_bundles - echo "==> Using restored circuits/binaries bundle" + echo "==> Using restored binaries bundle" SETUP_OUT="$(common::tmpfile nomos-setup-output.XXXXXX)" - run_examples::ensure_compose_circuits_platform_default - run_examples::maybe_set_docker_platform run_examples::maybe_rebuild_image run_examples::maybe_restore_host_after_image run_examples::validate_restored_bundle diff --git a/scripts/run/run-test-matrix.sh b/scripts/run/run-test-matrix.sh index eea3e43..bea5ce2 100755 --- a/scripts/run/run-test-matrix.sh +++ b/scripts/run/run-test-matrix.sh @@ -17,7 +17,7 @@ image rebuilds (where it makes sense), after cleaning and rebuilding bundles. Options: -t, --run-seconds N Demo duration for each run (default: 120) - -v, --validators N Validators (default: 1) + -n, --nodes N Nodes (default: 1) --modes LIST Comma-separated: host,compose,k8s (default: host,compose,k8s) --no-clean Skip scripts/ops/clean.sh step --no-bundles Skip scripts/build/build-bundle.sh (uses existing .tmp tarballs) @@ -44,7 +44,7 @@ matrix::have() { command -v "$1" >/dev/null 2>&1; } matrix::parse_args() { RUN_SECS=120 - VALIDATORS=1 + NODES=1 MODES_RAW="host,compose,k8s" DO_CLEAN=1 DO_BUNDLES=1 @@ -59,8 +59,8 @@ matrix::parse_args() { -h|--help) matrix::usage; exit 0 ;; -t|--run-seconds) RUN_SECS="${2:-}"; shift 2 ;; --run-seconds=*) RUN_SECS="${1#*=}"; shift ;; - -v|--validators) VALIDATORS="${2:-}"; shift 2 ;; - --validators=*) VALIDATORS="${1#*=}"; shift ;; + -n|--nodes) NODES="${2:-}"; shift 2 ;; + --nodes=*) NODES="${1#*=}"; shift ;; --modes) MODES_RAW="${2:-}"; shift 2 ;; --modes=*) MODES_RAW="${1#*=}"; shift ;; --no-clean) DO_CLEAN=0; shift ;; @@ -78,7 +78,7 @@ matrix::parse_args() { common::is_uint "${RUN_SECS}" || matrix::die "--run-seconds must be an integer" [ "${RUN_SECS}" -gt 0 ] || matrix::die "--run-seconds must be > 0" - common::is_uint "${VALIDATORS}" || matrix::die "--validators must be an integer" + common::is_uint "${NODES}" || matrix::die "--nodes must be an integer" } matrix::split_modes() { @@ -215,7 +215,7 @@ matrix::main() { host) matrix::run_case "host" \ "${ROOT_DIR}/scripts/run/run-examples.sh" \ - -t "${RUN_SECS}" -v "${VALIDATORS}" \ + -t "${RUN_SECS}" -n "${NODES}" \ "${forward[@]}" \ host ;; @@ -223,7 +223,7 @@ matrix::main() { if [ "${SKIP_IMAGE_BUILD_VARIANTS}" -eq 0 ]; then matrix::run_case "compose.image_build" \ "${ROOT_DIR}/scripts/run/run-examples.sh" \ - -t "${RUN_SECS}" -v "${VALIDATORS}" \ + -t "${RUN_SECS}" -n "${NODES}" \ "${forward[@]}" \ compose else @@ -233,7 +233,7 @@ matrix::main() { matrix::run_case "compose.skip_image_build" \ "${ROOT_DIR}/scripts/run/run-examples.sh" \ --no-image-build \ - -t "${RUN_SECS}" -v "${VALIDATORS}" \ + -t "${RUN_SECS}" -n "${NODES}" \ "${forward[@]}" \ compose ;; @@ -250,14 +250,14 @@ matrix::main() { # On non-docker-desktop clusters, run-examples.sh defaults to skipping local image builds # since the cluster can't see them. Honor the matrix "force" option by overriding. if [ "${ctx}" != "docker-desktop" ] && [ "${FORCE_K8S_IMAGE_BUILD}" -eq 1 ]; then - export NOMOS_FORCE_IMAGE_BUILD=1 + export LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD=1 fi matrix::run_case "k8s.image_build" \ "${ROOT_DIR}/scripts/run/run-examples.sh" \ - -t "${RUN_SECS}" -v "${VALIDATORS}" \ + -t "${RUN_SECS}" -n "${NODES}" \ "${forward[@]}" \ k8s - unset NOMOS_FORCE_IMAGE_BUILD || true + unset LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD || true else echo "==> [k8s] Detected context '${ctx}'; skipping image-build variant (use --force-k8s-image-build to override)" fi @@ -268,7 +268,7 @@ matrix::main() { matrix::run_case "k8s.skip_image_build" \ "${ROOT_DIR}/scripts/run/run-examples.sh" \ --no-image-build \ - -t "${RUN_SECS}" -v "${VALIDATORS}" \ + -t "${RUN_SECS}" -n "${NODES}" \ "${forward[@]}" \ k8s ;; diff --git a/scripts/setup/setup-circuits-stack.sh b/scripts/setup/setup-circuits-stack.sh deleted file mode 100755 index 324af77..0000000 --- a/scripts/setup/setup-circuits-stack.sh +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -if [ -z "${BASH_VERSION:-}" ]; then - exec bash "$0" "$@" -fi - -# shellcheck disable=SC1091 -. "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../lib/common.sh" - -readonly DEFAULT_CIRCUITS_VERSION="v0.3.2" -readonly DEFAULT_LINUX_PLATFORM="linux-x86_64" - -readonly DEFAULT_KZG_DIR_REL="testing-framework/assets/stack/kzgrs_test_params" -readonly DEFAULT_KZG_FILE="kzgrs_test_params" -readonly DEFAULT_KZG_PARAMS_RELPATH="tests/kzgrs/kzgrs_test_params" -readonly RAW_GITHUB_BASE_URL="https://raw.githubusercontent.com" - -setup_circuits_stack::usage() { - cat <<'EOF' -Usage: scripts/setup/setup-circuits-stack.sh [VERSION] - -Prepares circuits for both the Docker image (Linux/x86_64) and the host (for -witness generators). - -Env overrides: - STACK_DIR Where to place the Linux bundle (default: testing-framework/assets/stack/kzgrs_test_params) - HOST_DIR Where to place the host bundle (default: .tmp/logos-blockchain-circuits-host) - LINUX_STAGE_DIR Optional staging dir for the Linux bundle (default: .tmp/logos-blockchain-circuits-linux) - NOMOS_CIRCUITS_PLATFORM Force host platform (e.g., macos-aarch64) - NOMOS_CIRCUITS_REBUILD_RAPIDSNARK Set to 1 to force rebuild (host bundle only) -EOF -} - -setup_circuits_stack::fail_with_usage() { - echo "$1" >&2 - setup_circuits_stack::usage - exit 1 -} - -setup_circuits_stack::realpath_py() { - python3 - "$1" <<'PY' -import os, sys -print(os.path.realpath(sys.argv[1])) -PY -} - -setup_circuits_stack::detect_platform() { - local os arch - case "$(uname -s)" in - Linux*) os="linux" ;; - Darwin*) os="macos" ;; - MINGW*|MSYS*|CYGWIN*) os="windows" ;; - *) common::die "Unsupported OS: $(uname -s)" ;; - esac - - case "$(uname -m)" in - x86_64) arch="x86_64" ;; - aarch64|arm64) arch="aarch64" ;; - *) common::die "Unsupported arch: $(uname -m)" ;; - esac - - echo "${os}-${arch}" -} - -setup_circuits_stack::fetch_bundle() { - local platform="$1" - local dest="$2" - local rebuild="${3:-0}" - - # Install into a temporary directory first and only replace `${dest}` once we - # have a complete bundle. This avoids deleting a working install if GitHub - # returns transient errors (e.g. 502/504). - local temp_dest - temp_dest="$(mktemp -d)" - - if ! NOMOS_CIRCUITS_PLATFORM="${platform}" \ - NOMOS_CIRCUITS_REBUILD_RAPIDSNARK="${rebuild}" \ - "${ROOT_DIR}/scripts/setup/setup-logos-blockchain-circuits.sh" "${VERSION}" "${temp_dest}"; then - rm -rf "${temp_dest}" - return 1 - fi - - rm -rf "${dest}" - mkdir -p "$(dirname "${dest}")" - mv "${temp_dest}" "${dest}" -} - -setup_circuits_stack::fetch_kzg_params() { - local dest_dir="$1" - local dest_file="${dest_dir}/${KZG_FILE}" - local url="${RAW_GITHUB_BASE_URL}/logos-co/nomos-node/${NOMOS_NODE_REV}/${DEFAULT_KZG_PARAMS_RELPATH}" - - echo "Fetching KZG parameters from ${url}" - curl -fsSL "${url}" -o "${dest_file}" -} - -setup_circuits_stack::load_env() { - ROOT_DIR="$(common::repo_root)" - export ROOT_DIR - - common::require_file "${ROOT_DIR}/versions.env" - # shellcheck disable=SC1091 - . "${ROOT_DIR}/versions.env" - common::maybe_source "${ROOT_DIR}/paths.env" - - KZG_DIR_REL="${NOMOS_KZG_DIR_REL:-${DEFAULT_KZG_DIR_REL}}" - KZG_FILE="${NOMOS_KZG_FILE:-${DEFAULT_KZG_FILE}}" - HOST_DIR_REL_DEFAULT="${NOMOS_CIRCUITS_HOST_DIR_REL:-.tmp/logos-blockchain-circuits-host}" - LINUX_DIR_REL_DEFAULT="${NOMOS_CIRCUITS_LINUX_DIR_REL:-.tmp/logos-blockchain-circuits-linux}" - - VERSION="${VERSION:-${DEFAULT_CIRCUITS_VERSION}}" - STACK_DIR="${STACK_DIR:-${ROOT_DIR}/${KZG_DIR_REL}}" - HOST_DIR="${HOST_DIR:-${ROOT_DIR}/${HOST_DIR_REL_DEFAULT}}" - LINUX_STAGE_DIR="${LINUX_STAGE_DIR:-${ROOT_DIR}/${LINUX_DIR_REL_DEFAULT}}" - - NOMOS_NODE_REV="${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV in versions.env or env}" - - # Force non-interactive installs so repeated runs do not prompt. - export NOMOS_CIRCUITS_NONINTERACTIVE=1 -} - -setup_circuits_stack::main() { - if [ "${1:-}" = "-h" ] || [ "${1:-}" = "--help" ]; then - setup_circuits_stack::usage - exit 0 - fi - - setup_circuits_stack::load_env - if [ -n "${1:-}" ]; then - VERSION="$1" - fi - - echo "Preparing circuits (version ${VERSION})" - echo "Workspace: ${ROOT_DIR}" - - local linux_platform="${DEFAULT_LINUX_PLATFORM}" - - echo "Installing Linux bundle for Docker image into ${STACK_DIR}" - local stage_real stack_real - stage_real="$(setup_circuits_stack::realpath_py "${LINUX_STAGE_DIR}")" - stack_real="$(setup_circuits_stack::realpath_py "${STACK_DIR}")" - - if [ "${stage_real}" = "${stack_real}" ]; then - rm -rf "${STACK_DIR}" - setup_circuits_stack::fetch_bundle "${linux_platform}" "${STACK_DIR}" 0 - setup_circuits_stack::fetch_kzg_params "${STACK_DIR}" - else - rm -rf "${LINUX_STAGE_DIR}" - mkdir -p "${LINUX_STAGE_DIR}" - setup_circuits_stack::fetch_bundle "${linux_platform}" "${LINUX_STAGE_DIR}" 0 - rm -rf "${STACK_DIR}" - mkdir -p "${STACK_DIR}" - cp -R "${LINUX_STAGE_DIR}/." "${STACK_DIR}/" - setup_circuits_stack::fetch_kzg_params "${STACK_DIR}" - fi - echo "Linux bundle ready at ${STACK_DIR}" - - local host_platform - host_platform="${NOMOS_CIRCUITS_PLATFORM:-$(setup_circuits_stack::detect_platform)}" - if [[ "${host_platform}" == "${linux_platform}" ]]; then - echo "Host platform ${host_platform} matches Linux bundle; host can reuse ${STACK_DIR}" - echo "Export if you want to be explicit:" - echo " export NOMOS_CIRCUITS=\"${STACK_DIR}\"" - else - echo "Host platform detected: ${host_platform}; installing host-native bundle into ${HOST_DIR}" - setup_circuits_stack::fetch_bundle "${host_platform}" "${HOST_DIR}" "${NOMOS_CIRCUITS_REBUILD_RAPIDSNARK:-0}" - setup_circuits_stack::fetch_kzg_params "${HOST_DIR}" - echo "Host bundle ready at ${HOST_DIR}" - echo - echo "Set for host runs:" - echo " export NOMOS_CIRCUITS=\"${HOST_DIR}\"" - fi - - cat <<'EOF' - -Done. -- For Docker/compose: rebuild the image to bake the Linux bundle: - scripts/build/build_test_image.sh -- For host runs (e.g., compose_runner): ensure NOMOS_CIRCUITS points to the host bundle above. -EOF -} - -if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - setup_circuits_stack::main "$@" -fi diff --git a/scripts/setup/setup-observability.sh b/scripts/setup/setup-observability.sh index a8f9d03..539c0b7 100755 --- a/scripts/setup/setup-observability.sh +++ b/scripts/setup/setup-observability.sh @@ -17,12 +17,12 @@ Usage: Compose: - Runs Prometheus (+ OTLP receiver) and Grafana via docker compose. - - Prints NOMOS_METRICS_* / NOMOS_GRAFANA_URL exports to wire into runs. + - Prints LOGOS_BLOCKCHAIN_METRICS_* / LOGOS_BLOCKCHAIN_GRAFANA_URL exports to wire into runs. Kubernetes: - Installs prometheus-community/kube-prometheus-stack into namespace "logos-observability" and optionally loads Logos Grafana dashboards. - - Prints port-forward commands + NOMOS_METRICS_* / NOMOS_GRAFANA_URL exports. + - Prints port-forward commands + LOGOS_BLOCKCHAIN_METRICS_* / LOGOS_BLOCKCHAIN_GRAFANA_URL exports. USAGE } @@ -43,14 +43,14 @@ compose_run() { compose_env() { cat <<'EOF' -export NOMOS_METRICS_QUERY_URL=http://localhost:9090 -export NOMOS_METRICS_OTLP_INGEST_URL=http://host.docker.internal:9090/api/v1/otlp/v1/metrics -export NOMOS_GRAFANA_URL=http://localhost:3000 +export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090 +export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://host.docker.internal:9090/api/v1/otlp/v1/metrics +export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000 EOF } -k8s_namespace() { echo "${LOGOS_OBSERVABILITY_NAMESPACE:-${NOMOS_OBSERVABILITY_NAMESPACE:-logos-observability}}"; } -k8s_release() { echo "${LOGOS_OBSERVABILITY_RELEASE:-${NOMOS_OBSERVABILITY_RELEASE:-logos-observability}}"; } +k8s_namespace() { echo "${LOGOS_OBSERVABILITY_NAMESPACE:-${LOGOS_BLOCKCHAIN_OBSERVABILITY_NAMESPACE:-logos-observability}}"; } +k8s_release() { echo "${LOGOS_OBSERVABILITY_RELEASE:-${LOGOS_BLOCKCHAIN_OBSERVABILITY_RELEASE:-logos-observability}}"; } k8s_values() { echo "${ROOT}/scripts/observability/k8s/kube-prometheus-stack.values.yaml"; } k8s_install() { @@ -119,14 +119,14 @@ k8s_env() { cat </dev/null || true)" - case "${info}" in - *ELF*) : ;; - *) - echo "Circuits executable is not ELF: ${path} (${info}); forcing circuits download" - require_linux_execs=1 - return 0 - ;; - esac - - local pattern - pattern="$(expect_arch_pattern "${TARGET_ARCH}")" - if [ -n "${pattern}" ] && ! echo "${info}" | grep -Eqi "${pattern}"; then - echo "Circuits executable arch mismatch: ${path} (${info}); forcing circuits download" - require_linux_execs=1 - fi -} - -check_linux_exec /opt/circuits/zksign/witness_generator -check_linux_exec /opt/circuits/pol/witness_generator - -if [ "${RAPIDSNARK_FORCE_REBUILD:-0}" = "1" ]; then - echo "Forcing rapidsnark rebuild for /opt/circuits" - scripts/build/build-rapidsnark.sh /opt/circuits -elif [ -f "/opt/circuits/prover" ]; then - PROVER_INFO="$(file -b /opt/circuits/prover || true)" - case "${TARGET_ARCH}" in - x86_64) EXPECT_ARCH="x86-64" ;; - aarch64|arm64) EXPECT_ARCH="aarch64" ;; - *) EXPECT_ARCH="${TARGET_ARCH}" ;; - esac - if [ -n "${PROVER_INFO}" ] && ! echo "${PROVER_INFO}" | grep -qi "${EXPECT_ARCH}"; then - echo "Circuits prover architecture (${PROVER_INFO}) does not match target ${TARGET_ARCH}; rebuilding rapidsnark binaries" - RAPIDSNARK_FORCE_REBUILD=1 \ - scripts/build/build-rapidsnark.sh /opt/circuits - fi -fi - -if [ "${require_linux_execs}" -eq 1 ] || [ ! -f "/opt/circuits/pol/verification_key.json" ]; then - echo "Downloading ${VERSION} circuits bundle for ${CIRCUITS_PLATFORM}" - NOMOS_CIRCUITS_PLATFORM="${CIRCUITS_PLATFORM}" \ - NOMOS_CIRCUITS_REBUILD_RAPIDSNARK=1 \ - RAPIDSNARK_BUILD_GMP=1 \ - scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "/opt/circuits" -fi diff --git a/testing-framework/assets/stack/scripts/run_nomos.sh b/testing-framework/assets/stack/scripts/run_nomos.sh index 2e6f0e6..a45a202 100755 --- a/testing-framework/assets/stack/scripts/run_nomos.sh +++ b/testing-framework/assets/stack/scripts/run_nomos.sh @@ -2,11 +2,11 @@ set -e -role="${1:-validator}" +role="${1:-node}" bin_for_role() { case "$1" in - validator) echo "/usr/bin/logos-blockchain-node" ;; + node) echo "/usr/bin/logos-blockchain-node" ;; *) echo "Unknown role: $1" >&2; exit 2 ;; esac } @@ -39,16 +39,14 @@ check_binary_arch() { bin_path="$(bin_for_role "$role")" check_binary_arch "$bin_path" "logos-blockchain-${role}" -KZG_CONTAINER_PATH="${NOMOS_KZG_CONTAINER_PATH:-/kzgrs_test_params/kzgrs_test_params}" host_identifier_default="${role}-$(hostname -i)" export CFG_FILE_PATH="/config.yaml" \ - CFG_SERVER_ADDR="${CFG_SERVER_ADDR:-http://cfgsync:${NOMOS_CFGSYNC_PORT:-4400}}" \ + CFG_SERVER_ADDR="${CFG_SERVER_ADDR:-http://cfgsync:${LOGOS_BLOCKCHAIN_CFGSYNC_PORT:-4400}}" \ CFG_HOST_IP=$(hostname -i) \ CFG_HOST_KIND="${CFG_HOST_KIND:-$role}" \ CFG_HOST_IDENTIFIER="${CFG_HOST_IDENTIFIER:-$host_identifier_default}" \ - LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH="${LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH:-${KZG_CONTAINER_PATH}}" \ - NOMOS_TIME_BACKEND="${NOMOS_TIME_BACKEND:-monotonic}" \ + LOGOS_BLOCKCHAIN_TIME_BACKEND="${LOGOS_BLOCKCHAIN_TIME_BACKEND:-monotonic}" \ LOG_LEVEL="${LOG_LEVEL:-INFO}" \ POL_PROOF_DEV_MODE="${POL_PROOF_DEV_MODE:-true}" diff --git a/testing-framework/assets/stack/scripts/run_nomos_node.sh b/testing-framework/assets/stack/scripts/run_nomos_node.sh index 12d8bd2..94a970c 100755 --- a/testing-framework/assets/stack/scripts/run_nomos_node.sh +++ b/testing-framework/assets/stack/scripts/run_nomos_node.sh @@ -1,2 +1,2 @@ #!/bin/sh -exec /etc/nomos/scripts/run_nomos.sh validator +exec /etc/nomos/scripts/run_nomos.sh node diff --git a/testing-framework/configs/Cargo.toml b/testing-framework/configs/Cargo.toml index 1a0713c..50d4634 100644 --- a/testing-framework/configs/Cargo.toml +++ b/testing-framework/configs/Cargo.toml @@ -10,7 +10,6 @@ repository.workspace = true version = "0.1.0" [dependencies] -blst = "0.3.11" chain-leader = { workspace = true } chain-network = { workspace = true } chain-service = { workspace = true } diff --git a/testing-framework/configs/src/constants.rs b/testing-framework/configs/src/constants.rs index c7bb153..39dd889 100644 --- a/testing-framework/configs/src/constants.rs +++ b/testing-framework/configs/src/constants.rs @@ -32,23 +32,11 @@ pub const DEFAULT_DA_NETWORK_PORT: u16 = 3300; /// Default blend network port. pub const DEFAULT_BLEND_NETWORK_PORT: u16 = 3400; //4401; -/// Resolve cfgsync port from `NOMOS_CFGSYNC_PORT`, falling back to the default. +/// Resolve cfgsync port from `LOGOS_BLOCKCHAIN_CFGSYNC_PORT`, falling back to +/// the default. pub fn cfgsync_port() -> u16 { tf_env::nomos_cfgsync_port().unwrap_or(DEFAULT_CFGSYNC_PORT) } -/// Default KZG parameters file name. -pub const KZG_PARAMS_FILENAME: &str = "kzgrs_test_params"; -/// Default container path for KZG parameters (compose/k8s mount point). -pub const DEFAULT_KZG_CONTAINER_PATH: &str = "/kzgrs_test_params/kzgrs_test_params"; - -/// Resolve container KZG path from `NOMOS_KZG_CONTAINER_PATH`, falling back to -/// the default. -pub fn kzg_container_path() -> String { - tf_env::nomos_kzg_container_path().unwrap_or_else(|| DEFAULT_KZG_CONTAINER_PATH.to_string()) -} - /// Default stack assets directory. pub const DEFAULT_ASSETS_STACK_DIR: &str = "testing-framework/assets/stack"; -/// Default host-relative directory for KZG assets. -pub const DEFAULT_KZG_HOST_DIR: &str = "testing-framework/assets/stack/kzgrs_test_params"; diff --git a/testing-framework/configs/src/nodes/mod.rs b/testing-framework/configs/src/nodes/mod.rs index 2071991..085d7d8 100644 --- a/testing-framework/configs/src/nodes/mod.rs +++ b/testing-framework/configs/src/nodes/mod.rs @@ -1,4 +1,4 @@ pub(crate) mod blend; pub(crate) mod common; pub mod kms; -pub mod validator; +pub mod node; diff --git a/testing-framework/configs/src/nodes/validator.rs b/testing-framework/configs/src/nodes/node.rs similarity index 84% rename from testing-framework/configs/src/nodes/validator.rs rename to testing-framework/configs/src/nodes/node.rs index 91f47ad..33692ae 100644 --- a/testing-framework/configs/src/nodes/validator.rs +++ b/testing-framework/configs/src/nodes/node.rs @@ -1,5 +1,5 @@ use nomos_node::{ - Config as ValidatorConfig, RocksBackendSettings, config::deployment::DeploymentSettings, + Config as NodeConfig, RocksBackendSettings, config::deployment::DeploymentSettings, }; use nomos_sdp::SdpSettings; @@ -16,15 +16,15 @@ use crate::{ }; #[must_use] -pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig { +pub fn create_node_config(config: GeneralConfig) -> NodeConfig { let network_config = config.network_config.clone(); let (blend_user_config, blend_deployment, network_deployment) = build_blend_service_config(&config.blend_config); let deployment_settings = - build_validator_deployment_settings(&config, blend_deployment, network_deployment); + build_node_deployment_settings(&config, blend_deployment, network_deployment); - ValidatorConfig { + NodeConfig { network: network_config, blend: blend_user_config, deployment: deployment_settings, @@ -41,7 +41,7 @@ pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig { } } -fn build_validator_deployment_settings( +fn build_node_deployment_settings( config: &GeneralConfig, blend_deployment: nomos_node::config::blend::deployment::Settings, network_deployment: nomos_node::config::network::deployment::Settings, diff --git a/testing-framework/configs/src/timeouts.rs b/testing-framework/configs/src/timeouts.rs index 8741e04..58fd667 100644 --- a/testing-framework/configs/src/timeouts.rs +++ b/testing-framework/configs/src/timeouts.rs @@ -18,33 +18,42 @@ fn env_duration(key: &str, default: u64) -> Duration { } pub fn dispersal_timeout() -> Duration { - env_duration("NOMOS_DISPERSAL_TIMEOUT_SECS", DISPERSAL_TIMEOUT_SECS) + env_duration( + "LOGOS_BLOCKCHAIN_DISPERSAL_TIMEOUT_SECS", + DISPERSAL_TIMEOUT_SECS, + ) } pub fn retry_cooldown() -> Duration { - env_duration("NOMOS_RETRY_COOLDOWN_SECS", RETRY_COOLDOWN_SECS) + env_duration("LOGOS_BLOCKCHAIN_RETRY_COOLDOWN_SECS", RETRY_COOLDOWN_SECS) } pub fn grace_period() -> Duration { - env_duration("NOMOS_GRACE_PERIOD_SECS", GRACE_PERIOD_SECS) + env_duration("LOGOS_BLOCKCHAIN_GRACE_PERIOD_SECS", GRACE_PERIOD_SECS) } pub fn prune_duration() -> Duration { - env_duration("NOMOS_PRUNE_DURATION_SECS", PRUNE_DURATION_SECS) + env_duration("LOGOS_BLOCKCHAIN_PRUNE_DURATION_SECS", PRUNE_DURATION_SECS) } pub fn prune_interval() -> Duration { - env_duration("NOMOS_PRUNE_INTERVAL_SECS", PRUNE_INTERVAL_SECS) + env_duration("LOGOS_BLOCKCHAIN_PRUNE_INTERVAL_SECS", PRUNE_INTERVAL_SECS) } pub fn share_duration() -> Duration { - env_duration("NOMOS_SHARE_DURATION_SECS", SHARE_DURATION_SECS) + env_duration("LOGOS_BLOCKCHAIN_SHARE_DURATION_SECS", SHARE_DURATION_SECS) } pub fn commitments_wait() -> Duration { - env_duration("NOMOS_COMMITMENTS_WAIT_SECS", COMMITMENTS_WAIT_SECS) + env_duration( + "LOGOS_BLOCKCHAIN_COMMITMENTS_WAIT_SECS", + COMMITMENTS_WAIT_SECS, + ) } pub fn sdp_trigger_delay() -> Duration { - env_duration("NOMOS_SDP_TRIGGER_DELAY_SECS", SDP_TRIGGER_DELAY_SECS) + env_duration( + "LOGOS_BLOCKCHAIN_SDP_TRIGGER_DELAY_SECS", + SDP_TRIGGER_DELAY_SECS, + ) } diff --git a/testing-framework/core/src/kzg.rs b/testing-framework/core/src/kzg.rs deleted file mode 100644 index aa25f08..0000000 --- a/testing-framework/core/src/kzg.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::path::{Path, PathBuf}; - -use testing_framework_config::constants::{DEFAULT_KZG_CONTAINER_PATH, DEFAULT_KZG_HOST_DIR}; -use testing_framework_env as tf_env; - -/// Default in-image path for KZG params used by testnet images. -pub const DEFAULT_IN_IMAGE_KZG_PARAMS_PATH: &str = "/opt/nomos/kzg-params/kzgrs_test_params"; - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum KzgMode { - HostPath, - InImage, -} - -impl KzgMode { - #[must_use] - pub fn from_env_or_default() -> Self { - match tf_env::nomos_kzg_mode().as_deref() { - Some("hostPath") => Self::HostPath, - Some("inImage") => Self::InImage, - None => Self::InImage, - Some(other) => { - tracing::warn!( - value = other, - "unknown NOMOS_KZG_MODE; defaulting to inImage" - ); - Self::InImage - } - } - } -} - -/// Canonical KZG parameters model used by runners and config distribution. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct KzgParamsSpec { - pub mode: KzgMode, - /// Value written into node configs (cfgsync `global_params_path`) and, - /// where applicable, exported as `LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH` for - /// node processes. - pub node_params_path: String, - /// Host directory that must exist when running in `HostPath` mode. - pub host_params_dir: Option, -} - -impl KzgParamsSpec { - #[must_use] - pub fn for_compose(use_kzg_mount: bool) -> Self { - let node_params_path = tf_env::nomos_kzgrs_params_path().unwrap_or_else(|| { - if use_kzg_mount { - DEFAULT_KZG_CONTAINER_PATH.to_string() - } else { - DEFAULT_IN_IMAGE_KZG_PARAMS_PATH.to_string() - } - }); - Self { - mode: if use_kzg_mount { - KzgMode::HostPath - } else { - KzgMode::InImage - }, - node_params_path, - host_params_dir: None, - } - } - - #[must_use] - pub fn for_k8s(root: &Path) -> Self { - let mode = KzgMode::from_env_or_default(); - match mode { - KzgMode::HostPath => Self { - mode, - node_params_path: DEFAULT_KZG_CONTAINER_PATH.to_string(), - host_params_dir: Some(root.join( - tf_env::nomos_kzg_dir_rel().unwrap_or_else(|| DEFAULT_KZG_HOST_DIR.to_string()), - )), - }, - KzgMode::InImage => Self { - mode, - node_params_path: tf_env::nomos_kzgrs_params_path() - .unwrap_or_else(|| DEFAULT_IN_IMAGE_KZG_PARAMS_PATH.to_string()), - host_params_dir: None, - }, - } - } -} diff --git a/testing-framework/core/src/lib.rs b/testing-framework/core/src/lib.rs index c5a6175..244b161 100644 --- a/testing-framework/core/src/lib.rs +++ b/testing-framework/core/src/lib.rs @@ -1,4 +1,3 @@ -pub mod kzg; pub mod manual; pub mod nodes; pub mod scenario; diff --git a/testing-framework/core/src/manual.rs b/testing-framework/core/src/manual.rs index f609e7d..1f8618b 100644 --- a/testing-framework/core/src/manual.rs +++ b/testing-framework/core/src/manual.rs @@ -5,7 +5,7 @@ use crate::scenario::{DynError, StartNodeOptions, StartedNode}; /// Interface for imperative, deployer-backed manual clusters. #[async_trait] pub trait ManualClusterHandle: Send + Sync { - async fn start_validator_with( + async fn start_node_with( &self, name: &str, options: StartNodeOptions, diff --git a/testing-framework/core/src/nodes/common/lifecycle/spawn.rs b/testing-framework/core/src/nodes/common/lifecycle/spawn.rs index 790e862..b8409c9 100644 --- a/testing-framework/core/src/nodes/common/lifecycle/spawn.rs +++ b/testing-framework/core/src/nodes/common/lifecycle/spawn.rs @@ -12,8 +12,8 @@ use tracing::debug; use crate::nodes::common::config::injection::normalize_ed25519_sigs; -/// Configure tracing logger to write into `NOMOS_LOG_DIR` if set, else into the -/// provided base dir. +/// Configure tracing logger to write into `LOGOS_BLOCKCHAIN_LOG_DIR` if set, +/// else into the provided base dir. pub fn configure_logging(base_dir: &Path, prefix: &str, set_logger: F) where F: FnOnce(FileConfig), diff --git a/testing-framework/core/src/nodes/mod.rs b/testing-framework/core/src/nodes/mod.rs index 781202e..d6abce5 100644 --- a/testing-framework/core/src/nodes/mod.rs +++ b/testing-framework/core/src/nodes/mod.rs @@ -1,6 +1,6 @@ mod api_client; pub mod common; -pub mod validator; +pub mod node; use std::sync::LazyLock; diff --git a/testing-framework/core/src/nodes/validator.rs b/testing-framework/core/src/nodes/node.rs similarity index 81% rename from testing-framework/core/src/nodes/validator.rs rename to testing-framework/core/src/nodes/node.rs index 03ac59b..581ff14 100644 --- a/testing-framework/core/src/nodes/validator.rs +++ b/testing-framework/core/src/nodes/node.rs @@ -2,7 +2,7 @@ use std::{ops::Deref, path::PathBuf, time::Duration}; use nomos_node::Config; use nomos_tracing_service::LoggerLayer; -pub use testing_framework_config::nodes::validator::create_validator_config; +pub use testing_framework_config::nodes::node::create_node_config; use tracing::{debug, info}; use super::{persist_tempdir, should_persist_tempdir}; @@ -30,16 +30,11 @@ fn binary_path() -> PathBuf { BinaryResolver::resolve_path(&cfg) } -pub enum Pool { - Da, - Mantle, -} - -pub struct Validator { +pub struct Node { handle: NodeHandle, } -impl Deref for Validator { +impl Deref for Node { type Target = NodeHandle; fn deref(&self) -> &Self::Target { @@ -47,26 +42,26 @@ impl Deref for Validator { } } -impl Drop for Validator { +impl Drop for Node { fn drop(&mut self) { if should_persist_tempdir() && let Err(e) = persist_tempdir(&mut self.handle.tempdir, "logos-blockchain-node") { - debug!(error = ?e, "failed to persist validator tempdir"); + debug!(error = ?e, "failed to persist node tempdir"); } - debug!("stopping validator process"); + debug!("stopping node process"); kill_child(&mut self.handle.child); } } -impl Validator { - /// Check if the validator process is still running +impl Node { + /// Check if the node process is still running pub fn is_running(&mut self) -> bool { is_running(&mut self.handle.child) } - /// Wait for the validator process to exit, with a timeout + /// Wait for the node process to exit, with a timeout /// Returns true if the process exited within the timeout, false otherwise pub async fn wait_for_exit(&mut self, timeout: Duration) -> bool { self.handle.wait_for_exit(timeout).await @@ -77,13 +72,13 @@ impl Validator { let handle = spawn_node( config, &log_prefix, - "validator.yaml", + "node.yaml", binary_path(), !*IS_DEBUG_TRACING, ) .await?; - info!("validator spawned and ready"); + info!("node spawned and ready"); Ok(Self { handle }) } diff --git a/testing-framework/core/src/scenario/capabilities.rs b/testing-framework/core/src/scenario/capabilities.rs index b0c52b2..5695d19 100644 --- a/testing-framework/core/src/scenario/capabilities.rs +++ b/testing-framework/core/src/scenario/capabilities.rs @@ -2,7 +2,7 @@ use async_trait::async_trait; use reqwest::Url; use super::DynError; -use crate::{nodes::ApiClient, topology::generation::NodeRole}; +use crate::nodes::ApiClient; /// Marker type used by scenario builders to request node control support. #[derive(Clone, Copy, Debug, Default)] @@ -69,18 +69,18 @@ impl RequiresNodeControl for ObservabilityCapability { /// Interface exposed by runners that can restart nodes at runtime. #[async_trait] pub trait NodeControlHandle: Send + Sync { - async fn restart_validator(&self, index: usize) -> Result<(), DynError>; + async fn restart_node(&self, index: usize) -> Result<(), DynError>; - async fn start_validator(&self, _name: &str) -> Result { - Err("start_validator not supported by this deployer".into()) + async fn start_node(&self, _name: &str) -> Result { + Err("start_node not supported by this deployer".into()) } - async fn start_validator_with( + async fn start_node_with( &self, _name: &str, _options: StartNodeOptions, ) -> Result { - Err("start_validator_with not supported by this deployer".into()) + Err("start_node_with not supported by this deployer".into()) } fn node_client(&self, _name: &str) -> Option { @@ -91,6 +91,5 @@ pub trait NodeControlHandle: Send + Sync { #[derive(Clone)] pub struct StartedNode { pub name: String, - pub role: NodeRole, pub api: ApiClient, } diff --git a/testing-framework/core/src/scenario/cfgsync.rs b/testing-framework/core/src/scenario/cfgsync.rs index 6ebe989..1dca652 100644 --- a/testing-framework/core/src/scenario/cfgsync.rs +++ b/testing-framework/core/src/scenario/cfgsync.rs @@ -5,7 +5,6 @@ use nomos_tracing_service::TracingSettings; use nomos_utils::bounded_duration::{MinimalBoundedDuration, SECOND}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use testing_framework_config::constants::kzg_container_path; use tracing::debug; use crate::topology::{configs::wallet::WalletConfig, generation::GeneratedTopology}; @@ -32,7 +31,6 @@ pub struct CfgSyncConfig { pub old_blobs_check_interval: Duration, #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] pub blobs_validity_duration: Duration, - pub global_params_path: String, pub min_dispersal_peers: usize, pub min_replication_peers: usize, #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] @@ -65,16 +63,12 @@ pub fn render_cfgsync_yaml(cfg: &CfgSyncConfig) -> Result { serde_yaml::to_string(&serializable).context("rendering cfgsync yaml") } -pub fn apply_topology_overrides( - cfg: &mut CfgSyncConfig, - topology: &GeneratedTopology, - use_kzg_mount: bool, -) { +pub fn apply_topology_overrides(cfg: &mut CfgSyncConfig, topology: &GeneratedTopology) { debug!( - validators = topology.validators().len(), - use_kzg_mount, "applying topology overrides to cfgsync config" + nodes = topology.nodes().len(), + "applying topology overrides to cfgsync config" ); - let hosts = topology.validators().len(); + let hosts = topology.nodes().len(); cfg.n_hosts = hosts; let consensus = &topology.config().consensus_params; @@ -83,14 +77,14 @@ pub fn apply_topology_overrides( let config = topology.config(); cfg.wallet = config.wallet_config.clone(); - cfg.ids = Some(topology.nodes().map(|node| node.id).collect()); - cfg.blend_ports = Some(topology.nodes().map(|node| node.blend_port).collect()); - - if use_kzg_mount { - // Compose mounts the bundle at /kzgrs_test_params; the proving key lives under - // pol/. - cfg.global_params_path = kzg_container_path() - }; + cfg.ids = Some(topology.nodes().iter().map(|node| node.id).collect()); + cfg.blend_ports = Some( + topology + .nodes() + .iter() + .map(|node| node.blend_port) + .collect(), + ); } #[serde_as] @@ -114,7 +108,6 @@ struct SerializableCfgSyncConfig { old_blobs_check_interval: Duration, #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] blobs_validity_duration: Duration, - global_params_path: String, min_dispersal_peers: usize, min_replication_peers: usize, #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] @@ -143,7 +136,6 @@ impl From<&CfgSyncConfig> for SerializableCfgSyncConfig { num_subnets: cfg.num_subnets, old_blobs_check_interval: cfg.old_blobs_check_interval, blobs_validity_duration: cfg.blobs_validity_duration, - global_params_path: cfg.global_params_path.clone(), min_dispersal_peers: cfg.min_dispersal_peers, min_replication_peers: cfg.min_replication_peers, monitor_failure_time_window: cfg.monitor_failure_time_window, diff --git a/testing-framework/core/src/scenario/definition.rs b/testing-framework/core/src/scenario/definition.rs index cc13e5e..522d0ab 100644 --- a/testing-framework/core/src/scenario/definition.rs +++ b/testing-framework/core/src/scenario/definition.rs @@ -104,7 +104,7 @@ pub type ScenarioBuilder = Builder<()>; /// Builder for shaping the scenario topology. pub struct TopologyConfigurator { builder: Builder, - validators: usize, + nodes: usize, network_star: bool, } @@ -123,14 +123,14 @@ impl Builder { } #[must_use] - pub fn with_node_counts(validators: usize) -> Self { + pub fn with_node_counts(nodes: usize) -> Self { Self::new(TopologyBuilder::new(TopologyConfig::with_node_numbers( - validators, + nodes, ))) } /// Convenience constructor that immediately enters topology configuration, - /// letting callers set counts via `validators`. + /// letting callers set counts via `nodes`. pub fn topology() -> TopologyConfigurator { TopologyConfigurator::new(Self::new(TopologyBuilder::new(TopologyConfig::empty()))) } @@ -262,7 +262,7 @@ impl Builder { let workloads: Vec> = workloads.into_iter().map(Arc::from).collect(); info!( - validators = generated.validators().len(), + nodes = generated.nodes().len(), duration_secs = duration.as_secs(), workloads = workloads.len(), expectations = expectations.len(), @@ -283,15 +283,15 @@ impl TopologyConfigurator { const fn new(builder: Builder) -> Self { Self { builder, - validators: 0, + nodes: 0, network_star: false, } } - /// Set the number of validator nodes. + /// Set the number of nodes. #[must_use] - pub fn validators(mut self, count: usize) -> Self { - self.validators = count; + pub fn nodes(mut self, count: usize) -> Self { + self.nodes = count; self } @@ -305,7 +305,7 @@ impl TopologyConfigurator { /// Finalize and return the underlying scenario builder. #[must_use] pub fn apply(self) -> Builder { - let mut config = TopologyConfig::with_node_numbers(self.validators); + let mut config = TopologyConfig::with_node_numbers(self.nodes); if self.network_star { config.network_params.libp2p_network_layout = Libp2pNetworkLayout::Star; } diff --git a/testing-framework/core/src/scenario/http_probe.rs b/testing-framework/core/src/scenario/http_probe.rs index 4838397..7dfc823 100644 --- a/testing-framework/core/src/scenario/http_probe.rs +++ b/testing-framework/core/src/scenario/http_probe.rs @@ -7,33 +7,21 @@ use thiserror::Error; use tokio::time::{Instant, sleep}; use tracing::{debug, info}; -/// Role used for labelling readiness probes. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum NodeRole { - Validator, -} - -impl NodeRole { - #[must_use] - pub const fn label(self) -> &'static str { - match self { - Self::Validator => "validator", - } - } -} +/// Label used for readiness probes. +pub const NODE_ROLE: &str = "node"; /// Error raised when HTTP readiness checks time out. #[derive(Clone, Copy, Debug, Error)] -#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}", role = role.label())] +#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}")] pub struct HttpReadinessError { - role: NodeRole, + role: &'static str, port: u16, timeout: Duration, } impl HttpReadinessError { #[must_use] - pub const fn new(role: NodeRole, port: u16, timeout: Duration) -> Self { + pub const fn new(role: &'static str, port: u16, timeout: Duration) -> Self { Self { role, port, @@ -42,7 +30,7 @@ impl HttpReadinessError { } #[must_use] - pub const fn role(&self) -> NodeRole { + pub const fn role(&self) -> &'static str { self.role } @@ -60,7 +48,7 @@ impl HttpReadinessError { /// Wait for HTTP readiness on the provided ports against localhost. pub async fn wait_for_http_ports( ports: &[u16], - role: NodeRole, + role: &'static str, timeout_duration: Duration, poll_interval: Duration, ) -> Result<(), HttpReadinessError> { @@ -70,7 +58,7 @@ pub async fn wait_for_http_ports( /// Wait for HTTP readiness on the provided ports against a specific host. pub async fn wait_for_http_ports_with_host( ports: &[u16], - role: NodeRole, + role: &'static str, host: &str, timeout_duration: Duration, poll_interval: Duration, @@ -80,7 +68,7 @@ pub async fn wait_for_http_ports_with_host( } info!( - role = role.label(), + role, ?ports, host, timeout_secs = timeout_duration.as_secs_f32(), @@ -106,13 +94,13 @@ pub async fn wait_for_http_ports_with_host( async fn wait_for_single_port( client: ReqwestClient, port: u16, - role: NodeRole, + role: &'static str, host: &str, timeout_duration: Duration, poll_interval: Duration, ) -> Result<(), HttpReadinessError> { let url = format!("http://{host}:{port}{}", paths::CRYPTARCHIA_INFO); - debug!(role = role.label(), %url, "probing HTTP endpoint"); + debug!(role, %url, "probing HTTP endpoint"); let start = Instant::now(); let deadline = start + timeout_duration; let mut attempts: u64 = 0; @@ -123,7 +111,7 @@ async fn wait_for_single_port( let last_failure: Option = match client.get(&url).send().await { Ok(response) if response.status().is_success() => { info!( - role = role.label(), + role, port, host, %url, @@ -142,7 +130,7 @@ async fn wait_for_single_port( if attempts == 1 || attempts % 10 == 0 { debug!( - role = role.label(), + role, port, host, %url, @@ -155,7 +143,7 @@ async fn wait_for_single_port( if Instant::now() >= deadline { info!( - role = role.label(), + role, port, host, %url, diff --git a/testing-framework/core/src/scenario/observability.rs b/testing-framework/core/src/scenario/observability.rs index 4b1371e..dd7f12e 100644 --- a/testing-framework/core/src/scenario/observability.rs +++ b/testing-framework/core/src/scenario/observability.rs @@ -60,9 +60,9 @@ impl ObservabilityInputs { /// vars are also accepted as aliases for backwards compatibility. pub fn from_env() -> Result { Ok(Self { - metrics_query_url: read_url_var(&["NOMOS_METRICS_QUERY_URL"])?, - metrics_otlp_ingest_url: read_url_var(&["NOMOS_METRICS_OTLP_INGEST_URL"])?, - grafana_url: read_url_var(&["NOMOS_GRAFANA_URL"])?, + metrics_query_url: read_url_var(&["LOGOS_BLOCKCHAIN_METRICS_QUERY_URL"])?, + metrics_otlp_ingest_url: read_url_var(&["LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL"])?, + grafana_url: read_url_var(&["LOGOS_BLOCKCHAIN_GRAFANA_URL"])?, }) } diff --git a/testing-framework/core/src/scenario/runtime/metrics.rs b/testing-framework/core/src/scenario/runtime/metrics.rs index 4903513..7c6656f 100644 --- a/testing-framework/core/src/scenario/runtime/metrics.rs +++ b/testing-framework/core/src/scenario/runtime/metrics.rs @@ -6,8 +6,8 @@ use tracing::warn; pub const CONSENSUS_PROCESSED_BLOCKS: &str = "consensus_processed_blocks"; pub const CONSENSUS_TRANSACTIONS_TOTAL: &str = "consensus_transactions_total"; -const CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY: &str = - r#"sum(consensus_transactions_total{job=~"validator-.*"})"#; +const CONSENSUS_TRANSACTIONS_NODE_QUERY: &str = + r#"sum(consensus_transactions_total{job=~"node-.*"})"#; /// Telemetry handles available during a run. #[derive(Clone, Default)] @@ -71,21 +71,21 @@ impl Metrics { .prometheus() .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; - match handle.instant_samples(CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY) { + match handle.instant_samples(CONSENSUS_TRANSACTIONS_NODE_QUERY) { Ok(samples) if !samples.is_empty() => { return Ok(samples.into_iter().map(|sample| sample.value).sum()); } Ok(_) => { warn!( - query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY, - "validator-specific consensus transaction metric returned no samples; falling back to aggregate counter" + query = CONSENSUS_TRANSACTIONS_NODE_QUERY, + "node-specific consensus transaction metric returned no samples; falling back to aggregate counter" ); } Err(err) => { warn!( - query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY, + query = CONSENSUS_TRANSACTIONS_NODE_QUERY, error = %err, - "failed to query validator-specific consensus transaction metric; falling back to aggregate counter" + "failed to query node-specific consensus transaction metric; falling back to aggregate counter" ); } } diff --git a/testing-framework/core/src/scenario/runtime/node_clients.rs b/testing-framework/core/src/scenario/runtime/node_clients.rs index f118061..c5cbc56 100644 --- a/testing-framework/core/src/scenario/runtime/node_clients.rs +++ b/testing-framework/core/src/scenario/runtime/node_clients.rs @@ -11,7 +11,7 @@ use crate::{ topology::{deployment::Topology, generation::GeneratedTopology}, }; -/// Collection of API clients for the validatorset. +/// Collection of API clients for the node set. #[derive(Clone, Default)] pub struct NodeClients { inner: Arc>, @@ -19,49 +19,49 @@ pub struct NodeClients { #[derive(Default)] struct NodeClientsInner { - validators: Vec, + nodes: Vec, } impl NodeClients { #[must_use] /// Build clients from preconstructed vectors. - pub fn new(validators: Vec) -> Self { + pub fn new(nodes: Vec) -> Self { Self { - inner: Arc::new(RwLock::new(NodeClientsInner { validators })), + inner: Arc::new(RwLock::new(NodeClientsInner { nodes })), } } #[must_use] /// Derive clients from a spawned topology. pub fn from_topology(_descriptors: &GeneratedTopology, topology: &Topology) -> Self { - let validator_clients = topology.validators().iter().map(|node| { + let node_clients = topology.nodes().iter().map(|node| { let testing = node.testing_url(); ApiClient::from_urls(node.url(), testing) }); - Self::new(validator_clients.collect()) + Self::new(node_clients.collect()) } #[must_use] - /// Validator API clients. - pub fn validator_clients(&self) -> Vec { + /// Node API clients. + pub fn node_clients(&self) -> Vec { self.inner .read() .unwrap_or_else(|poisoned| poisoned.into_inner()) - .validators + .nodes .clone() } #[must_use] - /// Choose a random validator client if present. - pub fn random_validator(&self) -> Option { - let validators = self.validator_clients(); - if validators.is_empty() { + /// Choose a random node client if present. + pub fn random_node(&self) -> Option { + let nodes = self.node_clients(); + if nodes.is_empty() { return None; } let mut rng = thread_rng(); - let idx = rng.gen_range(0..validators.len()); - validators.get(idx).cloned() + let idx = rng.gen_range(0..nodes.len()); + nodes.get(idx).cloned() } /// Iterator over all clients. @@ -71,25 +71,24 @@ impl NodeClients { .read() .unwrap_or_else(|poisoned| poisoned.into_inner()); - guard.validators.iter().cloned().collect() + guard.nodes.iter().cloned().collect() } #[must_use] - /// Choose any random client from validators. + /// Choose any random client from nodes. pub fn any_client(&self) -> Option { let guard = self .inner .read() .unwrap_or_else(|poisoned| poisoned.into_inner()); - let validator_count = guard.validators.len(); - let total = validator_count; + let total = guard.nodes.len(); if total == 0 { return None; } let mut rng = thread_rng(); let choice = rng.gen_range(0..total); - guard.validators.get(choice).cloned() + guard.nodes.get(choice).cloned() } #[must_use] @@ -98,13 +97,13 @@ impl NodeClients { ClusterClient::new(self) } - pub fn add_validator(&self, client: ApiClient) { + pub fn add_node(&self, client: ApiClient) { let mut guard = self .inner .write() .unwrap_or_else(|poisoned| poisoned.into_inner()); - guard.validators.push(client); + guard.nodes.push(client); } pub fn clear(&self) { @@ -113,7 +112,7 @@ impl NodeClients { .write() .unwrap_or_else(|poisoned| poisoned.into_inner()); - guard.validators.clear(); + guard.nodes.clear(); } } diff --git a/testing-framework/core/src/topology/config.rs b/testing-framework/core/src/topology/config.rs index 656e813..ebcd3e1 100644 --- a/testing-framework/core/src/topology/config.rs +++ b/testing-framework/core/src/topology/config.rs @@ -20,7 +20,7 @@ use thiserror::Error; use crate::topology::{ configs::{GeneralConfig, time::default_time_config}, - generation::{GeneratedNodeConfig, GeneratedTopology, NodeRole}, + generation::{GeneratedNodeConfig, GeneratedTopology}, utils::{TopologyResolveError, create_kms_configs, resolve_ids, resolve_ports}, }; @@ -51,7 +51,7 @@ pub enum TopologyBuildError { /// High-level topology settings used to generate node configs for a scenario. #[derive(Clone)] pub struct TopologyConfig { - pub n_validators: usize, + pub n_nodes: usize, pub consensus_params: ConsensusParams, pub network_params: NetworkParams, pub wallet_config: WalletConfig, @@ -62,7 +62,7 @@ impl TopologyConfig { #[must_use] pub fn empty() -> Self { Self { - n_validators: 0, + n_nodes: 0, consensus_params: ConsensusParams::default_for_participants(1), network_params: NetworkParams::default(), wallet_config: WalletConfig::default(), @@ -70,10 +70,10 @@ impl TopologyConfig { } #[must_use] - /// Convenience config with two validators for consensus-only scenarios. - pub fn two_validators() -> Self { + /// Convenience config with two nodes for consensus-only scenarios. + pub fn two_nodes() -> Self { Self { - n_validators: 2, + n_nodes: 2, consensus_params: ConsensusParams::default_for_participants(2), network_params: NetworkParams::default(), wallet_config: WalletConfig::default(), @@ -81,12 +81,12 @@ impl TopologyConfig { } #[must_use] - /// Build a topology with explicit validator counts. - pub fn with_node_numbers(validators: usize) -> Self { - let participants = validators; + /// Build a topology with explicit node counts. + pub fn with_node_numbers(nodes: usize) -> Self { + let participants = nodes; Self { - n_validators: validators, + n_nodes: nodes, consensus_params: ConsensusParams::default_for_participants(participants), network_params: NetworkParams::default(), wallet_config: WalletConfig::default(), @@ -133,15 +133,9 @@ impl TopologyBuilder { } #[must_use] - pub const fn with_validator_count(mut self, validators: usize) -> Self { - self.config.n_validators = validators; - self - } - - #[must_use] - /// Set validator counts. - pub const fn with_node_counts(mut self, validators: usize) -> Self { - self.config.n_validators = validators; + /// Set node counts. + pub const fn with_node_count(mut self, nodes: usize) -> Self { + self.config.n_nodes = nodes; self } @@ -197,7 +191,7 @@ impl TopologyBuilder { let kms_configs = create_kms_configs(&blend_configs, &config.wallet_config.accounts); - let validators = build_node_descriptors( + let nodes = build_node_descriptors( &config, n_participants, &ids, @@ -212,7 +206,7 @@ impl TopologyBuilder { &time_config, )?; - Ok(GeneratedTopology { config, validators }) + Ok(GeneratedTopology { config, nodes }) } #[must_use] @@ -222,7 +216,7 @@ impl TopologyBuilder { } fn participant_count(config: &TopologyConfig) -> Result { - let n_participants = config.n_validators; + let n_participants = config.n_nodes; if n_participants == 0 { return Err(TopologyBuildError::EmptyParticipants); } @@ -298,7 +292,7 @@ fn build_node_descriptors( kms_configs: &[key_management_system_service::backend::preload::PreloadKMSBackendSettings], time_config: &testing_framework_config::topology::configs::time::GeneralTimeConfig, ) -> Result, TopologyBuildError> { - let mut validators = Vec::with_capacity(config.n_validators); + let mut nodes = Vec::with_capacity(config.n_nodes); for i in 0..n_participants { let consensus_config = @@ -325,21 +319,17 @@ fn build_node_descriptors( kms_config, }; - let (role, index) = (NodeRole::Validator, i); let descriptor = GeneratedNodeConfig { - role, - index, + index: i, id, general, blend_port, }; - match role { - NodeRole::Validator => validators.push(descriptor), - } + nodes.push(descriptor); } - Ok(validators) + Ok(nodes) } fn get_cloned( diff --git a/testing-framework/core/src/topology/deployment.rs b/testing-framework/core/src/topology/deployment.rs index dd78ebf..7cb2fc2 100644 --- a/testing-framework/core/src/topology/deployment.rs +++ b/testing-framework/core/src/topology/deployment.rs @@ -5,7 +5,7 @@ use thiserror::Error; use crate::{ nodes::{ common::node::SpawnNodeError, - validator::{Validator, create_validator_config}, + node::{Node, create_node_config}, }, topology::{ config::{TopologyBuildError, TopologyBuilder, TopologyConfig}, @@ -18,10 +18,10 @@ use crate::{ /// Runtime representation of a spawned topology with running nodes. pub struct Topology { - pub(crate) validators: Vec, + pub(crate) nodes: Vec, } -pub type DeployedNodes = Vec; +pub type DeployedNodes = Vec; #[derive(Debug, Error)] pub enum SpawnTopologyError { @@ -34,15 +34,15 @@ pub enum SpawnTopologyError { impl Topology { pub async fn spawn(config: TopologyConfig) -> Result { let generated = TopologyBuilder::new(config.clone()).build()?; - let n_validators = config.n_validators; + let n_nodes = config.n_nodes; let node_configs = generated - .nodes() + .iter() .map(|node| node.general.clone()) .collect::>(); - let validators = Self::spawn_validators(node_configs, n_validators).await?; + let nodes = Self::spawn_nodes(node_configs, n_nodes).await?; - Ok(Self { validators }) + Ok(Self { nodes }) } pub async fn spawn_with_empty_membership( @@ -56,32 +56,32 @@ impl Topology { .build()?; let node_configs = generated - .nodes() + .iter() .map(|node| node.general.clone()) .collect::>(); - let validators = Self::spawn_validators(node_configs, config.n_validators).await?; + let nodes = Self::spawn_nodes(node_configs, config.n_nodes).await?; - Ok(Self { validators }) + Ok(Self { nodes }) } - pub(crate) async fn spawn_validators( + pub(crate) async fn spawn_nodes( config: Vec, - n_validators: usize, + n_nodes: usize, ) -> Result { - let mut validators = Vec::new(); - for i in 0..n_validators { - let config = create_validator_config(config[i].clone()); - let label = format!("validator-{i}"); - validators.push(Validator::spawn(config, &label).await?); + let mut nodes = Vec::new(); + for i in 0..n_nodes { + let config = create_node_config(config[i].clone()); + let label = format!("node-{i}"); + nodes.push(Node::spawn(config, &label).await?); } - Ok(validators) + Ok(nodes) } #[must_use] - pub fn validators(&self) -> &[Validator] { - &self.validators + pub fn nodes(&self) -> &[Node] { + &self.nodes } pub async fn wait_network_ready(&self) -> Result<(), ReadinessError> { @@ -105,14 +105,14 @@ impl Topology { } fn node_listen_ports(&self) -> Vec { - self.validators + self.nodes .iter() .map(|node| node.config().network.backend.swarm.port) .collect() } fn node_initial_peer_ports(&self) -> Vec> { - self.validators + self.nodes .iter() .map(|node| { node.config() @@ -127,15 +127,10 @@ impl Topology { } fn node_labels(&self) -> Vec { - self.validators + self.nodes .iter() .enumerate() - .map(|(idx, node)| { - format!( - "validator#{idx}@{}", - node.config().network.backend.swarm.port - ) - }) + .map(|(idx, node)| format!("node#{idx}@{}", node.config().network.backend.swarm.port)) .collect() } } diff --git a/testing-framework/core/src/topology/generation.rs b/testing-framework/core/src/topology/generation.rs index 8352bdd..e691742 100644 --- a/testing-framework/core/src/topology/generation.rs +++ b/testing-framework/core/src/topology/generation.rs @@ -9,16 +9,9 @@ use crate::topology::{ readiness::{HttpNetworkReadiness, ReadinessCheck, ReadinessError}, }; -/// Node role within the generated topology. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum NodeRole { - Validator, -} - /// Fully generated configuration for an individual node. #[derive(Clone)] pub struct GeneratedNodeConfig { - pub role: NodeRole, pub index: usize, pub id: [u8; 32], pub general: GeneralConfig, @@ -27,13 +20,7 @@ pub struct GeneratedNodeConfig { impl GeneratedNodeConfig { #[must_use] - /// Logical role of the node. - pub const fn role(&self) -> NodeRole { - self.role - } - - #[must_use] - /// Zero-based index within its role group. + /// Zero-based index within the topology. pub const fn index(&self) -> usize { self.index } @@ -59,7 +46,7 @@ impl GeneratedNodeConfig { #[derive(Clone)] pub struct GeneratedTopology { pub(crate) config: TopologyConfig, - pub(crate) validators: Vec, + pub(crate) nodes: Vec, } impl GeneratedTopology { @@ -70,20 +57,20 @@ impl GeneratedTopology { } #[must_use] - /// All validator configs. - pub fn validators(&self) -> &[GeneratedNodeConfig] { - &self.validators + /// All node configs. + pub fn nodes(&self) -> &[GeneratedNodeConfig] { + &self.nodes } - /// Iterator over all node configs in role order. - pub fn nodes(&self) -> impl Iterator { - self.validators.iter() + /// Iterator over all node configs in topology order. + pub fn iter(&self) -> impl Iterator { + self.nodes.iter() } #[must_use] /// Slot duration from the first node (assumes homogeneous configs). pub fn slot_duration(&self) -> Option { - self.validators + self.nodes .first() .map(|node| node.general.time_config.slot_duration) } @@ -96,21 +83,21 @@ impl GeneratedTopology { pub async fn spawn_local(&self) -> Result { let configs = self - .nodes() + .iter() .map(|node| node.general.clone()) .collect::>(); - let validators = Topology::spawn_validators(configs, self.config.n_validators).await?; + let nodes = Topology::spawn_nodes(configs, self.config.n_nodes).await?; - Ok(Topology { validators }) + Ok(Topology { nodes }) } pub async fn wait_remote_readiness( &self, // Node endpoints - validator_endpoints: &[Url], + node_endpoints: &[Url], ) -> Result<(), ReadinessError> { - let total_nodes = self.validators.len(); + let total_nodes = self.nodes.len(); if total_nodes == 0 { return Ok(()); } @@ -118,20 +105,20 @@ impl GeneratedTopology { let labels = self.labels(); let client = Client::new(); - let endpoints = collect_node_endpoints(self, validator_endpoints, total_nodes); + let endpoints = collect_node_endpoints(self, node_endpoints, total_nodes); wait_for_network_readiness(self, &client, &endpoints, &labels).await } fn listen_ports(&self) -> Vec { - self.validators + self.nodes .iter() .map(|node| node.general.network_config.backend.swarm.port) .collect() } fn initial_peer_ports(&self) -> Vec> { - self.validators + self.nodes .iter() .map(|node| { node.general @@ -146,12 +133,12 @@ impl GeneratedTopology { } fn labels(&self) -> Vec { - self.validators + self.nodes .iter() .enumerate() .map(|(idx, node)| { format!( - "validator#{idx}@{}", + "node#{idx}@{}", node.general.network_config.backend.swarm.port ) }) @@ -161,17 +148,17 @@ impl GeneratedTopology { fn collect_node_endpoints( topology: &GeneratedTopology, - validator_endpoints: &[Url], + node_endpoints: &[Url], total_nodes: usize, ) -> Vec { assert_eq!( - topology.validators.len(), - validator_endpoints.len(), - "validator endpoints must match topology" + topology.nodes.len(), + node_endpoints.len(), + "node endpoints must match topology" ); let mut endpoints = Vec::with_capacity(total_nodes); - endpoints.extend_from_slice(validator_endpoints); + endpoints.extend_from_slice(node_endpoints); endpoints } diff --git a/testing-framework/core/src/topology/readiness/network.rs b/testing-framework/core/src/topology/readiness/network.rs index d560195..eaf0af1 100644 --- a/testing-framework/core/src/topology/readiness/network.rs +++ b/testing-framework/core/src/topology/readiness/network.rs @@ -37,7 +37,7 @@ impl<'a> ReadinessCheck<'a> for NetworkReadiness<'a> { type Data = Vec; async fn collect(&'a self) -> Self::Data { - collect_validator_statuses(self).await + collect_node_statuses(self).await } fn is_ready(&self, data: &Self::Data) -> bool { @@ -101,10 +101,10 @@ impl<'a> ReadinessCheck<'a> for HttpNetworkReadiness<'a> { } } -async fn collect_validator_statuses(readiness: &NetworkReadiness<'_>) -> Vec { - let validator_futures = readiness +async fn collect_node_statuses(readiness: &NetworkReadiness<'_>) -> Vec { + let node_futures = readiness .topology - .validators + .nodes .iter() .enumerate() .map(|(idx, node)| { @@ -112,7 +112,7 @@ async fn collect_validator_statuses(readiness: &NetworkReadiness<'_>) -> Vec) -> Vec Vec { - topology.nodes().map(host_from_node).collect() + topology.nodes().iter().map(host_from_node).collect() } fn docker_style_hosts(topology: &GeneratedTopology) -> Vec { topology .nodes() + .iter() .map(|node| docker_host(node, 10 + node.index() as u8)) .collect() } fn host_from_node(node: &GeneratedNodeConfig) -> Host { - let identifier = identifier_for(node.role(), node.index()); + let identifier = identifier_for(node.index()); let ip = Ipv4Addr::LOCALHOST; - let mut host = make_host(node.role(), ip, identifier); + let mut host = make_host(ip, identifier); host.network_port = node.network_port(); host.blend_port = node.blend_port; host } fn docker_host(node: &GeneratedNodeConfig, octet: u8) -> Host { - let identifier = identifier_for(node.role(), node.index()); + let identifier = identifier_for(node.index()); let ip = Ipv4Addr::new(172, 23, 0, octet); - let mut host = make_host(node.role(), ip, identifier); + let mut host = make_host(ip, identifier); host.network_port = node.network_port().saturating_add(1000); host.blend_port = node.blend_port.saturating_add(1000); host @@ -320,7 +345,7 @@ mod tests { fn tracing_settings(topology: &GeneratedTopology) -> TracingSettings { topology - .validators() + .nodes() .first() .expect("topology must contain at least one node") .general @@ -329,22 +354,18 @@ mod tests { .clone() } - fn identifier_for(role: TopologyNodeRole, index: usize) -> String { - match role { - TopologyNodeRole::Validator => format!("validator-{index}"), - } + fn identifier_for(index: usize) -> String { + format!("node-{index}") } - fn make_host(role: TopologyNodeRole, ip: Ipv4Addr, identifier: String) -> Host { + fn make_host(ip: Ipv4Addr, identifier: String) -> Host { let ports = PortOverrides { network_port: None, blend_port: None, api_port: None, testing_http_port: None, }; - match role { - TopologyNodeRole::Validator => Host::validator_from_ip(ip, identifier, ports), - } + Host::node_from_ip(ip, identifier, ports) } fn declaration_fingerprint( diff --git a/testing-framework/deployers/compose/src/deployer/orchestrator.rs b/testing-framework/deployers/compose/src/deployer/orchestrator.rs index dff4a20..40c2fc3 100644 --- a/testing-framework/deployers/compose/src/deployer/orchestrator.rs +++ b/testing-framework/deployers/compose/src/deployer/orchestrator.rs @@ -50,7 +50,7 @@ impl DeploymentOrchestrator { } = setup.prepare_workspace(&observability).await?; tracing::info!( - validators = descriptors.validators().len(), + nodes = descriptors.nodes().len(), duration_secs = scenario.duration().as_secs(), readiness_checks = self.deployer.readiness_checks, metrics_query_url = observability.metrics_query_url.as_ref().map(|u| u.as_str()), @@ -62,7 +62,7 @@ impl DeploymentOrchestrator { "compose deployment starting" ); - let validator_count = descriptors.validators().len(); + let node_count = descriptors.nodes().len(); let host_ports = PortManager::prepare(&mut environment, &descriptors).await?; wait_for_readiness_or_grace_period( @@ -102,7 +102,7 @@ impl DeploymentOrchestrator { ); info!( - validators = validator_count, + nodes = node_count, duration_secs = scenario.duration().as_secs(), readiness_checks = self.deployer.readiness_checks, host, @@ -195,22 +195,22 @@ fn maybe_print_endpoints(observability: &ObservabilityInputs, host: &str, ports: } fn log_profiling_urls(host: &str, ports: &HostPortMapping) { - for (idx, node) in ports.validators.iter().enumerate() { + for (idx, node) in ports.nodes.iter().enumerate() { tracing::info!( - validator = idx, + node = idx, profiling_url = %format!( "http://{}:{}/debug/pprof/profile?seconds=15&format=proto", host, node.api ), - "validator profiling endpoint (profiling feature required)" + "node profiling endpoint (profiling feature required)" ); } } fn print_profiling_urls(host: &str, ports: &HostPortMapping) { - for (idx, node) in ports.validators.iter().enumerate() { + for (idx, node) in ports.nodes.iter().enumerate() { println!( - "TESTNET_PPROF validator_{}=http://{}:{}/debug/pprof/profile?seconds=15&format=proto", + "TESTNET_PPROF node_{}=http://{}:{}/debug/pprof/profile?seconds=15&format=proto", idx, host, node.api ); } diff --git a/testing-framework/deployers/compose/src/deployer/ports.rs b/testing-framework/deployers/compose/src/deployer/ports.rs index 538aabc..eb076f5 100644 --- a/testing-framework/deployers/compose/src/deployer/ports.rs +++ b/testing-framework/deployers/compose/src/deployer/ports.rs @@ -17,13 +17,13 @@ impl PortManager { descriptors: &GeneratedTopology, ) -> Result { debug!( - validators = descriptors.validators().len(), + nodes = descriptors.nodes().len(), "resolving host ports for compose services" ); match discover_host_ports(environment, descriptors).await { Ok(mapping) => { info!( - validator_ports = ?mapping.validator_api_ports(), + node_ports = ?mapping.node_api_ports(), "resolved container host ports" ); Ok(mapping) diff --git a/testing-framework/deployers/compose/src/deployer/readiness.rs b/testing-framework/deployers/compose/src/deployer/readiness.rs index 19cca71..f31f1c7 100644 --- a/testing-framework/deployers/compose/src/deployer/readiness.rs +++ b/testing-framework/deployers/compose/src/deployer/readiness.rs @@ -7,7 +7,7 @@ use crate::{ environment::StackEnvironment, ports::{HostPortMapping, ensure_remote_readiness_with_ports}, }, - lifecycle::readiness::ensure_validators_ready_with_ports, + lifecycle::readiness::ensure_nodes_ready_with_ports, }; pub struct ReadinessChecker; @@ -18,13 +18,13 @@ impl ReadinessChecker { host_ports: &HostPortMapping, environment: &mut StackEnvironment, ) -> Result<(), ComposeRunnerError> { - let validator_ports = host_ports.validator_api_ports(); - info!(ports = ?validator_ports, "waiting for validator HTTP endpoints"); - if let Err(err) = ensure_validators_ready_with_ports(&validator_ports).await { + let node_ports = host_ports.node_api_ports(); + info!(ports = ?node_ports, "waiting for node HTTP endpoints"); + if let Err(err) = ensure_nodes_ready_with_ports(&node_ports).await { return fail_readiness_step( environment, - "validator readiness failed", - "validator readiness failed", + "node readiness failed", + "node readiness failed", err, ) .await; diff --git a/testing-framework/deployers/compose/src/deployer/setup.rs b/testing-framework/deployers/compose/src/deployer/setup.rs index 1dc8560..ac0e664 100644 --- a/testing-framework/deployers/compose/src/deployer/setup.rs +++ b/testing-framework/deployers/compose/src/deployer/setup.rs @@ -32,7 +32,7 @@ impl DeploymentSetup { ensure_supported_topology(&self.descriptors)?; info!( - validators = self.descriptors.validators().len(), + nodes = self.descriptors.nodes().len(), "starting compose deployment" ); diff --git a/testing-framework/deployers/compose/src/descriptor/mod.rs b/testing-framework/deployers/compose/src/descriptor/mod.rs index 8e10f93..305e5f7 100644 --- a/testing-framework/deployers/compose/src/descriptor/mod.rs +++ b/testing-framework/deployers/compose/src/descriptor/mod.rs @@ -4,10 +4,7 @@ use std::{ }; use serde::Serialize; -use testing_framework_core::{ - kzg::KzgParamsSpec, - topology::generation::{GeneratedNodeConfig, GeneratedTopology}, -}; +use testing_framework_core::topology::generation::{GeneratedNodeConfig, GeneratedTopology}; use testing_framework_env as tf_env; use crate::docker::platform::{host_gateway_entry, resolve_image}; @@ -20,7 +17,7 @@ use testing_framework_config::constants::DEFAULT_CFGSYNC_PORT; /// Top-level docker-compose descriptor built from a GeneratedTopology. #[derive(Clone, Debug, Serialize)] pub struct ComposeDescriptor { - validators: Vec, + nodes: Vec, } impl ComposeDescriptor { @@ -31,8 +28,8 @@ impl ComposeDescriptor { } #[cfg(test)] - pub fn validators(&self) -> &[NodeDescriptor] { - &self.validators + pub fn nodes(&self) -> &[NodeDescriptor] { + &self.nodes } } @@ -40,7 +37,6 @@ impl ComposeDescriptor { /// template. pub struct ComposeDescriptorBuilder<'a> { topology: &'a GeneratedTopology, - use_kzg_mount: bool, cfgsync_port: Option, } @@ -48,18 +44,10 @@ impl<'a> ComposeDescriptorBuilder<'a> { const fn new(topology: &'a GeneratedTopology) -> Self { Self { topology, - use_kzg_mount: false, cfgsync_port: None, } } - #[must_use] - /// Mount KZG parameters into nodes when enabled. - pub const fn with_kzg_mount(mut self, enabled: bool) -> Self { - self.use_kzg_mount = enabled; - self - } - #[must_use] /// Set cfgsync port for nodes. pub const fn with_cfgsync_port(mut self, port: u16) -> Self { @@ -74,68 +62,38 @@ impl<'a> ComposeDescriptorBuilder<'a> { let (image, platform) = resolve_image(); - let validators = build_nodes( - self.topology.validators(), - ComposeNodeKind::Validator, + let nodes = build_nodes( + self.topology.nodes(), &image, platform.as_deref(), - self.use_kzg_mount, cfgsync_port, ); - ComposeDescriptor { validators } + ComposeDescriptor { nodes } } } -#[derive(Clone, Copy)] -pub(crate) enum ComposeNodeKind { - Validator, -} +const NODE_ENTRYPOINT: &str = "/etc/nomos/scripts/run_nomos_node.sh"; -impl ComposeNodeKind { - fn instance_name(self, index: usize) -> String { - match self { - Self::Validator => format!("validator-{index}"), - } - } - - const fn entrypoint(self) -> &'static str { - match self { - Self::Validator => "/etc/nomos/scripts/run_nomos_node.sh", - } - } +pub(crate) fn node_instance_name(index: usize) -> String { + format!("node-{index}") } fn build_nodes( nodes: &[GeneratedNodeConfig], - kind: ComposeNodeKind, image: &str, platform: Option<&str>, - use_kzg_mount: bool, cfgsync_port: u16, ) -> Vec { nodes .iter() .enumerate() - .map(|(index, node)| { - NodeDescriptor::from_node( - kind, - index, - node, - image, - platform, - use_kzg_mount, - cfgsync_port, - ) - }) + .map(|(index, node)| NodeDescriptor::from_node(index, node, image, platform, cfgsync_port)) .collect() } -fn base_volumes(use_kzg_mount: bool) -> Vec { +fn base_volumes() -> Vec { let mut volumes = vec!["./stack:/etc/nomos".into()]; - if use_kzg_mount { - volumes.push("./kzgrs_test_params:/kzgrs_test_params:z".into()); - } if let Some(host_log_dir) = repo_root() .map(|root| root.join("tmp").join("node-logs")) .map(|dir| dir.display().to_string()) @@ -160,18 +118,16 @@ fn default_extra_hosts() -> Vec { host_gateway_entry().into_iter().collect() } -fn base_environment(cfgsync_port: u16, use_kzg_mount: bool) -> Vec { +fn base_environment(cfgsync_port: u16) -> Vec { let pol_mode = tf_env::pol_proof_dev_mode().unwrap_or_else(|| "true".to_string()); let rust_log = tf_env::rust_log().unwrap_or_else(|| "info".to_string()); let nomos_log_level = tf_env::nomos_log_level().unwrap_or_else(|| "info".to_string()); let time_backend = tf_env::nomos_time_backend().unwrap_or_else(|| "monotonic".into()); - let kzg_path = KzgParamsSpec::for_compose(use_kzg_mount).node_params_path; vec![ EnvEntry::new("POL_PROOF_DEV_MODE", pol_mode), EnvEntry::new("RUST_LOG", rust_log), - EnvEntry::new("NOMOS_LOG_LEVEL", nomos_log_level), - EnvEntry::new("NOMOS_TIME_BACKEND", time_backend), - EnvEntry::new("LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH", kzg_path), + EnvEntry::new("LOGOS_BLOCKCHAIN_LOG_LEVEL", nomos_log_level), + EnvEntry::new("LOGOS_BLOCKCHAIN_TIME_BACKEND", time_backend), EnvEntry::new( "CFG_SERVER_ADDR", format!("http://host.docker.internal:{cfgsync_port}"), diff --git a/testing-framework/deployers/compose/src/descriptor/node.rs b/testing-framework/deployers/compose/src/descriptor/node.rs index 95c9141..15c88ed 100644 --- a/testing-framework/deployers/compose/src/descriptor/node.rs +++ b/testing-framework/deployers/compose/src/descriptor/node.rs @@ -1,9 +1,11 @@ use serde::Serialize; use testing_framework_core::topology::generation::GeneratedNodeConfig; -use super::{ComposeNodeKind, base_environment, base_volumes, default_extra_hosts}; +use super::{ + NODE_ENTRYPOINT, base_environment, base_volumes, default_extra_hosts, node_instance_name, +}; -/// Describes a validator container in the compose stack. +/// Describes a node container in the compose stack. #[derive(Clone, Debug, Serialize)] pub struct NodeDescriptor { name: String, @@ -45,16 +47,14 @@ impl EnvEntry { impl NodeDescriptor { pub(crate) fn from_node( - kind: ComposeNodeKind, index: usize, node: &GeneratedNodeConfig, image: &str, platform: Option<&str>, - use_kzg_mount: bool, cfgsync_port: u16, ) -> Self { - let mut environment = base_environment(cfgsync_port, use_kzg_mount); - let identifier = kind.instance_name(index); + let mut environment = base_environment(cfgsync_port); + let identifier = node_instance_name(index); let api_port = node.general.api_config.address.port(); let testing_port = node.general.api_config.testing_http_address.port(); environment.extend([ @@ -77,10 +77,10 @@ impl NodeDescriptor { ]; Self { - name: kind.instance_name(index), + name: node_instance_name(index), image: image.to_owned(), - entrypoint: kind.entrypoint().to_owned(), - volumes: base_volumes(use_kzg_mount), + entrypoint: NODE_ENTRYPOINT.to_owned(), + volumes: base_volumes(), extra_hosts: default_extra_hosts(), ports, environment, diff --git a/testing-framework/deployers/compose/src/docker/control.rs b/testing-framework/deployers/compose/src/docker/control.rs index 4849980..4a27e02 100644 --- a/testing-framework/deployers/compose/src/docker/control.rs +++ b/testing-framework/deployers/compose/src/docker/control.rs @@ -45,13 +45,13 @@ pub struct ComposeNodeControl { #[async_trait::async_trait] impl NodeControlHandle for ComposeNodeControl { - async fn restart_validator(&self, index: usize) -> Result<(), DynError> { + async fn restart_node(&self, index: usize) -> Result<(), DynError> { restart_compose_service( &self.compose_file, &self.project_name, - &format!("validator-{index}"), + &format!("node-{index}"), ) .await - .map_err(|err| format!("validator restart failed: {err}").into()) + .map_err(|err| format!("node restart failed: {err}").into()) } } diff --git a/testing-framework/deployers/compose/src/docker/platform.rs b/testing-framework/deployers/compose/src/docker/platform.rs index efb55c4..473b47b 100644 --- a/testing-framework/deployers/compose/src/docker/platform.rs +++ b/testing-framework/deployers/compose/src/docker/platform.rs @@ -4,7 +4,7 @@ use testing_framework_env as tf_env; use tracing::debug; /// Select the compose image and optional platform, honoring -/// NOMOS_TESTNET_IMAGE. +/// LOGOS_BLOCKCHAIN_TESTNET_IMAGE. pub fn resolve_image() -> (String, Option) { let image = tf_env::nomos_testnet_image() .unwrap_or_else(|| String::from("logos-blockchain-testing:local")); diff --git a/testing-framework/deployers/compose/src/docker/workspace.rs b/testing-framework/deployers/compose/src/docker/workspace.rs index 4fe7eca..65fe782 100644 --- a/testing-framework/deployers/compose/src/docker/workspace.rs +++ b/testing-framework/deployers/compose/src/docker/workspace.rs @@ -5,10 +5,7 @@ use std::{ use anyhow::{Context as _, Result}; use tempfile::TempDir; -use testing_framework_config::constants::{ - DEFAULT_ASSETS_STACK_DIR, DEFAULT_KZG_HOST_DIR, KZG_PARAMS_FILENAME, -}; -use testing_framework_env; +use testing_framework_config::constants::DEFAULT_ASSETS_STACK_DIR; use tracing::{debug, info}; /// Copy the repository stack assets into a scenario-specific temp dir. @@ -54,40 +51,6 @@ impl ComposeWorkspace { copy_dir_recursive(&scripts_source, &temp.path().join("stack/scripts"))?; } - let kzg_source = repo_root.join( - testing_framework_env::nomos_kzg_dir_rel() - .unwrap_or_else(|| DEFAULT_KZG_HOST_DIR.to_string()), - ); - let target = temp.path().join(KZG_PARAMS_FILENAME); - if kzg_source.exists() { - if kzg_source.is_dir() { - copy_dir_recursive(&kzg_source, &target)?; - } else { - fs::copy(&kzg_source, &target).with_context(|| { - format!("copying {} -> {}", kzg_source.display(), target.display()) - })?; - } - } - // Fail fast if the KZG bundle is missing or empty; DA verifier will panic - // otherwise. - if !target.exists() - || fs::read_dir(&target) - .ok() - .map(|mut it| it.next().is_none()) - .unwrap_or(true) - { - anyhow::bail!( - "\nKZG params missing in stack assets (expected files in {})\ - \nrepo_root: {}\ - \ntarget: {}\ - \nnomos_kzg_dir_rel(): {:?}\n", - kzg_source.display(), - repo_root.display(), - target.display(), - testing_framework_env::nomos_kzg_dir_rel(), - ); - } - info!(root = %temp.path().display(), "compose workspace created"); Ok(Self { root: temp }) } diff --git a/testing-framework/deployers/compose/src/errors.rs b/testing-framework/deployers/compose/src/errors.rs index 6bb7630..3b0e6c1 100644 --- a/testing-framework/deployers/compose/src/errors.rs +++ b/testing-framework/deployers/compose/src/errors.rs @@ -1,10 +1,7 @@ use std::path::PathBuf; use testing_framework_core::{ - scenario::{ - MetricsError, - http_probe::{HttpReadinessError, NodeRole}, - }, + scenario::{MetricsError, http_probe::HttpReadinessError}, topology::readiness::ReadinessError, }; use url::ParseError; @@ -14,8 +11,8 @@ use crate::{docker::commands::ComposeCommandError, infrastructure::template::Tem #[derive(Debug, thiserror::Error)] /// Top-level compose runner errors. pub enum ComposeRunnerError { - #[error("compose runner requires at least one validator (validators={validators})")] - MissingValidator { validators: usize }, + #[error("compose runner requires at least one node (nodes={nodes})")] + MissingNode { nodes: usize }, #[error("docker does not appear to be available on this host")] DockerUnavailable, #[error("failed to resolve host port for {service} container port {container_port}: {source}")] @@ -37,7 +34,7 @@ pub enum ComposeRunnerError { NodeClients(#[from] NodeClientError), #[error(transparent)] Telemetry(#[from] MetricsError), - #[error("block feed requires at least one validator client")] + #[error("block feed requires at least one node client")] BlockFeedMissing, #[error("failed to start block feed: {source}")] BlockFeed { @@ -45,7 +42,7 @@ pub enum ComposeRunnerError { source: anyhow::Error, }, #[error( - "docker image '{image}' is not available; set NOMOS_TESTNET_IMAGE or build the image manually" + "docker image '{image}' is not available; set LOGOS_BLOCKCHAIN_TESTNET_IMAGE or build the image manually" )] MissingImage { image: String }, #[error("failed to prepare docker image: {source}")] @@ -103,9 +100,9 @@ pub enum ConfigError { pub enum StackReadinessError { #[error(transparent)] Http(#[from] HttpReadinessError), - #[error("failed to build readiness URL for {role} port {port}: {source}", role = role.label())] + #[error("failed to build readiness URL for {role} port {port}: {source}")] Endpoint { - role: NodeRole, + role: &'static str, port: u16, #[source] source: ParseError, @@ -120,12 +117,9 @@ pub enum StackReadinessError { #[derive(Debug, thiserror::Error)] /// Node client construction failures. pub enum NodeClientError { - #[error( - "failed to build {endpoint} client URL for {role} port {port}: {source}", - role = role.label() - )] + #[error("failed to build {endpoint} client URL for {role} port {port}: {source}")] Endpoint { - role: NodeRole, + role: &'static str, endpoint: &'static str, port: u16, #[source] diff --git a/testing-framework/deployers/compose/src/infrastructure/cfgsync.rs b/testing-framework/deployers/compose/src/infrastructure/cfgsync.rs index ed38901..a4b05ea 100644 --- a/testing-framework/deployers/compose/src/infrastructure/cfgsync.rs +++ b/testing-framework/deployers/compose/src/infrastructure/cfgsync.rs @@ -4,7 +4,6 @@ use nomos_tracing::metrics::otlp::OtlpMetricsConfig; use nomos_tracing_service::MetricsLayer; use reqwest::Url; use testing_framework_core::{ - kzg::KzgParamsSpec, scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, write_cfgsync_template}, topology::generation::GeneratedTopology, }; @@ -62,21 +61,18 @@ impl Drop for CfgsyncServerHandle { pub fn update_cfgsync_config( path: &Path, topology: &GeneratedTopology, - use_kzg_mount: bool, port: u16, metrics_otlp_ingest_url: Option<&Url>, ) -> anyhow::Result<()> { debug!( path = %path.display(), - use_kzg_mount, port, - validators = topology.validators().len(), + nodes = topology.nodes().len(), "updating cfgsync template" ); let mut cfg = load_cfgsync_template(path)?; cfg.port = port; - apply_topology_overrides(&mut cfg, topology, use_kzg_mount); - cfg.global_params_path = KzgParamsSpec::for_compose(use_kzg_mount).node_params_path; + apply_topology_overrides(&mut cfg, topology); if let Some(endpoint) = metrics_otlp_ingest_url.cloned() { cfg.tracing_settings.metrics = MetricsLayer::Otlp(OtlpMetricsConfig { endpoint, diff --git a/testing-framework/deployers/compose/src/infrastructure/environment.rs b/testing-framework/deployers/compose/src/infrastructure/environment.rs index 8fe51ad..fbf491a 100644 --- a/testing-framework/deployers/compose/src/infrastructure/environment.rs +++ b/testing-framework/deployers/compose/src/infrastructure/environment.rs @@ -6,7 +6,6 @@ use std::{ use anyhow::anyhow; use reqwest::Url; -use testing_framework_config::constants::KZG_PARAMS_FILENAME; use testing_framework_core::{ adjust_timeout, scenario::CleanupGuard, topology::generation::GeneratedTopology, }; @@ -37,7 +36,6 @@ pub struct WorkspaceState { pub workspace: ComposeWorkspace, pub root: PathBuf, pub cfgsync_path: PathBuf, - pub use_kzg: bool, } /// Holds paths and handles for a running docker-compose stack. @@ -133,13 +131,13 @@ impl StackEnvironment { } } -/// Verifies the topology has at least one validator so compose can start. +/// Verifies the topology has at least one node so compose can start. pub fn ensure_supported_topology( descriptors: &GeneratedTopology, ) -> Result<(), ComposeRunnerError> { - let validators = descriptors.validators().len(); - if validators == 0 { - return Err(ComposeRunnerError::MissingValidator { validators }); + let nodes = descriptors.nodes().len(); + if nodes == 0 { + return Err(ComposeRunnerError::MissingNode { nodes }); } Ok(()) } @@ -149,19 +147,15 @@ pub fn prepare_workspace_state() -> Result { let workspace = ComposeWorkspace::create().map_err(WorkspaceError::new)?; let root = workspace.root_path().to_path_buf(); let cfgsync_path = workspace.stack_dir().join("cfgsync.yaml"); - let use_kzg = workspace.root_path().join(KZG_PARAMS_FILENAME).exists(); - let state = WorkspaceState { workspace, root, cfgsync_path, - use_kzg, }; debug!( root = %state.root.display(), cfgsync = %state.cfgsync_path.display(), - use_kzg = state.use_kzg, "prepared compose workspace state" ); @@ -215,7 +209,6 @@ pub fn configure_cfgsync( update_cfgsync_config( &workspace.cfgsync_path, descriptors, - workspace.use_kzg, cfgsync_port, metrics_otlp_ingest_url, ) @@ -315,7 +308,6 @@ pub fn write_compose_artifacts( "building compose descriptor" ); let descriptor = ComposeDescriptor::builder(descriptors) - .with_kzg_mount(workspace.use_kzg) .with_cfgsync_port(cfgsync_port) .build(); diff --git a/testing-framework/deployers/compose/src/infrastructure/ports.rs b/testing-framework/deployers/compose/src/infrastructure/ports.rs index 7222cb6..03db06c 100644 --- a/testing-framework/deployers/compose/src/infrastructure/ports.rs +++ b/testing-framework/deployers/compose/src/infrastructure/ports.rs @@ -3,9 +3,7 @@ use std::time::Duration; use anyhow::{Context as _, anyhow}; use reqwest::Url; use testing_framework_core::{ - adjust_timeout, - scenario::http_probe::NodeRole as HttpNodeRole, - topology::generation::{GeneratedTopology, NodeRole as TopologyNodeRole}, + adjust_timeout, scenario::http_probe::NODE_ROLE, topology::generation::GeneratedTopology, }; use tokio::{process::Command, time::timeout}; use tracing::{debug, info}; @@ -25,16 +23,16 @@ pub struct NodeHostPorts { pub testing: u16, } -/// All host port mappings for validators. +/// All host port mappings for nodes. #[derive(Clone, Debug)] pub struct HostPortMapping { - pub validators: Vec, + pub nodes: Vec, } impl HostPortMapping { - /// Returns API ports for all validators. - pub fn validator_api_ports(&self) -> Vec { - self.validators.iter().map(|ports| ports.api).collect() + /// Returns API ports for all nodes. + pub fn node_api_ports(&self) -> Vec { + self.nodes.iter().map(|ports| ports.api).collect() } } @@ -46,21 +44,21 @@ pub async fn discover_host_ports( debug!( compose_file = %environment.compose_path().display(), project = environment.project_name(), - validators = descriptors.validators().len(), + nodes = descriptors.nodes().len(), "resolving compose host ports" ); - let mut validators = Vec::new(); - for node in descriptors.validators() { - let service = node_identifier(TopologyNodeRole::Validator, node.index()); + let mut nodes = Vec::new(); + for node in descriptors.nodes() { + let service = node_identifier(node.index()); let api = resolve_service_port(environment, &service, node.api_port()).await?; let testing = resolve_service_port(environment, &service, node.testing_http_port()).await?; - validators.push(NodeHostPorts { api, testing }); + nodes.push(NodeHostPorts { api, testing }); } - let mapping = HostPortMapping { validators }; + let mapping = HostPortMapping { nodes }; info!( - validator_ports = ?mapping.validators, + node_ports = ?mapping.nodes, "compose host ports resolved" ); @@ -130,19 +128,19 @@ pub async fn ensure_remote_readiness_with_ports( descriptors: &GeneratedTopology, mapping: &HostPortMapping, ) -> Result<(), StackReadinessError> { - let validator_urls = mapping - .validators + let node_urls = mapping + .nodes .iter() - .map(|ports| readiness_url(HttpNodeRole::Validator, ports.api)) + .map(|ports| readiness_url(NODE_ROLE, ports.api)) .collect::, _>>()?; descriptors - .wait_remote_readiness(&validator_urls) + .wait_remote_readiness(&node_urls) .await .map_err(|source| StackReadinessError::Remote { source }) } -fn readiness_url(role: HttpNodeRole, port: u16) -> Result { +fn readiness_url(role: &'static str, port: u16) -> Result { localhost_url(port).map_err(|source| StackReadinessError::Endpoint { role, port, source }) } @@ -150,10 +148,8 @@ fn localhost_url(port: u16) -> Result { Url::parse(&format!("http://{}:{port}/", compose_runner_host())) } -fn node_identifier(role: TopologyNodeRole, index: usize) -> String { - match role { - TopologyNodeRole::Validator => format!("validator-{index}"), - } +fn node_identifier(index: usize) -> String { + format!("node-{index}") } pub(crate) fn compose_runner_host() -> String { diff --git a/testing-framework/deployers/compose/src/lifecycle/block_feed.rs b/testing-framework/deployers/compose/src/lifecycle/block_feed.rs index a664773..ec18b06 100644 --- a/testing-framework/deployers/compose/src/lifecycle/block_feed.rs +++ b/testing-framework/deployers/compose/src/lifecycle/block_feed.rs @@ -13,12 +13,12 @@ async fn spawn_block_feed_with( node_clients: &NodeClients, ) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> { debug!( - validators = node_clients.validator_clients().len(), - "selecting validator client for block feed" + nodes = node_clients.node_clients().len(), + "selecting node client for block feed" ); let block_source_client = node_clients - .random_validator() + .random_node() .ok_or(ComposeRunnerError::BlockFeedMissing)?; spawn_block_feed(block_source_client) diff --git a/testing-framework/deployers/compose/src/lifecycle/readiness.rs b/testing-framework/deployers/compose/src/lifecycle/readiness.rs index 72a1016..c8e041a 100644 --- a/testing-framework/deployers/compose/src/lifecycle/readiness.rs +++ b/testing-framework/deployers/compose/src/lifecycle/readiness.rs @@ -3,26 +3,26 @@ use std::time::Duration; use reqwest::Url; use testing_framework_core::{ nodes::ApiClient, - scenario::{NodeClients, http_probe::NodeRole as HttpNodeRole}, - topology::generation::{GeneratedTopology, NodeRole as TopologyNodeRole}, + scenario::{NodeClients, http_probe::NODE_ROLE}, + topology::generation::GeneratedTopology, }; use tokio::time::sleep; use crate::{ errors::{NodeClientError, StackReadinessError}, infrastructure::ports::{HostPortMapping, NodeHostPorts}, - lifecycle::wait::wait_for_validators, + lifecycle::wait::wait_for_nodes, }; const DISABLED_READINESS_SLEEP: Duration = Duration::from_secs(5); -/// Wait until all validators respond on their API ports. -pub async fn ensure_validators_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> { +/// Wait until all nodes respond on their API ports. +pub async fn ensure_nodes_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> { if ports.is_empty() { return Ok(()); } - wait_for_validators(ports).await.map_err(Into::into) + wait_for_nodes(ports).await.map_err(Into::into) } /// Allow a brief pause when readiness probes are disabled. @@ -38,18 +38,18 @@ pub fn build_node_clients_with_ports( mapping: &HostPortMapping, host: &str, ) -> Result { - let validators = descriptors - .validators() + let nodes = descriptors + .nodes() .iter() - .zip(mapping.validators.iter()) - .map(|(node, ports)| api_client_from_host_ports(to_http_role(node.role()), ports, host)) + .zip(mapping.nodes.iter()) + .map(|(_node, ports)| api_client_from_host_ports(NODE_ROLE, ports, host)) .collect::, _>>()?; - Ok(NodeClients::new(validators)) + Ok(NodeClients::new(nodes)) } fn api_client_from_host_ports( - role: HttpNodeRole, + role: &'static str, ports: &NodeHostPorts, host: &str, ) -> Result { @@ -73,12 +73,6 @@ fn api_client_from_host_ports( Ok(ApiClient::from_urls(base_url, testing_url)) } -fn to_http_role(role: TopologyNodeRole) -> testing_framework_core::scenario::http_probe::NodeRole { - match role { - TopologyNodeRole::Validator => HttpNodeRole::Validator, - } -} - fn localhost_url(port: u16, host: &str) -> Result { Url::parse(&format!("http://{host}:{port}/")) } diff --git a/testing-framework/deployers/compose/src/lifecycle/wait.rs b/testing-framework/deployers/compose/src/lifecycle/wait.rs index 4fb58ad..dffa4a2 100644 --- a/testing-framework/deployers/compose/src/lifecycle/wait.rs +++ b/testing-framework/deployers/compose/src/lifecycle/wait.rs @@ -2,7 +2,7 @@ use std::{env, time::Duration}; use testing_framework_core::{ adjust_timeout, - scenario::http_probe::{self, HttpReadinessError, NodeRole}, + scenario::http_probe::{self, HttpReadinessError, NODE_ROLE}, }; use tracing::{debug, info}; @@ -12,15 +12,15 @@ const POLL_INTERVAL_MILLIS: u64 = 250; const DEFAULT_WAIT: Duration = Duration::from_secs(DEFAULT_WAIT_TIMEOUT_SECS); const POLL_INTERVAL: Duration = Duration::from_millis(POLL_INTERVAL_MILLIS); -pub async fn wait_for_validators(ports: &[u16]) -> Result<(), HttpReadinessError> { - wait_for_ports(ports, NodeRole::Validator).await +pub async fn wait_for_nodes(ports: &[u16]) -> Result<(), HttpReadinessError> { + wait_for_ports(ports, NODE_ROLE).await } -async fn wait_for_ports(ports: &[u16], role: NodeRole) -> Result<(), HttpReadinessError> { +async fn wait_for_ports(ports: &[u16], role: &'static str) -> Result<(), HttpReadinessError> { let host = compose_runner_host(); let timeout = compose_http_timeout(); - info!(role = ?role, ports = ?ports, host, "waiting for compose HTTP readiness"); + info!(role, ports = ?ports, host, "waiting for compose HTTP readiness"); http_probe::wait_for_http_ports_with_host( ports, diff --git a/testing-framework/deployers/k8s/helm/nomos-runner/templates/_helpers.tpl b/testing-framework/deployers/k8s/helm/nomos-runner/templates/_helpers.tpl index 0a28c4f..92c9987 100644 --- a/testing-framework/deployers/k8s/helm/nomos-runner/templates/_helpers.tpl +++ b/testing-framework/deployers/k8s/helm/nomos-runner/templates/_helpers.tpl @@ -20,12 +20,11 @@ app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end -}} -{{- define "nomos-runner.validatorLabels" -}} +{{- define "nomos-runner.nodeLabels" -}} {{- $root := index . "root" -}} {{- $index := index . "index" -}} app.kubernetes.io/name: {{ include "nomos-runner.chart" $root }} app.kubernetes.io/instance: {{ $root.Release.Name }} -nomos/logical-role: validator -nomos/validator-index: "{{ $index }}" +nomos/logical-role: node +nomos/node-index: "{{ $index }}" {{- end -}} - diff --git a/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-deployments.yaml b/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-deployments.yaml index d0a6fc9..2e5c08c 100644 --- a/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-deployments.yaml +++ b/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-deployments.yaml @@ -1,25 +1,25 @@ {{- $root := . -}} -{{- $nodes := default (list) .Values.validators.nodes }} +{{- $nodes := default (list) .Values.nodes.nodes }} {{- range $i, $node := $nodes }} --- apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }} + name: {{ include "nomos-runner.fullname" $root }}-node-{{ $i }} labels: - {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} + {{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 4 }} spec: replicas: 1 selector: matchLabels: - {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 6 }} + {{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 6 }} template: metadata: labels: - {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 8 }} + {{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 8 }} spec: containers: - - name: validator + - name: node image: {{ $root.Values.image }} imagePullPolicy: {{ $root.Values.imagePullPolicy }} command: ["/etc/nomos/scripts/run_nomos_node.sh"] @@ -31,10 +31,8 @@ spec: env: - name: CFG_SERVER_ADDR value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }} - - name: NOMOS_TIME_BACKEND + - name: LOGOS_BLOCKCHAIN_TIME_BACKEND value: {{ $root.Values.timeBackend | default "monotonic" | quote }} - - name: LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH - value: '{{ if eq $root.Values.kzg.mode "inImage" }}{{ $root.Values.kzg.inImageParamsPath }}{{ else }}{{ $root.Values.kzg.hostPathParamsPath }}{{ end }}' {{- range $key, $value := $node.env }} - name: {{ $key }} value: "{{ $value }}" @@ -43,11 +41,6 @@ spec: - name: assets mountPath: /etc/nomos readOnly: true - {{- if eq $root.Values.kzg.mode "hostPath" }} - - name: kzg-params - mountPath: /kzgrs_test_params - readOnly: true - {{- end }} volumes: - name: assets configMap: @@ -62,10 +55,4 @@ spec: path: scripts/run_nomos.sh - key: run_nomos_node.sh path: scripts/run_nomos_node.sh - {{- if eq $root.Values.kzg.mode "hostPath" }} - - name: kzg-params - persistentVolumeClaim: - claimName: {{ include "nomos-runner.fullname" $root }}-kzg - readOnly: true - {{- end }} {{- end }} diff --git a/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-services.yaml b/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-services.yaml index ff94e2e..62a0f30 100644 --- a/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-services.yaml +++ b/testing-framework/deployers/k8s/helm/nomos-runner/templates/validator-services.yaml @@ -1,17 +1,17 @@ {{- $root := . -}} -{{- $nodes := default (list) .Values.validators.nodes }} +{{- $nodes := default (list) .Values.nodes.nodes }} {{- range $i, $node := $nodes }} --- apiVersion: v1 kind: Service metadata: - name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }} + name: {{ include "nomos-runner.fullname" $root }}-node-{{ $i }} labels: - {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} + {{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 4 }} spec: type: NodePort selector: - {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} + {{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 4 }} ports: - name: http port: {{ default 18080 $node.apiPort }} diff --git a/testing-framework/deployers/k8s/helm/nomos-runner/values.yaml b/testing-framework/deployers/k8s/helm/nomos-runner/values.yaml index b5083de..260bbf9 100644 --- a/testing-framework/deployers/k8s/helm/nomos-runner/values.yaml +++ b/testing-framework/deployers/k8s/helm/nomos-runner/values.yaml @@ -14,14 +14,6 @@ scripts: runNomosSh: "" runNomosNodeSh: "" -validators: +nodes: count: 1 nodes: [] - -kzg: - mode: "hostPath" - hostPathParamsPath: "/kzgrs_test_params/kzgrs_test_params" - inImageParamsPath: "/opt/nomos/kzg-params/kzgrs_test_params" - hostPath: "/var/lib/nomos/kzgrs_test_params" - hostPathType: "Directory" - storageSize: "1Gi" diff --git a/testing-framework/deployers/k8s/src/deployer/orchestrator.rs b/testing-framework/deployers/k8s/src/deployer/orchestrator.rs index c0b366f..2245318 100644 --- a/testing-framework/deployers/k8s/src/deployer/orchestrator.rs +++ b/testing-framework/deployers/k8s/src/deployer/orchestrator.rs @@ -56,8 +56,8 @@ impl K8sDeployer { #[derive(Debug, thiserror::Error)] /// High-level runner failures returned to the scenario harness. pub enum K8sRunnerError { - #[error("kubernetes runner requires at least one validator (validators={validators})")] - UnsupportedTopology { validators: usize }, + #[error("kubernetes runner requires at least one node (nodes={nodes})")] + UnsupportedTopology { nodes: usize }, #[error("failed to initialise kubernetes client: {source}")] ClientInit { #[source] @@ -122,9 +122,9 @@ impl From for K8sRunnerError { } fn ensure_supported_topology(descriptors: &GeneratedTopology) -> Result<(), K8sRunnerError> { - let validators = descriptors.validators().len(); - if validators == 0 { - return Err(K8sRunnerError::UnsupportedTopology { validators }); + let nodes = descriptors.nodes().len(); + if nodes == 0 { + return Err(K8sRunnerError::UnsupportedTopology { nodes }); } Ok(()) } @@ -137,13 +137,13 @@ async fn deploy_with_observability( let observability = resolve_observability_inputs(observability)?; let descriptors = scenario.topology().clone(); - let validator_count = descriptors.validators().len(); + let node_count = descriptors.nodes().len(); ensure_supported_topology(&descriptors)?; let client = init_kube_client().await?; info!( - validators = validator_count, + nodes = node_count, duration_secs = scenario.duration().as_secs(), readiness_checks = deployer.readiness_checks, metrics_query_url = observability.metrics_query_url.as_ref().map(|u| u.as_str()), @@ -195,7 +195,7 @@ async fn deploy_with_observability( telemetry, block_feed, block_feed_guard, - validator_count, + node_count, ) } @@ -207,13 +207,13 @@ async fn setup_cluster( observability: &ObservabilityInputs, ) -> Result { let assets = prepare_assets(descriptors, observability.metrics_otlp_ingest_url.as_ref())?; - let validators = descriptors.validators().len(); + let nodes = descriptors.nodes().len(); let (namespace, release) = cluster_identifiers(); - info!(%namespace, %release, validators, "preparing k8s assets and namespace"); + info!(%namespace, %release, nodes, "preparing k8s assets and namespace"); let mut cleanup_guard = - Some(install_stack(client, &assets, &namespace, &release, validators).await?); + Some(install_stack(client, &assets, &namespace, &release, nodes).await?); info!("waiting for helm-managed services to become ready"); let cluster_ready = @@ -328,10 +328,10 @@ fn maybe_print_endpoints( .unwrap_or_else(|| "".to_string()) ); - let validator_clients = node_clients.validator_clients(); - for (idx, client) in validator_clients.iter().enumerate() { + let nodes = node_clients.node_clients(); + for (idx, client) in nodes.iter().enumerate() { println!( - "TESTNET_PPROF validator_{}={}/debug/pprof/profile?seconds=15&format=proto", + "TESTNET_PPROF node_{}={}/debug/pprof/profile?seconds=15&format=proto", idx, client.base_url() ); @@ -347,7 +347,7 @@ fn finalize_runner( telemetry: testing_framework_core::scenario::Metrics, block_feed: testing_framework_core::scenario::BlockFeed, block_feed_guard: BlockFeedTask, - validator_count: usize, + node_count: usize, ) -> Result { let environment = cluster .take() @@ -373,7 +373,7 @@ fn finalize_runner( ); info!( - validators = validator_count, + nodes = node_count, duration_secs = duration.as_secs(), "k8s deployment ready; handing control to scenario runner" ); diff --git a/testing-framework/deployers/k8s/src/infrastructure/assets.rs b/testing-framework/deployers/k8s/src/infrastructure/assets.rs index 3febc0f..729728e 100644 --- a/testing-framework/deployers/k8s/src/infrastructure/assets.rs +++ b/testing-framework/deployers/k8s/src/infrastructure/assets.rs @@ -11,9 +11,7 @@ use reqwest::Url; use serde::Serialize; use tempfile::TempDir; use testing_framework_config::constants::{DEFAULT_ASSETS_STACK_DIR, cfgsync_port}; -pub use testing_framework_core::kzg::KzgMode; use testing_framework_core::{ - kzg::KzgParamsSpec, scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, render_cfgsync_yaml}, topology::generation::GeneratedTopology, }; @@ -24,8 +22,6 @@ use tracing::{debug, info}; /// Paths and image metadata required to deploy the Helm chart. pub struct RunnerAssets { pub image: String, - pub kzg_mode: KzgMode, - pub kzg_path: Option, pub chart_path: PathBuf, pub cfgsync_file: PathBuf, pub run_cfgsync_script: PathBuf, @@ -54,8 +50,6 @@ pub enum AssetsError { }, #[error("missing required script at {path}")] MissingScript { path: PathBuf }, - #[error("missing KZG parameters at {path}; build them with `make kzgrs_test_params`")] - MissingKzg { path: PathBuf }, #[error("missing Helm chart at {path}; ensure the repository is up-to-date")] MissingChart { path: PathBuf }, #[error("failed to create temporary directory for rendered assets: {source}")] @@ -76,53 +70,37 @@ pub enum AssetsError { }, } -/// Render cfgsync config, Helm values, and locate scripts/KZG assets for a +/// Render cfgsync config, Helm values, and locate scripts for a /// topology. pub fn prepare_assets( topology: &GeneratedTopology, metrics_otlp_ingest_url: Option<&Url>, ) -> Result { info!( - validators = topology.validators().len(), + nodes = topology.nodes().len(), "preparing k8s runner assets" ); let root = workspace_root().map_err(|source| AssetsError::WorkspaceRoot { source })?; - let kzg_spec = KzgParamsSpec::for_k8s(&root); - let tempdir = create_assets_tempdir()?; - let cfgsync_file = render_and_write_cfgsync( - &root, - topology, - &kzg_spec, - metrics_otlp_ingest_url, - &tempdir, - )?; + let cfgsync_file = + render_and_write_cfgsync(&root, topology, metrics_otlp_ingest_url, &tempdir)?; let scripts = validate_scripts(&root)?; - let kzg_path = resolve_kzg_path(&root, &kzg_spec)?; let chart_path = helm_chart_path()?; let values_file = render_and_write_values(topology, &tempdir)?; let image = testnet_image(); - let kzg_display = kzg_path - .as_ref() - .map(|path| path.display().to_string()) - .unwrap_or_else(|| "".to_string()); debug!( cfgsync = %cfgsync_file.display(), values = %values_file.display(), image, - kzg_mode = ?kzg_spec.mode, - kzg = %kzg_display, chart = %chart_path.display(), "k8s runner assets prepared" ); Ok(RunnerAssets { image, - kzg_mode: kzg_spec.mode, - kzg_path, chart_path, cfgsync_file, run_nomos_script: scripts.run_shared, @@ -143,21 +121,13 @@ fn create_assets_tempdir() -> Result { fn render_and_write_cfgsync( root: &Path, topology: &GeneratedTopology, - kzg_spec: &KzgParamsSpec, metrics_otlp_ingest_url: Option<&Url>, tempdir: &TempDir, ) -> Result { - let cfgsync_yaml = render_cfgsync_config(root, topology, kzg_spec, metrics_otlp_ingest_url)?; + let cfgsync_yaml = render_cfgsync_config(root, topology, metrics_otlp_ingest_url)?; write_temp_file(tempdir.path(), "cfgsync.yaml", cfgsync_yaml) } -fn resolve_kzg_path(root: &Path, kzg_spec: &KzgParamsSpec) -> Result, AssetsError> { - match kzg_spec.mode { - KzgMode::HostPath => Ok(Some(validate_kzg_params(root, kzg_spec)?)), - KzgMode::InImage => Ok(None), - } -} - fn render_and_write_values( topology: &GeneratedTopology, tempdir: &TempDir, @@ -176,7 +146,6 @@ const CFGSYNC_K8S_TIMEOUT_SECS: u64 = 300; fn render_cfgsync_config( root: &Path, topology: &GeneratedTopology, - kzg_spec: &KzgParamsSpec, metrics_otlp_ingest_url: Option<&Url>, ) -> Result { let cfgsync_template_path = stack_assets_root(root).join("cfgsync.yaml"); @@ -185,8 +154,7 @@ fn render_cfgsync_config( let mut cfg = load_cfgsync_template(&cfgsync_template_path) .map_err(|source| AssetsError::Cfgsync { source })?; - apply_topology_overrides(&mut cfg, topology, kzg_spec.mode == KzgMode::HostPath); - cfg.global_params_path = kzg_spec.node_params_path.clone(); + apply_topology_overrides(&mut cfg, topology); if let Some(endpoint) = metrics_otlp_ingest_url.cloned() { cfg.tracing_settings.metrics = MetricsLayer::Otlp(OtlpMetricsConfig { @@ -232,19 +200,6 @@ fn validate_scripts(root: &Path) -> Result { }) } -fn validate_kzg_params(root: &Path, spec: &KzgParamsSpec) -> Result { - let Some(path) = spec.host_params_dir.clone() else { - return Err(AssetsError::MissingKzg { - path: root.join(testing_framework_config::constants::DEFAULT_KZG_HOST_DIR), - }); - }; - if path.exists() { - Ok(path) - } else { - Err(AssetsError::MissingKzg { path }) - } -} - fn helm_chart_path() -> Result { let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("helm/nomos-runner"); if path.exists() { @@ -309,7 +264,7 @@ struct HelmValues { #[serde(rename = "imagePullPolicy")] image_pull_policy: String, cfgsync: CfgsyncValues, - validators: NodeGroup, + nodes: NodeGroup, } #[derive(Serialize)] @@ -340,12 +295,12 @@ fn build_values(topology: &GeneratedTopology) -> HelmValues { let image_pull_policy = tf_env::nomos_testnet_image_pull_policy().unwrap_or_else(|| "IfNotPresent".into()); debug!(pol_mode, "rendering Helm values for k8s stack"); - let validators = build_node_group("validator", topology.validators(), &pol_mode); + let nodes = build_node_group("node", topology.nodes(), &pol_mode); HelmValues { image_pull_policy, cfgsync, - validators, + nodes, } } diff --git a/testing-framework/deployers/k8s/src/infrastructure/cluster.rs b/testing-framework/deployers/k8s/src/infrastructure/cluster.rs index a0430a4..e313b02 100644 --- a/testing-framework/deployers/k8s/src/infrastructure/cluster.rs +++ b/testing-framework/deployers/k8s/src/infrastructure/cluster.rs @@ -4,7 +4,7 @@ use kube::Client; use reqwest::Url; use testing_framework_core::{ nodes::ApiClient, - scenario::{CleanupGuard, NodeClients, http_probe::NodeRole}, + scenario::{CleanupGuard, NodeClients, http_probe::NODE_ROLE}, topology::{generation::GeneratedTopology, readiness::ReadinessError}, }; use tracing::{debug, info}; @@ -21,7 +21,7 @@ use crate::{ #[derive(Default)] pub struct PortSpecs { - pub validators: Vec, + pub nodes: Vec, } /// Holds k8s namespace, Helm release, port forwards, and cleanup guard. @@ -30,9 +30,9 @@ pub struct ClusterEnvironment { namespace: String, release: String, cleanup: Option, - validator_host: String, - validator_api_ports: Vec, - validator_testing_ports: Vec, + node_host: String, + node_api_ports: Vec, + node_testing_ports: Vec, port_forwards: Vec, } @@ -51,17 +51,17 @@ impl ClusterEnvironment { ports: &ClusterPorts, port_forwards: Vec, ) -> Self { - let validator_api_ports = ports.validators.iter().map(|ports| ports.api).collect(); - let validator_testing_ports = ports.validators.iter().map(|ports| ports.testing).collect(); + let node_api_ports = ports.nodes.iter().map(|ports| ports.api).collect(); + let node_testing_ports = ports.nodes.iter().map(|ports| ports.testing).collect(); Self { client, namespace, release, cleanup: Some(cleanup), - validator_host: ports.validator_host.clone(), - validator_api_ports, - validator_testing_ports, + node_host: ports.node_host.clone(), + node_api_ports, + node_testing_ports, port_forwards, } } @@ -99,20 +99,17 @@ impl ClusterEnvironment { &self.release } - pub fn validator_ports(&self) -> (&[u16], &[u16]) { - (&self.validator_api_ports, &self.validator_testing_ports) + pub fn node_ports(&self) -> (&[u16], &[u16]) { + (&self.node_api_ports, &self.node_testing_ports) } } #[derive(Debug, thiserror::Error)] /// Failures while building node clients against forwarded ports. pub enum NodeClientError { - #[error( - "failed to build {endpoint} client URL for {role} port {port}: {source}", - role = role.label() - )] + #[error("failed to build {endpoint} client URL for {role} port {port}: {source}")] Endpoint { - role: NodeRole, + role: &'static str, endpoint: &'static str, port: u16, #[source] @@ -123,12 +120,9 @@ pub enum NodeClientError { #[derive(Debug, thiserror::Error)] /// Readiness check failures for the remote cluster endpoints. pub enum RemoteReadinessError { - #[error( - "failed to build readiness URL for {role} port {port}: {source}", - role = role.label() - )] + #[error("failed to build readiness URL for {role} port {port}: {source}")] Endpoint { - role: NodeRole, + role: &'static str, port: u16, #[source] source: ParseError, @@ -141,8 +135,8 @@ pub enum RemoteReadinessError { } pub fn collect_port_specs(descriptors: &GeneratedTopology) -> PortSpecs { - let validators = descriptors - .validators() + let nodes = descriptors + .nodes() .iter() .map(|node| NodeConfigPorts { api: node.general.api_config.address.port(), @@ -150,35 +144,27 @@ pub fn collect_port_specs(descriptors: &GeneratedTopology) -> PortSpecs { }) .collect(); - let specs = PortSpecs { validators }; + let specs = PortSpecs { nodes }; - debug!( - validators = specs.validators.len(), - "collected k8s port specs" - ); + debug!(nodes = specs.nodes.len(), "collected k8s port specs"); specs } pub fn build_node_clients(cluster: &ClusterEnvironment) -> Result { - let validators = cluster - .validator_api_ports + let nodes = cluster + .node_api_ports .iter() .copied() - .zip(cluster.validator_testing_ports.iter().copied()) + .zip(cluster.node_testing_ports.iter().copied()) .map(|(api_port, testing_port)| { - api_client_from_ports( - &cluster.validator_host, - NodeRole::Validator, - api_port, - testing_port, - ) + api_client_from_ports(&cluster.node_host, NODE_ROLE, api_port, testing_port) }) .collect::, _>>()?; - debug!(validators = validators.len(), "built k8s node clients"); + debug!(nodes = nodes.len(), "built k8s node clients"); - Ok(NodeClients::new(validators)) + Ok(NodeClients::new(nodes)) } pub async fn ensure_cluster_readiness( @@ -186,18 +172,17 @@ pub async fn ensure_cluster_readiness( cluster: &ClusterEnvironment, ) -> Result<(), RemoteReadinessError> { info!("waiting for remote readiness (API + membership)"); - let (validator_api, _validator_testing) = cluster.validator_ports(); + let (node_api, _node_testing) = cluster.node_ports(); - let validator_urls = - readiness_urls(validator_api, NodeRole::Validator, &cluster.validator_host)?; + let node_urls = readiness_urls(node_api, NODE_ROLE, &cluster.node_host)?; descriptors - .wait_remote_readiness(&validator_urls) + .wait_remote_readiness(&node_urls) .await .map_err(|source| RemoteReadinessError::Remote { source })?; info!( - validator_api_ports = ?validator_api, + node_api_ports = ?node_api, "k8s remote readiness confirmed" ); @@ -225,14 +210,14 @@ pub async fn install_stack( assets: &RunnerAssets, namespace: &str, release: &str, - validators: usize, + nodes: usize, ) -> Result { tracing::info!( release = %release, namespace = %namespace, "installing helm release" ); - crate::infrastructure::helm::install_release(assets, release, namespace, validators).await?; + crate::infrastructure::helm::install_release(assets, release, namespace, nodes).await?; tracing::info!(release = %release, "helm install succeeded"); let preserve = env::var("K8S_RUNNER_PRESERVE").is_ok(); @@ -252,15 +237,15 @@ pub async fn wait_for_ports_or_cleanup( cleanup_guard: &mut Option, ) -> Result { info!( - validators = specs.validators.len(), + nodes = specs.nodes.len(), %namespace, %release, "waiting for cluster port-forwards" ); - match wait_for_cluster_ready(client, namespace, release, &specs.validators).await { + match wait_for_cluster_ready(client, namespace, release, &specs.nodes).await { Ok(ports) => { info!( - validator_ports = ?ports.ports.validators, + node_ports = ?ports.ports.nodes, "cluster port-forwards established" ); Ok(ports) @@ -288,7 +273,7 @@ async fn cleanup_pending(client: &Client, namespace: &str, guard: &mut Option Result, RemoteReadinessError> { ports @@ -298,7 +283,7 @@ fn readiness_urls( .collect() } -fn readiness_url(host: &str, role: NodeRole, port: u16) -> Result { +fn readiness_url(host: &str, role: &'static str, port: u16) -> Result { cluster_host_url(host, port).map_err(|source| RemoteReadinessError::Endpoint { role, port, @@ -312,7 +297,7 @@ fn cluster_host_url(host: &str, port: u16) -> Result { fn api_client_from_ports( host: &str, - role: NodeRole, + role: &'static str, api_port: u16, testing_port: u16, ) -> Result { diff --git a/testing-framework/deployers/k8s/src/infrastructure/helm.rs b/testing-framework/deployers/k8s/src/infrastructure/helm.rs index 3e35a8c..add216d 100644 --- a/testing-framework/deployers/k8s/src/infrastructure/helm.rs +++ b/testing-framework/deployers/k8s/src/infrastructure/helm.rs @@ -1,10 +1,10 @@ -use std::{io, path::Path, process::Stdio}; +use std::{io, process::Stdio}; use thiserror::Error; use tokio::process::Command; use tracing::{debug, info}; -use crate::infrastructure::assets::{KzgMode, RunnerAssets, cfgsync_port_value, workspace_root}; +use crate::infrastructure::assets::{RunnerAssets, cfgsync_port_value, workspace_root}; /// Errors returned from Helm invocations. #[derive(Debug, Error)] @@ -15,8 +15,6 @@ pub enum HelmError { #[source] source: io::Error, }, - #[error("kzg_path must be present for HostPath mode")] - MissingKzgPath, #[error("{command} exited with status {status:?}\nstderr:\n{stderr}\nstdout:\n{stdout}")] Failed { command: String, @@ -31,23 +29,20 @@ pub async fn install_release( assets: &RunnerAssets, release: &str, namespace: &str, - validators: usize, + nodes: usize, ) -> Result<(), HelmError> { - let kzg = resolve_kzg_install_args(assets)?; info!( release, namespace, - validators, + nodes, image = %assets.image, cfgsync_port = cfgsync_port_value(), - kzg_mode = ?assets.kzg_mode, - kzg = %kzg.display(), values = %assets.values_file.display(), "installing helm release" ); let command = format!("helm install {release}"); - let cmd = build_install_command(assets, release, namespace, validators, &kzg, &command); + let cmd = build_install_command(assets, release, namespace, nodes, &command); let output = run_helm_command(cmd, &command).await?; maybe_log_install_output(&command, &output); @@ -56,49 +51,11 @@ pub async fn install_release( Ok(()) } -struct KzgInstallArgs<'a> { - mode: &'static str, - host_path: Option<&'a Path>, - host_path_type: Option<&'static str>, -} - -impl KzgInstallArgs<'_> { - fn display(&self) -> String { - self.host_path - .map(|p| p.display().to_string()) - .unwrap_or_else(|| "".to_string()) - } -} - -fn resolve_kzg_install_args(assets: &RunnerAssets) -> Result, HelmError> { - match assets.kzg_mode { - KzgMode::HostPath => { - let host_path = assets.kzg_path.as_ref().ok_or(HelmError::MissingKzgPath)?; - let host_path_type = if host_path.is_dir() { - "Directory" - } else { - "File" - }; - Ok(KzgInstallArgs { - mode: "kzg.mode=hostPath", - host_path: Some(host_path), - host_path_type: Some(host_path_type), - }) - } - KzgMode::InImage => Ok(KzgInstallArgs { - mode: "kzg.mode=inImage", - host_path: None, - host_path_type: None, - }), - } -} - fn build_install_command( assets: &RunnerAssets, release: &str, namespace: &str, - validators: usize, - kzg: &KzgInstallArgs<'_>, + nodes: usize, command: &str, ) -> Command { let mut cmd = Command::new("helm"); @@ -114,13 +71,11 @@ fn build_install_command( .arg("--set") .arg(format!("image={}", assets.image)) .arg("--set") - .arg(format!("validators.count={validators}")) + .arg(format!("nodes.count={nodes}")) .arg("--set") .arg(format!("cfgsync.port={}", cfgsync_port_value())) .arg("-f") .arg(&assets.values_file) - .arg("--set") - .arg(kzg.mode) .arg("--set-file") .arg(format!("cfgsync.config={}", assets.cfgsync_file.display())) .arg("--set-file") @@ -141,13 +96,6 @@ fn build_install_command( .stdout(Stdio::piped()) .stderr(Stdio::piped()); - if let (Some(host_path), Some(host_path_type)) = (kzg.host_path, kzg.host_path_type) { - cmd.arg("--set") - .arg(format!("kzg.hostPath={}", host_path.display())) - .arg("--set") - .arg(format!("kzg.hostPathType={host_path_type}")); - } - if let Ok(root) = workspace_root() { cmd.current_dir(root); } diff --git a/testing-framework/deployers/k8s/src/lifecycle/block_feed.rs b/testing-framework/deployers/k8s/src/lifecycle/block_feed.rs index ff444cc..054924d 100644 --- a/testing-framework/deployers/k8s/src/lifecycle/block_feed.rs +++ b/testing-framework/deployers/k8s/src/lifecycle/block_feed.rs @@ -7,12 +7,12 @@ pub async fn spawn_block_feed_with( node_clients: &NodeClients, ) -> Result<(BlockFeed, BlockFeedTask), K8sRunnerError> { debug!( - validators = node_clients.validator_clients().len(), + nodes = node_clients.node_clients().len(), "selecting node client for block feed" ); let block_source_client = node_clients - .validator_clients() + .node_clients() .into_iter() .next() .or_else(|| node_clients.any_client()) diff --git a/testing-framework/deployers/k8s/src/lifecycle/wait/http_probe.rs b/testing-framework/deployers/k8s/src/lifecycle/wait/http_probe.rs index 0715305..9608f2a 100644 --- a/testing-framework/deployers/k8s/src/lifecycle/wait/http_probe.rs +++ b/testing-framework/deployers/k8s/src/lifecycle/wait/http_probe.rs @@ -1,11 +1,11 @@ -use testing_framework_core::scenario::http_probe::{self, HttpReadinessError, NodeRole}; +use testing_framework_core::scenario::http_probe::{self, HttpReadinessError}; use super::{ClusterWaitError, http_poll_interval, node_http_probe_timeout, node_http_timeout}; use crate::host::node_host; pub async fn wait_for_node_http_nodeport( ports: &[u16], - role: NodeRole, + role: &'static str, ) -> Result<(), ClusterWaitError> { let host = node_host(); wait_for_node_http_on_host(ports, role, &host, node_http_probe_timeout()).await @@ -15,14 +15,14 @@ const LOCALHOST: &str = "127.0.0.1"; pub async fn wait_for_node_http_port_forward( ports: &[u16], - role: NodeRole, + role: &'static str, ) -> Result<(), ClusterWaitError> { wait_for_node_http_on_host(ports, role, LOCALHOST, node_http_timeout()).await } async fn wait_for_node_http_on_host( ports: &[u16], - role: NodeRole, + role: &'static str, host: &str, timeout: std::time::Duration, ) -> Result<(), ClusterWaitError> { diff --git a/testing-framework/deployers/k8s/src/lifecycle/wait/mod.rs b/testing-framework/deployers/k8s/src/lifecycle/wait/mod.rs index a24a59c..584fe2f 100644 --- a/testing-framework/deployers/k8s/src/lifecycle/wait/mod.rs +++ b/testing-framework/deployers/k8s/src/lifecycle/wait/mod.rs @@ -1,7 +1,6 @@ use std::{env, sync::LazyLock, time::Duration}; use kube::Error as KubeError; -use testing_framework_core::scenario::http_probe::NodeRole; use thiserror::Error; mod deployment; @@ -41,8 +40,8 @@ pub struct HostPort { /// All port assignments for the cluster. #[derive(Debug)] pub struct ClusterPorts { - pub validators: Vec, - pub validator_host: String, + pub nodes: Vec, + pub node_host: String, } /// Success result from waiting for the cluster: host ports and forward handles. @@ -75,14 +74,11 @@ pub enum ClusterWaitError { }, #[error("service {service} did not allocate a node port for {port}")] NodePortUnavailable { service: String, port: u16 }, - #[error("cluster must have at least one validator")] - MissingValidator, - #[error( - "timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}", - role = role.label() - )] + #[error("cluster must have at least one node")] + MissingNode, + #[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}")] NodeHttpTimeout { - role: NodeRole, + role: &'static str, port: u16, timeout: Duration, }, diff --git a/testing-framework/deployers/k8s/src/lifecycle/wait/orchestrator.rs b/testing-framework/deployers/k8s/src/lifecycle/wait/orchestrator.rs index b5a5abe..b8962c4 100644 --- a/testing-framework/deployers/k8s/src/lifecycle/wait/orchestrator.rs +++ b/testing-framework/deployers/k8s/src/lifecycle/wait/orchestrator.rs @@ -1,5 +1,5 @@ use kube::Client; -use testing_framework_core::scenario::http_probe::NodeRole; +use testing_framework_core::scenario::http_probe::NODE_ROLE; use super::{ClusterPorts, ClusterReady, ClusterWaitError, NodeConfigPorts}; use crate::lifecycle::wait::{ @@ -13,41 +13,38 @@ pub async fn wait_for_cluster_ready( client: &Client, namespace: &str, release: &str, - validator_ports: &[NodeConfigPorts], + node_ports: &[NodeConfigPorts], ) -> Result { - if validator_ports.is_empty() { - return Err(ClusterWaitError::MissingValidator); + if node_ports.is_empty() { + return Err(ClusterWaitError::MissingNode); } - let mut validator_allocations = Vec::with_capacity(validator_ports.len()); - let mut validator_host = crate::host::node_host(); + let mut node_allocations = Vec::with_capacity(node_ports.len()); + let mut node_host = crate::host::node_host(); - for (index, ports) in validator_ports.iter().enumerate() { - let name = format!("{release}-validator-{index}"); + for (index, ports) in node_ports.iter().enumerate() { + let name = format!("{release}-node-{index}"); wait_for_deployment_ready(client, namespace, &name).await?; let allocation = discover_node_ports(client, namespace, &name, *ports).await?; - validator_allocations.push(allocation); + node_allocations.push(allocation); } let mut port_forwards: Vec = Vec::new(); - let validator_api_ports: Vec = validator_allocations - .iter() - .map(|ports| ports.api) - .collect(); - if wait_for_node_http_nodeport(&validator_api_ports, NodeRole::Validator) + let node_api_ports: Vec = node_allocations.iter().map(|ports| ports.api).collect(); + if wait_for_node_http_nodeport(&node_api_ports, NODE_ROLE) .await .is_err() { - validator_allocations.clear(); - validator_host = "127.0.0.1".to_owned(); + node_allocations.clear(); + node_host = "127.0.0.1".to_owned(); let namespace = namespace.to_owned(); let release = release.to_owned(); - let ports = validator_ports.to_vec(); + let ports = node_ports.to_vec(); let (forwards, allocations) = tokio::task::spawn_blocking(move || { let mut allocations = Vec::with_capacity(ports.len()); let forwards = - port_forward_group(&namespace, &release, "validator", &ports, &mut allocations)?; + port_forward_group(&namespace, &release, "node", &ports, &mut allocations)?; Ok::<_, ClusterWaitError>((forwards, allocations)) }) .await @@ -55,14 +52,9 @@ pub async fn wait_for_cluster_ready( source: source.into(), })??; port_forwards = forwards; - validator_allocations = allocations; - let validator_api_ports: Vec = validator_allocations - .iter() - .map(|ports| ports.api) - .collect(); - if let Err(err) = - wait_for_node_http_port_forward(&validator_api_ports, NodeRole::Validator).await - { + node_allocations = allocations; + let node_api_ports: Vec = node_allocations.iter().map(|ports| ports.api).collect(); + if let Err(err) = wait_for_node_http_port_forward(&node_api_ports, NODE_ROLE).await { kill_port_forwards(&mut port_forwards); return Err(err); } @@ -70,8 +62,8 @@ pub async fn wait_for_cluster_ready( Ok(ClusterReady { ports: ClusterPorts { - validators: validator_allocations, - validator_host, + nodes: node_allocations, + node_host, }, port_forwards, }) diff --git a/testing-framework/deployers/local/src/manual/mod.rs b/testing-framework/deployers/local/src/manual/mod.rs index e72f859..f935065 100644 --- a/testing-framework/deployers/local/src/manual/mod.rs +++ b/testing-framework/deployers/local/src/manual/mod.rs @@ -49,19 +49,19 @@ impl LocalManualCluster { self.nodes.node_client(name) } - pub async fn start_validator(&self, name: &str) -> Result { + pub async fn start_node(&self, name: &str) -> Result { Ok(self .nodes - .start_validator_with(name, StartNodeOptions::default()) + .start_node_with(name, StartNodeOptions::default()) .await?) } - pub async fn start_validator_with( + pub async fn start_node_with( &self, name: &str, options: StartNodeOptions, ) -> Result { - Ok(self.nodes.start_validator_with(name, options).await?) + Ok(self.nodes.start_node_with(name, options).await?) } pub fn stop_all(&self) { @@ -94,12 +94,12 @@ impl Drop for LocalManualCluster { #[async_trait::async_trait] impl ManualClusterHandle for LocalManualCluster { - async fn start_validator_with( + async fn start_node_with( &self, name: &str, options: StartNodeOptions, ) -> Result { - self.start_validator_with(name, options) + self.start_node_with(name, options) .await .map_err(|err| err.into()) } diff --git a/testing-framework/deployers/local/src/node_control/config.rs b/testing-framework/deployers/local/src/node_control/config.rs index 7e1cb8e..26062ad 100644 --- a/testing-framework/deployers/local/src/node_control/config.rs +++ b/testing-framework/deployers/local/src/node_control/config.rs @@ -13,7 +13,7 @@ use testing_framework_core::{ topology::{ config::TopologyConfig, configs::GeneralConfig, - generation::{GeneratedNodeConfig, GeneratedTopology, NodeRole}, + generation::{GeneratedNodeConfig, GeneratedTopology}, }, }; @@ -23,13 +23,12 @@ pub(super) fn build_general_config_for( descriptors: &GeneratedTopology, base_consensus: &consensus::GeneralConsensusConfig, base_time: &GeneralTimeConfig, - role: NodeRole, index: usize, peer_ports_by_name: &HashMap, options: &StartNodeOptions, peer_ports: &[u16], ) -> Result<(GeneralConfig, u16), LocalDynamicError> { - if let Some(node) = descriptor_for(descriptors, role, index) { + if let Some(node) = descriptor_for(descriptors, index) { let mut config = node.general.clone(); let initial_peers = resolve_initial_peers( peer_ports_by_name, @@ -65,14 +64,8 @@ pub(super) fn build_general_config_for( Ok((general_config, network_port)) } -fn descriptor_for( - descriptors: &GeneratedTopology, - role: NodeRole, - index: usize, -) -> Option<&GeneratedNodeConfig> { - match role { - NodeRole::Validator => descriptors.validators().get(index), - } +fn descriptor_for(descriptors: &GeneratedTopology, index: usize) -> Option<&GeneratedNodeConfig> { + descriptors.nodes().get(index) } fn resolve_peer_names( diff --git a/testing-framework/deployers/local/src/node_control/mod.rs b/testing-framework/deployers/local/src/node_control/mod.rs index cd31237..7bb0acc 100644 --- a/testing-framework/deployers/local/src/node_control/mod.rs +++ b/testing-framework/deployers/local/src/node_control/mod.rs @@ -3,16 +3,16 @@ use std::{ sync::Mutex, }; -use nomos_node::Config as ValidatorConfig; +use nomos_node::Config as NodeConfig; use testing_framework_config::topology::configs::{consensus, time}; use testing_framework_core::{ nodes::{ ApiClient, - validator::{Validator, create_validator_config}, + node::{Node, create_node_config}, }, scenario::{DynError, NodeControlHandle, StartNodeOptions, StartedNode}, topology::{ - generation::{GeneratedTopology, NodeRole, find_expected_peer_counts}, + generation::{GeneratedTopology, find_expected_peer_counts}, utils::multiaddr_port, }, }; @@ -54,7 +54,7 @@ pub struct LocalDynamicNodes { #[derive(Clone, Default)] pub struct LocalDynamicSeed { - pub validator_count: usize, + pub node_count: usize, pub peer_ports: Vec, pub peer_ports_by_name: HashMap, } @@ -64,17 +64,18 @@ impl LocalDynamicSeed { pub fn from_topology(descriptors: &GeneratedTopology) -> Self { let peer_ports = descriptors .nodes() + .iter() .map(|node| node.network_port()) .collect::>(); let peer_ports_by_name = descriptors - .validators() + .nodes() .iter() - .map(|node| (format!("validator-{}", node.index()), node.network_port())) + .map(|node| (format!("node-{}", node.index()), node.network_port())) .collect(); Self { - validator_count: descriptors.validators().len(), + node_count: descriptors.nodes().len(), peer_ports, peer_ports_by_name, } @@ -98,7 +99,7 @@ impl LocalDynamicNodes { seed: LocalDynamicSeed, ) -> Self { let base_node = descriptors - .validators() + .nodes() .first() .expect("generated topology must include at least one node"); @@ -106,11 +107,11 @@ impl LocalDynamicNodes { let base_time = base_node.general.time_config.clone(); let state = LocalDynamicState { - validator_count: seed.validator_count, + node_count: seed.node_count, peer_ports: seed.peer_ports.clone(), peer_ports_by_name: seed.peer_ports_by_name.clone(), clients_by_name: HashMap::new(), - validators: Vec::new(), + nodes: Vec::new(), }; Self { @@ -139,22 +140,22 @@ impl LocalDynamicNodes { .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); - state.validators.clear(); + state.nodes.clear(); state.peer_ports.clone_from(&self.seed.peer_ports); state .peer_ports_by_name .clone_from(&self.seed.peer_ports_by_name); state.clients_by_name.clear(); - state.validator_count = self.seed.validator_count; + state.node_count = self.seed.node_count; self.node_clients.clear(); } - pub async fn start_validator_with( + pub async fn start_node_with( &self, name: &str, options: StartNodeOptions, ) -> Result { - self.start_node(NodeRole::Validator, name, options).await + self.start_node(name, options).await } pub(crate) fn readiness_nodes(&self) -> Vec { @@ -164,13 +165,13 @@ impl LocalDynamicNodes { .unwrap_or_else(|poisoned| poisoned.into_inner()); let listen_ports = state - .validators + .nodes .iter() .map(|node| node.config().network.backend.swarm.port) .collect::>(); let initial_peer_ports = state - .validators + .nodes .iter() .map(|node| { node.config() @@ -186,14 +187,11 @@ impl LocalDynamicNodes { let expected_peer_counts = find_expected_peer_counts(&listen_ports, &initial_peer_ports); state - .validators + .nodes .iter() .enumerate() .map(|(idx, node)| ReadinessNode { - label: format!( - "validator#{idx}@{}", - node.config().network.backend.swarm.port - ), + label: format!("node#{idx}@{}", node.config().network.backend.swarm.port), expected_peers: expected_peer_counts.get(idx).copied(), api: node.api().clone(), }) @@ -202,7 +200,6 @@ impl LocalDynamicNodes { async fn start_node( &self, - role: NodeRole, name: &str, options: StartNodeOptions, ) -> Result { @@ -212,14 +209,11 @@ impl LocalDynamicNodes { .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); - let (index, role_label) = match role { - NodeRole::Validator => (state.validator_count, "validator"), - }; - + let index = state.node_count; let label = if name.trim().is_empty() { - format!("{role_label}-{index}") + format!("node-{index}") } else { - format!("{role_label}-{name}") + format!("node-{name}") }; if state.peer_ports_by_name.contains_key(&label) { @@ -240,47 +234,42 @@ impl LocalDynamicNodes { &self.descriptors, &self.base_consensus, &self.base_time, - role, index, &peer_ports_by_name, &options, &peer_ports, )?; - let api_client = match role { - NodeRole::Validator => { - let config = create_validator_config(general_config); - self.spawn_and_register_validator(&node_name, network_port, config) - .await? - } - }; + let config = create_node_config(general_config); + let api_client = self + .spawn_and_register_node(&node_name, network_port, config) + .await?; Ok(StartedNode { name: node_name, - role, api: api_client, }) } - async fn spawn_and_register_validator( + async fn spawn_and_register_node( &self, node_name: &str, network_port: u16, - config: ValidatorConfig, + config: NodeConfig, ) -> Result { - let node = Validator::spawn(config, node_name) + let node = Node::spawn(config, node_name) .await .map_err(|source| LocalDynamicError::Spawn { source })?; let client = node.api().clone(); - self.node_clients.add_validator(client.clone()); + self.node_clients.add_node(client.clone()); let mut state = self .state .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); - state.register_validator(node_name, network_port, client.clone(), node); + state.register_node(node_name, network_port, client.clone(), node); Ok(client) } @@ -288,22 +277,22 @@ impl LocalDynamicNodes { #[async_trait::async_trait] impl NodeControlHandle for LocalDynamicNodes { - async fn restart_validator(&self, _index: usize) -> Result<(), DynError> { - Err("local deployer does not support restart_validator".into()) + async fn restart_node(&self, _index: usize) -> Result<(), DynError> { + Err("local deployer does not support restart_node".into()) } - async fn start_validator(&self, name: &str) -> Result { - self.start_validator_with(name, StartNodeOptions::default()) + async fn start_node(&self, name: &str) -> Result { + self.start_node_with(name, StartNodeOptions::default()) .await .map_err(|err| err.into()) } - async fn start_validator_with( + async fn start_node_with( &self, name: &str, options: StartNodeOptions, ) -> Result { - self.start_validator_with(name, options) + self.start_node_with(name, options) .await .map_err(|err| err.into()) } diff --git a/testing-framework/deployers/local/src/node_control/state.rs b/testing-framework/deployers/local/src/node_control/state.rs index b3c4ddb..4eb03f8 100644 --- a/testing-framework/deployers/local/src/node_control/state.rs +++ b/testing-framework/deployers/local/src/node_control/state.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; -use testing_framework_core::nodes::{ApiClient, validator::Validator}; +use testing_framework_core::nodes::{ApiClient, node::Node}; pub(crate) struct LocalDynamicState { - pub(crate) validator_count: usize, + pub(crate) node_count: usize, pub(crate) peer_ports: Vec, pub(crate) peer_ports_by_name: HashMap, pub(crate) clients_by_name: HashMap, - pub(crate) validators: Vec, + pub(crate) nodes: Vec, } impl LocalDynamicState { @@ -18,15 +18,15 @@ impl LocalDynamicState { self.clients_by_name.insert(node_name.to_string(), client); } - pub(super) fn register_validator( + pub(super) fn register_node( &mut self, node_name: &str, network_port: u16, client: ApiClient, - node: Validator, + node: Node, ) { self.register_common(node_name, network_port, client); - self.validator_count += 1; - self.validators.push(node); + self.node_count += 1; + self.nodes.push(node); } } diff --git a/testing-framework/deployers/local/src/runner.rs b/testing-framework/deployers/local/src/runner.rs index fc4d7d3..98b467b 100644 --- a/testing-framework/deployers/local/src/runner.rs +++ b/testing-framework/deployers/local/src/runner.rs @@ -19,10 +19,12 @@ use crate::{ manual::{LocalManualCluster, ManualClusterError}, node_control::{LocalDynamicNodes, LocalDynamicSeed}, }; -/// Spawns validators as local processes, reusing the existing +/// Spawns nodes as local processes, reusing the existing /// integration harness. #[derive(Clone)] -pub struct LocalDeployer {} +pub struct LocalDeployer { + membership_check: bool, +} /// Errors surfaced by the local deployer while driving a scenario. #[derive(Debug, Error)] @@ -66,10 +68,10 @@ impl Deployer<()> for LocalDeployer { async fn deploy(&self, scenario: &Scenario<()>) -> Result { info!( - validators = scenario.topology().validators().len(), + nodes = scenario.topology().nodes().len(), "starting local deployment" ); - let topology = Self::prepare_topology(scenario).await?; + let topology = Self::prepare_topology(scenario, self.membership_check).await?; let node_clients = NodeClients::from_topology(scenario.topology(), &topology); let (block_feed, block_feed_guard) = spawn_block_feed_with(&node_clients).await?; @@ -97,11 +99,11 @@ impl Deployer for LocalDeployer { scenario: &Scenario, ) -> Result { info!( - validators = scenario.topology().validators().len(), + nodes = scenario.topology().nodes().len(), "starting local deployment with node control" ); - let topology = Self::prepare_topology(scenario).await?; + let topology = Self::prepare_topology(scenario, self.membership_check).await?; let node_clients = NodeClients::from_topology(scenario.topology(), &topology); let node_control = Arc::new(LocalDynamicNodes::new_with_seed( scenario.topology().clone(), @@ -132,6 +134,14 @@ impl LocalDeployer { Self::default() } + #[must_use] + /// Configure whether the deployer should enforce membership readiness + /// checks. + pub fn with_membership_check(mut self, enabled: bool) -> Self { + self.membership_check = enabled; + self + } + /// Build a manual cluster using this deployer's local implementation. pub fn manual_cluster( &self, @@ -142,13 +152,11 @@ impl LocalDeployer { async fn prepare_topology( scenario: &Scenario, + membership_check: bool, ) -> Result { let descriptors = scenario.topology(); - info!( - validators = descriptors.validators().len(), - "spawning local validators" - ); + info!(nodes = descriptors.nodes().len(), "spawning local nodes"); let topology = descriptors .clone() @@ -156,12 +164,16 @@ impl LocalDeployer { .await .map_err(|source| LocalDeployerError::Spawn { source })?; - wait_for_readiness(&topology).await.map_err(|source| { - debug!(error = ?source, "local readiness failed"); - LocalDeployerError::ReadinessFailed { source } - })?; + if membership_check { + wait_for_readiness(&topology).await.map_err(|source| { + debug!(error = ?source, "local readiness failed"); + LocalDeployerError::ReadinessFailed { source } + })?; - info!("local nodes are ready"); + info!("local nodes are ready"); + } else { + info!("skipping local membership readiness checks"); + } Ok(topology) } @@ -169,7 +181,9 @@ impl LocalDeployer { impl Default for LocalDeployer { fn default() -> Self { - Self {} + Self { + membership_check: true, + } } } @@ -184,13 +198,13 @@ async fn spawn_block_feed_with( node_clients: &NodeClients, ) -> Result<(BlockFeed, BlockFeedTask), LocalDeployerError> { debug!( - validators = node_clients.validator_clients().len(), - "selecting validator client for local block feed" + nodes = node_clients.node_clients().len(), + "selecting node client for local block feed" ); - let Some(block_source_client) = node_clients.random_validator() else { + let Some(block_source_client) = node_clients.random_node() else { return Err(LocalDeployerError::WorkloadFailed { - source: "block feed requires at least one validator".into(), + source: "block feed requires at least one node".into(), }); }; diff --git a/testing-framework/env/src/lib.rs b/testing-framework/env/src/lib.rs index 438349e..671a392 100644 --- a/testing-framework/env/src/lib.rs +++ b/testing-framework/env/src/lib.rs @@ -7,69 +7,49 @@ pub fn slow_test_env() -> bool { #[must_use] pub fn debug_tracing() -> bool { - env::var("NOMOS_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true")) + env::var("LOGOS_BLOCKCHAIN_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true")) } #[must_use] pub fn nomos_log_dir() -> Option { - env::var("NOMOS_LOG_DIR").ok().map(PathBuf::from) + env::var("LOGOS_BLOCKCHAIN_LOG_DIR").ok().map(PathBuf::from) } #[must_use] pub fn nomos_log_level() -> Option { - env::var("NOMOS_LOG_LEVEL").ok() + env::var("LOGOS_BLOCKCHAIN_LOG_LEVEL").ok() } #[must_use] pub fn nomos_log_filter() -> Option { - env::var("NOMOS_LOG_FILTER").ok() + env::var("LOGOS_BLOCKCHAIN_LOG_FILTER").ok() } #[must_use] pub fn nomos_use_autonat() -> bool { - env::var("NOMOS_USE_AUTONAT").is_ok() + env::var("LOGOS_BLOCKCHAIN_USE_AUTONAT").is_ok() } #[must_use] pub fn nomos_cfgsync_port() -> Option { - env::var("NOMOS_CFGSYNC_PORT") + env::var("LOGOS_BLOCKCHAIN_CFGSYNC_PORT") .ok() .and_then(|v| v.parse::().ok()) } -#[must_use] -pub fn nomos_kzg_container_path() -> Option { - env::var("NOMOS_KZG_CONTAINER_PATH").ok() -} - #[must_use] pub fn nomos_tests_keep_logs() -> bool { - env::var("NOMOS_TESTS_KEEP_LOGS").is_ok() + env::var("LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS").is_ok() } #[must_use] pub fn nomos_testnet_image() -> Option { - env::var("NOMOS_TESTNET_IMAGE").ok() + env::var("LOGOS_BLOCKCHAIN_TESTNET_IMAGE").ok() } #[must_use] pub fn nomos_testnet_image_pull_policy() -> Option { - env::var("NOMOS_TESTNET_IMAGE_PULL_POLICY").ok() -} - -#[must_use] -pub fn nomos_kzg_mode() -> Option { - env::var("NOMOS_KZG_MODE").ok() -} - -#[must_use] -pub fn nomos_kzg_dir_rel() -> Option { - env::var("NOMOS_KZG_DIR_REL").ok() -} - -#[must_use] -pub fn nomos_kzg_file() -> Option { - env::var("NOMOS_KZG_FILE").ok() + env::var("LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY").ok() } #[must_use] @@ -84,20 +64,15 @@ pub fn rust_log() -> Option { #[must_use] pub fn nomos_time_backend() -> Option { - env::var("NOMOS_TIME_BACKEND").ok() -} - -#[must_use] -pub fn nomos_kzgrs_params_path() -> Option { - env::var("LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH").ok() + env::var("LOGOS_BLOCKCHAIN_TIME_BACKEND").ok() } #[must_use] pub fn nomos_otlp_endpoint() -> Option { - env::var("NOMOS_OTLP_ENDPOINT").ok() + env::var("LOGOS_BLOCKCHAIN_OTLP_ENDPOINT").ok() } #[must_use] pub fn nomos_otlp_metrics_endpoint() -> Option { - env::var("NOMOS_OTLP_METRICS_ENDPOINT").ok() + env::var("LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT").ok() } diff --git a/testing-framework/tools/cfgsync_tf/src/bin/cfgsync-client.rs b/testing-framework/tools/cfgsync_tf/src/bin/cfgsync-client.rs index d09f86e..2fe192c 100644 --- a/testing-framework/tools/cfgsync_tf/src/bin/cfgsync-client.rs +++ b/testing-framework/tools/cfgsync_tf/src/bin/cfgsync-client.rs @@ -4,7 +4,7 @@ use cfgsync_tf::{ client::{FetchedConfig, get_config}, server::ClientIp, }; -use nomos_node::Config as ValidatorConfig; +use nomos_node::Config as NodeConfig; use serde::{Serialize, de::DeserializeOwned}; use testing_framework_config::constants::cfgsync_port as default_cfgsync_port; use testing_framework_core::nodes::common::config::injection::{ @@ -67,10 +67,10 @@ async fn main() { testing_http_port, }; - let node_config_endpoint = format!("{server_addr}/validator"); + let node_config_endpoint = format!("{server_addr}/node"); let config_result = - pull_to_file::(payload, &node_config_endpoint, &config_file_path).await; + pull_to_file::(payload, &node_config_endpoint, &config_file_path).await; // Handle error if the config request fails if let Err(err) = config_result { diff --git a/testing-framework/tools/cfgsync_tf/src/host.rs b/testing-framework/tools/cfgsync_tf/src/host.rs index 3f480d0..49ac2b1 100644 --- a/testing-framework/tools/cfgsync_tf/src/host.rs +++ b/testing-framework/tools/cfgsync_tf/src/host.rs @@ -6,7 +6,7 @@ use testing_framework_config::constants::{ #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub enum HostKind { - Validator, + Node, } #[derive(Eq, PartialEq, Hash, Clone)] @@ -42,8 +42,8 @@ impl Host { } #[must_use] - pub fn validator_from_ip(ip: Ipv4Addr, identifier: String, ports: PortOverrides) -> Self { - Self::from_parts(HostKind::Validator, ip, identifier, ports) + pub fn node_from_ip(ip: Ipv4Addr, identifier: String, ports: PortOverrides) -> Self { + Self::from_parts(HostKind::Node, ip, identifier, ports) } } @@ -57,7 +57,7 @@ pub fn sort_hosts(mut hosts: Vec) -> Vec { .and_then(|raw| raw.parse::().ok()) .unwrap_or(0); let kind = match host.kind { - HostKind::Validator => 0, + HostKind::Node => 0, }; (kind, index) }); diff --git a/testing-framework/tools/cfgsync_tf/src/server.rs b/testing-framework/tools/cfgsync_tf/src/server.rs index c14cf35..48acd5b 100644 --- a/testing-framework/tools/cfgsync_tf/src/server.rs +++ b/testing-framework/tools/cfgsync_tf/src/server.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use serde_json::{Value, json, to_value}; use serde_with::serde_as; use testing_framework_config::{ - nodes::validator::create_validator_config, + nodes::node::create_node_config, topology::configs::{consensus::ConsensusParams, wallet::WalletConfig}, }; use tokio::sync::oneshot::channel; @@ -46,7 +46,6 @@ pub struct CfgSyncConfig { pub old_blobs_check_interval: Duration, #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] pub blobs_validity_duration: Duration, - pub global_params_path: String, pub min_dispersal_peers: usize, pub min_replication_peers: usize, #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] @@ -102,7 +101,7 @@ pub struct ClientIp { pub testing_http_port: Option, } -async fn validator_config( +async fn node_config( State(config_repo): State>, Json(payload): Json, ) -> impl IntoResponse { @@ -123,20 +122,20 @@ async fn validator_config( let (reply_tx, reply_rx) = channel(); config_repo - .register(Host::validator_from_ip(ip, identifier, ports), reply_tx) + .register(Host::node_from_ip(ip, identifier, ports), reply_tx) .await; (reply_rx.await).map_or_else( |_| (StatusCode::INTERNAL_SERVER_ERROR, "Error receiving config").into_response(), |config_response| match config_response { RepoResponse::Config(config) => { - let config = create_validator_config(*config); + let config = create_node_config(*config); let mut value = match to_value(&config) { Ok(value) => value, Err(err) => { return ( StatusCode::INTERNAL_SERVER_ERROR, - format!("failed to serialize validator config: {err}"), + format!("failed to serialize node config: {err}"), ) .into_response(); } @@ -158,7 +157,7 @@ async fn validator_config( pub fn cfgsync_app(config_repo: Arc) -> Router { Router::new() - .route("/validator", post(validator_config)) + .route("/node", post(node_config)) .with_state(config_repo) } diff --git a/testing-framework/workflows/src/builder/mod.rs b/testing-framework/workflows/src/builder/mod.rs index c75e42d..a144253 100644 --- a/testing-framework/workflows/src/builder/mod.rs +++ b/testing-framework/workflows/src/builder/mod.rs @@ -530,7 +530,7 @@ impl ChaosBuilder { min_delay: DEFAULT_CHAOS_MIN_DELAY, max_delay: DEFAULT_CHAOS_MAX_DELAY, target_cooldown: DEFAULT_CHAOS_TARGET_COOLDOWN, - include_validators: true, + include_nodes: true, } } } @@ -540,7 +540,7 @@ pub struct ChaosRestartBuilder { min_delay: Duration, max_delay: Duration, target_cooldown: Duration, - include_validators: bool, + include_nodes: bool, } impl ChaosRestartBuilder { @@ -580,9 +580,9 @@ impl ChaosRestartBuilder { } #[must_use] - /// Include validators in the restart target set. - pub const fn include_validators(mut self, enabled: bool) -> Self { - self.include_validators = enabled; + /// Include nodes in the restart target set. + pub const fn include_nodes(mut self, enabled: bool) -> Self { + self.include_nodes = enabled; self } @@ -605,16 +605,16 @@ impl ChaosRestartBuilder { ); self.target_cooldown = self.min_delay; } - if !self.include_validators { + if !self.include_nodes { tracing::warn!("chaos restart requires at least one node group; enabling all targets"); - self.include_validators = true; + self.include_nodes = true; } let workload = RandomRestartWorkload::new( self.min_delay, self.max_delay, self.target_cooldown, - self.include_validators, + self.include_nodes, ); self.builder = self.builder.with_workload(workload); self.builder diff --git a/testing-framework/workflows/src/expectations/consensus_liveness.rs b/testing-framework/workflows/src/expectations/consensus_liveness.rs index 7136606..085349f 100644 --- a/testing-framework/workflows/src/expectations/consensus_liveness.rs +++ b/testing-framework/workflows/src/expectations/consensus_liveness.rs @@ -67,7 +67,7 @@ enum ConsensusLivenessIssue { #[derive(Debug, Error)] enum ConsensusLivenessError { - #[error("consensus liveness requires at least one validator")] + #[error("consensus liveness requires at least one node")] MissingParticipants, #[error("consensus liveness violated (target={target}):\n{details}")] Violations { diff --git a/testing-framework/workflows/src/workloads/chaos.rs b/testing-framework/workflows/src/workloads/chaos.rs index ea137b9..b964152 100644 --- a/testing-framework/workflows/src/workloads/chaos.rs +++ b/testing-framework/workflows/src/workloads/chaos.rs @@ -8,13 +8,13 @@ use tracing::info; const MIN_DELAY_SPREAD_FALLBACK: Duration = Duration::from_millis(1); -/// Randomly restarts validators during a run to introduce chaos. +/// Randomly restarts nodes during a run to introduce chaos. #[derive(Debug)] pub struct RandomRestartWorkload { min_delay: Duration, max_delay: Duration, target_cooldown: Duration, - include_validators: bool, + include_nodes: bool, } impl RandomRestartWorkload { @@ -22,32 +22,32 @@ impl RandomRestartWorkload { /// /// `min_delay`/`max_delay` bound the sleep between restart attempts, while /// `target_cooldown` prevents repeatedly restarting the same node too - /// quickly. Validators can be selectively included. + /// quickly. Nodes can be selectively included. #[must_use] pub const fn new( min_delay: Duration, max_delay: Duration, target_cooldown: Duration, - include_validators: bool, + include_nodes: bool, ) -> Self { Self { min_delay, max_delay, target_cooldown, - include_validators, + include_nodes, } } fn targets(&self, ctx: &RunContext) -> Vec { let mut targets = Vec::new(); - let validator_count = ctx.descriptors().validators().len(); - if self.include_validators { - if validator_count > 1 { - for index in 0..validator_count { - targets.push(Target::Validator(index)); + let node_count = ctx.descriptors().nodes().len(); + if self.include_nodes { + if node_count > 1 { + for index in 0..node_count { + targets.push(Target::Node(index)); } - } else if validator_count == 1 { - info!("chaos restart skipping validators: only one validator configured"); + } else if node_count == 1 { + info!("chaos restart skipping nodes: only one node configured"); } } targets @@ -146,7 +146,7 @@ impl Workload for RandomRestartWorkload { tracing::info!( config = ?self, - validators = ctx.descriptors().validators().len(), + nodes = ctx.descriptors().nodes().len(), target_count = targets.len(), "starting chaos restart workload" ); @@ -158,12 +158,12 @@ impl Workload for RandomRestartWorkload { let target = self.pick_target(&targets, &cooldowns).await?; match target { - Target::Validator(index) => { - tracing::info!(index, "chaos restarting validator"); + Target::Node(index) => { + tracing::info!(index, "chaos restarting node"); handle - .restart_validator(index) + .restart_node(index) .await - .map_err(|err| format!("validator restart failed: {err}"))? + .map_err(|err| format!("node restart failed: {err}"))? } } @@ -174,5 +174,5 @@ impl Workload for RandomRestartWorkload { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] enum Target { - Validator(usize), + Node(usize), } diff --git a/testing-framework/workflows/src/workloads/transaction/workload.rs b/testing-framework/workflows/src/workloads/transaction/workload.rs index e5574dd..da6e077 100644 --- a/testing-framework/workflows/src/workloads/transaction/workload.rs +++ b/testing-framework/workflows/src/workloads/transaction/workload.rs @@ -73,7 +73,7 @@ impl ScenarioWorkload for Workload { } let reference_node = descriptors - .validators() + .nodes() .first() .ok_or("transaction workload requires at least one node in the topology")?; diff --git a/testing-framework/workflows/src/workloads/util.rs b/testing-framework/workflows/src/workloads/util.rs index 0e89e32..b000f3d 100644 --- a/testing-framework/workflows/src/workloads/util.rs +++ b/testing-framework/workflows/src/workloads/util.rs @@ -42,19 +42,13 @@ pub async fn submit_transaction_via_cluster( tx: Arc, ) -> Result<(), DynError> { let tx_hash = tx.hash(); - debug!( - ?tx_hash, - "submitting transaction via cluster (validators first)" - ); + debug!(?tx_hash, "submitting transaction via cluster (nodes first)"); let node_clients = ctx.node_clients(); - let mut validator_clients = node_clients.validator_clients(); - validator_clients.shuffle(&mut thread_rng()); + let mut clients = node_clients.node_clients(); + clients.shuffle(&mut thread_rng()); - let clients = validator_clients.into_iter(); - let mut clients: Vec<_> = clients.collect(); let mut last_err = None; - for attempt in 0..SUBMIT_RETRIES { clients.shuffle(&mut thread_rng()); diff --git a/versions.env b/versions.env index 0464058..e63f21d 100644 --- a/versions.env +++ b/versions.env @@ -1,7 +1,7 @@ VERSION=v0.3.2 -NOMOS_BUNDLE_VERSION=v4 +LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v4 # Pinned logos-blockchain-node revision used for CI builds and binary bundles. -NOMOS_NODE_REV=47ae18e95f643bde563b4769212b37f6f018fed3 +LOGOS_BLOCKCHAIN_NODE_REV=47ae18e95f643bde563b4769212b37f6f018fed3 # Optional: local logos-blockchain-node checkout override (do not commit absolute paths). -# NOMOS_NODE_PATH= +# LOGOS_BLOCKCHAIN_NODE_PATH=