docs: sync book with current framework

This commit is contained in:
andrussal 2026-01-26 16:36:51 +01:00
parent 8d2dd4c86a
commit a372a808bc
83 changed files with 1468 additions and 1232 deletions

View File

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
runs-on: ${{ matrix.runs-on }}
env:
NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits
LOGOS_BLOCKCHAIN_CIRCUITS: ${{ github.workspace }}/.tmp/logos-blockchain-circuits
CARGO_INCREMENTAL: 0
CARGO_PROFILE_DEV_DEBUG: 0
RUSTFLAGS: -C debuginfo=0 --cfg feature="pol-dev-mode"
@ -26,7 +26,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -35,8 +35,8 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install system dependencies (Linux)
if: runner.os == 'Linux'
run: |
@ -45,12 +45,12 @@ jobs:
sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$NOMOS_CIRCUITS"
echo "NOMOS_CIRCUITS=$NOMOS_CIRCUITS" >> "$GITHUB_ENV"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$LOGOS_BLOCKCHAIN_CIRCUITS"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$LOGOS_BLOCKCHAIN_CIRCUITS" >> "$GITHUB_ENV"
- name: Add top-level KZG params file
run: |
curl -fsSL "https://raw.githubusercontent.com/logos-co/nomos-node/${NOMOS_NODE_REV}/tests/kzgrs/kzgrs_test_params" \
-o "${NOMOS_CIRCUITS}/kzgrs_test_params"
curl -fsSL "https://raw.githubusercontent.com/logos-co/nomos-node/${LOGOS_BLOCKCHAIN_NODE_REV}/tests/kzgrs/kzgrs_test_params" \
-o "${LOGOS_BLOCKCHAIN_CIRCUITS}/kzgrs_test_params"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -66,13 +66,13 @@ jobs:
chmod +x scripts/build/build-bundle.sh
DEST=".tmp/nomos-binaries-linux-${VERSION}.tar.gz"
scripts/build/build-bundle.sh --platform linux --output "$DEST"
echo "NOMOS_BINARIES_TAR=$DEST" >> "$GITHUB_ENV"
echo "LOGOS_BLOCKCHAIN_BINARIES_TAR=$DEST" >> "$GITHUB_ENV"
- name: Save nomos binaries cache
uses: actions/cache@v4
with:
path: ${{ github.workspace }}/.tmp/nomos-binaries-linux-${{ env.VERSION }}.tar.gz
key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}
key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}
- uses: actions/upload-artifact@v4
with:
name: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}
name: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}
path: .tmp/nomos-binaries-linux-${{ env.VERSION }}.tar.gz

View File

@ -24,7 +24,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -33,12 +33,12 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits"
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -65,7 +65,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -74,12 +74,12 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits"
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -106,7 +106,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -115,12 +115,12 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits"
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -141,7 +141,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -150,12 +150,12 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits"
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -178,7 +178,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -187,8 +187,8 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -207,7 +207,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -216,12 +216,12 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.nomos-circuits"
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
@ -242,8 +242,7 @@ jobs:
POL_PROOF_DEV_MODE: true
LOCAL_DEMO_RUN_SECS: 120
LOCAL_DEMO_VALIDATORS: 1
NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits
NOMOS_KZGRS_PARAMS_PATH: ${{ github.workspace }}/.tmp/kzgrs_test_params
LOGOS_BLOCKCHAIN_CIRCUITS: ${{ github.workspace }}/.tmp/logos-blockchain-circuits
CARGO_INCREMENTAL: 0
CARGO_PROFILE_DEV_DEBUG: 0
RUSTFLAGS: -C debuginfo=0
@ -254,7 +253,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -263,16 +262,20 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Set temp dir
run: |
echo "TMPDIR=${{ runner.temp }}" >> "$GITHUB_ENV"
echo "CARGO_TARGET_DIR=${{ runner.temp }}/target-local" >> "$GITHUB_ENV"
echo "NOMOS_LOG_DIR=${{ runner.temp }}/local-logs" >> "$GITHUB_ENV"
echo "NOMOS_STATE_DIR=${{ runner.temp }}/nomos-state" >> "$GITHUB_ENV"
echo "LOGOS_BLOCKCHAIN_LOG_DIR=${{ runner.temp }}/local-logs" >> "$GITHUB_ENV"
echo "LOGOS_BLOCKCHAIN_STATE_DIR=${{ runner.temp }}/nomos-state" >> "$GITHUB_ENV"
rm -rf "${{ runner.temp }}/local-logs" "${{ runner.temp }}/nomos-state"
mkdir -p "${{ runner.temp }}/local-logs" "${{ runner.temp }}/nomos-state"
- name: Install circuits
run: |
mkdir -p "${LOGOS_BLOCKCHAIN_CIRCUITS}"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "${LOGOS_BLOCKCHAIN_CIRCUITS}"
- name: Clean workspace caches
run: |
rm -rf .tmp/nomos-* testing-framework/assets/stack/kzgrs_test_params
@ -303,13 +306,13 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ github.workspace }}/.tmp/nomos-binaries.tar.gz
key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }}
key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }}
- name: Download nomos binaries artifact (fallback)
if: steps.restore-nomos-bins-host.outputs.cache-hit != 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }}
ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }}
run: |
set -euo pipefail
mkdir -p "${TMPDIR}"
@ -338,27 +341,27 @@ jobs:
DEST="${GITHUB_WORKSPACE}/.tmp/nomos-binaries-host-${VERSION}.tar.gz"
if [ -f "${SRC}" ]; then
mv "${SRC}" "${DEST}"
echo "NOMOS_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV"
echo "LOGOS_BLOCKCHAIN_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV"
else
echo "Expected ${SRC} not found" >&2
exit 1
fi
- name: Run host demo (scripted)
env:
NOMOS_TESTS_KEEP_LOGS: "true"
LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS: "true"
RUST_LOG: "info"
NOMOS_LOG_DIR: "${{ runner.temp }}/local-logs"
LOGOS_BLOCKCHAIN_LOG_DIR: "${{ runner.temp }}/local-logs"
run: |
scripts/run/run-examples.sh -t 120 -v 1 -e 1 host
scripts/run/run-examples.sh -t 120 -n 1 host
- name: Collect host demo logs (on failure)
if: failure()
run: |
if [ -d "${NOMOS_LOG_DIR}" ]; then
tar -czf "${RUNNER_TEMP}/local-logs.tgz" -C "$(dirname "${NOMOS_LOG_DIR}")" "$(basename "${NOMOS_LOG_DIR}")"
if [ -d "${LOGOS_BLOCKCHAIN_LOG_DIR}" ]; then
tar -czf "${RUNNER_TEMP}/local-logs.tgz" -C "$(dirname "${LOGOS_BLOCKCHAIN_LOG_DIR}")" "$(basename "${LOGOS_BLOCKCHAIN_LOG_DIR}")"
echo "Local logs tar: $(realpath ${RUNNER_TEMP}/local-logs.tgz)"
find "${NOMOS_LOG_DIR}" -type f -print
find "${LOGOS_BLOCKCHAIN_LOG_DIR}" -type f -print
else
echo "No local logs directory at ${NOMOS_LOG_DIR}"
echo "No local logs directory at ${LOGOS_BLOCKCHAIN_LOG_DIR}"
fi
- name: Upload host smoke logs
if: failure()
@ -380,8 +383,8 @@ jobs:
runs-on: ubuntu-latest
env:
TMPDIR: ${{ github.workspace }}/.tmp
NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits
NOMOS_TESTNET_IMAGE: nomos-testnet:${{ github.run_id }}
LOGOS_BLOCKCHAIN_CIRCUITS: ${{ github.workspace }}/.tmp/logos-blockchain-circuits
LOGOS_BLOCKCHAIN_TESTNET_IMAGE: nomos-testnet:${{ github.run_id }}
DOCKER_BUILDKIT: 1
CARGO_INCREMENTAL: 0
CARGO_PROFILE_DEV_DEBUG: 0
@ -393,7 +396,7 @@ jobs:
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION" >&2
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
@ -402,24 +405,28 @@ jobs:
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV}"
: "${NOMOS_BUNDLE_VERSION:?Missing NOMOS_BUNDLE_VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Prepare workspace tmpdir
run: mkdir -p "$TMPDIR"
- name: Install circuits
run: |
mkdir -p "${LOGOS_BLOCKCHAIN_CIRCUITS}"
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "${LOGOS_BLOCKCHAIN_CIRCUITS}"
- name: Restore cached nomos binaries
id: restore-nomos-bins
uses: actions/cache@v4
with:
path: ${{ github.workspace }}/.tmp/nomos-binaries.tar.gz
key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }}
key: ${{ runner.os }}-nomos-binaries-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }}
- name: Download nomos binaries artifact (fallback)
if: steps.restore-nomos-bins.outputs.cache-hit != 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.NOMOS_BUNDLE_VERSION }}-${{ env.NOMOS_NODE_REV }}
ARTIFACT_NAME: nomos-binaries-${{ runner.os }}-${{ env.VERSION }}-${{ env.LOGOS_BLOCKCHAIN_BUNDLE_VERSION }}-${{ env.LOGOS_BLOCKCHAIN_NODE_REV }}
run: |
set -euo pipefail
download_dir="${TMPDIR}/nomos-binaries-download"
@ -450,7 +457,7 @@ jobs:
DEST="${GITHUB_WORKSPACE}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz"
if [ -f "${SRC}" ]; then
mv "${SRC}" "${DEST}"
echo "NOMOS_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV"
echo "LOGOS_BLOCKCHAIN_BINARIES_TAR=${DEST}" >> "$GITHUB_ENV"
else
echo "Expected ${SRC} not found" >&2
exit 1
@ -495,18 +502,17 @@ jobs:
env:
POL_PROOF_DEV_MODE: "true"
COMPOSE_NODE_PAIRS: "1x1"
NOMOS_TESTNET_IMAGE: ${{ env.NOMOS_TESTNET_IMAGE }}
LOGOS_BLOCKCHAIN_TESTNET_IMAGE: ${{ env.LOGOS_BLOCKCHAIN_TESTNET_IMAGE }}
COMPOSE_RUNNER_HOST: "127.0.0.1"
NOMOS_TIME_BACKEND: "monotonic"
NOMOS_KZGRS_PARAMS_PATH: "/kzgrs_test_params/kzgrs_test_params"
LOGOS_BLOCKCHAIN_TIME_BACKEND: "monotonic"
RUST_BACKTRACE: "1"
NOMOS_TESTS_TRACING: "true"
LOGOS_BLOCKCHAIN_TESTS_TRACING: "true"
RUST_LOG: "info"
NOMOS_LOG_LEVEL: "info"
NOMOS_LOG_DIR: "${{ github.workspace }}/.tmp/compose-logs"
LOGOS_BLOCKCHAIN_LOG_LEVEL: "info"
LOGOS_BLOCKCHAIN_LOG_DIR: "${{ github.workspace }}/.tmp/compose-logs"
run: |
mkdir -p "$TMPDIR"
scripts/run/run-examples.sh -t 120 -v 1 -e 1 compose
scripts/run/run-examples.sh -t 120 -n 1 compose
- name: Show compose runner log
env:

View File

@ -113,7 +113,7 @@ For compose/k8s deployments, you can create prebuilt bundles to speed up image b
scripts/build/build-bundle.sh --platform linux
# Use the bundle when building images
export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
scripts/build/build_test_image.sh
```
@ -124,10 +124,10 @@ Key environment variables for customization:
| Variable | Purpose | Default |
|----------|---------|---------|
| `POL_PROOF_DEV_MODE=true` | **Required** — Disable expensive proof generation (set automatically by `scripts/run/run-examples.sh`) | (none) |
| `NOMOS_TESTNET_IMAGE` | Docker image tag for compose/k8s | `logos-blockchain-testing:local` |
| `NOMOS_DEMO_NODES` | Number of nodes | Varies by example |
| `NOMOS_LOG_DIR` | Directory for persistent log files | (temporary) |
| `NOMOS_LOG_LEVEL` | Logging verbosity | `info` |
| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | Docker image tag for compose/k8s | `logos-blockchain-testing:local` |
| `LOGOS_BLOCKCHAIN_DEMO_NODES` | Number of nodes | Varies by example |
| `LOGOS_BLOCKCHAIN_LOG_DIR` | Directory for persistent log files | (temporary) |
| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | Logging verbosity | `info` |
See [Operations Guide](https://logos-blockchain.github.io/logos-blockchain-testing/operations.html) for complete configuration reference.

View File

@ -14,7 +14,7 @@ Reviewed against `git rev-parse HEAD` at the time of writing, plus local working
## Findings / Fixes Applied
- `book/src/environment-variables.md` was not a complete reference: it missed multiple `NOMOS_*` variables used by the repo (scripts + framework). Added the missing variables and corrected a misleading note about `RUST_LOG` vs node logging.
- `book/src/running-examples.md` “Quick Smoke Matrix” section didnt reflect current `scripts/run/run-test-matrix.sh` flags. Added the commonly used options and clarified the relationship to `NOMOS_SKIP_IMAGE_BUILD`.
- `book/src/running-examples.md` “Quick Smoke Matrix” section didnt reflect current `scripts/run/run-test-matrix.sh` flags. Added the commonly used options and clarified the relationship to `LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD`.
- `book/src/part-iv.md` existed but was not in `book/src/SUMMARY.md`. Removed it so the rendered book doesnt silently diverge from the filesystem.
- `mdbook test book` was failing because:
- Many Rust examples were written as ` ```rust` (doctested by default) but depend on workspace crates; they arent standalone doctest snippets.

View File

@ -159,8 +159,8 @@ cargo doc --no-deps --document-private-items
**When:** New environment variable added, changed, or removed
**Examples:**
- New: `NOMOS_NEW_FEATURE_ENABLED`
- Changed: `NOMOS_LOG_LEVEL` accepts new values
- New: `LOGOS_BLOCKCHAIN_NEW_FEATURE_ENABLED`
- Changed: `LOGOS_BLOCKCHAIN_LOG_LEVEL` accepts new values
- Deprecated: `OLD_FEATURE_FLAG`
**Update these pages:**
@ -235,7 +235,7 @@ rg "scripts/" book/src/ --no-heading
```bash
- [ ] src/prerequisites.md # Image build instructions
- [ ] src/runners.md # Compose/K8s prerequisites
- [ ] src/environment-variables.md # NOMOS_TESTNET_IMAGE, NOMOS_BINARIES_TAR
- [ ] src/environment-variables.md # LOGOS_BLOCKCHAIN_TESTNET_IMAGE, LOGOS_BLOCKCHAIN_BINARIES_TAR
- [ ] src/architecture-overview.md # Assets and Images section
```
@ -247,7 +247,7 @@ rg "scripts/" book/src/ --no-heading
```bash
- [ ] src/logging-observability.md # Primary documentation
- [ ] src/environment-variables.md # NOMOS_METRICS_*, NOMOS_OTLP_*
- [ ] src/environment-variables.md # LOGOS_BLOCKCHAIN_METRICS_*, LOGOS_BLOCKCHAIN_OTLP_*
- [ ] src/architecture-overview.md # Observability section
- [ ] src/runners.md # Runner observability support
```

View File

@ -23,6 +23,7 @@
- [RunContext: BlockFeed & Node Control](node-control.md)
- [Chaos Workloads](chaos.md)
- [Topology & Chaos Patterns](topology-chaos.md)
- [Manual Clusters: Imperative Control](manual-cluster.md)
- [Part III — Developer Reference](part-iii.md)
- [Scenario Model (Developer Level)](scenario-model.md)
- [API Levels: Builder DSL vs. Direct](api-levels.md)

View File

@ -7,15 +7,13 @@ logos-blockchain-testing/
├─ testing-framework/ # Core library crates
│ ├─ configs/ # Node config builders, topology generation, tracing/logging config
│ ├─ core/ # Scenario model (ScenarioBuilder), runtime (Runner, Deployer), topology, node spawning
│ ├─ workflows/ # Workloads (transactions, DA, chaos), expectations (liveness), builder DSL extensions
│ ├─ runners/ # Deployment backends
│ ├─ workflows/ # Workloads (transactions, chaos), expectations (liveness), builder DSL extensions
│ ├─ deployers/ # Deployment backends
│ │ ├─ local/ # LocalDeployer (spawns local processes)
│ │ ├─ compose/ # ComposeDeployer (Docker Compose + Prometheus)
│ │ └─ k8s/ # K8sDeployer (Kubernetes Helm)
│ └─ assets/ # Docker/K8s stack assets
│ └─ stack/
│ ├─ kzgrs_test_params/ # KZG circuit parameters directory
│ │ └─ kzgrs_test_params # Actual proving key file (note repeated name)
│ ├─ monitoring/ # Prometheus config
│ ├─ scripts/ # Container entrypoints
│ └─ cfgsync.yaml # Config sync server template
@ -29,8 +27,7 @@ logos-blockchain-testing/
├─ scripts/ # Helper utilities
│ ├─ run-examples.sh # Convenience script (handles setup + runs examples)
│ ├─ build-bundle.sh # Build prebuilt binaries+circuits bundle
│ ├─ setup-circuits-stack.sh # Fetch KZG parameters (Linux + host)
│ └─ setup-nomos-circuits.sh # Legacy circuit fetcher
│ └─ setup-logos-blockchain-circuits.sh # Fetch circuit assets (Linux + host)
└─ book/ # This documentation (mdBook)
```
@ -45,13 +42,12 @@ Core library crates providing the testing API.
| `configs` | Node configuration builders | Topology generation, tracing config |
| `core` | Scenario model & runtime | `ScenarioBuilder`, `Deployer`, `Runner` |
| `workflows` | Workloads & expectations | `ScenarioBuilderExt`, `ChaosBuilderExt` |
| `runners/local` | Local process deployer | `LocalDeployer` |
| `runners/compose` | Docker Compose deployer | `ComposeDeployer` |
| `runners/k8s` | Kubernetes deployer | `K8sDeployer` |
| `deployers/local` | Local process deployer | `LocalDeployer` |
| `deployers/compose` | Docker Compose deployer | `ComposeDeployer` |
| `deployers/k8s` | Kubernetes deployer | `K8sDeployer` |
### `testing-framework/assets/stack/`
Docker/K8s deployment assets:
- **`kzgrs_test_params/kzgrs_test_params`**: Circuit parameters file (note repeated name; override via `NOMOS_KZGRS_PARAMS_PATH`)
- **`monitoring/`**: Prometheus config
- **`scripts/`**: Container entrypoints
@ -60,13 +56,13 @@ Convenience utilities:
- **`run-examples.sh`**: All-in-one script for host/compose/k8s modes (recommended)
- **`build-bundle.sh`**: Create prebuilt binaries+circuits bundle for compose/k8s
- **`build_test_image.sh`**: Build the compose/k8s Docker image (bakes in assets)
- **`setup-circuits-stack.sh`**: Fetch KZG parameters for both Linux and host
- **`setup-logos-blockchain-circuits.sh`**: Fetch circuit assets for both Linux and host
- **`cfgsync.yaml`**: Configuration sync server template
### `examples/` (Start Here!)
**Runnable binaries** demonstrating framework usage:
- `local_runner.rs` — Local processes
- `compose_runner.rs` — Docker Compose (requires `NOMOS_TESTNET_IMAGE` built)
- `compose_runner.rs` — Docker Compose (requires `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` built)
- `k8s_runner.rs` — Kubernetes (requires cluster + image)
**Run with:** `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin <name>`
@ -75,20 +71,20 @@ Convenience utilities:
### `scripts/`
Helper utilities:
- **`setup-nomos-circuits.sh`**: Fetch KZG parameters from releases
- **`setup-logos-blockchain-circuits.sh`**: Fetch circuit assets from releases
## Observability
**Compose runner** includes:
- **Prometheus** at `http://localhost:9090` (metrics scraping)
- Node metrics exposed per validator
- Node metrics exposed per node
- Access in expectations: `ctx.telemetry().prometheus().map(|p| p.base_url())`
**Logging** controlled by:
- `NOMOS_LOG_DIR` — Write per-node log files
- `NOMOS_LOG_LEVEL` — Global log level (error/warn/info/debug/trace)
- `NOMOS_LOG_FILTER` — Target-specific filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`)
- `NOMOS_TESTS_TRACING` — Enable file logging for local runner
- `LOGOS_BLOCKCHAIN_LOG_DIR` — Write per-node log files
- `LOGOS_BLOCKCHAIN_LOG_LEVEL` — Global log level (error/warn/info/debug/trace)
- `LOGOS_BLOCKCHAIN_LOG_FILTER` — Target-specific filtering (e.g., `cryptarchia=trace`)
- `LOGOS_BLOCKCHAIN_TESTS_TRACING` — Enable file logging for local runner
See [Logging & Observability](logging-observability.md) for details.
@ -102,6 +98,6 @@ See [Logging & Observability](logging-observability.md) for details.
| **Add a new expectation** | `testing-framework/workflows/src/expectations/` → Implement `Expectation` trait |
| **Modify node configs** | `testing-framework/configs/src/topology/configs/` |
| **Extend builder DSL** | `testing-framework/workflows/src/builder/` → Add trait methods |
| **Add a new deployer** | `testing-framework/runners/` → Implement `Deployer` trait |
| **Add a new deployer** | `testing-framework/deployers/` → Implement `Deployer` trait |
For detailed guidance, see [Internal Crate Reference](internal-crate-reference.md).

View File

@ -17,10 +17,9 @@ use std::time::Duration;
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(5)
.transactions_with(|txs| txs.rate(5).users(3))
.da_with(|da| da.channel_rate(1).blob_rate(1).headroom_percent(20))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build();
@ -36,30 +35,23 @@ Direct instantiation gives you explicit control over the concrete types you atta
```rust,ignore
use std::{
num::{NonZeroU64, NonZeroUsize},
num::NonZeroUsize,
time::Duration,
};
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::{
expectations::ConsensusLiveness,
workloads::{da, transaction},
workloads::transaction,
};
let tx_workload = transaction::Workload::with_rate(5)
.expect("transaction rate must be non-zero")
.with_user_limit(NonZeroUsize::new(3));
let da_workload = da::Workload::with_rate(
NonZeroU64::new(1).unwrap(), // blob rate per block
NonZeroU64::new(1).unwrap(), // channel rate per block
da::Workload::default_headroom_percent(),
);
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(5)
.with_workload(tx_workload)
.with_workload(da_workload)
.with_expectation(ConsensusLiveness::default())
.with_run_duration(Duration::from_secs(60))
.build();
@ -75,7 +67,6 @@ let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
| High-Level DSL | Low-Level Direct |
|----------------|------------------|
| `.transactions_with(\|txs\| txs.rate(5).users(3))` | `.with_workload(transaction::Workload::with_rate(5).expect(...).with_user_limit(...))` |
| `.da_with(\|da\| da.blob_rate(1).channel_rate(1))` | `.with_workload(da::Workload::with_rate(...))` |
| `.expect_consensus_liveness()` | `.with_expectation(ConsensusLiveness::default())` |
## Bundled Expectations (Important)
@ -97,7 +88,7 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::transaction};
let tx_workload = transaction::Workload::with_rate(5)
.expect("transaction rate must be non-zero");
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(5)
.with_workload(tx_workload) // direct instantiation
.expect_consensus_liveness() // DSL

View File

@ -26,7 +26,6 @@ flowchart TB
subgraph Workflows["Workflows (Batteries Included)"]
DSL[ScenarioBuilderExt<br/>Fluent API]
TxWorkload[Transaction Workload]
DAWorkload[DA Workload]
ChaosWorkload[Chaos Workload]
Expectations[Built-in Expectations]
end
@ -74,7 +73,7 @@ flowchart TB
**Workflows (High-Level API)**
- `ScenarioBuilderExt` trait provides fluent DSL
- Built-in workloads (transactions, DA, chaos)
- Built-in workloads (transactions, chaos)
- Common expectations (liveness, inclusion)
- Simplifies scenario authoring
@ -120,7 +119,7 @@ See [Extending the Framework](extending.md) for details.
### Components
- **Topology** describes the cluster: how many nodes, their roles, and the high-level network and data-availability parameters they should follow.
- **Topology** describes the cluster: how many nodes and the high-level network parameters they should follow.
- **Scenario** combines that topology with the activities to run and the checks to perform, forming a single plan.
- **Deployer** provisions infrastructure on the chosen backend (local processes, Docker Compose, or Kubernetes), waits for readiness, and returns a Runner.
- **Runner** orchestrates scenario execution: starts workloads, observes signals, evaluates expectations, and triggers cleanup.
@ -136,13 +135,13 @@ together predictably.
The framework is consumed via **runnable example binaries** in `examples/src/bin/`:
- `local_runner.rs` — Spawns nodes as host processes
- `compose_runner.rs` — Deploys via Docker Compose (requires `NOMOS_TESTNET_IMAGE` built)
- `compose_runner.rs` — Deploys via Docker Compose (requires `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` built)
- `k8s_runner.rs` — Deploys via Kubernetes Helm (requires cluster + image)
**Recommended:** Use the convenience script:
```bash
scripts/run/run-examples.sh -t <duration> -v <validators> <mode>
scripts/run/run-examples.sh -t <duration> -n <nodes> <mode>
# mode: host, compose, or k8s
```
@ -169,10 +168,9 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn scenario_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(50)
.transactions_with(|txs| txs.rate(5).users(20))
.da_with(|da| da.channel_rate(1).blob_rate(2))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(90))
.build()
@ -180,8 +178,8 @@ pub fn scenario_plan() -> testing_framework_core::scenario::Scenario<()> {
```
**Key API Points:**
- Topology uses `.topology_with(|t| { t.validators(N) })` closure pattern
- Workloads are configured via `_with` closures (`transactions_with`, `da_with`, `chaos_with`)
- Topology uses `.topology_with(|t| { t.nodes(N) })` closure pattern
- Workloads are configured via `_with` closures (`transactions_with`, `chaos_with`)
- Chaos workloads require `.enable_node_control()` and a compatible runner
## Deployers
@ -195,29 +193,29 @@ Three deployer implementations:
| `K8sDeployer` | Kubernetes Helm | Cluster + image loaded | Not yet |
**Compose-specific features:**
- Observability is external (set `NOMOS_METRICS_QUERY_URL` / `NOMOS_METRICS_OTLP_INGEST_URL` / `NOMOS_GRAFANA_URL` as needed)
- Optional OTLP trace/metrics endpoints (`NOMOS_OTLP_ENDPOINT`, `NOMOS_OTLP_METRICS_ENDPOINT`)
- Node control for chaos testing (restart validators)
- Observability is external (set `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` / `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` / `LOGOS_BLOCKCHAIN_GRAFANA_URL` as needed)
- Optional OTLP trace/metrics endpoints (`LOGOS_BLOCKCHAIN_OTLP_ENDPOINT`, `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT`)
- Node control for chaos testing (restart nodes)
## Assets and Images
### Docker Image
Built via `scripts/build/build_test_image.sh`:
- Embeds KZG circuit parameters and binaries from `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`
- Embeds circuit assets and binaries
- Includes runner scripts: `run_nomos_node.sh`
- Tagged as `NOMOS_TESTNET_IMAGE` (default: `logos-blockchain-testing:local`)
- **Recommended:** Use prebuilt bundle via `scripts/build/build-bundle.sh --platform linux` and set `NOMOS_BINARIES_TAR` before building image
- Tagged as `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` (default: `logos-blockchain-testing:local`)
- **Recommended:** Use prebuilt bundle via `scripts/build/build-bundle.sh --platform linux` and set `LOGOS_BLOCKCHAIN_BINARIES_TAR` before building image
### Circuit Assets
KZG parameters required for DA workloads:
- **Host path:** `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note repeated filename—directory contains file `kzgrs_test_params`)
- **Container path:** `/kzgrs_test_params/kzgrs_test_params` (for compose/k8s)
- **Override:** `NOMOS_KZGRS_PARAMS_PATH=/custom/path/to/file` (must point to file)
- **Fetch via:** `scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/circuits` or use `scripts/run/run-examples.sh`
Circuit assets required by the node binary:
- **Host path:** `~/.logos-blockchain-circuits` (default)
- **Container path:** `/opt/circuits` (for compose/k8s)
- **Override:** `LOGOS_BLOCKCHAIN_CIRCUITS=/custom/path/to/dir` (must point to a directory)
- **Fetch via:** `scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits` or use `scripts/run/run-examples.sh`
### Compose Stack
Templates and configs in `testing-framework/runners/compose/assets/`:
- `docker-compose.yml.tera` — Stack template (validators)
- `docker-compose.yml.tera` — Stack template (nodes)
- Cfgsync config: `testing-framework/assets/stack/cfgsync.yaml`
- Monitoring assets (not deployed by the framework): `testing-framework/assets/stack/monitoring/`
@ -228,33 +226,33 @@ Templates and configs in `testing-framework/runners/compose/assets/`:
| Component | Configuration | Output |
|-----------|--------------|--------|
| **Runner binaries** | `RUST_LOG` | Framework orchestration logs |
| **Node processes** | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (+ `NOMOS_LOG_DIR` on host runner) | Consensus, DA, mempool logs |
| **Node processes** | `LOGOS_BLOCKCHAIN_LOG_LEVEL`, `LOGOS_BLOCKCHAIN_LOG_FILTER` (+ `LOGOS_BLOCKCHAIN_LOG_DIR` on host runner) | Consensus, mempool, network logs |
**Node logging:**
- **Local runner:** Writes to temporary directories by default (cleaned up). Set `NOMOS_TESTS_TRACING=true` + `NOMOS_LOG_DIR` for persistent files.
- **Local runner:** Writes to temporary directories by default (cleaned up). Set `LOGOS_BLOCKCHAIN_TESTS_TRACING=true` + `LOGOS_BLOCKCHAIN_LOG_DIR` for persistent files.
- **Compose runner:** Default logs to container stdout/stderr (`docker logs`). To write per-node files, set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory).
- **K8s runner:** Logs to pod stdout/stderr (`kubectl logs`). To write per-node files, set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory).
**File naming:** Per-node files use prefix `nomos-node-{index}` (may include timestamps).
**File naming:** Per-node files use prefix `logos-blockchain-node-{index}` (may include timestamps).
## Observability
**Prometheus-compatible metrics querying (optional):**
- The framework does **not** deploy Prometheus/Grafana.
- Provide a Prometheus-compatible base URL (PromQL API) via `NOMOS_METRICS_QUERY_URL`.
- Provide a Prometheus-compatible base URL (PromQL API) via `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL`.
- Accessible in expectations when configured: `ctx.telemetry().prometheus().map(|p| p.base_url())`
**Grafana dashboards (optional):**
- Dashboards live in `testing-framework/assets/stack/monitoring/grafana/dashboards/` and can be imported into your Grafana.
- If you set `NOMOS_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS`.
- If you set `LOGOS_BLOCKCHAIN_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS`.
**Node APIs:**
- HTTP endpoints per node for consensus info, network status, DA membership
- Accessible in expectations: `ctx.node_clients().validator_clients().get(0)`
- HTTP endpoints per node for consensus info and network status
- Accessible in expectations: `ctx.node_clients().node_clients().get(0)`
**OTLP (optional):**
- Trace endpoint: `NOMOS_OTLP_ENDPOINT=http://localhost:4317`
- Metrics endpoint: `NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318`
- Trace endpoint: `LOGOS_BLOCKCHAIN_OTLP_ENDPOINT=http://localhost:4317`
- Metrics endpoint: `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT=http://localhost:4318`
- Disabled by default (no noise if unset)
For detailed logging configuration, see [Logging & Observability](logging-observability.md).

View File

@ -16,8 +16,8 @@ flowchart LR
D --> E[5. Deploy & Run]
```
1. **Shape the topology** — How many nodes, what roles, what network shape
2. **Attach workloads** — What traffic to generate (transactions, blobs, chaos)
1. **Shape the topology** — How many nodes, what network shape
2. **Attach workloads** — What traffic to generate (transactions, chaos)
3. **Define expectations** — What success looks like (liveness, inclusion, recovery)
4. **Set duration** — How long to run the experiment
5. **Choose a runner** — Where to execute (local, compose, k8s)
@ -36,12 +36,12 @@ use testing_framework_workflows::ScenarioBuilderExt;
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star() // Star network (one gateway + nodes)
.validators(3) // 3 validator nodes
.nodes(3) // 3 nodes
})
```
**What goes in topology?**
- Node counts (validators)
- Node counts (nodes)
- Network shape (`network_star()` is currently the only built-in layout)
**What does NOT go in topology?**
@ -61,7 +61,6 @@ let scenario = ScenarioBuilder::topology_with(|t| {
**What goes in workloads?**
- Transaction traffic (rate, users)
- DA traffic (channels, blobs)
- Chaos injection (restarts, delays)
**Units explained:**
@ -136,7 +135,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
async fn hello_consensus_liveness() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(3)
.nodes(3)
})
.wallets(20)
.transactions_with(|tx| tx.rate(10).users(5))
@ -204,7 +203,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
#[tokio::test]
async fn test_consensus_liveness() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(3)
t.network_star().nodes(3)
})
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(30))
@ -219,7 +218,7 @@ async fn test_consensus_liveness() -> Result<()> {
#[tokio::test]
async fn test_transaction_inclusion() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(2)
t.network_star().nodes(2)
})
.wallets(10)
.transactions_with(|tx| tx.rate(5).users(5))
@ -245,13 +244,13 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn minimal_topology() -> ScenarioBuilder {
ScenarioBuilder::topology_with(|t| {
t.network_star().validators(2)
t.network_star().nodes(2)
})
}
pub fn production_like_topology() -> ScenarioBuilder {
ScenarioBuilder::topology_with(|t| {
t.network_star().validators(7)
t.network_star().nodes(7)
})
}
@ -293,10 +292,10 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
async fn test_liveness_with_topology(validators: usize) -> Result<()> {
async fn test_liveness_with_topology(nodes: usize) -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(validators)
.nodes(nodes)
})
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
@ -331,7 +330,7 @@ async fn liveness_large() -> Result<()> {
### Topology
**Do include:**
- Node counts (`.validators(3)`)
- Node counts (`.nodes(3)`)
- Network shape (`.network_star()`)
**Don't include:**
@ -343,7 +342,7 @@ async fn liveness_large() -> Result<()> {
**Do include:**
- Transaction traffic (`.transactions_with(|tx| ...)`)
- DA traffic (`.da_with(|da| ...)`)
- Chaos traffic (`.chaos().restart()` or `RandomRestartWorkload`)
- Chaos injection (`.with_workload(RandomRestartWorkload::new(...))`)
- Rates, users, timing
@ -367,8 +366,8 @@ async fn liveness_large() -> Result<()> {
## Best Practices
1. **Keep scenarios focused**: One scenario = one behavior under test
2. **Start small**: 2-3 validators, 30-60 seconds
3. **Use descriptive names**: `test_consensus_survives_validator_restart` not `test_1`
2. **Start small**: 2-3 nodes, 30-60 seconds
3. **Use descriptive names**: `test_consensus_survives_node_restart` not `test_1`
4. **Extract common patterns**: Shared topology builders, helper functions
5. **Document intent**: Add comments explaining what you're testing and why
6. **Mind the units**: `.rate(N)` is per-block, `.with_run_duration()` is wall-clock
@ -379,6 +378,6 @@ async fn liveness_large() -> Result<()> {
## Next Steps
- **[Core Content: Workloads & Expectations](workloads.md)** — Comprehensive reference for built-in workloads and expectations
- **[Examples](examples.md)** — More scenario patterns (DA, chaos, advanced topologies)
- **[Examples](examples.md)** — More scenario patterns (chaos, advanced topologies)
- **[Running Scenarios](running-scenarios.md)** — How execution works, artifacts produced, per-runner details
- **[API Levels](api-levels.md)** — When to use builder DSL vs. direct instantiation

View File

@ -5,7 +5,7 @@ This page collects proven patterns for authoring, running, and maintaining test
## Scenario Design
**State your intent**
- Document the goal of each scenario (throughput, DA validation, resilience) so expectation choices are obvious
- Document the goal of each scenario (throughput, resilience) so expectation choices are obvious
- Use descriptive variable names that explain topology purpose (e.g., `star_topology_3val_2exec` vs `topology`)
- Add comments explaining why specific rates or durations were chosen
@ -20,7 +20,7 @@ This page collects proven patterns for authoring, running, and maintaining test
- Don't mix high transaction load with aggressive chaos in the same test (hard to debug)
**Start small, scale up**
- Begin with minimal topology (1-2 validators) to validate scenario logic
- Begin with minimal topology (1-2 nodes) to validate scenario logic
- Gradually increase topology size and workload rates
- Use Host runner for fast iteration, then validate on Compose before production
@ -34,10 +34,10 @@ This page collects proven patterns for authoring, running, and maintaining test
**Example: Topology preset**
```rust,ignore
pub fn standard_da_topology() -> GeneratedTopology {
pub fn standard_topology() -> GeneratedTopology {
TopologyBuilder::new()
.network_star()
.validators(3)
.nodes(3)
.generate()
}
```
@ -46,7 +46,6 @@ pub fn standard_da_topology() -> GeneratedTopology {
```rust,ignore
pub const STANDARD_TX_RATE: f64 = 10.0;
pub const STANDARD_DA_CHANNEL_RATE: f64 = 2.0;
pub const SHORT_RUN_DURATION: Duration = Duration::from_secs(60);
pub const LONG_RUN_DURATION: Duration = Duration::from_secs(300);
```
@ -55,8 +54,8 @@ pub const LONG_RUN_DURATION: Duration = Duration::from_secs(300);
**Observe first, tune second**
- Rely on liveness and inclusion signals to interpret outcomes before tweaking rates or topology
- Enable detailed logging (`RUST_LOG=debug`, `NOMOS_LOG_LEVEL=debug`) only after initial failure
- Use `NOMOS_TESTS_KEEP_LOGS=1` to persist logs when debugging failures
- Enable detailed logging (`RUST_LOG=debug`, `LOGOS_BLOCKCHAIN_LOG_LEVEL=debug`) only after initial failure
- Use `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` to persist logs when debugging failures
**Use BlockFeed effectively**
- Subscribe to BlockFeed in expectations for real-time block monitoring
@ -102,7 +101,7 @@ strategy:
**Cache aggressively**
- Cache Rust build artifacts (`target/`)
- Cache circuit parameters (`assets/stack/kzgrs_test_params/`)
- Cache circuit parameters (`~/.logos-blockchain-circuits/`)
- Cache Docker layers (use BuildKit cache)
**Collect logs on failure**
@ -163,34 +162,32 @@ runner.run(&mut scenario).await?;
// BAD: Hard to debug when it fails
.transactions_with(|tx| tx.rate(50).users(100)) // high load
.chaos_with(|c| c.restart().min_delay(...)) // AND chaos
.da_with(|da| da.channel_rate(10).blob_rate(20)) // AND DA stress
// GOOD: Separate tests for each concern
// Test 1: High transaction load only
// Test 2: Chaos resilience only
// Test 3: DA stress only
```
**DON'T: Hardcode paths or ports**
```rust,ignore
// BAD: Breaks on different machines
let path = PathBuf::from("/home/user/circuits/kzgrs_test_params");
let path = PathBuf::from("/home/user/circuits");
let port = 9000; // might conflict
// GOOD: Use env vars and dynamic allocation
let path = std::env::var("NOMOS_KZGRS_PARAMS_PATH")
.unwrap_or_else(|_| "assets/stack/kzgrs_test_params/kzgrs_test_params".to_string());
let path = std::env::var("LOGOS_BLOCKCHAIN_CIRCUITS")
.unwrap_or_else(|_| "~/.logos-blockchain-circuits".to_string());
let port = get_available_tcp_port();
```
**DON'T: Ignore resource limits**
```bash
# BAD: Large topology without checking resources
scripts/run/run-examples.sh -v 20 -e 10 compose
scripts/run/run-examples.sh -n 20 compose
# (might OOM or exhaust ulimits)
# GOOD: Scale gradually and monitor resources
scripts/run/run-examples.sh -v 3 -e 2 compose # start small
scripts/run/run-examples.sh -n 3 compose # start small
docker stats # monitor resource usage
# then increase if resources allow
```
@ -198,12 +195,11 @@ docker stats # monitor resource usage
## Scenario Design Heuristics
**Minimal viable topology**
- Consensus: 3 validators (minimum for Byzantine fault tolerance)
- Consensus: 3 nodes (minimum for Byzantine fault tolerance)
- Network: Star topology (simplest for debugging)
**Workload rate selection**
- Start with 1-5 tx/s per user, then increase
- DA: 1-2 channels, 1-3 blobs/channel initially
- Chaos: 30s+ intervals between restarts (allow recovery)
**Duration guidelines**
@ -222,7 +218,6 @@ docker stats # monitor resource usage
|-----------|--------------|
| Basic functionality | `expect_consensus_liveness()` |
| Transaction handling | `expect_consensus_liveness()` + custom inclusion check |
| DA correctness | `expect_consensus_liveness()` + DA dispersal/sampling checks |
| Resilience | `expect_consensus_liveness()` + recovery time measurement |
## Testing the Tests

View File

@ -9,7 +9,7 @@ recovery. The built-in restart workload lives in
## How it works
- Requires `NodeControlCapability` (`enable_node_control()` in the scenario
builder) and a runner that provides a `NodeControlHandle`.
- Randomly selects nodes (validators) to restart based on your
- Randomly selects nodes to restart based on your
include/exclude flags.
- Respects min/max delay between restarts and a target cooldown to avoid
flapping the same node too frequently.
@ -29,13 +29,13 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRe
pub fn random_restart_plan() -> testing_framework_core::scenario::Scenario<
testing_framework_core::scenario::NodeControlCapability,
> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.enable_node_control()
.with_workload(RandomRestartWorkload::new(
Duration::from_secs(45), // min delay
Duration::from_secs(75), // max delay
Duration::from_secs(120), // target cooldown
true, // include validators
true, // include nodes
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(150))
@ -47,11 +47,11 @@ pub fn random_restart_plan() -> testing_framework_core::scenario::Scenario<
- **Consensus liveness**: ensure blocks keep progressing despite restarts.
- **Height convergence**: optionally check all nodes converge after the chaos
window.
- Any workload-specific inclusion checks if youre also driving tx/DA traffic.
- Any workload-specific inclusion checks if youre also driving transactions.
## Best practices
- Keep delays/cooldowns realistic; avoid back-to-back restarts that would never
happen in production.
- Limit chaos scope: toggle validators based on what you want to
- Limit chaos scope: toggle nodes based on what you want to
test.
- Combine with observability: monitor metrics/logs to explain failures.

View File

@ -74,19 +74,19 @@ jobs:
restore-keys: |
${{ runner.os }}-cargo-host-
- name: Cache nomos-node build
- name: Cache logos-blockchain-node build
uses: actions/cache@v3
with:
path: |
../nomos-node/target/release/nomos-node
key: ${{ runner.os }}-nomos-${{ hashFiles('../nomos-node/**/Cargo.lock') }}
../logos-blockchain-node/target/release/logos-blockchain-node
key: ${{ runner.os }}-nomos-${{ hashFiles('../logos-blockchain-node/**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-nomos-
- name: Run host smoke test
run: |
# Use run-examples.sh which handles setup automatically
scripts/run/run-examples.sh -t 120 -v 3 -e 1 host
scripts/run/run-examples.sh -t 120 -n 3 host
- name: Upload logs on failure
if: failure()
@ -151,7 +151,7 @@ jobs:
TOPOLOGY: ${{ matrix.topology }}
run: |
# Build and run with the specified topology
scripts/run/run-examples.sh -t 120 -v ${TOPOLOGY:0:1} -e ${TOPOLOGY:2:1} compose
scripts/run/run-examples.sh -t 120 -n ${TOPOLOGY:0:1} compose
- name: Collect Docker logs on failure
if: failure()
@ -198,7 +198,7 @@ jobs:
## Workflow Features
1. **Matrix Testing:** Runs compose tests with different topologies (`3v1e`, `5v1e`)
2. **Caching:** Caches Rust dependencies, Docker layers, and nomos-node builds for faster runs
2. **Caching:** Caches Rust dependencies, Docker layers, and logos-blockchain-node builds for faster runs
3. **Log Collection:** Automatically uploads logs and artifacts when tests fail
4. **Timeout Protection:** Reasonable timeouts prevent jobs from hanging indefinitely
6. **Clean Teardown:** Ensures Docker resources are cleaned up even on failure
@ -259,14 +259,14 @@ Without this, tests will hang due to expensive proof generation.
Prefer `scripts/run/run-examples.sh` which handles all setup automatically:
```bash
scripts/run/run-examples.sh -t 120 -v 3 -e 1 host
scripts/run/run-examples.sh -t 120 -n 3 host
```
This is more reliable than manual `cargo run` commands.
### Cache Aggressively
Cache Rust dependencies, nomos-node builds, and Docker layers to speed up CI:
Cache Rust dependencies, logos-blockchain-node builds, and Docker layers to speed up CI:
```yaml
- name: Cache Rust dependencies
@ -346,7 +346,7 @@ Add debug environment variables temporarily:
```yaml
env:
RUST_LOG: debug
NOMOS_LOG_LEVEL: debug
LOGOS_BLOCKCHAIN_LOG_LEVEL: debug
```
### Preserve Containers (Compose)
@ -357,7 +357,7 @@ Set `COMPOSE_RUNNER_PRESERVE=1` to keep containers running for inspection:
- name: Run compose test (preserve on failure)
env:
COMPOSE_RUNNER_PRESERVE: 1
run: scripts/run/run-examples.sh -t 120 -v 3 -e 1 compose
run: scripts/run/run-examples.sh -t 120 -n 3 compose
```
### Access Artifacts

View File

@ -48,10 +48,10 @@ impl Workload for ReachabilityWorkload {
topology: &GeneratedTopology,
_run_metrics: &RunMetrics,
) -> Result<(), DynError> {
if topology.validators().get(self.target_idx).is_none() {
if topology.nodes().get(self.target_idx).is_none() {
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
"no validator at requested index",
"no node at requested index",
)));
}
Ok(())
@ -60,7 +60,7 @@ impl Workload for ReachabilityWorkload {
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
let client = ctx
.node_clients()
.validator_clients()
.node_clients()
.get(self.target_idx)
.ok_or_else(|| {
Box::new(std::io::Error::new(
@ -108,7 +108,7 @@ impl Expectation for ReachabilityExpectation {
async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> {
let client = ctx
.node_clients()
.validator_clients()
.node_clients()
.get(self.target_idx)
.ok_or_else(|| {
Box::new(std::io::Error::new(

View File

@ -22,7 +22,7 @@ use testing_framework_core::scenario::{Builder, ScenarioBuilder};
pub fn topology() -> Builder<()> {
ScenarioBuilder::topology_with(|t| {
t.network_star() // Star topology (all connect to seed node)
.validators(3) // Number of validator nodes
.nodes(3) // Number of nodes
})
}
```
@ -34,7 +34,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn wallets_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.wallets(50) // Seed 50 funded wallet accounts
.build()
}
@ -47,7 +47,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn transactions_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.wallets(50)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
@ -57,24 +57,6 @@ pub fn transactions_plan() -> testing_framework_core::scenario::Scenario<()> {
}
```
## DA Workload
```rust,ignore
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn da_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(50)
.da_with(|da| {
da.channel_rate(1) // number of DA channels to run
.blob_rate(2) // target 2 blobs per block (headroom applied)
.headroom_percent(20) // optional headroom when sizing channels
}) // Finish DA workload config
.build()
}
```
## Chaos Workload (Requires `enable_node_control()`)
```rust,ignore
@ -84,7 +66,7 @@ use testing_framework_core::scenario::{NodeControlCapability, ScenarioBuilder};
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub fn chaos_plan() -> testing_framework_core::scenario::Scenario<NodeControlCapability> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.enable_node_control() // Enable node control capability
.chaos_with(|c| {
c.restart() // Random restart chaos
@ -104,7 +86,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn expectations_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.expect_consensus_liveness() // Assert blocks are produced continuously
.build()
}
@ -119,7 +101,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn run_duration_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.with_run_duration(Duration::from_secs(120)) // Run for 120 seconds
.build()
}
@ -132,7 +114,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn build_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1)).build() // Construct the final Scenario
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)).build() // Construct the final Scenario
}
```
@ -164,7 +146,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn execution() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.expect_consensus_liveness()
.build();
@ -187,17 +169,12 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn run_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(50)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
.users(20)
})
.da_with(|da| {
da.channel_rate(1) // number of DA channels
.blob_rate(2) // target 2 blobs per block
.headroom_percent(20) // optional channel headroom
})
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(90))
.build();

View File

@ -31,19 +31,19 @@ Control which runner to use and the test topology:
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators (all runners) |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds (all runners) |
| `LOCAL_DEMO_VALIDATORS` | — | Legacy: Number of validators (host runner only) |
| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes (all runners) |
| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds (all runners) |
| `LOCAL_DEMO_NODES` | — | Legacy: Number of nodes (host runner only) |
| `LOCAL_DEMO_RUN_SECS` | — | Legacy: Run duration (host runner only) |
| `COMPOSE_NODE_PAIRS` | — | Compose-specific topology format: "validators" (e.g., `3`) |
| `COMPOSE_NODE_PAIRS` | — | Compose-specific topology format: "nodes" (e.g., `3`) |
**Example:**
```bash
# Run with 5 validators, for 120 seconds
NOMOS_DEMO_VALIDATORS=5 \
NOMOS_DEMO_RUN_SECS=120 \
scripts/run/run-examples.sh -t 120 -v 5 host
# Run with 5 nodes, for 120 seconds
LOGOS_BLOCKCHAIN_DEMO_NODES=5 \
LOGOS_BLOCKCHAIN_DEMO_RUN_SECS=120 \
scripts/run/run-examples.sh -t 120 -n 5 host
```
---
@ -54,13 +54,13 @@ Required for host runner when not using helper scripts:
| Variable | Required | Default | Effect |
|----------|----------|---------|--------|
| `NOMOS_NODE_BIN` | Yes (host) | — | Path to `nomos-node` binary |
| `NOMOS_NODE_PATH` | No | — | Path to nomos-node git checkout (dev workflow) |
| `LOGOS_BLOCKCHAIN_NODE_BIN` | Yes (host) | — | Path to `logos-blockchain-node` binary |
| `LOGOS_BLOCKCHAIN_NODE_PATH` | No | — | Path to logos-blockchain-node git checkout (dev workflow) |
**Example:**
```bash
export NOMOS_NODE_BIN=/path/to/nomos-node/target/release/nomos-node
export LOGOS_BLOCKCHAIN_NODE_BIN=/path/to/logos-blockchain-node/target/release/logos-blockchain-node
```
---
@ -71,53 +71,47 @@ Required for compose and k8s runners:
| Variable | Required | Default | Effect |
|----------|----------|---------|--------|
| `NOMOS_TESTNET_IMAGE` | Yes (compose/k8s) | `logos-blockchain-testing:local` | Docker image tag for node containers |
| `NOMOS_TESTNET_IMAGE_PULL_POLICY` | No | `IfNotPresent` (local) / `Always` (ECR) | K8s `imagePullPolicy` used by the runner |
| `NOMOS_BINARIES_TAR` | No | — | Path to prebuilt bundle (`.tar.gz`) for image build |
| `NOMOS_SKIP_IMAGE_BUILD` | No | 0 | Skip image rebuild (compose/k8s); assumes image already exists |
| `NOMOS_FORCE_IMAGE_BUILD` | No | 0 | Force rebuilding the image even when the script would normally skip it (e.g. non-local k8s) |
| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | Yes (compose/k8s) | `logos-blockchain-testing:local` | Docker image tag for node containers |
| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY` | No | `IfNotPresent` (local) / `Always` (ECR) | K8s `imagePullPolicy` used by the runner |
| `LOGOS_BLOCKCHAIN_BINARIES_TAR` | No | — | Path to prebuilt bundle (`.tar.gz`) for image build |
| `LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD` | No | 0 | Skip image rebuild (compose/k8s); assumes image already exists |
| `LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD` | No | 0 | Force rebuilding the image even when the script would normally skip it (e.g. non-local k8s) |
**Example:**
```bash
# Using prebuilt bundle
export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
export NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local
export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local
scripts/build/build_test_image.sh
# Using pre-existing image (skip build)
export NOMOS_SKIP_IMAGE_BUILD=1
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
export LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1
scripts/run/run-examples.sh -t 60 -n 3 compose
```
---
## Circuit Assets (KZG Parameters)
## Circuit Assets
Circuit asset configuration for DA workloads:
Circuit asset configuration used by local runs and image builds:
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_KZGRS_PARAMS_PATH` | `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` | Path to KZG proving key file |
| `NOMOS_KZG_DIR_REL` | `testing-framework/assets/stack/kzgrs_test_params` | Directory containing KZG assets (relative to workspace root) |
| `NOMOS_KZG_FILE` | `kzgrs_test_params` | Filename of the proving key within `NOMOS_KZG_DIR_REL` |
| `NOMOS_KZG_CONTAINER_PATH` | `/kzgrs_test_params/kzgrs_test_params` | File path where the node expects KZG params inside containers |
| `NOMOS_KZG_MODE` | Runner-specific | K8s only: `hostPath` (mount from host) or `inImage` (embed into image) |
| `NOMOS_KZG_IN_IMAGE_PARAMS_PATH` | `/opt/nomos/kzg-params/kzgrs_test_params` | K8s `inImage` mode: where the proving key is stored inside the image |
| `LOGOS_BLOCKCHAIN_CIRCUITS` | `~/.logos-blockchain-circuits` | Directory containing circuit assets |
| `VERSION` | From `versions.env` | Circuit release tag (used by helper scripts) |
| `NOMOS_CIRCUITS` | — | Directory containing fetched circuit bundles (set by `scripts/setup/setup-circuits-stack.sh`) |
| `NOMOS_CIRCUITS_VERSION` | — | Legacy alias for `VERSION` (supported by some build scripts) |
| `NOMOS_CIRCUITS_PLATFORM` | Auto-detected | Override circuits platform (e.g. `linux-x86_64`, `macos-aarch64`) |
| `NOMOS_CIRCUITS_HOST_DIR_REL` | `.tmp/nomos-circuits-host` | Output dir for host circuits bundle (relative to repo root) |
| `NOMOS_CIRCUITS_LINUX_DIR_REL` | `.tmp/nomos-circuits-linux` | Output dir for linux circuits bundle (relative to repo root) |
| `NOMOS_CIRCUITS_NONINTERACTIVE` | 0 | Set to `1` to overwrite outputs without prompting in setup scripts |
| `NOMOS_CIRCUITS_REBUILD_RAPIDSNARK` | 0 | Set to `1` to force rebuilding rapidsnark (host bundle only) |
| `LOGOS_BLOCKCHAIN_CIRCUITS_VERSION` | — | Legacy alias for `VERSION` (supported by some build scripts) |
| `LOGOS_BLOCKCHAIN_CIRCUITS_PLATFORM` | Auto-detected | Override circuits platform (e.g. `linux-x86_64`, `macos-aarch64`) |
| `LOGOS_BLOCKCHAIN_CIRCUITS_HOST_DIR_REL` | `.tmp/logos-blockchain-circuits-host` | Output dir for host circuit bundle (relative to repo root) |
| `LOGOS_BLOCKCHAIN_CIRCUITS_LINUX_DIR_REL` | `.tmp/logos-blockchain-circuits-linux` | Output dir for linux circuit bundle (relative to repo root) |
| `LOGOS_BLOCKCHAIN_CIRCUITS_NONINTERACTIVE` | 0 | Set to `1` to overwrite outputs without prompting in setup scripts |
| `LOGOS_BLOCKCHAIN_CIRCUITS_REBUILD_RAPIDSNARK` | 0 | Set to `1` to force rebuilding rapidsnark (host bundle only) |
**Example:**
```bash
# Use custom circuit assets
NOMOS_KZGRS_PARAMS_PATH=/custom/path/to/kzgrs_test_params \
LOGOS_BLOCKCHAIN_CIRCUITS=/custom/path/to/circuits \
cargo run -p runner-examples --bin local_runner
```
@ -129,28 +123,28 @@ Control node log output (not framework runner logs):
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` |
| `NOMOS_LOG_FILTER` | — | Fine-grained module filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`) |
| `NOMOS_LOG_DIR` | — | Host runner: directory for per-node log files (persistent). Compose/k8s: use `cfgsync.yaml` for file logging. |
| `NOMOS_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI artifacts) |
| `NOMOS_TESTS_TRACING` | false | Enable debug tracing preset (combine with `NOMOS_LOG_DIR` unless external tracing backends configured) |
| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` |
| `LOGOS_BLOCKCHAIN_LOG_FILTER` | — | Fine-grained module filtering (e.g., `cryptarchia=trace`) |
| `LOGOS_BLOCKCHAIN_LOG_DIR` | — | Host runner: directory for per-node log files (persistent). Compose/k8s: use `cfgsync.yaml` for file logging. |
| `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI artifacts) |
| `LOGOS_BLOCKCHAIN_TESTS_TRACING` | false | Enable debug tracing preset (combine with `LOGOS_BLOCKCHAIN_LOG_DIR` unless external tracing backends configured) |
**Important:** Node logging ignores `RUST_LOG`; use `NOMOS_LOG_LEVEL` and `NOMOS_LOG_FILTER` for node logs.
**Important:** Node logging ignores `RUST_LOG`; use `LOGOS_BLOCKCHAIN_LOG_LEVEL` and `LOGOS_BLOCKCHAIN_LOG_FILTER` for node logs.
**Example:**
```bash
# Debug logging to files
NOMOS_LOG_DIR=/tmp/test-logs \
NOMOS_LOG_LEVEL=debug \
NOMOS_LOG_FILTER="cryptarchia=trace,nomos_da_sampling=debug" \
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/test-logs \
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \
LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner
# Inspect logs
ls /tmp/test-logs/
# nomos-node-0.2024-12-18T14-30-00.log
# nomos-node-1.2024-12-18T14-30-00.log
# logos-blockchain-node-0.2024-12-18T14-30-00.log
# logos-blockchain-node-1.2024-12-18T14-30-00.log
```
**Common filter targets:**
@ -158,9 +152,6 @@ ls /tmp/test-logs/
| Target Prefix | Subsystem |
|---------------|-----------|
| `cryptarchia` | Consensus (Cryptarchia) |
| `nomos_da_sampling` | DA sampling service |
| `nomos_da_dispersal` | DA dispersal service |
| `nomos_da_verifier` | DA verification |
| `nomos_blend` | Mix network/privacy layer |
| `chain_service` | Chain service (node APIs/state) |
| `chain_network` | P2P networking |
@ -174,21 +165,21 @@ Optional observability integration:
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_METRICS_QUERY_URL` | — | Prometheus-compatible base URL for runner to query (e.g., `http://localhost:9090`) |
| `NOMOS_METRICS_OTLP_INGEST_URL` | — | Full OTLP HTTP ingest URL for node metrics export (e.g., `http://localhost:9090/api/v1/otlp/v1/metrics`) |
| `NOMOS_GRAFANA_URL` | — | Grafana base URL for printing/logging (e.g., `http://localhost:3000`) |
| `NOMOS_OTLP_ENDPOINT` | — | OTLP trace endpoint (optional) |
| `NOMOS_OTLP_METRICS_ENDPOINT` | — | OTLP metrics endpoint (optional) |
| `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` | — | Prometheus-compatible base URL for runner to query (e.g., `http://localhost:9090`) |
| `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` | — | Full OTLP HTTP ingest URL for node metrics export (e.g., `http://localhost:9090/api/v1/otlp/v1/metrics`) |
| `LOGOS_BLOCKCHAIN_GRAFANA_URL` | — | Grafana base URL for printing/logging (e.g., `http://localhost:3000`) |
| `LOGOS_BLOCKCHAIN_OTLP_ENDPOINT` | — | OTLP trace endpoint (optional) |
| `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT` | — | OTLP metrics endpoint (optional) |
**Example:**
```bash
# Enable Prometheus querying
export NOMOS_METRICS_QUERY_URL=http://localhost:9090
export NOMOS_METRICS_OTLP_INGEST_URL=http://localhost:9090/api/v1/otlp/v1/metrics
export NOMOS_GRAFANA_URL=http://localhost:3000
export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090
export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://localhost:9090/api/v1/otlp/v1/metrics
export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
```
---
@ -210,7 +201,7 @@ Variables specific to Docker Compose deployment:
```bash
# Keep containers after test for debugging
COMPOSE_RUNNER_PRESERVE=1 \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
# Containers remain running
docker ps --filter "name=nomos-compose-"
@ -243,11 +234,11 @@ Variables specific to Kubernetes deployment:
K8S_RUNNER_NAMESPACE=nomos-test-debug \
K8S_RUNNER_PRESERVE=1 \
K8S_RUNNER_DEBUG=1 \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
scripts/run/run-examples.sh -t 60 -n 3 k8s
# Inspect resources
kubectl get pods -n nomos-test-debug
kubectl logs -n nomos-test-debug -l nomos/logical-role=validator
kubectl logs -n nomos-test-debug -l nomos/logical-role=node
```
---
@ -258,19 +249,19 @@ Platform-specific build configuration:
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_BUNDLE_DOCKER_PLATFORM` | Host arch | Docker platform for bundle builds: `linux/arm64` or `linux/amd64` (macOS/Windows hosts) |
| `NOMOS_BIN_PLATFORM` | — | Legacy alias for `NOMOS_BUNDLE_DOCKER_PLATFORM` |
| `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM` | Host arch | Docker platform for bundle builds: `linux/arm64` or `linux/amd64` (macOS/Windows hosts) |
| `LOGOS_BLOCKCHAIN_BIN_PLATFORM` | — | Legacy alias for `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM` |
| `COMPOSE_CIRCUITS_PLATFORM` | Host arch | Circuits platform for image builds: `linux-aarch64` or `linux-x86_64` |
| `NOMOS_EXTRA_FEATURES` | — | Extra cargo features to enable when building bundles (used by `scripts/build/build-bundle.sh`) |
| `LOGOS_BLOCKCHAIN_EXTRA_FEATURES` | — | Extra cargo features to enable when building bundles (used by `scripts/build/build-bundle.sh`) |
**macOS / Apple Silicon:**
```bash
# Native performance (recommended for local testing)
export NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64
export LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64
# Or target amd64 (slower via emulation)
export NOMOS_BUNDLE_DOCKER_PLATFORM=linux/amd64
export LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/amd64
```
---
@ -283,36 +274,28 @@ Timeout and performance tuning:
|----------|---------|--------|
| `SLOW_TEST_ENV` | false | Doubles built-in readiness timeouts (useful in CI / constrained laptops) |
| `TESTNET_PRINT_ENDPOINTS` | 0 | Print `TESTNET_ENDPOINTS` / `TESTNET_PPROF` lines during deploy (set automatically by `scripts/run/run-examples.sh`) |
| `NOMOS_DISPERSAL_TIMEOUT_SECS` | 20 | DA dispersal timeout (seconds) |
| `NOMOS_RETRY_COOLDOWN_SECS` | 3 | Cooldown between retries (seconds) |
| `NOMOS_GRACE_PERIOD_SECS` | 1200 | Grace period before enforcing strict time-based expectations (seconds) |
| `NOMOS_PRUNE_DURATION_SECS` | 30 | Prune step duration (seconds) |
| `NOMOS_PRUNE_INTERVAL_SECS` | 5 | Interval between prune cycles (seconds) |
| `NOMOS_SHARE_DURATION_SECS` | 5 | Share duration (seconds) |
| `NOMOS_COMMITMENTS_WAIT_SECS` | 1 | Commitments wait duration (seconds) |
| `NOMOS_SDP_TRIGGER_DELAY_SECS` | 5 | SDP trigger delay (seconds) |
**Example:**
```bash
# Increase timeouts for slow environments
SLOW_TEST_ENV=true \
scripts/run/run-examples.sh -t 120 -v 5 -e 2 compose
scripts/run/run-examples.sh -t 120 -n 5 compose
```
---
## Node Configuration (Advanced)
Node-level configuration passed through to nomos-node:
Node-level configuration passed through to logos-blockchain-node:
| Variable | Default | Effect |
|----------|---------|--------|
| `CONSENSUS_SLOT_TIME` | — | Consensus slot time (seconds) |
| `CONSENSUS_ACTIVE_SLOT_COEFF` | — | Active slot coefficient (0.0-1.0) |
| `NOMOS_USE_AUTONAT` | Unset | If set, use AutoNAT instead of a static loopback address for libp2p NAT settings |
| `NOMOS_CFGSYNC_PORT` | 4400 | Port used for cfgsync service inside the stack |
| `NOMOS_TIME_BACKEND` | `monotonic` | Select time backend (used by compose/k8s stack scripts and deployers) |
| `LOGOS_BLOCKCHAIN_USE_AUTONAT` | Unset | If set, use AutoNAT instead of a static loopback address for libp2p NAT settings |
| `LOGOS_BLOCKCHAIN_CFGSYNC_PORT` | 4400 | Port used for cfgsync service inside the stack |
| `LOGOS_BLOCKCHAIN_TIME_BACKEND` | `monotonic` | Select time backend (used by compose/k8s stack scripts and deployers) |
**Example:**
@ -353,12 +336,12 @@ Variables used by helper scripts (`scripts/run/run-examples.sh`, etc.):
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_NODE_REV` | From `versions.env` | nomos-node git revision to build/fetch |
| `NOMOS_BUNDLE_VERSION` | From `versions.env` | Bundle schema version |
| `NOMOS_IMAGE_SELECTION` | — | Internal: image selection mode set by `run-examples.sh` (`local`/`ecr`/`auto`) |
| `NOMOS_NODE_APPLY_PATCHES` | 1 | Set to `0` to disable applying local patches when building bundles |
| `NOMOS_NODE_PATCH_DIR` | `patches/nomos-node` | Patch directory applied to nomos-node checkout during bundle builds |
| `NOMOS_NODE_PATCH_LEVEL` | — | Patch application level (`all` or an integer) for bundle builds |
| `LOGOS_BLOCKCHAIN_NODE_REV` | From `versions.env` | logos-blockchain-node git revision to build/fetch |
| `LOGOS_BLOCKCHAIN_BUNDLE_VERSION` | From `versions.env` | Bundle schema version |
| `LOGOS_BLOCKCHAIN_IMAGE_SELECTION` | — | Internal: image selection mode set by `run-examples.sh` (`local`/`ecr`/`auto`) |
| `LOGOS_BLOCKCHAIN_NODE_APPLY_PATCHES` | 1 | Set to `0` to disable applying local patches when building bundles |
| `LOGOS_BLOCKCHAIN_NODE_PATCH_DIR` | `patches/logos-blockchain-node` | Patch directory applied to logos-blockchain-node checkout during bundle builds |
| `LOGOS_BLOCKCHAIN_NODE_PATCH_LEVEL` | — | Patch application level (`all` or an integer) for bundle builds |
---
@ -368,26 +351,26 @@ Variables used by helper scripts (`scripts/run/run-examples.sh`, etc.):
```bash
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -n 3 host
```
### Debug Logging (Host)
```bash
POL_PROOF_DEV_MODE=true \
NOMOS_LOG_DIR=/tmp/logs \
NOMOS_LOG_LEVEL=debug \
NOMOS_LOG_FILTER="cryptarchia=trace" \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/logs \
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \
LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \
scripts/run/run-examples.sh -t 60 -n 3 host
```
### Compose with Observability
```bash
POL_PROOF_DEV_MODE=true \
NOMOS_METRICS_QUERY_URL=http://localhost:9090 \
NOMOS_GRAFANA_URL=http://localhost:3000 \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090 \
LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000 \
scripts/run/run-examples.sh -t 60 -n 3 compose
```
### K8s with Debug
@ -397,7 +380,7 @@ POL_PROOF_DEV_MODE=true \
K8S_RUNNER_NAMESPACE=nomos-debug \
K8S_RUNNER_DEBUG=1 \
K8S_RUNNER_PRESERVE=1 \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
scripts/run/run-examples.sh -t 60 -n 3 k8s
```
### CI Environment
@ -406,7 +389,7 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
env:
POL_PROOF_DEV_MODE: true
RUST_BACKTRACE: 1
NOMOS_TESTS_KEEP_LOGS: 1
LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS: 1
```
---

View File

@ -13,9 +13,9 @@ Realistic advanced scenarios demonstrating framework capabilities for production
| Example | Topology | Workloads | Deployer | Key Feature |
|---------|----------|-----------|----------|-------------|
| Load Progression | 3 validators | Increasing tx rate | Compose | Dynamic load testing |
| Sustained Load | 4 validators | High tx + DA rate | Compose | Stress testing |
| Aggressive Chaos | 4 validators | Frequent restarts + traffic | Compose | Resilience validation |
| Load Progression | 3 nodes | Increasing tx rate | Compose | Dynamic load testing |
| Sustained Load | 4 nodes | High tx rate | Compose | Stress testing |
| Aggressive Chaos | 4 nodes | Frequent restarts + traffic | Compose | Resilience validation |
## Load Progression Test
@ -34,7 +34,7 @@ pub async fn load_progression_test() -> Result<()> {
println!("Testing with rate: {}", rate);
let mut plan =
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(50)
.transactions_with(|txs| txs.rate(rate).users(20))
.expect_consensus_liveness()
@ -54,7 +54,7 @@ pub async fn load_progression_test() -> Result<()> {
## Sustained Load Test
Run high transaction and DA load for extended duration:
Run high transaction load for extended duration:
```rust,ignore
use std::time::Duration;
@ -65,10 +65,9 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn sustained_load_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
.wallets(100)
.transactions_with(|txs| txs.rate(15).users(50))
.da_with(|da| da.channel_rate(2).blob_rate(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(300))
.build();
@ -96,7 +95,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn aggressive_chaos_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
.enable_node_control()
.wallets(50)
.transactions_with(|txs| txs.rate(10).users(20))
@ -143,7 +142,7 @@ These scenarios require custom implementations but demonstrate framework extensi
#### Cross-Validator Mempool Divergence & Convergence
**Concept:** Drive different transaction subsets into different validators (or differing arrival orders) to create temporary mempool divergence, then verify mempools/blocks converge to contain the union (no permanent divergence).
**Concept:** Drive different transaction subsets into different nodes (or differing arrival orders) to create temporary mempool divergence, then verify mempools/blocks converge to contain the union (no permanent divergence).
**Requirements:**
- **Custom workload:** Targets specific nodes via `ctx.node_clients()` with disjoint or jittered transaction batches
@ -238,7 +237,7 @@ These scenarios require custom implementations but demonstrate framework extensi
**Requirements:**
- Needs `block_peer()` / `unblock_peer()` methods in `NodeControlHandle`
- Partition subsets of validators, wait, then restore connectivity
- Partition subsets of nodes, wait, then restore connectivity
- Verify chain convergence after partition heals
**Why useful:** Tests the most realistic failure mode in distributed systems.

View File

@ -13,7 +13,7 @@ and expectations.
- `compose_runner.rs` — Docker Compose (requires image built)
- `k8s_runner.rs` — Kubernetes (requires cluster access and image loaded)
**Recommended:** Use `scripts/run/run-examples.sh -t <duration> -v <validators> <mode>` where mode is `host`, `compose`, or `k8s`.
**Recommended:** Use `scripts/run/run-examples.sh -t <duration> -n <nodes> <mode>` where mode is `host`, `compose`, or `k8s`.
**Alternative:** Direct cargo run: `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin <name>`
@ -34,7 +34,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn simple_consensus() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(30))
.build();
@ -62,7 +62,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn transaction_workload() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.wallets(20)
.transactions_with(|txs| txs.rate(5).users(10))
.expect_consensus_liveness()
@ -79,37 +79,6 @@ pub async fn transaction_workload() -> Result<()> {
**When to use**: validate transaction submission and inclusion.
## DA + transaction workload
Combined test stressing both transaction and DA layers:
```rust,ignore
use std::time::Duration;
use anyhow::Result;
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn da_and_transactions() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(30)
.transactions_with(|txs| txs.rate(5).users(15))
.da_with(|da| da.channel_rate(2).blob_rate(2))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(90))
.build();
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&plan).await?;
let _handle = runner.run(&mut plan).await?;
Ok(())
}
```
**When to use**: end-to-end coverage of transaction and DA layers.
## Chaos resilience
Test system resilience under node restarts:
@ -123,7 +92,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn chaos_resilience() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
.enable_node_control()
.wallets(20)
.transactions_with(|txs| txs.rate(3).users(10))

View File

@ -61,15 +61,15 @@ impl Workload for MyWorkload {
_run_metrics: &RunMetrics,
) -> Result<(), DynError> {
// Validate prerequisites (e.g., enough nodes, wallet data present)
if topology.validators().is_empty() {
return Err("no validators available".into());
if topology.nodes().is_empty() {
return Err("no nodes available".into());
}
Ok(())
}
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
// Drive async activity: submit transactions, query nodes, etc.
let clients = ctx.node_clients().validator_clients();
let clients = ctx.node_clients().node_clients();
for client in clients {
let info = client.consensus_info().await?;
@ -126,8 +126,8 @@ impl Expectation for MyExpectation {
async fn start_capture(&mut self, ctx: &RunContext) -> Result<(), DynError> {
// Optional: capture baseline state before workloads start
let client = ctx.node_clients().validator_clients().first()
.ok_or("no validators")?;
let client = ctx.node_clients().node_clients().first()
.ok_or("no nodes")?;
let info = client.consensus_info().await?;
self.captured_baseline = Some(info.height);
@ -138,8 +138,8 @@ impl Expectation for MyExpectation {
async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> {
// Assert the expected condition holds after workloads finish
let client = ctx.node_clients().validator_clients().first()
.ok_or("no validators")?;
let client = ctx.node_clients().node_clients().first()
.ok_or("no nodes")?;
let info = client.consensus_info().await?;
let final_height = info.height;
@ -201,7 +201,7 @@ impl Deployer<()> for MyDeployer {
async fn deploy(&self, scenario: &Scenario<()>) -> Result<Runner, Self::Error> {
// 1. Launch nodes using scenario.topology()
// 2. Wait for readiness (e.g., consensus info endpoint responds)
// 3. Build NodeClients for validators
// 3. Build NodeClients for nodes
// 4. Spawn a block feed for expectations (optional but recommended)
// 5. Create NodeControlHandle if you support restarts (optional)
// 6. Return a Runner wrapping RunContext + CleanupGuard
@ -345,7 +345,7 @@ impl MyWorkloadDsl for ScenarioBuilder {
Users can then call:
```rust,ignore
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.my_workload_with(|w| {
w.target_rate(10)
.some_option(true)

View File

@ -18,14 +18,13 @@ The framework enforces a minimum of **2× slot duration** (4 seconds with defaul
- **Smoke tests**: 30s minimum (~14 blocks with default 2s slots, 0.9 coefficient)
- **Transaction workloads**: 60s+ (~27 blocks) to observe inclusion patterns
- **DA workloads**: 90s+ (~40 blocks) to account for dispersal and sampling
- **Chaos tests**: 120s+ (~54 blocks) to allow recovery after restarts
Very short runs (< 30s) risk false confidenceone or two lucky blocks don't prove liveness.
**Do I always need seeded wallets?**
Only for transaction scenarios. Data-availability or pure chaos scenarios may
not require them, but liveness checks still need validators producing blocks.
Only for transaction scenarios. Pure chaos scenarios may not require them, but
liveness checks still need nodes producing blocks.
**What if expectations fail but workloads “look fine”?**
Trust expectations first—they capture the intended success criteria. Use the

View File

@ -1,7 +1,6 @@
# Glossary
- **Validator**: node role responsible for participating in consensus and block
production.
- **Node**: process that participates in consensus and produces blocks.
- **Deployer**: component that provisions infrastructure (spawns processes,
creates containers, or launches pods), waits for readiness, and returns a
Runner. Examples: LocalDeployer, ComposeDeployer, K8sDeployer.
@ -38,9 +37,7 @@
state (e.g., wallet balances, UTXO sets) rather than just progress signals.
Also called "correctness expectations."
- **Mantle transaction**: transaction type in Logos that can contain UTXO transfers
(LedgerTx) and operations (Op), including channel data (ChannelBlob).
- **Channel**: logical grouping for DA blobs; each blob belongs to a channel and
references a parent blob in the same channel, creating a chain of related data.
(LedgerTx) and operations (Op).
- **POL_PROOF_DEV_MODE**: environment variable that disables expensive Groth16 zero-knowledge
proof generation for leader election. **Required for all runners** (local, compose, k8s)
for practical testing—without it, proof generation causes timeouts. Should never be

View File

@ -2,13 +2,13 @@
High-level roles of the crates that make up the framework:
- **Configs** (`testing-framework/configs/`): Prepares reusable configuration primitives for nodes, networking, tracing, data availability, and wallets, shared by all scenarios and runners. Includes topology generation and circuit asset resolution.
- **Configs** (`testing-framework/configs/`): Prepares reusable configuration primitives for nodes, networking, tracing, and wallets, shared by all scenarios and runners. Includes topology generation and circuit asset resolution.
- **Core scenario orchestration** (`testing-framework/core/`): Houses the topology and scenario model, runtime coordination, node clients, and readiness/health probes. Defines `Deployer` and `Runner` traits, `ScenarioBuilder`, and `RunContext`.
- **Workflows** (`testing-framework/workflows/`): Packages workloads (transaction, DA, chaos) and expectations (consensus liveness) into reusable building blocks. Offers fluent DSL extensions (`ScenarioBuilderExt`, `ChaosBuilderExt`).
- **Workflows** (`testing-framework/workflows/`): Packages workloads (transaction, chaos) and expectations (consensus liveness) into reusable building blocks. Offers fluent DSL extensions (`ScenarioBuilderExt`, `ChaosBuilderExt`).
- **Runners** (`testing-framework/runners/{local,compose,k8s}/`): Implements deployment backends (local host, Docker Compose, Kubernetes) that all consume the same scenario plan. Each provides a `Deployer` implementation (`LocalDeployer`, `ComposeDeployer`, `K8sDeployer`).
- **Deployers** (`testing-framework/deployers/{local,compose,k8s}/`): Implements deployment backends (local host, Docker Compose, Kubernetes) that all consume the same scenario plan. Each provides a `Deployer` implementation (`LocalDeployer`, `ComposeDeployer`, `K8sDeployer`).
- **Runner Examples** (crate name: `runner-examples`, path: `examples/`): Runnable binaries demonstrating framework usage and serving as living documentation. These are the **primary entry point** for running scenarios (`examples/src/bin/local_runner.rs`, `examples/src/bin/compose_runner.rs`, `examples/src/bin/k8s_runner.rs`).
@ -16,13 +16,13 @@ High-level roles of the crates that make up the framework:
| What You're Adding | Where It Goes | Examples |
|-------------------|---------------|----------|
| **Node config parameter** | `testing-framework/configs/src/topology/configs/` | Slot duration, log levels, DA params |
| **Topology feature** | `testing-framework/core/src/topology/` | New network layouts, node roles |
| **Node config parameter** | `testing-framework/configs/src/topology/configs/` | Slot duration, log levels |
| **Topology feature** | `testing-framework/core/src/topology/` | New network layouts |
| **Scenario capability** | `testing-framework/core/src/scenario/` | New capabilities, context methods |
| **Workload** | `testing-framework/workflows/src/workloads/` | New traffic generators |
| **Expectation** | `testing-framework/workflows/src/expectations/` | New success criteria |
| **Builder API** | `testing-framework/workflows/src/builder/` | DSL extensions, fluent methods |
| **Deployer** | `testing-framework/runners/` | New deployment backends |
| **Deployer** | `testing-framework/deployers/` | New deployment backends |
| **Example scenario** | `examples/src/bin/` | Demonstration binaries |
## Extension Workflow
@ -93,7 +93,7 @@ impl<Caps> YourWorkloadDslExt for testing_framework_core::scenario::Builder<Caps
}
pub fn use_in_examples() {
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.your_workload_with(|w| w.some_config())
.build();
}
@ -136,7 +136,7 @@ impl<Caps> YourExpectationDslExt for testing_framework_core::scenario::Builder<C
}
pub fn use_in_examples() {
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.expect_your_condition()
.build();
}

View File

@ -6,12 +6,12 @@ tests and full-system validation by letting teams describe a cluster layout,
drive meaningful traffic, and assert the outcomes in one coherent plan.
It is for protocol engineers, infrastructure operators, and QA teams who need
repeatable confidence that validators
components work together under network and timing constraints.
repeatable confidence that node components work together under network and
timing constraints.
Multi-node integration testing is required because many Logos behaviors—block
progress, data availability, liveness under churn—only emerge when several
roles interact over real networking and time. This framework makes those checks
progress and liveness under churn—only emerge when several nodes interact over
real networking and time. This framework makes those checks
declarative, observable, and portable across environments.
## A Scenario in 20 Lines
@ -22,11 +22,10 @@ Here's the conceptual shape of every test you'll write:
// 1. Define the cluster
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(3)
.nodes(3)
})
// 2. Add workloads (traffic)
.transactions_with(|tx| tx.rate(10).users(5))
.da_with(|da| da.channel_rate(2).blob_rate(2))
// 3. Define success criteria
.expect_consensus_liveness()

View File

@ -9,9 +9,9 @@ Comprehensive guide to log collection, metrics, and debugging across all runners
| Component | Controlled By | Purpose |
|-----------|--------------|---------|
| **Framework binaries** (`cargo run -p runner-examples --bin local_runner`) | `RUST_LOG` | Runner orchestration, deployment logs |
| **Node processes** (validators spawned by runner) | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (+ `NOMOS_LOG_DIR` on host runner) | Consensus, DA, mempool, network logs |
| **Node processes** (nodes spawned by runner) | `LOGOS_BLOCKCHAIN_LOG_LEVEL`, `LOGOS_BLOCKCHAIN_LOG_FILTER` (+ `LOGOS_BLOCKCHAIN_LOG_DIR` on host runner) | Consensus, mempool, network logs |
**Common mistake:** Setting `RUST_LOG=debug` only increases verbosity of the runner binary itself. Node logs remain at their default level unless you also set `NOMOS_LOG_LEVEL=debug`.
**Common mistake:** Setting `RUST_LOG=debug` only increases verbosity of the runner binary itself. Node logs remain at their default level unless you also set `LOGOS_BLOCKCHAIN_LOG_LEVEL=debug`.
**Example:**
@ -20,10 +20,10 @@ Comprehensive guide to log collection, metrics, and debugging across all runners
RUST_LOG=debug cargo run -p runner-examples --bin local_runner
# This makes the NODES verbose:
NOMOS_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner
# Both verbose (typically not needed):
RUST_LOG=debug NOMOS_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner
RUST_LOG=debug LOGOS_BLOCKCHAIN_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner
```
## Logging Environment Variables
@ -32,47 +32,44 @@ See [Environment Variables Reference](environment-variables.md) for complete det
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_LOG_DIR` | None (console only) | Host runner: directory for per-node log files. Compose/k8s: use `cfgsync.yaml` |
| `NOMOS_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` |
| `NOMOS_LOG_FILTER` | None | Fine-grained target filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`) |
| `NOMOS_TESTS_TRACING` | false | Enable debug tracing preset |
| `NOMOS_OTLP_ENDPOINT` | None | OTLP trace endpoint (optional) |
| `NOMOS_OTLP_METRICS_ENDPOINT` | None | OTLP metrics endpoint (optional) |
| `LOGOS_BLOCKCHAIN_LOG_DIR` | None (console only) | Host runner: directory for per-node log files. Compose/k8s: use `cfgsync.yaml` |
| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` |
| `LOGOS_BLOCKCHAIN_LOG_FILTER` | None | Fine-grained target filtering (e.g., `cryptarchia=trace`) |
| `LOGOS_BLOCKCHAIN_TESTS_TRACING` | false | Enable debug tracing preset |
| `LOGOS_BLOCKCHAIN_OTLP_ENDPOINT` | None | OTLP trace endpoint (optional) |
| `LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT` | None | OTLP metrics endpoint (optional) |
**Example:** Full debug logging to files:
```bash
NOMOS_TESTS_TRACING=true \
NOMOS_LOG_DIR=/tmp/test-logs \
NOMOS_LOG_LEVEL=debug \
NOMOS_LOG_FILTER="cryptarchia=trace,nomos_da_sampling=debug,nomos_da_dispersal=debug,nomos_da_verifier=debug" \
LOGOS_BLOCKCHAIN_TESTS_TRACING=true \
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/test-logs \
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \
LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace,chain_service=info,chain_network=info" \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner
```
## Per-Node Log Files
When `NOMOS_LOG_DIR` is set, each node writes logs to separate files:
When `LOGOS_BLOCKCHAIN_LOG_DIR` is set, each node writes logs to separate files:
**File naming pattern:**
- **Validators**: Prefix `nomos-node-0`, `nomos-node-1`, etc. (may include timestamp suffix)
- **Validators**: Prefix `logos-blockchain-node-0`, `logos-blockchain-node-1`, etc. (may include timestamp suffix)
**Example filenames:**
- `nomos-node-0.2024-12-18T14-30-00.log`
- `nomos-node-1.2024-12-18T14-30-00.log`
- `logos-blockchain-node-0.2024-12-18T14-30-00.log`
- `logos-blockchain-node-1.2024-12-18T14-30-00.log`
**Local runner note:** The local runner uses per-run temporary directories under the current working directory and removes them after the run unless `NOMOS_TESTS_KEEP_LOGS=1`. Use `NOMOS_LOG_DIR=/path/to/logs` to write per-node log files to a stable location.
**Local runner note:** The local runner uses per-run temporary directories under the current working directory and removes them after the run unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`. Use `LOGOS_BLOCKCHAIN_LOG_DIR=/path/to/logs` to write per-node log files to a stable location.
## Filter Target Names
Common target prefixes for `NOMOS_LOG_FILTER`:
Common target prefixes for `LOGOS_BLOCKCHAIN_LOG_FILTER`:
| Target Prefix | Subsystem |
|---------------|-----------|
| `cryptarchia` | Consensus (Cryptarchia) |
| `nomos_da_sampling` | DA sampling service |
| `nomos_da_dispersal` | DA dispersal service |
| `nomos_da_verifier` | DA verification |
| `nomos_blend` | Mix network/privacy layer |
| `chain_service` | Chain service (node APIs/state) |
| `chain_network` | P2P networking |
@ -81,7 +78,7 @@ Common target prefixes for `NOMOS_LOG_FILTER`:
**Example filter:**
```bash
NOMOS_LOG_FILTER="cryptarchia=trace,nomos_da_sampling=debug,chain_service=info,chain_network=info"
LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace,chain_service=info,chain_network=info"
```
---
@ -101,17 +98,17 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
**Persistent file output:**
```bash
NOMOS_LOG_DIR=/tmp/local-logs \
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/local-logs \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner
# After test completes:
ls /tmp/local-logs/
# Files with prefix: nomos-node-0*, nomos-node-1*
# Files with prefix: logos-blockchain-node-0*, logos-blockchain-node-1*
# May include timestamps in filename
```
**Tip:** Use `NOMOS_LOG_DIR` for persistent per-node log files, and `NOMOS_TESTS_KEEP_LOGS=1` if you want to keep the per-run temporary directories (configs/state) for post-mortem inspection.
**Tip:** Use `LOGOS_BLOCKCHAIN_LOG_DIR` for persistent per-node log files, and `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` if you want to keep the per-run temporary directories (configs/state) for post-mortem inspection.
### Compose Runner (Docker Containers)
@ -125,7 +122,7 @@ docker ps --filter "name=nomos-compose-"
docker logs -f <container-id-or-name>
# Or use name pattern matching:
docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1)
docker logs -f $(docker ps --filter "name=nomos-compose-.*-node-0" -q | head -1)
# Show last 100 lines
docker logs --tail 100 <container-id>
@ -139,7 +136,7 @@ To write per-node log files inside containers, set `tracing_settings.logger: !Fi
```bash
# Ensure cfgsync.yaml is configured to log to /logs
NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin compose_runner
@ -161,7 +158,7 @@ volumes:
```bash
COMPOSE_RUNNER_PRESERVE=1 \
NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \
cargo run -p runner-examples --bin compose_runner
# Containers remain running after test—inspect with docker logs or docker exec
```
@ -172,7 +169,7 @@ cargo run -p runner-examples --bin compose_runner
- `TESTNET_RUNNER_PRESERVE=1` — alias for `COMPOSE_RUNNER_PRESERVE=1`
- `COMPOSE_RUNNER_HTTP_TIMEOUT_SECS=<secs>` — override HTTP readiness timeout
**Note:** Container names follow pattern `nomos-compose-{uuid}-validator-{index}-1` where `{uuid}` changes per run.
**Note:** Container names follow pattern `nomos-compose-{uuid}-node-{index}-1` where `{uuid}` changes per run.
### K8s Runner (Kubernetes Pods)
@ -184,25 +181,25 @@ kubectl get pods
# Stream logs using label selectors (recommended)
# Helm chart labels:
# - nomos/logical-role=validator
# - nomos/validator-index
kubectl logs -l nomos/logical-role=validator -f
# - nomos/logical-role=node
# - nomos/node-index
kubectl logs -l nomos/logical-role=node -f
# Stream logs from specific pod
kubectl logs -f nomos-validator-0
kubectl logs -f logos-blockchain-node-0
# Previous logs from crashed pods
kubectl logs --previous -l nomos/logical-role=validator
kubectl logs --previous -l nomos/logical-role=node
```
**Download logs for offline analysis:**
```bash
# Using label selectors
kubectl logs -l nomos/logical-role=validator --tail=1000 > all-validators.log
kubectl logs -l nomos/logical-role=node --tail=1000 > all-nodes.log
# Specific pods
kubectl logs nomos-validator-0 > validator-0.log
kubectl logs logos-blockchain-node-0 > node-0.log
```
**K8s debugging variables:**
@ -214,7 +211,7 @@ kubectl logs nomos-validator-0 > validator-0.log
**Specify namespace (if not using default):**
```bash
kubectl logs -n my-namespace -l nomos/logical-role=validator -f
kubectl logs -n my-namespace -l nomos/logical-role=node -f
```
**Note:** K8s runner is optimized for local clusters (Docker Desktop K8s, minikube, kind). Remote clusters require additional setup.
@ -228,8 +225,8 @@ kubectl logs -n my-namespace -l nomos/logical-role=validator -f
**To enable OTLP:**
```bash
NOMOS_OTLP_ENDPOINT=http://localhost:4317 \
NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318 \
LOGOS_BLOCKCHAIN_OTLP_ENDPOINT=http://localhost:4317 \
LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT=http://localhost:4318 \
cargo run -p runner-examples --bin local_runner
```
@ -247,7 +244,7 @@ Runners expose metrics and node HTTP endpoints for expectation code and debuggin
- For a ready-to-run stack, use `scripts/setup/setup-observability.sh`:
- Compose: `scripts/setup/setup-observability.sh compose up` then `scripts/setup/setup-observability.sh compose env`
- K8s: `scripts/setup/setup-observability.sh k8s install` then `scripts/setup/setup-observability.sh k8s env`
- Provide `NOMOS_METRICS_QUERY_URL` (PromQL base URL) to enable `ctx.telemetry()` queries
- Provide `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` (PromQL base URL) to enable `ctx.telemetry()` queries
- Access from expectations when configured: `ctx.telemetry().prometheus().map(|p| p.base_url())`
**Example:**
@ -261,13 +258,13 @@ eval $(scripts/setup/setup-observability.sh compose env)
# Run scenario with metrics
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
```
### Grafana (Optional)
- Runners do **not** provision Grafana automatically (but `scripts/setup/setup-observability.sh` can)
- If you set `NOMOS_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS`
- If you set `LOGOS_BLOCKCHAIN_GRAFANA_URL`, the deployer prints it in `TESTNET_ENDPOINTS`
- Dashboards live in `testing-framework/assets/stack/monitoring/grafana/dashboards/` (the bundled stack auto-provisions them)
**Example:**
@ -277,16 +274,16 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/setup/setup-observability.sh compose up
eval $(scripts/setup/setup-observability.sh compose env)
export NOMOS_GRAFANA_URL=http://localhost:3000
POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000
POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -n 3 compose
```
**Default bundled Grafana login:** `admin` / `admin` (see `scripts/observability/compose/docker-compose.yml`).
### Node APIs
- Access from expectations: `ctx.node_clients().validator_clients().get(0)`
- Endpoints: consensus info, network info, DA membership, etc.
- Access from expectations: `ctx.node_clients().node_clients().get(0)`
- Endpoints: consensus info, network info, etc.
- See `testing-framework/core/src/nodes/api_client.rs` for available methods
**Example usage in expectations:**
@ -295,10 +292,10 @@ POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
use testing_framework_core::scenario::{DynError, RunContext};
async fn evaluate(ctx: &RunContext) -> Result<(), DynError> {
let client = &ctx.node_clients().validator_clients()[0];
let client = &ctx.node_clients().node_clients()[0];
let info = client.consensus_info().await?;
tracing::info!(height = info.height, "consensus info from validator 0");
tracing::info!(height = info.height, "consensus info from node 0");
Ok(())
}
@ -322,11 +319,11 @@ flowchart TD
### Debug Logging (Host)
```bash
NOMOS_LOG_DIR=/tmp/logs \
NOMOS_LOG_LEVEL=debug \
NOMOS_LOG_FILTER="cryptarchia=trace" \
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/logs \
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \
LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -n 3 host
```
### Compose with Observability
@ -338,7 +335,7 @@ eval $(scripts/setup/setup-observability.sh compose env)
# Run with metrics
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
# Access Grafana at http://localhost:3000
```
@ -350,10 +347,10 @@ K8S_RUNNER_NAMESPACE=nomos-debug \
K8S_RUNNER_DEBUG=1 \
K8S_RUNNER_PRESERVE=1 \
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
scripts/run/run-examples.sh -t 60 -n 3 k8s
# Inspect logs
kubectl logs -n nomos-debug -l nomos/logical-role=validator
kubectl logs -n nomos-debug -l nomos/logical-role=node
```
---

399
book/src/manual-cluster.md Normal file
View File

@ -0,0 +1,399 @@
# Manual Clusters: Imperative Control
**When should I read this?** You're integrating external test drivers (like Cucumber/BDD frameworks) that need imperative node orchestration. This is an escape hatch for when the test orchestration must live outside the framework—most tests should use the standard scenario approach.
---
## Overview
**Manual clusters** provide imperative, on-demand node control for scenarios that don't fit the declarative `ScenarioBuilder` pattern:
```rust
use testing_framework_core::topology::config::TopologyConfig;
use testing_framework_core::scenario::{PeerSelection, StartNodeOptions};
use testing_framework_runner_local::LocalDeployer;
let config = TopologyConfig::with_node_numbers(3);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
// Start nodes on demand with explicit peer selection
let node_a = cluster.start_node_with(
"a",
StartNodeOptions {
peers: PeerSelection::None, // Start isolated
}
).await?.api;
let node_b = cluster.start_node_with(
"b",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]), // Connect to A
}
).await?.api;
// Wait for network readiness
cluster.wait_network_ready().await?;
// Custom validation logic
let info_a = node_a.consensus_info().await?;
let info_b = node_b.consensus_info().await?;
assert!(info_a.height.abs_diff(info_b.height) <= 5);
```
**Key difference from scenarios:**
- **External orchestration:** Your code (or an external driver like Cucumber) controls the execution flow step-by-step
- **Imperative model:** You call `start_node()`, `sleep()`, poll APIs directly in test logic
- **No framework execution:** The scenario runner doesn't drive workloads—you do
Note: Scenarios with node control can also start nodes dynamically, control peer selection, and orchestrate timing—but via **workloads** within the framework's execution model. Use manual clusters only when the orchestration must be external (e.g., Cucumber steps).
---
## When to Use Manual Clusters
**Manual clusters are an escape hatch for when orchestration must live outside the framework.**
Prefer workloads for scenario logic; use manual clusters only when an external system needs to control node lifecycle—for example:
**Cucumber/BDD integration**
Gherkin steps control when nodes start, which peers they connect to, and when to verify state. The test driver (Cucumber) orchestrates the scenario step-by-step.
**Custom test harnesses**
External scripts or tools that need programmatic control over node lifecycle as part of a larger testing pipeline.
---
## Core API
### Starting the Cluster
```rust
use testing_framework_core::topology::config::TopologyConfig;
use testing_framework_runner_local::LocalDeployer;
// Define capacity (preallocates ports/configs for N nodes)
let config = TopologyConfig::with_node_numbers(5);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
// Nodes are stopped automatically when cluster is dropped
```
**Important:** The `TopologyConfig` defines the **maximum capacity**, not the initial state. Nodes are started on-demand via API calls.
### Starting Nodes
**Default peers (topology layout):**
```rust
let node = cluster.start_node("seed").await?;
```
**No peers (isolated):**
```rust
use testing_framework_core::scenario::{PeerSelection, StartNodeOptions};
let node = cluster.start_node_with(
"isolated",
StartNodeOptions {
peers: PeerSelection::None,
}
).await?;
```
**Explicit peers (named):**
```rust
let node = cluster.start_node_with(
"follower",
StartNodeOptions {
peers: PeerSelection::Named(vec![
"node-seed".to_owned(),
"node-isolated".to_owned(),
]),
}
).await?;
```
**Note:** Node names are prefixed with `node-` internally. If you start a node with name `"a"`, reference it as `"node-a"` in peer lists.
### Getting Node Clients
```rust
// From start result
let started = cluster.start_node("my-node").await?;
let client = started.api;
// Or lookup by name
if let Some(client) = cluster.node_client("node-my-node") {
let info = client.consensus_info().await?;
println!("Height: {}", info.height);
}
```
### Waiting for Readiness
```rust
// Waits until all started nodes have connected to their expected peers
cluster.wait_network_ready().await?;
```
**Behavior:**
- Single-node clusters always ready (no peers to verify)
- Multi-node clusters wait for peer counts to match expectations
- Timeout after 60 seconds (120 seconds if `SLOW_TEST_ENV=true`) with diagnostic message
---
## Complete Example: External Test Driver Pattern
This shows how an external test driver (like Cucumber) might use manual clusters to control node lifecycle:
```rust
use std::time::Duration;
use anyhow::Result;
use testing_framework_core::{
scenario::{PeerSelection, StartNodeOptions},
topology::config::TopologyConfig,
};
use testing_framework_runner_local::LocalDeployer;
use tokio::time::sleep;
#[tokio::test]
async fn external_driver_example() -> Result<()> {
// Step 1: Create cluster with capacity for 3 nodes
let config = TopologyConfig::with_node_numbers(3);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
// Step 2: External driver decides to start 2 nodes initially
println!("Starting initial topology...");
let node_a = cluster.start_node("a").await?.api;
let node_b = cluster
.start_node_with(
"b",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
},
)
.await?
.api;
cluster.wait_network_ready().await?;
// Step 3: External driver runs some protocol operations
let info = node_a.consensus_info().await?;
println!("Initial cluster height: {}", info.height);
// Step 4: Later, external driver decides to add third node
println!("External driver adding third node...");
let node_c = cluster
.start_node_with(
"c",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
},
)
.await?
.api;
cluster.wait_network_ready().await?;
// Step 5: External driver validates final state
let heights = vec![
node_a.consensus_info().await?.height,
node_b.consensus_info().await?.height,
node_c.consensus_info().await?.height,
];
println!("Final heights: {:?}", heights);
Ok(())
}
```
**Key pattern:**
The external driver controls **when** nodes start and **which peers** they connect to, allowing test frameworks like Cucumber to orchestrate scenarios step-by-step based on Gherkin steps or other external logic.
---
## Peer Selection Strategies
**`PeerSelection::DefaultLayout`**
Uses the topology's network layout (star/chain/full). Default behavior.
```rust
let node = cluster.start_node_with(
"normal",
StartNodeOptions {
peers: PeerSelection::DefaultLayout,
}
).await?;
```
**`PeerSelection::None`**
Node starts with no initial peers. Use when an external driver needs to build topology incrementally.
```rust
let isolated = cluster.start_node_with(
"isolated",
StartNodeOptions {
peers: PeerSelection::None,
}
).await?;
```
**`PeerSelection::Named(vec!["node-a", "node-b"])`**
Explicit peer list. Use when an external driver needs to construct specific peer relationships.
```rust
let follower = cluster.start_node_with(
"follower",
StartNodeOptions {
peers: PeerSelection::Named(vec![
"node-seed".to_owned(),
"node-seed".to_owned(),
]),
}
).await?;
```
**Remember:** Node names are automatically prefixed with `node-`. If you call `start_node("a")`, reference it as `"node-a"` in peer lists.
---
## Custom Validation Patterns
Manual clusters don't have built-in expectations—you write validation logic directly:
### Height Convergence
```rust
use tokio::time::{sleep, Duration};
let start = tokio::time::Instant::now();
loop {
let heights: Vec<u64> = vec![
node_a.consensus_info().await?.height,
node_b.consensus_info().await?.height,
node_c.consensus_info().await?.height,
];
let max_diff = heights.iter().max().unwrap() - heights.iter().min().unwrap();
if max_diff <= 5 {
println!("Converged: heights={:?}", heights);
break;
}
if start.elapsed() > Duration::from_secs(60) {
return Err(anyhow::anyhow!("Convergence timeout: heights={:?}", heights));
}
sleep(Duration::from_secs(2)).await;
}
```
### Peer Count Verification
```rust
let info = node.network_info().await?;
assert_eq!(
info.n_peers, 3,
"Expected 3 peers, found {}",
info.n_peers
);
```
### Block Production
```rust
// Verify node is producing blocks
let initial_height = node_a.consensus_info().await?.height;
sleep(Duration::from_secs(10)).await;
let current_height = node_a.consensus_info().await?.height;
assert!(
current_height > initial_height,
"Node should have produced blocks: initial={}, current={}",
initial_height,
current_height
);
```
---
## Limitations
**Local deployer only**
Manual clusters currently only work with `LocalDeployer`. Compose and K8s support is not available.
**No built-in workloads**
You must manually submit transactions via node API clients. The framework's transaction workloads are scenario-specific.
**No automatic expectations**
You wire validation yourself. The `.expect_*()` methods from scenarios are not automatically attached—you write custom validation loops.
**No RunContext**
Manual clusters don't provide `RunContext`, so features like `BlockFeed` and metrics queries require manual setup.
---
## Relationship to Node Control
Manual clusters and [node control](node-control.md) share the same underlying infrastructure (`LocalDynamicNodes`), but serve different purposes:
| Feature | Manual Cluster | Node Control (Scenario) |
|---------|---------------|-------------------------|
| **Orchestration** | External (your code/Cucumber) | Framework (workloads) |
| **Programming model** | Imperative (step-by-step) | Declarative (plan + execute) |
| **Node lifecycle** | Manual `start_node()` calls | Automatic + workload-driven |
| **Traffic generation** | Manual API calls | Built-in workloads (tx, chaos) |
| **Validation** | Manual polling loops | Built-in expectations + custom |
| **Use case** | Cucumber/BDD integration | Standard testing & chaos |
**When to use which:**
- **Scenarios with node control** → Standard testing (built-in workloads drive node control)
- **Manual clusters** → External drivers (Cucumber/BDD where external logic drives node control)
---
## Running Manual Cluster Tests
Manual cluster tests are typically marked with `#[ignore]` to prevent accidental runs:
```rust
#[tokio::test]
#[ignore = "run manually with: cargo test -- --ignored external_driver_example"]
async fn external_driver_example() -> Result<()> {
// ...
}
```
**To run:**
```bash
# Required: dev mode for fast proofs
POL_PROOF_DEV_MODE=true \
cargo test -p runner-examples -- --ignored external_driver_example
```
**Logs:**
```bash
# Preserve logs after test
LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1 \
RUST_LOG=info \
POL_PROOF_DEV_MODE=true \
cargo test -p runner-examples -- --ignored external_driver_example
```
---
## See Also
- [Testing Philosophy](testing-philosophy.md) — Why the framework is declarative by default
- [RunContext: BlockFeed & Node Control](node-control.md) — Node control within scenarios
- [Chaos Testing](chaos.md) — Restart-based chaos (scenario approach)
- [Scenario Builder Extensions](scenario-builder-ext-patterns.md) — Extending the declarative model

View File

@ -10,7 +10,7 @@ provides:
## BlockFeed: Observing Block Production
The `BlockFeed` is a broadcast stream of block observations that allows workloads and expectations to monitor blockchain progress in real-time. It polls a validator node continuously and broadcasts new blocks to all subscribers.
The `BlockFeed` is a broadcast stream of block observations that allows workloads and expectations to monitor blockchain progress in real-time. It polls a node continuously and broadcasts new blocks to all subscribers.
### What BlockFeed Provides
@ -134,7 +134,7 @@ async fn start_capture(ctx: &RunContext) -> Result<(), DynError> {
"observed block"
);
// Process transactions, DA blobs, etc.
// Process transactions or other block data.
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
Err(_) => continue,
@ -204,7 +204,7 @@ async fn generate_request() -> Option<()> {
}
async fn start(ctx: &RunContext) -> Result<(), DynError> {
let clients = ctx.node_clients().validator_clients();
let clients = ctx.node_clients().node_clients();
let mut receiver = ctx.block_feed().subscribe();
let mut pending_requests: Vec<()> = Vec::new();
@ -249,7 +249,7 @@ Example direct polling in expectations:
use testing_framework_core::scenario::{DynError, RunContext};
async fn evaluate(ctx: &RunContext) -> Result<(), DynError> {
let client = &ctx.node_clients().validator_clients()[0];
let client = &ctx.node_clients().node_clients()[0];
// Poll current height once
let info = client.consensus_info().await?;
@ -311,7 +311,6 @@ async fn evaluate(ctx: &RunContext, expected_min: u64) -> Result<(), DynError> {
The framework's built-in expectations use BlockFeed extensively:
- **`ConsensusLiveness`**: Doesn't directly subscribe but uses block feed stats to verify progress
- **`DataAvailabilityExpectation`**: Subscribes to inspect DA blobs in each block and track inscription/dispersal
- **`TransactionInclusion`**: Subscribes to find specific transactions in blocks
See [Examples](examples.md) and [Workloads & Expectations](workloads.md) for more patterns.
@ -324,7 +323,7 @@ The framework currently supports **process-level chaos** (node restarts) for
resilience testing:
**Supported:**
- Restart validators (`restart_validator`)
- Restart nodes (`restart_node`)
- Random restart workload via `.chaos().restart()`
**Not Yet Supported:**
@ -354,8 +353,8 @@ impl Workload for RestartWorkload {
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
if let Some(control) = ctx.node_control() {
// Restart the first validator (index 0) if supported.
control.restart_validator(0).await?;
// Restart the first node (index 0) if supported.
control.restart_node(0).await?;
}
Ok(())
}
@ -375,7 +374,7 @@ use testing_framework_core::scenario::DynError;
#[async_trait]
pub trait NodeControlHandle: Send + Sync {
async fn restart_validator(&self, index: usize) -> Result<(), DynError>;
async fn restart_node(&self, index: usize) -> Result<(), DynError>;
}
```

View File

@ -13,18 +13,18 @@ Operational readiness focuses on prerequisites, environment fit, and clear signa
**Prerequisites:**
- `versions.env` file at repository root (required by helper scripts)
- Node binaries (`nomos-node`) available or built on demand
- Node binaries (`logos-blockchain-node`) available or built on demand
- Platform requirements met (Docker for compose, cluster access for k8s)
- Circuit assets for DA workloads
- Circuit assets for proof generation
**Artifacts:**
- KZG parameters (circuit assets) for Data Availability scenarios
- Circuit parameters required by the node binary
- Docker images for compose/k8s deployments
- Binary bundles for reproducible builds
**Environment Configuration:**
- `POL_PROOF_DEV_MODE=true` is **REQUIRED for all runners** to avoid expensive proof generation
- Logging configured via `NOMOS_LOG_*` variables
- Logging configured via `LOGOS_BLOCKCHAIN_LOG_*` variables
- Observability endpoints (Prometheus, Grafana) optional but useful
**Readiness & Health:**
@ -78,4 +78,3 @@ This Operations & Deployment section covers:
- [Logging & Observability](logging-observability.md) — Log collection, metrics, and debugging
**Philosophy:** Treat operational hygiene—assets present, prerequisites satisfied, observability reachable—as the first step to reliable scenario outcomes.

View File

@ -10,19 +10,19 @@ All helper scripts require a `versions.env` file at the repository root:
```bash
VERSION=v0.3.1
NOMOS_NODE_REV=abc123def456789
NOMOS_BUNDLE_VERSION=v1
LOGOS_BLOCKCHAIN_NODE_REV=abc123def456789
LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v1
```
**What it defines:**
- `VERSION` — Circuit release tag for KZG parameters
- `NOMOS_NODE_REV` — Git revision of nomos-node to build/fetch
- `NOMOS_BUNDLE_VERSION` — Bundle schema version
- `VERSION` — Circuit assets release tag
- `LOGOS_BLOCKCHAIN_NODE_REV` — Git revision of logos-blockchain-node to build/fetch
- `LOGOS_BLOCKCHAIN_BUNDLE_VERSION` — Bundle schema version
**Where it's used:**
- `scripts/run/run-examples.sh`
- `scripts/build/build-bundle.sh`
- `scripts/setup/setup-nomos-circuits.sh`
- `scripts/setup/setup-logos-blockchain-circuits.sh`
- CI workflows
**Error if missing:**
@ -30,37 +30,37 @@ NOMOS_BUNDLE_VERSION=v1
ERROR: versions.env not found at repository root
This file is required and should define:
VERSION=<circuit release tag>
NOMOS_NODE_REV=<nomos-node git revision>
NOMOS_BUNDLE_VERSION=<bundle schema version>
LOGOS_BLOCKCHAIN_NODE_REV=<logos-blockchain-node git revision>
LOGOS_BLOCKCHAIN_BUNDLE_VERSION=<bundle schema version>
```
**Fix:** Ensure you're in the repository root. The file should already exist in the checked-out repo.
## Node Binaries
Scenarios need compiled `nomos-node` binaries.
Scenarios need compiled `logos-blockchain-node` binaries.
### Option 1: Use Helper Scripts (Recommended)
```bash
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -n 3 host
```
This automatically:
- Clones/updates nomos-node checkout
- Clones/updates logos-blockchain-node checkout
- Builds required binaries
- Sets `NOMOS_NODE_BIN`
- Sets `LOGOS_BLOCKCHAIN_NODE_BIN`
### Option 2: Manual Build
If you have a sibling `nomos-node` checkout:
If you have a sibling `logos-blockchain-node` checkout:
```bash
cd ../nomos-node
cargo build --release --bin nomos-node
cd ../logos-blockchain-node
cargo build --release --bin logos-blockchain-node
# Set environment variables
export NOMOS_NODE_BIN=$PWD/target/release/nomos-node
export LOGOS_BLOCKCHAIN_NODE_BIN=$PWD/target/release/logos-blockchain-node
# Return to testing framework
cd ../nomos-testing
@ -80,51 +80,40 @@ CI workflows use prebuilt artifacts:
- name: Extract bundle
run: |
tar -xzf .tmp/nomos-binaries-linux-*.tar.gz -C .tmp/
export NOMOS_NODE_BIN=$PWD/.tmp/nomos-node
export LOGOS_BLOCKCHAIN_NODE_BIN=$PWD/.tmp/logos-blockchain-node
```
## Circuit Assets (KZG Parameters)
## Circuit Assets
Data Availability (DA) workloads require KZG cryptographic parameters.
Nodes require circuit assets for proof generation. The framework expects a
directory containing the circuits, not a single file.
### Asset Location
**Default path:** `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`
**Default path:** `~/.logos-blockchain-circuits`
Note: The directory `kzgrs_test_params/` contains a file named `kzgrs_test_params`. This is the proving key file (~120MB).
**Container path (compose/k8s):** `/kzgrs_test_params/kzgrs_test_params`
**Container path (compose/k8s):** `/opt/circuits` (set during image build)
### Getting Assets
**Option 1: Use helper script** (recommended):
```bash
# Fetch circuits
scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
# Copy to default location
mkdir -p testing-framework/assets/stack/kzgrs_test_params
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
# Verify (should be ~120MB)
ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params
scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits
```
**Option 2: Let `run-examples.sh` handle it**:
```bash
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -n 3 host
```
This automatically fetches and places assets.
### Override Path
Set `NOMOS_KZGRS_PARAMS_PATH` to use a custom location:
Set `LOGOS_BLOCKCHAIN_CIRCUITS` to use a custom location:
```bash
NOMOS_KZGRS_PARAMS_PATH=/custom/path/to/kzgrs_test_params \
LOGOS_BLOCKCHAIN_CIRCUITS=/custom/path/to/circuits \
cargo run -p runner-examples --bin local_runner
```
@ -132,14 +121,14 @@ cargo run -p runner-examples --bin local_runner
| Runner | When Required |
|--------|---------------|
| **Host (local)** | Always (for DA workloads) |
| **Host (local)** | Always |
| **Compose** | During image build (baked into image) |
| **K8s** | During image build + mounted via hostPath |
| **K8s** | During image build |
**Error without assets:**
```text
Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params" }
Error: circuits directory not found (LOGOS_BLOCKCHAIN_CIRCUITS)
```
## Platform Requirements
@ -149,7 +138,7 @@ Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-frame
**Requires:**
- Rust nightly toolchain
- Node binaries built
- KZG circuit assets (for DA workloads)
- Circuit assets for proof generation
- Available ports (18080+, 3100+, etc.)
**No Docker required.**
@ -164,11 +153,11 @@ Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-frame
**Requires:**
- Docker daemon running
- Docker image built: `logos-blockchain-testing:local`
- KZG assets baked into image
- Circuit assets baked into image
- Docker Desktop (macOS) or Docker Engine (Linux)
**Platform notes (macOS / Apple silicon):**
- Prefer `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` for native performance
- Prefer `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` for native performance
- Use `linux/amd64` only if targeting amd64 environments (slower via emulation)
**Best for:**
@ -182,7 +171,7 @@ Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-frame
- Kubernetes cluster (Docker Desktop K8s, minikube, kind, or remote)
- `kubectl` configured
- Docker image built and loaded/pushed
- KZG assets baked into image + mounted via hostPath
- Circuit assets baked into image
**Local cluster setup:**
@ -198,7 +187,7 @@ minikube start
minikube image load logos-blockchain-testing:local
```
**Remote cluster:** Push image to registry and set `NOMOS_TESTNET_IMAGE`.
**Remote cluster:** Push image to registry and set `LOGOS_BLOCKCHAIN_TESTNET_IMAGE`.
**Best for:**
- Production-like testing
@ -218,7 +207,7 @@ Without this, proof generation uses expensive Groth16 proving, causing:
```bash
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -n 3 compose
# etc.
```
@ -237,8 +226,8 @@ Run this checklist before your first scenario:
# 1. Verify versions.env exists
cat versions.env
# 2. Check circuit assets (for DA workloads)
ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params
# 2. Check circuit assets
ls -lh "${HOME}/.logos-blockchain-circuits"
# 3. Verify POL_PROOF_DEV_MODE is set
echo $POL_PROOF_DEV_MODE # Should print: true
@ -250,7 +239,7 @@ docker ps
docker images | grep logos-blockchain-testing
# 6. For host runner: verify node binaries (if not using scripts)
$NOMOS_NODE_BIN --version
$LOGOS_BLOCKCHAIN_NODE_BIN --version
```
## Recommended: Use Helper Scripts
@ -259,18 +248,18 @@ The easiest path is to let the helper scripts handle everything:
```bash
# Host runner
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -n 3 host
# Compose runner
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
# K8s runner
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
scripts/run/run-examples.sh -t 60 -n 3 k8s
```
These scripts:
- Verify `versions.env` exists
- Clone/build nomos-node if needed
- Clone/build logos-blockchain-node if needed
- Fetch circuit assets if missing
- Build Docker images (compose/k8s)
- Load images into cluster (k8s)

View File

@ -2,7 +2,7 @@
**Declarative, multi-node blockchain testing for the Logos network**
The Logos Testing Framework enables you to test consensus, data availability, and transaction workloads across local processes, Docker Compose, and Kubernetes deployments—all with a unified scenario API.
The Logos Testing Framework enables you to test consensus and transaction workloads across local processes, Docker Compose, and Kubernetes deployments—all with a unified scenario API.
[**Get Started**](quickstart.md)
@ -13,8 +13,8 @@ The Logos Testing Framework enables you to test consensus, data availability, an
**Everything in this framework is a Scenario.**
A Scenario is a controlled experiment over time, composed of:
- **Topology** — The cluster shape (validators, network layout)
- **Workloads** — Traffic and conditions that exercise the system (transactions, DA, chaos)
- **Topology** — The cluster shape (nodes, network layout)
- **Workloads** — Traffic and conditions that exercise the system (transactions, chaos)
- **Expectations** — Success criteria verified after execution (liveness, inclusion, recovery)
- **Duration** — The time window for the experiment
@ -37,8 +37,8 @@ flowchart LR
```
1. **Define Scenario** — Describe your test: topology, workloads, and success criteria
2. **Deploy Topology** — Launch validators using host, compose, or k8s runners
3. **Run Workloads** — Drive transactions, DA traffic, and chaos operations
2. **Deploy Topology** — Launch nodes using host, compose, or k8s runners
3. **Run Workloads** — Drive transactions and chaos operations
4. **Check Expectations** — Verify consensus liveness, inclusion, and system health
---
@ -57,7 +57,6 @@ flowchart LR
**Built-in Workloads**
- Transaction submission with configurable rates
- Data availability (DA) blob dispersal and sampling
- Chaos testing with controlled node restarts
**Comprehensive Observability**
@ -81,7 +80,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
async fn main() -> anyhow::Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(3)
.nodes(3)
})
.transactions_with(|tx| tx.rate(10).users(5))
.expect_consensus_liveness()
@ -122,11 +121,9 @@ Check the **[Developer Reference](part-iii.md)** to implement custom workloads,
## Project Context
**Logos** is a modular blockchain protocol composed of validators, and a data-availability (DA) subsystem:
**Logos** is a modular blockchain protocol composed of nodes that participate in consensus and produce blocks.
- **Validators** participate in consensus and produce blocks
These roles interact tightly, which is why meaningful testing must be performed in multi-node environments that include real networking, timing, and DA interaction.
Meaningful testing must be performed in multi-node environments that include real networking and timing behavior.
The Logos Testing Framework provides the infrastructure to orchestrate these multi-node scenarios reliably across development, CI, and production-like environments.

View File

@ -16,7 +16,7 @@ git clone https://github.com/logos-blockchain/logos-blockchain-testing.git
cd logos-blockchain-testing
# 3. Run your first scenario (downloads dependencies automatically)
POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
POL_PROOF_DEV_MODE=true scripts/run/run-examples.sh -t 60 -n 1 host
```
**First run takes 5-10 minutes** (downloads ~120MB circuit assets, builds binaries).
@ -32,10 +32,10 @@ If you already have the repository cloned:
- Rust toolchain (nightly)
- Unix-like system (tested on Linux and macOS)
- For Docker Compose examples: Docker daemon running
- For Docker Desktop on Apple silicon (compose/k8s): set `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` to avoid slow/fragile amd64 emulation builds
- **`versions.env` file** at repository root (defines VERSION, NOMOS_NODE_REV, NOMOS_BUNDLE_VERSION)
- For Docker Desktop on Apple silicon (compose/k8s): set `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` to avoid slow/fragile amd64 emulation builds
- **`versions.env` file** at repository root (defines VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION)
**Note:** `nomos-node` binaries are built automatically on demand or can be provided via prebuilt bundles.
**Note:** `logos-blockchain-node` binaries are built automatically on demand or can be provided via prebuilt bundles.
**Important:** The `versions.env` file is required by helper scripts. If missing, the scripts will fail with an error. The file should already exist in the repository root.
@ -47,15 +47,15 @@ The framework ships with runnable example binaries in `examples/src/bin/`.
```bash
# From the logos-blockchain-testing directory
scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
scripts/run/run-examples.sh -t 60 -n 1 host
```
This handles circuit setup, binary building, and runs a complete scenario: 1 validator, mixed transaction + DA workload (5 tx/block + 1 channel + 1 blob), 60s duration.
This handles circuit setup, binary building, and runs a complete scenario: 1 node, transaction workload (5 tx/block), 60s duration.
**Alternative:** Direct cargo run (requires manual setup):
```bash
# Requires circuits in place and NOMOS_NODE_BIN set
# Requires circuits in place and LOGOS_BLOCKCHAIN_NODE_BIN set
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
```
@ -70,18 +70,13 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn run_local_demo() -> Result<()> {
// Define the scenario (1 validator, tx + DA workload)
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
// Define the scenario (1 node, tx workload)
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
.wallets(1_000)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
.users(500) // use 500 of the seeded wallets
})
.da_with(|da| {
da.channel_rate(1) // 1 channel
.blob_rate(1) // target 1 blob per block
.headroom_percent(20) // default headroom when sizing channels
})
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build();
@ -103,8 +98,8 @@ pub async fn run_local_demo() -> Result<()> {
- Nodes spawn as local processes
- Consensus starts producing blocks
- Scenario runs for the configured duration
- Node state/logs written under a temporary per-run directory in the current working directory (removed after the run unless `NOMOS_TESTS_KEEP_LOGS=1`)
- To write per-node log files to a stable location: set `NOMOS_LOG_DIR=/path/to/logs` (files will have prefix like `nomos-node-0*`, may include timestamps)
- Node state/logs written under a temporary per-run directory in the current working directory (removed after the run unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`)
- To write per-node log files to a stable location: set `LOGOS_BLOCKCHAIN_LOG_DIR=/path/to/logs` (files will have prefix like `logos-blockchain-node-0*`, may include timestamps)
## What Just Happened?
@ -118,7 +113,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
pub fn step_1_topology() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::topology_with(|t| {
t.network_star() // Star topology: all nodes connect to seed
.validators(1) // 1 validator node
.nodes(1) // 1 node
})
}
```
@ -132,7 +127,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn step_2_wallets() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1).wallets(1_000) // Seed 1,000 funded wallet accounts
ScenarioBuilder::with_node_counts(1).wallets(1_000) // Seed 1,000 funded wallet accounts
}
```
@ -145,21 +140,16 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn step_3_workloads() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1)
ScenarioBuilder::with_node_counts(1)
.wallets(1_000)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
.users(500) // Use 500 of the 1,000 wallets
})
.da_with(|da| {
da.channel_rate(1) // 1 DA channel (more spawned with headroom)
.blob_rate(1) // target 1 blob per block
.headroom_percent(20) // default headroom when sizing channels
})
}
```
Generates both transaction and DA traffic to stress both subsystems.
Generates transaction traffic to stress the inclusion pipeline.
### 4. Expectation
@ -168,7 +158,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn step_4_expectation() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1).expect_consensus_liveness() // This says what success means: blocks must be produced continuously.
ScenarioBuilder::with_node_counts(1).expect_consensus_liveness() // This says what success means: blocks must be produced continuously.
}
```
@ -182,7 +172,7 @@ use std::time::Duration;
use testing_framework_core::scenario::ScenarioBuilder;
pub fn step_5_run_duration() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1).with_run_duration(Duration::from_secs(60))
ScenarioBuilder::with_node_counts(1).with_run_duration(Duration::from_secs(60))
}
```
@ -196,7 +186,7 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
pub async fn step_6_deploy_and_execute() -> Result<()> {
let mut plan = ScenarioBuilder::with_node_counts(1, 1).build();
let mut plan = ScenarioBuilder::with_node_counts(1).build();
let deployer = LocalDeployer::default(); // Use local process deployer
let runner = deployer.deploy(&plan).await?; // Provision infrastructure
@ -213,16 +203,16 @@ pub async fn step_6_deploy_and_execute() -> Result<()> {
**With run-examples.sh** (recommended):
```bash
# Scale up to 3 validators, run for 2 minutes
scripts/run/run-examples.sh -t 120 -v 3 -e 2 host
# Scale up to 3 nodes, run for 2 minutes
scripts/run/run-examples.sh -t 120 -n 3 host
```
**With direct cargo run:**
```bash
# Uses NOMOS_DEMO_* env vars (or legacy *_DEMO_* vars)
NOMOS_DEMO_VALIDATORS=3 \
NOMOS_DEMO_RUN_SECS=120 \
# Uses LOGOS_BLOCKCHAIN_DEMO_* env vars (or legacy *_DEMO_* vars)
LOGOS_BLOCKCHAIN_DEMO_NODES=3 \
LOGOS_BLOCKCHAIN_DEMO_RUN_SECS=120 \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner
```
@ -234,12 +224,12 @@ Use the same API with a different deployer for reproducible containerized enviro
**Recommended:** Use the convenience script (handles everything):
```bash
scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 1 compose
```
This automatically:
- Fetches circuit assets (to `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`)
- Builds/uses prebuilt binaries (via `NOMOS_BINARIES_TAR` if available)
- Fetches circuit assets (to `~/.logos-blockchain-circuits` by default)
- Builds/uses prebuilt binaries (via `LOGOS_BLOCKCHAIN_BINARIES_TAR` if available)
- Builds the Docker image
- Runs the compose scenario
@ -248,15 +238,14 @@ This automatically:
```bash
# Option 1: Use prebuilt bundle (recommended for compose/k8s)
scripts/build/build-bundle.sh --platform linux # Creates .tmp/nomos-binaries-linux-v0.3.1.tar.gz
export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
# Option 2: Manual circuit/image setup (rebuilds during image build)
scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 /tmp/logos-blockchain-circuits
scripts/build/build_test_image.sh
# Run with Compose
NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin compose_runner
```
@ -274,7 +263,7 @@ eval "$(scripts/setup/setup-observability.sh compose env)"
Then run your compose scenario as usual (the environment variables enable PromQL querying and node OTLP metrics export).
**Note:** Compose expects KZG parameters at `/kzgrs_test_params/kzgrs_test_params` inside containers (the directory name is repeated as the filename).
**Note:** Compose expects circuits at `/opt/circuits` inside containers (set by the image build).
**In code:** Just swap the deployer:
@ -285,7 +274,7 @@ use testing_framework_runner_compose::ComposeDeployer;
pub async fn run_with_compose_deployer() -> Result<()> {
// ... same scenario definition ...
let mut plan = ScenarioBuilder::with_node_counts(1, 1).build();
let mut plan = ScenarioBuilder::with_node_counts(1).build();
let deployer = ComposeDeployer::default(); // Use Docker Compose
let runner = deployer.deploy(&plan).await?;

View File

@ -14,7 +14,7 @@ environment and operational considerations, see [Operations Overview](operations
- **Can run in CI** for fast smoke tests.
- **Node control:** Not supported (chaos workloads not available)
**Run with:** `scripts/run/run-examples.sh -t 60 -v 1 -e 1 host`
**Run with:** `scripts/run/run-examples.sh -t 60 -n 1 host`
## Docker Compose runner
- Starts nodes in containers to provide a reproducible multi-node stack on a
@ -25,7 +25,7 @@ environment and operational considerations, see [Operations Overview](operations
- **Recommended for CI pipelines** (isolated environment, reproducible).
- **Node control:** Supported (can restart nodes for chaos testing)
**Run with:** `scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose`
**Run with:** `scripts/run/run-examples.sh -t 60 -n 1 compose`
## Kubernetes runner
- Deploys nodes onto a cluster for higher-fidelity, longer-running scenarios (via `K8sDeployer`).
@ -34,10 +34,10 @@ environment and operational considerations, see [Operations Overview](operations
and scheduling matter.
- **Node control:** Not supported yet (chaos workloads not available)
**Run with:** `scripts/run/run-examples.sh -t 60 -v 1 -e 1 k8s`
**Run with:** `scripts/run/run-examples.sh -t 60 -n 1 k8s`
### Common expectations
- All runners require at least one validator and, for transaction scenarios,
- All runners require at least one node and, for transaction scenarios,
access to seeded wallets.
- Readiness probes gate workload start so traffic begins only after nodes are
reachable.

View File

@ -8,18 +8,18 @@ Use `scripts/run/run-examples.sh` for all modes—it handles all setup automatic
```bash
# Host mode (local processes)
scripts/run/run-examples.sh -t 60 -v 3 host
scripts/run/run-examples.sh -t 60 -n 3 host
# Compose mode (Docker Compose)
scripts/run/run-examples.sh -t 60 -v 3 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
# K8s mode (Kubernetes)
scripts/run/run-examples.sh -t 60 -v 3 k8s
scripts/run/run-examples.sh -t 60 -n 3 k8s
```
**Parameters:**
- `-t 60` — Run duration in seconds
- `-v 3` — Number of validators
- `-n 3` — Number of nodes
- `host|compose|k8s` — Deployment mode
This script handles:
@ -29,14 +29,14 @@ This script handles:
- Image loading into cluster (k8s)
- Execution with proper environment
**Note:** For `k8s` runs against non-local clusters (e.g. EKS), the cluster pulls images from a registry. In that case, build + push your image separately (see `scripts/build/build_test_image.sh`) and set `NOMOS_TESTNET_IMAGE` to the pushed reference.
**Note:** For `k8s` runs against non-local clusters (e.g. EKS), the cluster pulls images from a registry. In that case, build + push your image separately (see `scripts/build/build_test_image.sh`) and set `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` to the pushed reference.
## Quick Smoke Matrix
For a small "does everything still run?" matrix across all runners:
```bash
scripts/run/run-test-matrix.sh -t 120 -v 1 -e 1
scripts/run/run-test-matrix.sh -t 120 -n 1
```
This runs host, compose, and k8s modes with various image-build configurations. Useful after making runner/image/script changes. Forwards `--metrics-*` options through to `scripts/run/run-examples.sh`.
@ -51,31 +51,31 @@ This runs host, compose, and k8s modes with various image-build configurations.
**Environment overrides:**
- `VERSION=v0.3.1` — Circuit version
- `NOMOS_NODE_REV=<commit>` — nomos-node git revision
- `NOMOS_BINARIES_TAR=path/to/bundle.tar.gz` — Use prebuilt bundle
- `NOMOS_SKIP_IMAGE_BUILD=1` — Skip image rebuild inside `run-examples.sh` (compose/k8s)
- `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64|linux/amd64` — Docker platform for bundle builds (macOS/Windows)
- `LOGOS_BLOCKCHAIN_NODE_REV=<commit>` — logos-blockchain-node git revision
- `LOGOS_BLOCKCHAIN_BINARIES_TAR=path/to/bundle.tar.gz` — Use prebuilt bundle
- `LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1` — Skip image rebuild inside `run-examples.sh` (compose/k8s)
- `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64|linux/amd64` — Docker platform for bundle builds (macOS/Windows)
- `COMPOSE_CIRCUITS_PLATFORM=linux-aarch64|linux-x86_64` — Circuits platform for image builds
- `SLOW_TEST_ENV=true` — Doubles built-in readiness timeouts (useful in CI / constrained laptops)
- `TESTNET_PRINT_ENDPOINTS=1` — Print `TESTNET_ENDPOINTS` / `TESTNET_PPROF` lines during deploy
## Dev Workflow: Updating nomos-node Revision
## Dev Workflow: Updating logos-blockchain-node Revision
The repo pins a `nomos-node` revision in `versions.env` for reproducible builds. To update it or point to a local checkout:
The repo pins a `logos-blockchain-node` revision in `versions.env` for reproducible builds. To update it or point to a local checkout:
```bash
# Pin to a new git revision (updates versions.env + Cargo.toml git revs)
scripts/ops/update-nomos-rev.sh --rev <git_sha>
# Use a local nomos-node checkout instead (for development)
scripts/ops/update-nomos-rev.sh --path /path/to/nomos-node
# Use a local logos-blockchain-node checkout instead (for development)
scripts/ops/update-nomos-rev.sh --path /path/to/logos-blockchain-node
# If Cargo.toml was marked skip-worktree, clear it
scripts/ops/update-nomos-rev.sh --unskip-worktree
```
**Notes:**
- Don't commit absolute `NOMOS_NODE_PATH` values; prefer `--rev` for shared history/CI
- Don't commit absolute `LOGOS_BLOCKCHAIN_NODE_PATH` values; prefer `--rev` for shared history/CI
- After changing rev/path, expect `Cargo.lock` to update on the next `cargo build`/`cargo test`
## Cleanup Helper
@ -100,7 +100,7 @@ For manual control, run the `local_runner` binary directly:
```bash
POL_PROOF_DEV_MODE=true \
NOMOS_NODE_BIN=/path/to/nomos-node \
LOGOS_BLOCKCHAIN_NODE_BIN=/path/to/logos-blockchain-node \
cargo run -p runner-examples --bin local_runner
```
@ -108,14 +108,14 @@ cargo run -p runner-examples --bin local_runner
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators (legacy: `LOCAL_DEMO_VALIDATORS`) |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds (legacy: `LOCAL_DEMO_RUN_SECS`) |
| `NOMOS_NODE_BIN` | — | Path to nomos-node binary (required) |
| `NOMOS_LOG_DIR` | None | Directory for per-node log files |
| `NOMOS_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI) |
| `NOMOS_TESTS_TRACING` | false | Enable debug tracing preset |
| `NOMOS_LOG_LEVEL` | info | Global log level: error, warn, info, debug, trace |
| `NOMOS_LOG_FILTER` | None | Fine-grained module filtering (e.g., `cryptarchia=trace,nomos_da_sampling=debug`) |
| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes (legacy: `LOCAL_DEMO_NODES`) |
| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds (legacy: `LOCAL_DEMO_RUN_SECS`) |
| `LOGOS_BLOCKCHAIN_NODE_BIN` | — | Path to logos-blockchain-node binary (required) |
| `LOGOS_BLOCKCHAIN_LOG_DIR` | None | Directory for per-node log files |
| `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI) |
| `LOGOS_BLOCKCHAIN_TESTS_TRACING` | false | Enable debug tracing preset |
| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | info | Global log level: error, warn, info, debug, trace |
| `LOGOS_BLOCKCHAIN_LOG_FILTER` | None | Fine-grained module filtering (e.g., `cryptarchia=trace`) |
| `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners |
**Note:** Requires circuit assets and host binaries. Use `scripts/run/run-examples.sh host` to handle setup automatically.
@ -134,11 +134,11 @@ scripts/build/build-bundle.sh --platform linux
# Creates .tmp/nomos-binaries-linux-v0.3.1.tar.gz
# 2. Build image (embeds bundle assets)
export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
scripts/build/build_test_image.sh
# 3. Run
NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin compose_runner
```
@ -146,15 +146,14 @@ cargo run -p runner-examples --bin compose_runner
### Option 2: Manual Circuit/Image Setup
```bash
# Fetch and copy circuits
scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
# Fetch circuits
scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits
# Build image
scripts/build/build_test_image.sh
# Run
NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin compose_runner
```
@ -162,36 +161,36 @@ cargo run -p runner-examples --bin compose_runner
### Platform Note (macOS / Apple Silicon)
- Docker Desktop runs a `linux/arm64` engine by default
- For native performance: `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` (recommended for local testing)
- For amd64 targets: `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/amd64` (slower via emulation)
- For native performance: `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` (recommended for local testing)
- For amd64 targets: `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/amd64` (slower via emulation)
### Compose Runner Environment Variables
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_TESTNET_IMAGE` | — | Image tag (required, must match built image) |
| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | — | Image tag (required, must match built image) |
| `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners |
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds |
| `COMPOSE_NODE_PAIRS` | — | Alternative topology format: "validators" (e.g., `3`) |
| `NOMOS_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query |
| `NOMOS_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export |
| `NOMOS_GRAFANA_URL` | None | Grafana base URL for printing/logging |
| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes |
| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds |
| `COMPOSE_NODE_PAIRS` | — | Alternative topology format: "nodes" (e.g., `3`) |
| `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query |
| `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export |
| `LOGOS_BLOCKCHAIN_GRAFANA_URL` | None | Grafana base URL for printing/logging |
| `COMPOSE_RUNNER_HOST` | 127.0.0.1 | Host address for port mappings |
| `COMPOSE_RUNNER_PRESERVE` | 0 | Keep containers running after test |
| `NOMOS_LOG_LEVEL` | info | Node log level (stdout/stderr) |
| `NOMOS_LOG_FILTER` | None | Fine-grained module filtering |
| `LOGOS_BLOCKCHAIN_LOG_LEVEL` | info | Node log level (stdout/stderr) |
| `LOGOS_BLOCKCHAIN_LOG_FILTER` | None | Fine-grained module filtering |
**Config file option:** `testing-framework/assets/stack/cfgsync.yaml` (`tracing_settings.logger`) — Switch node logs between stdout/stderr and file output
### Compose-Specific Features
- **Node control support**: Only runner that supports chaos testing (`.enable_node_control()` + chaos workloads)
- **External observability**: Set `NOMOS_METRICS_*` / `NOMOS_GRAFANA_URL` to enable telemetry links and querying
- **External observability**: Set `LOGOS_BLOCKCHAIN_METRICS_*` / `LOGOS_BLOCKCHAIN_GRAFANA_URL` to enable telemetry links and querying
- Quickstart: `scripts/setup/setup-observability.sh compose up` then `scripts/setup/setup-observability.sh compose env`
**Important:**
- Containers expect KZG parameters at `/kzgrs_test_params/kzgrs_test_params` (note the repeated filename)
- Containers expect circuits at `/opt/circuits` (set by the image build)
- Use `scripts/run/run-examples.sh compose` to handle all setup automatically
---
@ -211,11 +210,11 @@ For manual control, run the `k8s_runner` binary directly. K8s requires the same
```bash
# 1. Build image with bundle (recommended)
scripts/build/build-bundle.sh --platform linux
export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz
scripts/build/build_test_image.sh
# 2. Load into cluster (choose one)
export NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local
# For kind:
kind load docker-image logos-blockchain-testing:local
@ -226,13 +225,13 @@ minikube image load logos-blockchain-testing:local
# For remote cluster (push to registry):
docker tag logos-blockchain-testing:local your-registry/logos-blockchain-testing:latest
docker push your-registry/logos-blockchain-testing:latest
export NOMOS_TESTNET_IMAGE=your-registry/logos-blockchain-testing:latest
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=your-registry/logos-blockchain-testing:latest
```
### Run the Example
```bash
export NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local
export POL_PROOF_DEV_MODE=true
cargo run -p runner-examples --bin k8s_runner
```
@ -241,13 +240,13 @@ cargo run -p runner-examples --bin k8s_runner
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_TESTNET_IMAGE` | — | Image tag (required) |
| `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` | — | Image tag (required) |
| `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners |
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds |
| `NOMOS_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query (PromQL) |
| `NOMOS_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export |
| `NOMOS_GRAFANA_URL` | None | Grafana base URL for printing/logging |
| `LOGOS_BLOCKCHAIN_DEMO_NODES` | 1 | Number of nodes |
| `LOGOS_BLOCKCHAIN_DEMO_RUN_SECS` | 60 | Run duration in seconds |
| `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query (PromQL) |
| `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export |
| `LOGOS_BLOCKCHAIN_GRAFANA_URL` | None | Grafana base URL for printing/logging |
| `K8S_RUNNER_NAMESPACE` | Random | Kubernetes namespace (pin for debugging) |
| `K8S_RUNNER_RELEASE` | Random | Helm release name (pin for debugging) |
| `K8S_RUNNER_NODE_HOST` | — | NodePort host resolution for non-local clusters |
@ -257,24 +256,24 @@ cargo run -p runner-examples --bin k8s_runner
### K8s + Observability (Optional)
```bash
export NOMOS_METRICS_QUERY_URL=http://your-prometheus:9090
export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://your-prometheus:9090
# Prometheus OTLP receiver example:
export NOMOS_METRICS_OTLP_INGEST_URL=http://your-prometheus:9090/api/v1/otlp/v1/metrics
export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://your-prometheus:9090/api/v1/otlp/v1/metrics
# Optional: print Grafana link in TESTNET_ENDPOINTS
export NOMOS_GRAFANA_URL=http://your-grafana:3000
export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://your-grafana:3000
cargo run -p runner-examples --bin k8s_runner
```
**Notes:**
- `NOMOS_METRICS_QUERY_URL` must be reachable from the runner process (often via `kubectl port-forward`)
- `NOMOS_METRICS_OTLP_INGEST_URL` must be reachable from nodes (pods/containers) and is backend-specific
- `LOGOS_BLOCKCHAIN_METRICS_QUERY_URL` must be reachable from the runner process (often via `kubectl port-forward`)
- `LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL` must be reachable from nodes (pods/containers) and is backend-specific
- Quickstart installer: `scripts/setup/setup-observability.sh k8s install` then `scripts/setup/setup-observability.sh k8s env`
- Optional dashboards: `scripts/setup/setup-observability.sh k8s dashboards`
### Via `scripts/run/run-examples.sh` (Recommended)
```bash
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s \
scripts/run/run-examples.sh -t 60 -n 3 k8s \
--metrics-query-url http://your-prometheus:9090 \
--metrics-otlp-ingest-url http://your-prometheus:9090/api/v1/otlp/v1/metrics
```
@ -285,7 +284,7 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s \
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ObservabilityBuilderExt as _;
let plan = ScenarioBuilder::with_node_counts(1, 1)
let plan = ScenarioBuilder::with_node_counts(1)
.with_metrics_query_url_str("http://your-prometheus:9090")
.with_metrics_otlp_ingest_url_str("http://your-prometheus:9090/api/v1/otlp/v1/metrics")
.build();
@ -293,8 +292,8 @@ let plan = ScenarioBuilder::with_node_counts(1, 1)
### Important K8s Notes
- K8s runner mounts `testing-framework/assets/stack/kzgrs_test_params` as a hostPath volume
- File path inside pods: `/kzgrs_test_params/kzgrs_test_params`
- K8s runner uses circuits baked into the image
- File path inside pods: `/opt/circuits`
- **No node control support yet**: Chaos workloads (`.enable_node_control()`) will fail
- Optimized for local clusters (Docker Desktop K8s / minikube / kind)
- Remote clusters require additional setup (registry push, PV/CSI for assets, etc.)

View File

@ -37,7 +37,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
async fn run_once() -> anyhow::Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(20)
.transactions_with(|tx| tx.rate(1).users(5))
.expect_consensus_liveness()
@ -63,14 +63,14 @@ Notes:
### Local (Host) Runner
- **Best for**: fast iteration and debugging
- **Logs/state**: stored under a temporary run directory unless you set `NOMOS_TESTS_KEEP_LOGS=1` and/or `NOMOS_LOG_DIR=...`
- **Logs/state**: stored under a temporary run directory unless you set `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` and/or `LOGOS_BLOCKCHAIN_LOG_DIR=...`
- **Limitations**: no node-control capability (chaos workflows that require node control wont work here)
Run the built-in local examples:
```bash
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -n 3 host
```
### Compose Runner
@ -83,7 +83,7 @@ Run the built-in compose examples:
```bash
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 3 compose
```
### K8s Runner
@ -96,16 +96,16 @@ Run the built-in k8s examples:
```bash
POL_PROOF_DEV_MODE=true \
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
scripts/run/run-examples.sh -t 60 -n 3 k8s
```
---
## Artifacts & Where to Look
- **Node logs**: configure via `NOMOS_LOG_DIR`, `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (see [Logging & Observability](logging-observability.md))
- **Node logs**: configure via `LOGOS_BLOCKCHAIN_LOG_DIR`, `LOGOS_BLOCKCHAIN_LOG_LEVEL`, `LOGOS_BLOCKCHAIN_LOG_FILTER` (see [Logging & Observability](logging-observability.md))
- **Runner logs**: controlled by `RUST_LOG` (runner process only)
- **Keep run directories**: set `NOMOS_TESTS_KEEP_LOGS=1`
- **Keep run directories**: set `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`
- **Compose environment preservation**: set `COMPOSE_RUNNER_PRESERVE=1`
- **K8s environment preservation**: set `K8S_RUNNER_PRESERVE=1`

View File

@ -24,7 +24,7 @@ flowchart TB
subgraph Phase4["4. Execution Phase"]
Execute[Drive Workloads]
ExecuteDetails["• Submit transactions<br/>Disperse DA blobs<br/>Trigger chaos events<br/>• Run for duration"]
ExecuteDetails["• Submit transactions<br/>• Trigger chaos events<br/>• Run for duration"]
Execute --> ExecuteDetails
end
@ -61,8 +61,8 @@ flowchart TB
Declare a topology, attach workloads and expectations, and set the run window. The plan is the single source of truth for what will happen.
**Key actions:**
- Define cluster shape (validators, network topology)
- Configure workloads (transaction rate, DA traffic, chaos patterns)
- Define cluster shape (nodes, network topology)
- Configure workloads (transaction rate, chaos patterns)
- Attach expectations (liveness, inclusion, custom checks)
- Set timing parameters (run duration, cooldown period)
@ -74,7 +74,7 @@ Hand the plan to a deployer. It provisions the environment on the chosen backend
**Key actions:**
- Provision infrastructure (processes, containers, or pods)
- Launch validator nodes
- Launch nodes
- Wait for readiness probes (HTTP endpoints respond)
- Establish node connectivity and metrics endpoints
- Spawn BlockFeed for real-time block observation
@ -99,7 +99,6 @@ The runner starts traffic and behaviors for the planned duration.
**Key actions:**
- Submit transactions at configured rates
- Disperse and sample DA blobs
- Trigger chaos events (node restarts)
- Run concurrently for the specified duration
- Observe blocks and metrics in real-time
@ -115,7 +114,6 @@ Once activity stops (and optional cooldown completes), the runner checks livenes
**Key actions:**
- Verify consensus liveness (minimum block production)
- Check transaction inclusion rates
- Validate DA dispersal and sampling
- Assess system recovery after chaos events
- Aggregate pass/fail results
@ -128,7 +126,7 @@ Tear down resources so successive runs start fresh and do not inherit leaked sta
**Key actions:**
- Stop all node processes/containers/pods
- Remove temporary directories and volumes
- Collect and archive logs (if `NOMOS_TESTS_KEEP_LOGS=1`)
- Collect and archive logs (if `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`)
- Release ports and network resources
- Cleanup observability stack (if spawned)

View File

@ -14,7 +14,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn declarative_over_imperative() {
// Good: declarative
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -22,7 +22,7 @@ pub fn declarative_over_imperative() {
.build();
// Bad: imperative (framework doesn't work this way)
// spawn_validator();
// spawn_node();
// loop { submit_tx(); check_block(); }
}
```
@ -30,6 +30,8 @@ pub fn declarative_over_imperative() {
**Why it matters:** The framework handles deployment, readiness, and cleanup.
You focus on test intent, not infrastructure orchestration.
**Exception:** For advanced network scenarios (split-brain, late joins, network healing) that can't be expressed declaratively, see [Manual Clusters](manual-cluster.md) for imperative control.
## Protocol Time, Not Wall Time
Reason in **blocks** and **consensus intervals**, not wall-clock seconds.
@ -47,7 +49,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn protocol_time_not_wall_time() {
// Good: protocol-oriented thinking
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -84,7 +86,7 @@ use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub fn determinism_first() {
// Separate: functional test (deterministic)
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -93,7 +95,7 @@ pub fn determinism_first() {
// Separate: chaos test (introduces randomness)
let _chaos_plan =
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.enable_node_control()
.chaos_with(|c| {
c.restart()
@ -120,7 +122,7 @@ Prefer **user-facing signals** over internal state:
**Good checks:**
- Blocks progressing at expected rate (liveness)
- Transactions included within N blocks (inclusion)
- DA blobs retrievable (availability)
- Transactions included within N blocks (inclusion)
**Avoid internal checks:**
- Memory pool size
@ -143,14 +145,14 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn minimum_run_windows() {
// Bad: too short (~2 blocks with default 2s slots, 0.9 coeff)
let _too_short = ScenarioBuilder::with_node_counts(1, 0)
let _too_short = ScenarioBuilder::with_node_counts(1)
.with_run_duration(Duration::from_secs(5))
.expect_consensus_liveness()
.build();
// Good: enough blocks for assertions (~27 blocks with default 2s slots, 0.9
// coeff)
let _good = ScenarioBuilder::with_node_counts(1, 0)
let _good = ScenarioBuilder::with_node_counts(1)
.with_run_duration(Duration::from_secs(60))
.expect_consensus_liveness()
.build();

View File

@ -15,22 +15,22 @@ See also: [RunContext: BlockFeed & Node Control](node-control.md) for the curren
- **Restarts**: random restarts with minimum delay/cooldown to test recovery.
- **Partitions (planned)**: block/unblock peers to simulate partial isolation, then assert
height convergence after healing.
- **Validator churn (planned)**: stop one validator and start another (new key) mid-run to
- **Node churn (planned)**: stop one node and start another (new key) mid-run to
test membership changes; expect convergence.
- **Load SLOs**: push tx/DA rates and assert inclusion/availability budgets
- **Load SLOs**: push transaction rates and assert inclusion/latency budgets
instead of only liveness.
- **API probes**: poll HTTP/RPC endpoints during chaos to ensure external
contracts stay healthy (shape + latency).
## Expectations to pair
- **Liveness/height convergence** after chaos windows.
- **SLO checks**: inclusion latency, DA responsiveness, API latency/shape.
- **SLO checks**: inclusion latency, API latency/shape.
- **Recovery checks**: ensure nodes that were isolated or restarted catch up to
cluster height within a timeout.
## Guidance
- Keep chaos realistic: avoid flapping or patterns you wouldn't operate in prod.
- Scope chaos: choose validators intentionally; don't restart all
- Scope chaos: choose nodes intentionally; don't restart all
nodes at once unless you're testing full outages.
- Combine chaos with observability: capture block feed/metrics and API health so
failures are diagnosable.

View File

@ -3,12 +3,12 @@
**Prerequisites for All Runners:**
- **`versions.env` file** at repository root (required by helper scripts)
- **`POL_PROOF_DEV_MODE=true`** MUST be set for all runners (host, compose, k8s) to avoid expensive Groth16 proof generation that causes timeouts
- **KZG circuit assets** must be present at `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note the repeated filename) for DA workloads
- **Circuit assets** must be present and `LOGOS_BLOCKCHAIN_CIRCUITS` must point to a directory that contains them
**Platform/Environment Notes:**
- **macOS + Docker Desktop (Apple silicon):** prefer `NOMOS_BUNDLE_DOCKER_PLATFORM=linux/arm64` for local compose/k8s runs to avoid slow/fragile amd64 emulation builds.
- **macOS + Docker Desktop (Apple silicon):** prefer `LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=linux/arm64` for local compose/k8s runs to avoid slow/fragile amd64 emulation builds.
- **Disk space:** bundle/image builds are storage-heavy. If you see I/O errors or Docker build failures, check free space and prune old artifacts (`.tmp/`, `target/`, and Docker build cache) before retrying.
- **K8s runner scope:** the default Helm chart mounts KZG params via `hostPath` and uses a local image tag (`logos-blockchain-testing:local`). This is intended for local clusters (Docker Desktop / minikube / kind), not remote managed clusters without additional setup.
- **K8s runner scope:** the default Helm chart mounts circuit assets via `hostPath` and uses a local image tag (`logos-blockchain-testing:local`). This is intended for local clusters (Docker Desktop / minikube / kind), not remote managed clusters without additional setup.
- Quick cleanup: `scripts/ops/clean.sh` (and `scripts/ops/clean.sh --docker` if needed).
- Destructive cleanup (last resort): `scripts/ops/clean.sh --docker-system --dangerous` (add `--volumes` if you also want to prune Docker volumes).
@ -18,7 +18,7 @@
Common symptoms and likely causes:
- **No or slow block progression**: missing `POL_PROOF_DEV_MODE=true`, missing KZG circuit assets (`/kzgrs_test_params/kzgrs_test_params` file) for DA workloads, too-short run window, port conflicts, or resource exhaustion—set required env vars, verify assets exist, extend duration, check node logs for startup errors.
- **No or slow block progression**: missing `POL_PROOF_DEV_MODE=true`, missing circuit assets, too-short run window, port conflicts, or resource exhaustion—set required env vars, verify assets exist, extend duration, check node logs for startup errors.
- **Transactions not included**: unfunded or misconfigured wallets (check `.wallets(N)` vs `.users(M)`), transaction rate exceeding block capacity, or rates exceeding block production speed—reduce rate, increase wallet count, verify wallet setup in logs.
- **Chaos stalls the run**: chaos (node control) only works with ComposeDeployer; host runner (LocalDeployer) and K8sDeployer don't support it (won't "stall", just can't execute chaos workloads). With compose, aggressive restart cadence can prevent consensus recovery—widen restart intervals.
- **Observability gaps**: metrics or logs unreachable because ports clash or services are not exposed—adjust observability ports and confirm runner wiring.
@ -43,7 +43,7 @@ $ cargo run -p runner-examples --bin local_runner
Finished dev [unoptimized + debuginfo] target(s) in 0.48s
Running `target/debug/local_runner`
[INFO runner_examples::local_runner] Starting local runner scenario
[INFO testing_framework_runner_local] Launching 3 validators
[INFO testing_framework_runner_local] Launching 3 nodes
[INFO testing_framework_runner_local] Waiting for node readiness...
(hangs here for 5+ minutes, CPU at 100%)
thread 'main' panicked at 'readiness timeout expired'
@ -71,12 +71,12 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
**What you'll see:**
```text
$ scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
$ scripts/run/run-examples.sh -t 60 -n 1 host
ERROR: versions.env not found at repository root
This file is required and should define:
VERSION=<circuit release tag>
NOMOS_NODE_REV=<nomos-node git revision>
NOMOS_BUNDLE_VERSION=<bundle schema version>
LOGOS_BLOCKCHAIN_NODE_REV=<logos-blockchain-node git revision>
LOGOS_BLOCKCHAIN_BUNDLE_VERSION=<bundle schema version>
```
**Root Cause:** Helper scripts need `versions.env` to know which versions to build/fetch.
@ -87,50 +87,44 @@ This file is required and should define:
cat versions.env
# Should show:
# VERSION=v0.3.1
# NOMOS_NODE_REV=abc123def456
# NOMOS_BUNDLE_VERSION=v1
# LOGOS_BLOCKCHAIN_NODE_REV=abc123def456
# LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v1
```
---
### 3. Missing KZG Circuit Assets (DA Workloads)
### 3. Missing Circuit Assets
**Symptoms:**
- DA workload tests fail
- Node startup fails early
- Error messages about missing circuit files
- Nodes crash during DA operations
**What you'll see:**
```text
$ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
[INFO testing_framework_runner_local] Starting DA workload
[ERROR nomos_da_dispersal] Failed to load KZG parameters
Error: Custom { kind: NotFound, error: "Circuit file not found at: testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params" }
[INFO testing_framework_runner_local] Starting local runner scenario
Error: circuit assets directory missing or invalid
thread 'main' panicked at 'workload init failed'
```
**Root Cause:** DA (Data Availability) workloads require KZG cryptographic parameters. The file must exist at: `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note the repeated filename).
**Root Cause:** Circuit assets are required for proof-related paths. The runner expects `LOGOS_BLOCKCHAIN_CIRCUITS` to point to a directory containing the assets.
**Fix (recommended):**
```bash
# Use run-examples.sh which handles setup automatically
scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
scripts/run/run-examples.sh -t 60 -n 1 host
```
**Fix (manual):**
```bash
# Fetch circuits
scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits
# Copy to expected location
mkdir -p testing-framework/assets/stack/kzgrs_test_params
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
# Verify (should be ~120MB)
ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params
# Set the environment variable
export LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits
```
---
@ -138,37 +132,37 @@ ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params
### 4. Node Binaries Not Found
**Symptoms:**
- Error about missing `nomos-node` binary
- Error about missing `logos-blockchain-node` binary
- "file not found" or "no such file or directory"
- Environment variables `NOMOS_NODE_BIN` not set
- Environment variables `LOGOS_BLOCKCHAIN_NODE_BIN` not set
**What you'll see:**
```text
$ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
[INFO testing_framework_runner_local] Spawning validator 0
[INFO testing_framework_runner_local] Spawning node 0
Error: Os { code: 2, kind: NotFound, message: "No such file or directory" }
thread 'main' panicked at 'failed to spawn nomos-node process'
thread 'main' panicked at 'failed to spawn logos-blockchain-node process'
```
**Root Cause:** The local runner needs compiled `nomos-node` binaries, but doesn't know where they are.
**Root Cause:** The local runner needs compiled `logos-blockchain-node` binaries, but doesn't know where they are.
**Fix (recommended):**
```bash
# Use run-examples.sh which builds binaries automatically
scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
scripts/run/run-examples.sh -t 60 -n 1 host
```
**Fix (manual - set paths explicitly):**
```bash
# Build binaries first
cd ../nomos-node # or wherever your nomos-node checkout is
cargo build --release --bin nomos-node
cd ../logos-blockchain-node # or wherever your logos-blockchain-node checkout is
cargo build --release --bin logos-blockchain-node
# Set environment variables
export NOMOS_NODE_BIN=$PWD/target/release/nomos-node
export LOGOS_BLOCKCHAIN_NODE_BIN=$PWD/target/release/logos-blockchain-node
# Return to testing framework
cd ../nomos-testing
@ -187,7 +181,7 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
**What you'll see:**
```text
$ scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose
$ scripts/run/run-examples.sh -t 60 -n 1 compose
[INFO runner_examples::compose_runner] Starting compose deployment
Error: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
thread 'main' panicked at 'compose deployment failed'
@ -236,7 +230,7 @@ thread 'main' panicked at 'compose deployment failed'
```bash
# Use run-examples.sh which builds the image automatically
scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 1 compose
```
**Fix (manual):**
@ -246,7 +240,7 @@ scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose
scripts/build/build-bundle.sh --platform linux
# 2. Set bundle path
export NOMOS_BINARIES_TAR=$(ls -t .tmp/nomos-binaries-linux-*.tar.gz | head -1)
export LOGOS_BLOCKCHAIN_BINARIES_TAR=$(ls -t .tmp/nomos-binaries-linux-*.tar.gz | head -1)
# 3. Build Docker image
scripts/build/build_test_image.sh
@ -272,7 +266,7 @@ kind load docker-image logos-blockchain-testing:local
```text
$ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
[INFO testing_framework_runner_local] Launching validator 0 on port 18080
[INFO testing_framework_runner_local] Launching node 0 on port 18080
Error: Os { code: 48, kind: AddrInUse, message: "Address already in use" }
thread 'main' panicked at 'failed to bind port 18080'
```
@ -287,7 +281,7 @@ lsof -i :18080 # macOS/Linux
netstat -ano | findstr :18080 # Windows
# Kill orphaned nomos processes
pkill nomos-node
pkill logos-blockchain-node
# For compose: ensure containers are stopped
docker compose down
@ -335,7 +329,7 @@ thread 'main' panicked at 'workload init failed: insufficient wallets'
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(20) // ← Increase wallet count
.transactions_with(|tx| {
tx.users(10) // ← Must be ≤ wallets(20)
@ -362,7 +356,7 @@ CONTAINER ID STATUS
abc123def456 Restarting (137) 30 seconds ago # 137 = OOM killed
$ docker logs abc123def456
[INFO nomos_node] Starting validator
[INFO nomos_node] Starting node
[INFO consensus] Processing block
Killed # ← OOM killer terminated the process
```
@ -414,15 +408,15 @@ $ ls .tmp/
```bash
# Persist logs to a specific directory
NOMOS_LOG_DIR=/tmp/test-logs \
NOMOS_TESTS_KEEP_LOGS=1 \
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/test-logs \
LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1 \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner
# Logs persist after run
ls /tmp/test-logs/
# nomos-node-0.2024-12-18T14-30-00.log
# nomos-node-1.2024-12-18T14-30-00.log
# logos-blockchain-node-0.2024-12-18T14-30-00.log
# logos-blockchain-node-1.2024-12-18T14-30-00.log
# ...
```
@ -457,7 +451,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
// Increase run duration to allow more blocks.
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(120)) // ← Give more time
.build();
@ -481,15 +475,15 @@ When a test fails, check these in order:
1. **`POL_PROOF_DEV_MODE=true` is set** (REQUIRED for all runners)
2. **`versions.env` exists at repo root**
3. **KZG circuit assets present** (for DA workloads): `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`
4. **Node binaries available** (`NOMOS_NODE_BIN` set, or using `run-examples.sh`)
3. **Circuit assets present** (`LOGOS_BLOCKCHAIN_CIRCUITS` points to a valid directory)
4. **Node binaries available** (`LOGOS_BLOCKCHAIN_NODE_BIN` set, or using `run-examples.sh`)
5. **Docker daemon running** (for compose/k8s)
6. **Docker image built** (`logos-blockchain-testing:local` exists for compose/k8s)
7. **No port conflicts** (`lsof -i :18080`, kill orphaned processes)
8. **Sufficient wallets** (`.wallets(N)``.users(M)`)
9. **Enough resources** (Docker memory 8GB+, ulimit -n 4096)
10. **Run duration appropriate** (long enough for consensus timing)
11. **Logs persisted** (`NOMOS_LOG_DIR` + `NOMOS_TESTS_KEEP_LOGS=1` if needed)
11. **Logs persisted** (`LOGOS_BLOCKCHAIN_LOG_DIR` + `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1` if needed)
**Still stuck?** Check node logs (see [Where to Find Logs](#where-to-find-logs)) for the actual error.
@ -497,17 +491,17 @@ When a test fails, check these in order:
### Log Location Quick Reference
| Runner | Default Output | With `NOMOS_LOG_DIR` + Flags | Access Command |
| Runner | Default Output | With `LOGOS_BLOCKCHAIN_LOG_DIR` + Flags | Access Command |
|--------|---------------|------------------------------|----------------|
| **Host** (local) | Per-run temporary directories under the current working directory (removed unless `NOMOS_TESTS_KEEP_LOGS=1`) | Per-node files with prefix `nomos-node-{index}` (set `NOMOS_LOG_DIR`) | `cat $NOMOS_LOG_DIR/nomos-node-0*` |
| **Host** (local) | Per-run temporary directories under the current working directory (removed unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`) | Per-node files with prefix `logos-blockchain-node-{index}` (set `LOGOS_BLOCKCHAIN_LOG_DIR`) | `cat $LOGOS_BLOCKCHAIN_LOG_DIR/logos-blockchain-node-0*` |
| **Compose** | Docker container stdout/stderr | Set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory) | `docker ps` then `docker logs <container-id>` |
| **K8s** | Pod stdout/stderr | Set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory) | `kubectl logs -l nomos/logical-role=validator` |
| **K8s** | Pod stdout/stderr | Set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory) | `kubectl logs -l nomos/logical-role=node` |
**Important Notes:**
- **Host runner** (local processes): Per-run temporary directories are created under the current working directory and removed after the run unless `NOMOS_TESTS_KEEP_LOGS=1`. To write per-node log files to a stable location, set `NOMOS_LOG_DIR=/path/to/logs`.
- **Host runner** (local processes): Per-run temporary directories are created under the current working directory and removed after the run unless `LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS=1`. To write per-node log files to a stable location, set `LOGOS_BLOCKCHAIN_LOG_DIR=/path/to/logs`.
- **Compose/K8s**: Node log destination is controlled by `testing-framework/assets/stack/cfgsync.yaml` (`tracing_settings.logger`). By default, rely on `docker logs` or `kubectl logs`.
- **File naming**: Log files use prefix `nomos-node-{index}*` with timestamps, e.g., `nomos-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix).
- **Container names**: Compose containers include project UUID, e.g., `nomos-compose-<uuid>-validator-0-1` where `<uuid>` is randomly generated per run
- **File naming**: Log files use prefix `logos-blockchain-node-{index}*` with timestamps, e.g., `logos-blockchain-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix).
- **Container names**: Compose containers include project UUID, e.g., `nomos-compose-<uuid>-node-0-1` where `<uuid>` is randomly generated per run
### Accessing Node Logs by Runner
@ -520,15 +514,15 @@ POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner 2>&1 | t
**Persistent file output:**
```bash
NOMOS_LOG_DIR=/tmp/debug-logs \
NOMOS_LOG_LEVEL=debug \
LOGOS_BLOCKCHAIN_LOG_DIR=/tmp/debug-logs \
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner
# Inspect logs (note: filenames include timestamps):
ls /tmp/debug-logs/
# Example: nomos-node-0.2024-12-01T10-30-45.log
tail -f /tmp/debug-logs/nomos-node-0* # Use wildcard to match timestamp
# Example: logos-blockchain-node-0.2024-12-01T10-30-45.log
tail -f /tmp/debug-logs/logos-blockchain-node-0* # Use wildcard to match timestamp
```
#### Compose Runner
@ -542,7 +536,7 @@ docker ps --filter "name=nomos-compose-"
docker logs -f <container-id>
# Or filter by name pattern:
docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1)
docker logs -f $(docker ps --filter "name=nomos-compose-.*-node-0" -q | head -1)
# Show last 100 lines
docker logs --tail 100 <container-id>
@ -551,12 +545,12 @@ docker logs --tail 100 <container-id>
**Keep containers for post-mortem debugging:**
```bash
COMPOSE_RUNNER_PRESERVE=1 \
NOMOS_TESTNET_IMAGE=logos-blockchain-testing:local \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE=logos-blockchain-testing:local \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin compose_runner
# OR: Use run-examples.sh (handles setup automatically)
COMPOSE_RUNNER_PRESERVE=1 scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose
COMPOSE_RUNNER_PRESERVE=1 scripts/run/run-examples.sh -t 60 -n 1 compose
# After test failure, containers remain running:
docker ps --filter "name=nomos-compose-"
@ -564,7 +558,7 @@ docker exec -it <container-id> /bin/sh
docker logs <container-id> > debug.log
```
**Note:** Container names follow the pattern `nomos-compose-{uuid}-validator-{index}-1`, where `{uuid}` is randomly generated per run.
**Note:** Container names follow the pattern `nomos-compose-{uuid}-node-{index}-1`, where `{uuid}` is randomly generated per run.
#### K8s Runner
@ -576,26 +570,26 @@ docker logs <container-id> > debug.log
# Check your namespace first
kubectl config view --minify | grep namespace
# All validator pods (add -n <namespace> if not using default)
kubectl logs -l nomos/logical-role=validator -f
# All node pods (add -n <namespace> if not using default)
kubectl logs -l nomos/logical-role=node -f
# Specific pod by name (find exact name first)
kubectl get pods -l nomos/logical-role=validator # Find the exact pod name
kubectl get pods -l nomos/logical-role=node # Find the exact pod name
kubectl logs -f <actual-pod-name> # Then use it
# With explicit namespace
kubectl logs -n my-namespace -l nomos/logical-role=validator -f
kubectl logs -n my-namespace -l nomos/logical-role=node -f
```
**Download logs from crashed pods:**
```bash
# Previous logs from crashed pod
kubectl get pods -l nomos/logical-role=validator # Find crashed pod name first
kubectl logs --previous <actual-pod-name> > crashed-validator.log
kubectl get pods -l nomos/logical-role=node # Find crashed pod name first
kubectl logs --previous <actual-pod-name> > crashed-node.log
# Or use label selector for all crashed validators
for pod in $(kubectl get pods -l nomos/logical-role=validator -o name); do
# Or use label selector for all crashed nodes
for pod in $(kubectl get pods -l nomos/logical-role=node -o name); do
kubectl logs --previous $pod > $(basename $pod)-previous.log 2>&1
done
```
@ -610,10 +604,10 @@ for pod in $(kubectl get pods -o name); do
done > all-logs.txt
# Or use label selectors (recommended)
kubectl logs -l nomos/logical-role=validator --tail=500 > validators.log
kubectl logs -l nomos/logical-role=node --tail=500 > nodes.log
# With explicit namespace
kubectl logs -n my-namespace -l nomos/logical-role=validator --tail=500 > validators.log
kubectl logs -n my-namespace -l nomos/logical-role=node --tail=500 > nodes.log
```
## Debugging Workflow
@ -644,7 +638,7 @@ ps aux | grep nomos
docker ps -a --filter "name=nomos-compose-"
# K8s: check pod status (use label selectors, add -n <namespace> if needed)
kubectl get pods -l nomos/logical-role=validator
kubectl get pods -l nomos/logical-role=node
kubectl describe pod <actual-pod-name> # Get name from above first
```
@ -658,7 +652,7 @@ Focus on the first node that exhibited problems or the node with the highest ind
- "Failed to bind address" → port conflict
- "Connection refused" → peer not ready or network issue
- "Proof verification failed" or "Proof generation timeout" → missing `POL_PROOF_DEV_MODE=true` (REQUIRED for all runners)
- "Failed to load KZG parameters" or "Circuit file not found" → missing KZG circuit assets at `testing-framework/assets/stack/kzgrs_test_params/`
- "Circuit file not found" → missing circuit assets at the path in `LOGOS_BLOCKCHAIN_CIRCUITS`
- "Insufficient funds" → wallet seeding issue (increase `.wallets(N)` or reduce `.users(M)`)
### 4. Check Log Levels
@ -666,12 +660,12 @@ Focus on the first node that exhibited problems or the node with the highest ind
If logs are too sparse, increase verbosity:
```bash
NOMOS_LOG_LEVEL=debug \
NOMOS_LOG_FILTER="cryptarchia=trace,nomos_da_sampling=debug" \
LOGOS_BLOCKCHAIN_LOG_LEVEL=debug \
LOGOS_BLOCKCHAIN_LOG_FILTER="cryptarchia=trace" \
cargo run -p runner-examples --bin local_runner
```
If metric updates are polluting your logs (fields like `counter.*` / `gauge.*`), move those events to a dedicated `tracing` target (e.g. `target: "nomos_metrics"`) and set `NOMOS_LOG_FILTER="nomos_metrics=off,..."` so they dont get formatted into log output.
If metric updates are polluting your logs (fields like `counter.*` / `gauge.*`), move those events to a dedicated `tracing` target (e.g. `target: "nomos_metrics"`) and set `LOGOS_BLOCKCHAIN_LOG_FILTER="nomos_metrics=off,..."` so they dont get formatted into log output.
### 5. Verify Observability Endpoints
@ -689,22 +683,22 @@ curl http://localhost:18080/consensus/info # Adjust port per node
### 6. Compare with Known-Good Scenario
Run a minimal baseline test (e.g., 2 validators, consensus liveness only). If it passes, the issue is in your workload or topology configuration.
Run a minimal baseline test (e.g., 2 nodes, consensus liveness only). If it passes, the issue is in your workload or topology configuration.
## Common Error Messages
### "Consensus liveness expectation failed"
- **Cause**: Not enough blocks produced during the run window, missing
`POL_PROOF_DEV_MODE=true` (causes slow proof generation), or missing KZG
assets for DA workloads.
`POL_PROOF_DEV_MODE=true` (causes slow proof generation), or missing circuit
assets.
- **Fix**:
1. Verify `POL_PROOF_DEV_MODE=true` is set (REQUIRED for all runners).
2. Verify KZG assets exist at
`testing-framework/assets/stack/kzgrs_test_params/` (for DA workloads).
2. Verify circuit assets exist at the path referenced by
`LOGOS_BLOCKCHAIN_CIRCUITS`.
3. Extend `with_run_duration()` to allow more blocks.
4. Check node logs for proof generation or DA errors.
5. Reduce transaction/DA rate if nodes are overwhelmed.
4. Check node logs for proof generation or circuit asset errors.
5. Reduce transaction rate if nodes are overwhelmed.
### "Wallet seeding failed"
@ -730,50 +724,50 @@ Run a minimal baseline test (e.g., 2 validators, consensus liveness only). If it
it, proof generation is too slow).
2. Check node logs for startup errors (port conflicts, missing assets).
3. Verify network connectivity between nodes.
4. For DA workloads, ensure KZG circuit assets are present.
4. Ensure circuit assets are present and `LOGOS_BLOCKCHAIN_CIRCUITS` points to them.
### "ERROR: versions.env missing"
- **Cause**: Helper scripts (`run-examples.sh`, `build-bundle.sh`, `setup-circuits-stack.sh`) require `versions.env` file at repository root.
- **Cause**: Helper scripts (`run-examples.sh`, `build-bundle.sh`, `setup-logos-blockchain-circuits.sh`) require `versions.env` file at repository root.
- **Fix**: Ensure you're running from the repository root directory. The `versions.env` file should already exist and contains:
```text
VERSION=<circuit release tag>
NOMOS_NODE_REV=<nomos-node git revision>
NOMOS_BUNDLE_VERSION=<bundle schema version>
LOGOS_BLOCKCHAIN_NODE_REV=<logos-blockchain-node git revision>
LOGOS_BLOCKCHAIN_BUNDLE_VERSION=<bundle schema version>
```
Use the checked-in `versions.env` at the repository root as the source of truth.
### "Port already in use"
- **Cause**: Previous test didn't clean up, or another process holds the port.
- **Fix**: Kill orphaned processes (`pkill nomos-node`), wait for Docker cleanup
- **Fix**: Kill orphaned processes (`pkill logos-blockchain-node`), wait for Docker cleanup
(`docker compose down`), or restart Docker.
### "Image not found: logos-blockchain-testing:local"
- **Cause**: Docker image not built for Compose/K8s runners, or KZG assets not
- **Cause**: Docker image not built for Compose/K8s runners, or circuit assets not
baked into the image.
- **Fix (recommended)**: Use run-examples.sh which handles everything:
```bash
scripts/run/run-examples.sh -t 60 -v 1 -e 1 compose
scripts/run/run-examples.sh -t 60 -n 1 compose
```
- **Fix (manual)**:
1. Build bundle: `scripts/build/build-bundle.sh --platform linux`
2. Set bundle path: `export NOMOS_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz`
2. Set bundle path: `export LOGOS_BLOCKCHAIN_BINARIES_TAR=.tmp/nomos-binaries-linux-v0.3.1.tar.gz`
3. Build image: `scripts/build/build_test_image.sh`
4. **kind/minikube:** load the image into the cluster nodes (e.g. `kind load docker-image logos-blockchain-testing:local`, or `minikube image load ...`), or push to a registry and set `NOMOS_TESTNET_IMAGE` accordingly.
4. **kind/minikube:** load the image into the cluster nodes (e.g. `kind load docker-image logos-blockchain-testing:local`, or `minikube image load ...`), or push to a registry and set `LOGOS_BLOCKCHAIN_TESTNET_IMAGE` accordingly.
### "Failed to load KZG parameters" or "Circuit file not found"
### "Circuit file not found"
- **Cause**: DA workload requires KZG circuit assets. The file `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params` (note repeated filename) must exist. Inside containers, it's at `/kzgrs_test_params/kzgrs_test_params`.
- **Cause**: Circuit assets are missing or `LOGOS_BLOCKCHAIN_CIRCUITS` points to a non-existent directory. Inside containers, assets are expected at `/opt/circuits`.
- **Fix (recommended)**: Use run-examples.sh which handles setup:
```bash
scripts/run/run-examples.sh -t 60 -v 1 -e 1 <mode>
scripts/run/run-examples.sh -t 60 -n 1 <mode>
```
- **Fix (manual)**:
1. Fetch assets: `scripts/setup/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits`
2. Copy to expected path: `cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/`
3. Verify file exists: `ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`
1. Fetch assets: `scripts/setup/setup-logos-blockchain-circuits.sh v0.3.1 ~/.logos-blockchain-circuits`
2. Set `LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits`
3. Verify directory exists: `ls -lh $LOGOS_BLOCKCHAIN_CIRCUITS`
4. For Compose/K8s: rebuild image with assets baked in
For detailed logging configuration and observability setup, see [Logging & Observability](logging-observability.md).

View File

@ -14,8 +14,8 @@ without changing the plan.
- Understand when to use each runner (Host, Compose, Kubernetes)
**Author and Run Scenarios**
- Define multi-node topologies with validators
- Configure transaction and DA workloads with appropriate rates
- Define multi-node topologies with nodes
- Configure transaction workloads with appropriate rates
- Add consensus liveness and inclusion expectations
- Run scenarios across all three deployment modes
- Use BlockFeed to monitor block production in real-time
@ -56,8 +56,8 @@ without changing the plan.
## What This Book Does NOT Cover
- **Logos node internals** — This book focuses on testing infrastructure, not the blockchain protocol implementation. See the Logos node repository (`nomos-node`) for protocol documentation.
- **Consensus algorithm theory** — We assume familiarity with basic blockchain concepts (validators, blocks, transactions, data availability).
- **Logos node internals** — This book focuses on testing infrastructure, not the blockchain protocol implementation. See the Logos node repository (`logos-blockchain-node`) for protocol documentation.
- **Consensus algorithm theory** — We assume familiarity with basic blockchain concepts (nodes, blocks, transactions).
- **Rust language basics** — Examples use Rust, but we don't teach the language. See [The Rust Book](https://doc.rust-lang.org/book/) if you're new to Rust.
- **Kubernetes administration** — We show how to use the K8s runner, but don't cover cluster setup, networking, or operations.
- **Docker fundamentals** — We assume basic Docker/Compose knowledge for the Compose runner.

View File

@ -45,7 +45,7 @@ use testing_framework_workflows::workloads::transaction::Workload;
```rust,ignore
use testing_framework_workflows::ScenarioBuilderExt;
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(20) // Seed 20 wallet accounts
.transactions_with(|tx| {
tx.rate(10) // 10 transactions per block
@ -63,7 +63,7 @@ use testing_framework_workflows::workloads::transaction;
let tx_workload = transaction::Workload::with_rate(10)
.expect("transaction rate must be non-zero");
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(20)
.with_workload(tx_workload)
.with_run_duration(Duration::from_secs(60))
@ -86,7 +86,7 @@ ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
3. **Circuit artifacts must be available:**
- Automatically staged by `scripts/run/run-examples.sh`
- Or manually via `scripts/setup/setup-circuits-stack.sh` (recommended) / `scripts/setup/setup-nomos-circuits.sh`
- Or manually via `scripts/setup/setup-logos-blockchain-circuits.sh` (recommended) / `scripts/setup/setup-logos-blockchain-circuits.sh`
#### Attached Expectation
@ -117,7 +117,7 @@ Error: Expectation failed: TxInclusionExpectation
**How to debug:**
1. Check logs for proof generation timing:
```bash
grep "proof generation" $NOMOS_LOG_DIR/*/*.log
grep "proof generation" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log
```
2. Verify `POL_PROOF_DEV_MODE=true` was set
3. Increase duration: `.with_run_duration(Duration::from_secs(120))`
@ -125,97 +125,7 @@ Error: Expectation failed: TxInclusionExpectation
---
### 2. Data Availability (DA) Workload
Drives blob and channel activity to exercise data availability paths and storage.
**Import:**
```rust,ignore
use testing_framework_workflows::workloads::da::Workload;
```
#### Configuration
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `blob_rate_per_block` | `NonZeroU64` | **Required** | Blobs to publish per block |
| `channel_rate_per_block` | `NonZeroU64` | **Required** | Channels to create per block |
| `headroom_percent` | `u64` | `20` | Extra capacity for channel planning (avoids saturation) |
#### DSL Usage
```rust,ignore
use testing_framework_workflows::ScenarioBuilderExt;
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.da_with(|da| {
da.channel_rate(2) // 2 channels per block
.blob_rate(4) // 4 blobs per block
})
.with_run_duration(Duration::from_secs(120))
.build();
```
#### Direct Instantiation
```rust,ignore
use std::num::NonZeroU64;
use testing_framework_workflows::workloads::da;
let da_workload = da::Workload::with_rate(
NonZeroU64::new(4).unwrap(), // blob_rate_per_block
NonZeroU64::new(2).unwrap(), // channel_rate_per_block
20, // headroom_percent
);
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.with_workload(da_workload)
.with_run_duration(Duration::from_secs(120))
.build();
```
#### Prerequisites
1. **Sufficient duration:**
Channel creation and blob publishing are slower than transaction submission. Allow 120+ seconds.
2. **Circuit artifacts:**
Same as transaction workload (POL_PROOF_DEV_MODE, circuits staged).
#### Attached Expectation
**DaWorkloadExpectation** — Verifies blobs and channels were created and published.
**What it checks:**
- At least `N` channels were created (where N = channel_rate × expected blocks)
- At least `M` blobs were published (where M = blob_rate × expected blocks × headroom)
- Uses BlockFeed API to verify
**Failure modes:**
- "Expected >= X channels, observed Y" (Y < X)
- "Expected >= X blobs, observed Y" (Y < X)
- Common causes: insufficient duration, DA saturation
#### What Failure Looks Like
```text
Error: Expectation failed: DaWorkloadExpectation
Expected: >= 60 channels (2 channels/block × 30 blocks)
Observed: 23 channels
Possible causes:
- Duration too short (channels still being created)
- Blob publishing failed (check API errors)
- Network issues (check validator connectivity)
```
**How to debug:**
1. Increase duration: `.with_run_duration(Duration::from_secs(180))`
2. Reduce rates: `.channel_rate(1).blob_rate(2)`
---
### 3. Chaos Workload (Random Restart)
### 2. Chaos Workload (Random Restart)
Triggers controlled node restarts to test resilience and recovery behaviors.
@ -231,7 +141,7 @@ use testing_framework_workflows::workloads::chaos::RandomRestartWorkload;
| `min_delay` | `Duration` | **Required** | Minimum time between restart attempts |
| `max_delay` | `Duration` | **Required** | Maximum time between restart attempts |
| `target_cooldown` | `Duration` | **Required** | Minimum time before restarting same node again |
| `include_validators` | `bool` | **Required** | Whether to restart validators |
| `include_nodes` | `bool` | **Required** | Whether to restart nodes |
#### Usage
@ -242,14 +152,14 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRestartWorkload};
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(3)
t.network_star().nodes(3)
})
.enable_node_control() // REQUIRED for chaos
.with_workload(RandomRestartWorkload::new(
Duration::from_secs(45), // min_delay
Duration::from_secs(75), // max_delay
Duration::from_secs(120), // target_cooldown
true, // include_validators
true, // include_nodes
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(180))
@ -270,7 +180,7 @@ let scenario = ScenarioBuilder::topology_with(|t| {
- **K8s runner:** Not yet implemented
3. **Sufficient topology:**
- For validators: Need >1 validator (workload skips if only 1)
- For nodes: Need >1 node (workload skips if only 1)
4. **Realistic timing:**
- Total duration should be 2-3× the max_delay + cooldown
@ -306,18 +216,18 @@ Error: Expectation failed: ConsensusLiveness
Possible causes:
- Restart frequency too high (nodes can't recover)
- Consensus timing too slow (increase duration)
- Too many validators restarted simultaneously
- Too many nodes restarted simultaneously
- Nodes crashed after restart (check logs)
```
**How to debug:**
1. Check restart events in logs:
```bash
grep "restarting\|restart complete" $NOMOS_LOG_DIR/*/*.log
grep "restarting\|restart complete" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log
```
2. Verify node control is enabled:
```bash
grep "NodeControlHandle" $NOMOS_LOG_DIR/*/*.log
grep "NodeControlHandle" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log
```
3. Increase cooldown: `Duration::from_secs(180)`
4. Increase duration: `.with_run_duration(Duration::from_secs(300))`
@ -338,7 +248,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
#### DSL Usage
```rust,ignore
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build();
@ -360,7 +270,7 @@ Error: Expectation failed: ConsensusLiveness
Possible causes:
- Nodes crashed or never started (check logs)
- Consensus timing misconfigured (CONSENSUS_SLOT_TIME too high)
- Insufficient validators (need >= 2 for BFT consensus)
- Insufficient nodes (need >= 2 for BFT consensus)
- Duration too short (nodes still syncing)
```
@ -368,15 +278,15 @@ Error: Expectation failed: ConsensusLiveness
1. Check if nodes started:
```bash
grep "node started\|listening on" $NOMOS_LOG_DIR/*/*.log
grep "node started\|listening on" $LOGOS_BLOCKCHAIN_LOG_DIR/*/*.log
```
2. Check block production:
```bash
grep "block.*height" $NOMOS_LOG_DIR/validator-*/*.log
grep "block.*height" $LOGOS_BLOCKCHAIN_LOG_DIR/node-*/*.log
```
3. Check consensus participation:
```bash
grep "consensus.*slot\|proposal" $NOMOS_LOG_DIR/validator-*/*.log
grep "consensus.*slot\|proposal" $LOGOS_BLOCKCHAIN_LOG_DIR/node-*/*.log
```
4. Increase duration: `.with_run_duration(Duration::from_secs(120))`
5. Check env vars: `echo $CONSENSUS_SLOT_TIME $CONSENSUS_ACTIVE_SLOT_COEFF`
@ -390,10 +300,9 @@ Each workload automatically attaches its own expectation:
| Workload | Expectation | What It Checks |
|----------|-------------|----------------|
| Transaction | `TxInclusionExpectation` | Transactions were included in blocks |
| DA | `DaWorkloadExpectation` | Blobs and channels were created/published |
| Chaos | (None) | Add `.expect_consensus_liveness()` explicitly |
These expectations are added automatically when using the DSL (`.transactions_with()`, `.da_with()`).
These expectations are added automatically when using the DSL (`.transactions_with()`).
---
@ -412,18 +321,6 @@ These expectations are added automatically when using the DSL (`.transactions_wi
| Users | 5 | wallet accounts |
| Wallets | 20 | total seeded |
### DA Workload
```rust,ignore
.da_with(|da| da.channel_rate(2).blob_rate(4))
```
| What | Value | Unit |
|------|-------|------|
| Channel rate | 2 | channels/block |
| Blob rate | 4 | blobs/block |
| Headroom | 20 | percent |
### Chaos Workload
```rust,ignore
@ -432,7 +329,7 @@ These expectations are added automatically when using the DSL (`.transactions_wi
Duration::from_secs(45), // min
Duration::from_secs(75), // max
Duration::from_secs(120), // cooldown
true, // validators
true, // nodes
))
```
@ -443,10 +340,9 @@ These expectations are added automatically when using the DSL (`.transactions_wi
### Pattern 1: Multiple Workloads
```rust,ignore
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.wallets(20)
.transactions_with(|tx| tx.rate(5).users(10))
.da_with(|da| da.channel_rate(2).blob_rate(2))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(120))
.build();
@ -473,7 +369,7 @@ impl Expectation for MyCustomExpectation {
}
}
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.with_expectation(MyCustomExpectation)
.with_run_duration(Duration::from_secs(60))
.build();
@ -485,12 +381,12 @@ ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
When a workload or expectation fails:
1. Check logs: `$NOMOS_LOG_DIR/*/` or `docker compose logs` or `kubectl logs`
2. Verify environment variables: `POL_PROOF_DEV_MODE`, `NOMOS_NODE_BIN`, etc.
1. Check logs: `$LOGOS_BLOCKCHAIN_LOG_DIR/*/` or `docker compose logs` or `kubectl logs`
2. Verify environment variables: `POL_PROOF_DEV_MODE`, `LOGOS_BLOCKCHAIN_NODE_BIN`, etc.
3. Check prerequisites: wallets, node control, circuits
4. Increase duration: Double the run duration and retry
5. Reduce rates: Half the traffic rates and retry
6. Check metrics: Prometheus queries for block height, tx count, DA stats
6. Check metrics: Prometheus queries for block height and tx count
7. Reproduce locally: Use local runner for faster iteration
---

View File

@ -1,7 +1,7 @@
# Workspace Layout
The workspace focuses on multi-node integration testing and sits alongside a
`nomos-node` checkout. Its crates separate concerns to keep scenarios
`logos-blockchain-node` checkout. Its crates separate concerns to keep scenarios
repeatable and portable:
- **Configs**: prepares high-level node, network, tracing, and wallet settings

View File

@ -0,0 +1,59 @@
use std::time::Duration;
use anyhow::Result;
use testing_framework_core::{
scenario::{PeerSelection, StartNodeOptions},
topology::config::TopologyConfig,
};
use testing_framework_runner_local::LocalDeployer;
use tokio::time::sleep;
#[allow(dead_code)]
async fn external_driver_example() -> Result<()> {
// Step 1: Create cluster with capacity for 3 nodes
let config = TopologyConfig::with_node_numbers(3);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
// Step 2: External driver decides to start 2 nodes initially
println!("Starting initial topology...");
let node_a = cluster.start_node("a").await?.api;
let node_b = cluster
.start_node_with(
"b",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
},
)
.await?
.api;
cluster.wait_network_ready().await?;
// Step 3: External driver runs some protocol operations
let info = node_a.consensus_info().await?;
println!("Initial cluster height: {}", info.height);
// Step 4: Later, external driver decides to add third node
println!("External driver adding third node...");
let node_c = cluster
.start_node_with(
"c",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
},
)
.await?
.api;
cluster.wait_network_ready().await?;
// Step 5: External driver validates final state
let heights = vec![
node_a.consensus_info().await?.height,
node_b.consensus_info().await?.height,
node_c.consensus_info().await?.height,
];
println!("Final heights: {:?}", heights);
Ok(())
}

View File

@ -0,0 +1,60 @@
use std::time::Duration;
use testing_framework_core::nodes::ApiClient;
use tokio::time::sleep;
#[allow(dead_code)]
async fn height_convergence(
node_a: &ApiClient,
node_b: &ApiClient,
node_c: &ApiClient,
) -> anyhow::Result<()> {
let start = tokio::time::Instant::now();
loop {
let heights: Vec<u64> = vec![
node_a.consensus_info().await?.height,
node_b.consensus_info().await?.height,
node_c.consensus_info().await?.height,
];
let max_diff = heights.iter().max().unwrap() - heights.iter().min().unwrap();
if max_diff <= 5 {
println!("Converged: heights={:?}", heights);
break;
}
if start.elapsed() > Duration::from_secs(60) {
return Err(anyhow::anyhow!("Convergence timeout: heights={:?}", heights));
}
sleep(Duration::from_secs(2)).await;
}
Ok(())
}
#[allow(dead_code)]
async fn peer_count_verification(node: &ApiClient) -> anyhow::Result<()> {
let info = node.network_info().await?;
assert_eq!(
info.n_peers, 3,
"Expected 3 peers, found {}",
info.n_peers
);
Ok(())
}
#[allow(dead_code)]
async fn block_production(node_a: &ApiClient) -> anyhow::Result<()> {
// Verify node is producing blocks
let initial_height = node_a.consensus_info().await?.height;
sleep(Duration::from_secs(10)).await;
let current_height = node_a.consensus_info().await?.height;
assert!(
current_height > initial_height,
"Node should have produced blocks: initial={}, current={}",
initial_height,
current_height
);
Ok(())
}

View File

@ -3,7 +3,8 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
pub async fn run_with_env_overrides() -> Result<()> {
// Uses NOMOS_DEMO_* env vars (for example NOMOS_DEMO_NODES)
// Uses LOGOS_BLOCKCHAIN_DEMO_* env vars (for example
// LOGOS_BLOCKCHAIN_DEMO_NODES)
let mut plan = ScenarioBuilder::with_node_counts(3)
.with_run_duration(std::time::Duration::from_secs(120))
.build()?;

View File

@ -24,9 +24,9 @@ async fn main() {
tracing_subscriber::fmt::init();
let nodes = read_env_any(&["NOMOS_DEMO_NODES"], demo::DEFAULT_NODES);
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(nodes, run_secs, "starting compose runner demo");
@ -80,7 +80,9 @@ async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
};
if !runner.context().telemetry().is_configured() {
warn!("metrics querying is disabled; set NOMOS_METRICS_QUERY_URL to enable PromQL queries");
warn!(
"metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries"
);
}
info!("running scenario");

View File

@ -17,8 +17,8 @@ const TRANSACTION_WALLETS: usize = 50;
async fn main() {
tracing_subscriber::fmt::init();
let nodes = read_env_any(&["NOMOS_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(nodes, run_secs, "starting k8s runner demo");
if let Err(err) = run_k8s_case(nodes, Duration::from_secs(run_secs)).await {
@ -41,13 +41,13 @@ async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> {
.with_run_duration(run_duration)
.expect_consensus_liveness();
if let Ok(url) = env::var("NOMOS_METRICS_QUERY_URL") {
if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_QUERY_URL") {
if !url.trim().is_empty() {
scenario = scenario.with_metrics_query_url_str(url.trim());
}
}
if let Ok(url) = env::var("NOMOS_METRICS_OTLP_INGEST_URL") {
if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL") {
if !url.trim().is_empty() {
scenario = scenario.with_metrics_otlp_ingest_url_str(url.trim());
}
@ -68,7 +68,9 @@ async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> {
};
if !runner.context().telemetry().is_configured() {
warn!("metrics querying is disabled; set NOMOS_METRICS_QUERY_URL to enable PromQL queries");
warn!(
"metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries"
);
}
info!("running scenario");

View File

@ -22,8 +22,8 @@ async fn main() {
process::exit(1);
}
let nodes = read_env_any(&["NOMOS_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(nodes, run_secs, "starting local runner demo");

View File

@ -22,13 +22,13 @@ fn set_default_env(key: &str, value: &str) {
pub fn init_logging_defaults() {
set_default_env("POL_PROOF_DEV_MODE", "true");
set_default_env("NOMOS_TESTS_KEEP_LOGS", "1");
set_default_env("NOMOS_LOG_LEVEL", "info");
set_default_env("LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS", "1");
set_default_env("LOGOS_BLOCKCHAIN_LOG_LEVEL", "info");
set_default_env("RUST_LOG", "info");
}
pub fn init_node_log_dir_defaults(deployer: DeployerKind) {
if env::var_os("NOMOS_LOG_DIR").is_some() {
if env::var_os("LOGOS_BLOCKCHAIN_LOG_DIR").is_some() {
return;
}
@ -36,8 +36,12 @@ pub fn init_node_log_dir_defaults(deployer: DeployerKind) {
let _ = fs::create_dir_all(&host_dir);
match deployer {
DeployerKind::Local => set_default_env("NOMOS_LOG_DIR", &host_dir.display().to_string()),
DeployerKind::Compose => set_default_env("NOMOS_LOG_DIR", DEFAULT_CONTAINER_NODE_LOG_DIR),
DeployerKind::Local => {
set_default_env("LOGOS_BLOCKCHAIN_LOG_DIR", &host_dir.display().to_string())
}
DeployerKind::Compose => {
set_default_env("LOGOS_BLOCKCHAIN_LOG_DIR", DEFAULT_CONTAINER_NODE_LOG_DIR)
}
}
}

View File

@ -2,5 +2,5 @@
# Relative paths are resolved from the repo root.
# Host-side circuit bundle locations used by helper scripts.
NOMOS_CIRCUITS_HOST_DIR_REL=".tmp/logos-blockchain-circuits-host"
NOMOS_CIRCUITS_LINUX_DIR_REL=".tmp/logos-blockchain-circuits-linux"
LOGOS_BLOCKCHAIN_CIRCUITS_HOST_DIR_REL=".tmp/logos-blockchain-circuits-host"
LOGOS_BLOCKCHAIN_CIRCUITS_LINUX_DIR_REL=".tmp/logos-blockchain-circuits-linux"

View File

@ -30,7 +30,7 @@ Usage: scripts/build/build-bundle.sh [--platform host|linux] [--output PATH]
Options:
--platform Target platform for binaries (default: host)
--output Output path for the tarball (default: .tmp/nomos-binaries-<platform>-<version>.tar.gz)
--rev logos-blockchain-node git revision to build (overrides NOMOS_NODE_REV)
--rev logos-blockchain-node git revision to build (overrides LOGOS_BLOCKCHAIN_NODE_REV)
--path Use local logos-blockchain-node checkout at DIR (skip fetch/checkout)
--features Extra cargo features to enable (comma-separated); base always includes "testing"
--docker-platform Docker platform for Linux bundle when running on non-Linux host (default: auto; linux/arm64 on Apple silicon Docker Desktop, else linux/amd64)
@ -40,7 +40,7 @@ Notes:
run inside a Linux Docker container to produce Linux binaries.
- On Apple silicon, Docker defaults to linux/arm64; for compose/k8s you likely
want linux/amd64 (the default here). Override with --docker-platform.
- VERSION, NOMOS_NODE_REV, and optional NOMOS_NODE_PATH env vars are honored (defaults align with run-examples.sh).
- VERSION, LOGOS_BLOCKCHAIN_NODE_REV, and optional LOGOS_BLOCKCHAIN_NODE_PATH env vars are honored (defaults align with run-examples.sh).
USAGE
}
@ -52,17 +52,17 @@ build_bundle::fail() {
build_bundle::apply_nomos_node_patches() {
local node_src="$1"
local apply="${NOMOS_NODE_APPLY_PATCHES:-1}"
local apply="${LOGOS_BLOCKCHAIN_NODE_APPLY_PATCHES:-1}"
if [ "${apply}" = "0" ]; then
return 0
fi
local patch_dir="${NOMOS_NODE_PATCH_DIR:-${ROOT_DIR}/patches/logos-blockchain-node}"
local patch_dir="${LOGOS_BLOCKCHAIN_NODE_PATCH_DIR:-${ROOT_DIR}/patches/logos-blockchain-node}"
if [ ! -d "${patch_dir}" ]; then
return 0
fi
local level="${NOMOS_NODE_PATCH_LEVEL:-}"
local level="${LOGOS_BLOCKCHAIN_NODE_PATCH_LEVEL:-}"
if [ -z "${level}" ]; then
level="all"
fi
@ -84,7 +84,7 @@ build_bundle::apply_nomos_node_patches() {
fi
if [ "${level}" != "all" ] && [ "${level}" != "ALL" ]; then
if ! [[ "${level}" =~ ^[0-9]+$ ]]; then
build_bundle::fail "Invalid NOMOS_NODE_PATCH_LEVEL: ${level} (expected integer or 'all')"
build_bundle::fail "Invalid LOGOS_BLOCKCHAIN_NODE_PATCH_LEVEL: ${level} (expected integer or 'all')"
fi
if [ -n "${phase}" ] && [ "${phase}" -gt "${level}" ]; then
continue
@ -104,11 +104,11 @@ build_bundle::load_env() {
. "${ROOT_DIR}/versions.env"
DEFAULT_VERSION="${VERSION:?Missing VERSION in versions.env}"
DEFAULT_NODE_REV="${NOMOS_NODE_REV:-}"
DEFAULT_NODE_PATH="${NOMOS_NODE_PATH:-}"
DEFAULT_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV:-}"
DEFAULT_NODE_PATH="${LOGOS_BLOCKCHAIN_NODE_PATH:-}"
NOMOS_EXTRA_FEATURES="${NOMOS_EXTRA_FEATURES:-}"
DOCKER_PLATFORM="${NOMOS_BUNDLE_DOCKER_PLATFORM:-${NOMOS_BIN_PLATFORM:-}}"
LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${LOGOS_BLOCKCHAIN_EXTRA_FEATURES:-}"
DOCKER_PLATFORM="${LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM:-${LOGOS_BLOCKCHAIN_BIN_PLATFORM:-}}"
BUNDLE_RUSTUP_TOOLCHAIN="${BUNDLE_RUSTUP_TOOLCHAIN:-}"
if [ -z "${BUNDLE_RUSTUP_TOOLCHAIN}" ] && command -v rustup >/dev/null 2>&1 && [ -f "${ROOT_DIR}/rust-toolchain.toml" ]; then
@ -153,8 +153,8 @@ build_bundle::parse_args() {
--rev) REV_OVERRIDE="${2:-}"; shift 2 ;;
--path=*) PATH_OVERRIDE="${1#*=}"; shift ;;
--path) PATH_OVERRIDE="${2:-}"; shift 2 ;;
--features=*) NOMOS_EXTRA_FEATURES="${1#*=}"; shift ;;
--features) NOMOS_EXTRA_FEATURES="${2:-}"; shift 2 ;;
--features=*) LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${1#*=}"; shift ;;
--features) LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${2:-}"; shift 2 ;;
--docker-platform=*) DOCKER_PLATFORM="${1#*=}"; shift ;;
--docker-platform) DOCKER_PLATFORM="${2:-}"; shift 2 ;;
*) build_bundle::fail "Unknown argument: $1" ;;
@ -174,11 +174,11 @@ build_bundle::validate_and_finalize() {
build_bundle::fail "Use either --rev or --path, not both"
fi
if [ -z "${REV_OVERRIDE}" ] && [ -z "${PATH_OVERRIDE}" ] && [ -z "${DEFAULT_NODE_REV}" ] && [ -z "${DEFAULT_NODE_PATH}" ]; then
build_bundle::fail "Provide --rev, --path, or set NOMOS_NODE_REV/NOMOS_NODE_PATH in versions.env"
build_bundle::fail "Provide --rev, --path, or set LOGOS_BLOCKCHAIN_NODE_REV/LOGOS_BLOCKCHAIN_NODE_PATH in versions.env"
fi
NOMOS_NODE_REV="${REV_OVERRIDE:-${DEFAULT_NODE_REV}}"
NOMOS_NODE_PATH="${PATH_OVERRIDE:-${DEFAULT_NODE_PATH}}"
export NOMOS_NODE_REV NOMOS_NODE_PATH
LOGOS_BLOCKCHAIN_NODE_REV="${REV_OVERRIDE:-${DEFAULT_NODE_REV}}"
LOGOS_BLOCKCHAIN_NODE_PATH="${PATH_OVERRIDE:-${DEFAULT_NODE_PATH}}"
export LOGOS_BLOCKCHAIN_NODE_REV LOGOS_BLOCKCHAIN_NODE_PATH
build_bundle::default_docker_platform
DOCKER_PLATFORM="${DOCKER_PLATFORM:-linux/amd64}"
@ -223,16 +223,16 @@ build_bundle::maybe_run_linux_build_in_docker() {
command -v docker >/dev/null 2>&1 || build_bundle::fail "Docker is required to build a Linux bundle from non-Linux host"
[ -n "${DOCKER_PLATFORM}" ] || build_bundle::fail "--docker-platform must not be empty"
local node_path_env="${NOMOS_NODE_PATH}"
local node_path_env="${LOGOS_BLOCKCHAIN_NODE_PATH}"
local -a extra_mounts=()
if [ -n "${NOMOS_NODE_PATH}" ]; then
case "${NOMOS_NODE_PATH}" in
if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then
case "${LOGOS_BLOCKCHAIN_NODE_PATH}" in
"${ROOT_DIR}"/*)
node_path_env="/workspace${NOMOS_NODE_PATH#"${ROOT_DIR}"}"
node_path_env="/workspace${LOGOS_BLOCKCHAIN_NODE_PATH#"${ROOT_DIR}"}"
;;
/*)
node_path_env="/external/logos-blockchain-node"
extra_mounts+=("-v" "${NOMOS_NODE_PATH}:${node_path_env}")
extra_mounts+=("-v" "${LOGOS_BLOCKCHAIN_NODE_PATH}:${node_path_env}")
;;
*)
build_bundle::fail "--path must be absolute when cross-building in Docker"
@ -248,23 +248,23 @@ build_bundle::maybe_run_linux_build_in_docker() {
mkdir -p "${ROOT_DIR}/.tmp/cargo-linux" "${host_target_dir}"
local -a features_args=()
if [ -n "${NOMOS_EXTRA_FEATURES:-}" ]; then
features_args+=(--features "${NOMOS_EXTRA_FEATURES}")
if [ -n "${LOGOS_BLOCKCHAIN_EXTRA_FEATURES:-}" ]; then
features_args+=(--features "${LOGOS_BLOCKCHAIN_EXTRA_FEATURES}")
fi
local -a src_args=()
if [ -n "${node_path_env}" ]; then
src_args+=(--path "${node_path_env}")
else
src_args+=(--rev "${NOMOS_NODE_REV}")
src_args+=(--rev "${LOGOS_BLOCKCHAIN_NODE_REV}")
fi
docker run --rm --platform "${DOCKER_PLATFORM}" \
-e VERSION="${VERSION}" \
-e NOMOS_NODE_REV="${NOMOS_NODE_REV}" \
-e NOMOS_NODE_PATH="${node_path_env}" \
-e NOMOS_BUNDLE_DOCKER_PLATFORM="${DOCKER_PLATFORM}" \
-e NOMOS_EXTRA_FEATURES="${NOMOS_EXTRA_FEATURES:-}" \
-e LOGOS_BLOCKCHAIN_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV}" \
-e LOGOS_BLOCKCHAIN_NODE_PATH="${node_path_env}" \
-e LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM="${DOCKER_PLATFORM}" \
-e LOGOS_BLOCKCHAIN_EXTRA_FEATURES="${LOGOS_BLOCKCHAIN_EXTRA_FEATURES:-}" \
-e BUNDLE_IN_CONTAINER=1 \
-e CARGO_HOME=/workspace/.tmp/cargo-linux \
-e CARGO_TARGET_DIR="/workspace/.tmp/logos-blockchain-node-linux-target${target_suffix}" \
@ -289,15 +289,15 @@ build_bundle::prepare_circuits() {
# as the native-host `target/debug` layout would otherwise get mixed.
local target_suffix=""
if [ -n "${BUNDLE_IN_CONTAINER:-}" ]; then
target_suffix="$(build_bundle::docker_platform_suffix "${NOMOS_BUNDLE_DOCKER_PLATFORM:-}")"
target_suffix="$(build_bundle::docker_platform_suffix "${LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM:-}")"
fi
NODE_TARGET="${ROOT_DIR}/.tmp/logos-blockchain-node-linux-target${target_suffix}"
fi
NODE_SRC_DEFAULT="${ROOT_DIR}/.tmp/logos-blockchain-node-${PLATFORM}-src"
NODE_SRC="${NOMOS_NODE_PATH:-${NODE_SRC_DEFAULT}}"
if [ -n "${NOMOS_NODE_PATH}" ]; then
[ -d "${NODE_SRC}" ] || build_bundle::fail "NOMOS_NODE_PATH does not exist: ${NODE_SRC}"
NODE_SRC="${LOGOS_BLOCKCHAIN_NODE_PATH:-${NODE_SRC_DEFAULT}}"
if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then
[ -d "${NODE_SRC}" ] || build_bundle::fail "LOGOS_BLOCKCHAIN_NODE_PATH does not exist: ${NODE_SRC}"
rm -rf "${NODE_SRC_DEFAULT}"
if [ -d "${NODE_TARGET}" ]; then
find "${NODE_TARGET}" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
@ -314,19 +314,19 @@ build_bundle::build_binaries() {
mkdir -p "${NODE_SRC}"
(
cd "${NODE_SRC}"
if [ -n "${NOMOS_NODE_PATH}" ]; then
if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then
echo "Using local logos-blockchain-node checkout at ${NODE_SRC} (no fetch/checkout)"
else
if [ ! -d "${NODE_SRC}/.git" ]; then
git clone https://github.com/logos-co/nomos-node.git "${NODE_SRC}"
fi
git fetch --depth 1 origin "${NOMOS_NODE_REV}"
git checkout "${NOMOS_NODE_REV}"
git fetch --depth 1 origin "${LOGOS_BLOCKCHAIN_NODE_REV}"
git checkout "${LOGOS_BLOCKCHAIN_NODE_REV}"
git reset --hard
git clean -fdx
fi
if [ -z "${NOMOS_NODE_PATH}" ]; then
if [ -z "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then
build_bundle::apply_nomos_node_patches "${NODE_SRC}"
fi
unset CARGO_FEATURE_BUILD_VERIFICATION_KEY
@ -352,8 +352,8 @@ build_bundle::package_bundle() {
mkdir -p "${bundle_dir}/artifacts"
cp "${NODE_BIN}" "${bundle_dir}/artifacts/logos-blockchain-node"
{
echo "nomos_node_path=${NOMOS_NODE_PATH:-}"
echo "nomos_node_rev=${NOMOS_NODE_REV:-}"
echo "nomos_node_path=${LOGOS_BLOCKCHAIN_NODE_PATH:-}"
echo "nomos_node_rev=${LOGOS_BLOCKCHAIN_NODE_REV:-}"
if [ -d "${NODE_SRC}/.git" ] && command -v git >/dev/null 2>&1; then
echo "nomos_node_git_head=$(git -C "${NODE_SRC}" rev-parse HEAD 2>/dev/null || true)"
fi

View File

@ -16,7 +16,7 @@ Builds a Linux bundle via scripts/build/build-bundle.sh, then stages artifacts i
- testing-framework/assets/stack/bin
Options:
--rev REV logos-blockchain-node git revision to build (overrides NOMOS_NODE_REV)
--rev REV logos-blockchain-node git revision to build (overrides LOGOS_BLOCKCHAIN_NODE_REV)
--path DIR use local logos-blockchain-node checkout (skip fetch/checkout)
--features LIST extra cargo features (comma-separated); base includes "testing"
--docker-platform PLAT docker platform for the Linux build (e.g. linux/amd64, linux/arm64)
@ -26,8 +26,8 @@ Options:
Environment:
VERSION bundle version (default from versions.env)
NOMOS_CIRCUITS_VERSION legacy alias for VERSION (supported)
NOMOS_NODE_REV default logos-blockchain-node revision (from versions.env)
LOGOS_BLOCKCHAIN_CIRCUITS_VERSION legacy alias for VERSION (supported)
LOGOS_BLOCKCHAIN_NODE_REV default logos-blockchain-node revision (from versions.env)
EOF
}
@ -48,8 +48,8 @@ build_linux_binaries::load_env() {
DEFAULT_VERSION="${VERSION:?Missing VERSION in versions.env}"
VERSION="${VERSION:-${DEFAULT_VERSION}}"
if [ -n "${NOMOS_CIRCUITS_VERSION:-}" ]; then
VERSION="${NOMOS_CIRCUITS_VERSION}"
if [ -n "${LOGOS_BLOCKCHAIN_CIRCUITS_VERSION:-}" ]; then
VERSION="${LOGOS_BLOCKCHAIN_CIRCUITS_VERSION}"
fi
}

View File

@ -19,13 +19,13 @@ Options:
--version VERSION Bundle version tag (default: versions.env VERSION)
--dockerfile PATH Dockerfile path (default: testing-framework/assets/stack/Dockerfile.runtime)
--base-tag TAG Base image tag (default: logos-blockchain-testing:base)
--bundle-tar PATH Bundle tar containing artifacts/{nomos-*} (default: .tmp/nomos-binaries-linux-<version>.tar.gz; or env NOMOS_BINARIES_TAR)
--bundle-tar PATH Bundle tar containing artifacts/{nomos-*} (default: .tmp/nomos-binaries-linux-<version>.tar.gz; or env LOGOS_BLOCKCHAIN_BINARIES_TAR)
--no-restore Do not restore binaries from bundle tar (forces Dockerfile to build/download as needed)
--print-config Print resolved configuration and exit
-h, --help Show this help and exit
Env (legacy/compatible):
IMAGE_TAG, VERSION, NOMOS_BINARIES_TAR
IMAGE_TAG, VERSION, LOGOS_BLOCKCHAIN_BINARIES_TAR
USAGE
}
@ -52,7 +52,7 @@ build_test_image::load_env() {
BASE_IMAGE_TAG_DEFAULT="logos-blockchain-testing:base"
VERSION_DEFAULT="${VERSION:?Missing VERSION in versions.env}"
NOMOS_NODE_REV="${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV in versions.env}"
LOGOS_BLOCKCHAIN_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV in versions.env}"
}
build_test_image::parse_args() {
@ -61,7 +61,7 @@ build_test_image::parse_args() {
DOCKERFILE_PATH="${DOCKERFILE_PATH_DEFAULT}"
BASE_DOCKERFILE_PATH="${BASE_DOCKERFILE_PATH_DEFAULT}"
BASE_IMAGE_TAG="${BASE_IMAGE_TAG:-${BASE_IMAGE_TAG_DEFAULT}}"
BUNDLE_TAR_PATH="${NOMOS_BINARIES_TAR:-}"
BUNDLE_TAR_PATH="${LOGOS_BLOCKCHAIN_BINARIES_TAR:-}"
NO_RESTORE=0
PRINT_CONFIG=0
@ -102,7 +102,7 @@ build_test_image::print_config() {
echo "Dockerfile: ${DOCKERFILE_PATH}"
echo "Base image tag: ${BASE_IMAGE_TAG}"
echo "Base Dockerfile: ${BASE_DOCKERFILE_PATH}"
echo "Logos node rev: ${NOMOS_NODE_REV}"
echo "Logos node rev: ${LOGOS_BLOCKCHAIN_NODE_REV}"
echo "Binaries dir: ${BIN_DST}"
echo "Bundle tar (if used): ${TAR_PATH}"
echo "Restore from tar: $([ "${NO_RESTORE}" -eq 1 ] && echo "disabled" || echo "enabled")"
@ -168,7 +168,7 @@ build_test_image::docker_build() {
local -a base_build_args=(
-f "${BASE_DOCKERFILE_PATH}"
-t "${BASE_IMAGE_TAG}"
--build-arg "NOMOS_NODE_REV=${NOMOS_NODE_REV}"
--build-arg "LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV}"
--build-arg "VERSION=${VERSION}"
"${ROOT_DIR}"
)
@ -212,7 +212,7 @@ build_test_image::main() {
cat <<EOF
Build complete.
- Use this image in k8s/compose by exporting NOMOS_TESTNET_IMAGE=${IMAGE_TAG}
- Use this image in k8s/compose by exporting LOGOS_BLOCKCHAIN_TESTNET_IMAGE=${IMAGE_TAG}
EOF
}

View File

@ -16,8 +16,8 @@ Usage:
scripts/ops/update-nomos-rev.sh --unskip-worktree
Notes:
--rev sets NOMOS_NODE_REV and updates Cargo.toml revs
--path sets NOMOS_NODE_PATH (clears NOMOS_NODE_REV) and patches Cargo.toml to use a local logos-blockchain-node checkout
--rev sets LOGOS_BLOCKCHAIN_NODE_REV and updates Cargo.toml revs
--path sets LOGOS_BLOCKCHAIN_NODE_PATH (clears LOGOS_BLOCKCHAIN_NODE_REV) and patches Cargo.toml to use a local logos-blockchain-node checkout
--unskip-worktree clears any skip-worktree flag for Cargo.toml
Only one may be used at a time.
EOF
@ -88,8 +88,8 @@ update_nomos_rev::update_to_rev() {
echo "Updating logos-blockchain-node rev to ${rev}"
sed -i.bak -E \
-e "s/^#?[[:space:]]*NOMOS_NODE_REV=.*/NOMOS_NODE_REV=${rev}/" \
-e "s/^#?[[:space:]]*NOMOS_NODE_PATH=.*/# NOMOS_NODE_PATH=/" \
-e "s/^#?[[:space:]]*LOGOS_BLOCKCHAIN_NODE_REV=.*/LOGOS_BLOCKCHAIN_NODE_REV=${rev}/" \
-e "s/^#?[[:space:]]*LOGOS_BLOCKCHAIN_NODE_PATH=.*/# LOGOS_BLOCKCHAIN_NODE_PATH=/" \
"${ROOT_DIR}/versions.env"
rm -f "${ROOT_DIR}/versions.env.bak"
@ -123,12 +123,12 @@ update_nomos_rev::update_to_path() {
[ -d "${node_path}" ] || common::die "path does not exist: ${node_path}"
local current_rev escaped_path
current_rev="$(grep -E '^[#[:space:]]*NOMOS_NODE_REV=' "${ROOT_DIR}/versions.env" | head -n1 | sed -E 's/^#?[[:space:]]*NOMOS_NODE_REV=//')"
current_rev="$(grep -E '^[#[:space:]]*LOGOS_BLOCKCHAIN_NODE_REV=' "${ROOT_DIR}/versions.env" | head -n1 | sed -E 's/^#?[[:space:]]*LOGOS_BLOCKCHAIN_NODE_REV=//')"
escaped_path="${node_path//\//\\/}"
sed -i.bak -E \
-e "s/^#?[[:space:]]*NOMOS_NODE_PATH=.*/NOMOS_NODE_PATH=${escaped_path}/" \
-e "s/^#?[[:space:]]*NOMOS_NODE_REV=.*/# NOMOS_NODE_REV=${current_rev}/" \
-e "s/^#?[[:space:]]*LOGOS_BLOCKCHAIN_NODE_PATH=.*/LOGOS_BLOCKCHAIN_NODE_PATH=${escaped_path}/" \
-e "s/^#?[[:space:]]*LOGOS_BLOCKCHAIN_NODE_REV=.*/# LOGOS_BLOCKCHAIN_NODE_REV=${current_rev}/" \
"${ROOT_DIR}/versions.env"
rm -f "${ROOT_DIR}/versions.env.bak"
@ -226,8 +226,8 @@ update_nomos_rev::main() {
update_nomos_rev::load_env
update_nomos_rev::parse_args "$@"
update_nomos_rev::ensure_env_key "NOMOS_NODE_REV" "# NOMOS_NODE_REV="
update_nomos_rev::ensure_env_key "NOMOS_NODE_PATH" "# NOMOS_NODE_PATH="
update_nomos_rev::ensure_env_key "LOGOS_BLOCKCHAIN_NODE_REV" "# LOGOS_BLOCKCHAIN_NODE_REV="
update_nomos_rev::ensure_env_key "LOGOS_BLOCKCHAIN_NODE_PATH" "# LOGOS_BLOCKCHAIN_NODE_PATH="
if [ "${UNSKIP_WORKTREE}" -eq 1 ]; then
update_nomos_rev::maybe_unskip_worktree "Cargo.toml"

View File

@ -47,9 +47,9 @@ checks::print_workspace() {
if [ -f "${ROOT_DIR}/versions.env" ]; then
checks::ok "versions.env present"
checks::say "VERSION=${VERSION:-<unset>}"
checks::say "NOMOS_NODE_REV=${NOMOS_NODE_REV:-<unset>}"
if [ -n "${NOMOS_NODE_PATH:-}" ]; then
checks::say "NOMOS_NODE_PATH=${NOMOS_NODE_PATH}"
checks::say "LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV:-<unset>}"
if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH:-}" ]; then
checks::say "LOGOS_BLOCKCHAIN_NODE_PATH=${LOGOS_BLOCKCHAIN_NODE_PATH}"
fi
else
checks::warn "versions.env missing (scripts depend on it)"
@ -118,9 +118,9 @@ checks::print_docker() {
checks::warn "could not query docker engine arch (is Docker running?)"
fi
local bundle_platform="${NOMOS_BUNDLE_DOCKER_PLATFORM:-${NOMOS_BIN_PLATFORM:-}}"
local bundle_platform="${LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM:-${LOGOS_BLOCKCHAIN_BIN_PLATFORM:-}}"
if [ -z "${bundle_platform}" ]; then
checks::say "NOMOS_BUNDLE_DOCKER_PLATFORM=<auto>"
checks::say "LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=<auto>"
if [[ "${server_arch}" == *"linux/arm64"* ]]; then
checks::say "bundle docker platform (auto): ${default_bundle_platform_arm64}"
else
@ -128,19 +128,19 @@ checks::print_docker() {
fi
bundle_platform="auto"
else
checks::say "NOMOS_BUNDLE_DOCKER_PLATFORM=${bundle_platform}"
checks::say "LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=${bundle_platform}"
fi
if [[ "${server_arch}" == *"linux/arm64"* ]] && [ "${bundle_platform}" = "${default_bundle_platform_amd64}" ]; then
checks::warn "Docker engine is linux/arm64 but bundle platform is ${default_bundle_platform_amd64} (emulation). If builds are slow/flaky, set: NOMOS_BUNDLE_DOCKER_PLATFORM=${default_bundle_platform_arm64}"
checks::warn "Docker engine is linux/arm64 but bundle platform is ${default_bundle_platform_amd64} (emulation). If builds are slow/flaky, set: LOGOS_BLOCKCHAIN_BUNDLE_DOCKER_PLATFORM=${default_bundle_platform_arm64}"
fi
local image="${NOMOS_TESTNET_IMAGE:-${default_local_image}}"
checks::say "NOMOS_TESTNET_IMAGE=${image}"
local image="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE:-${default_local_image}}"
checks::say "LOGOS_BLOCKCHAIN_TESTNET_IMAGE=${image}"
if docker image inspect "${image}" >/dev/null 2>&1; then
checks::ok "testnet image present locally"
else
checks::warn "testnet image not present locally (compose/k8s runs will rebuild or fail if NOMOS_SKIP_IMAGE_BUILD=1)"
checks::warn "testnet image not present locally (compose/k8s runs will rebuild or fail if LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1)"
fi
}
@ -186,7 +186,7 @@ checks::print_k8s_image_visibility() {
checks::section "K8s Image Visibility"
local default_local_image="logos-blockchain-testing:local"
local image="${NOMOS_TESTNET_IMAGE:-${default_local_image}}"
local image="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE:-${default_local_image}}"
if [ -z "${KUBE_CONTEXT:-}" ]; then
return 0
@ -211,7 +211,7 @@ checks::print_k8s_image_visibility() {
*)
if [[ "${image}" == *":local" ]]; then
checks::warn "current context is ${KUBE_CONTEXT}; a :local image tag may not be reachable by cluster nodes"
checks::say "Suggested: push to a registry and set NOMOS_TESTNET_IMAGE, or load into the cluster if supported"
checks::say "Suggested: push to a registry and set LOGOS_BLOCKCHAIN_TESTNET_IMAGE, or load into the cluster if supported"
fi
;;
esac
@ -248,7 +248,7 @@ checks::print_docker_desktop_kubernetes_health() {
checks::print_debug_flags() {
checks::section "Runner Debug Flags (optional)"
checks::say "SLOW_TEST_ENV=${SLOW_TEST_ENV:-<unset>} (if true: doubles readiness timeouts)"
checks::say "NOMOS_SKIP_IMAGE_BUILD=${NOMOS_SKIP_IMAGE_BUILD:-<unset>} (compose/k8s)"
checks::say "LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-<unset>} (compose/k8s)"
checks::say "COMPOSE_RUNNER_PRESERVE=${COMPOSE_RUNNER_PRESERVE:-<unset>} (compose)"
checks::say "K8S_RUNNER_PRESERVE=${K8S_RUNNER_PRESERVE:-<unset>} (k8s)"
checks::say "K8S_RUNNER_DEBUG=${K8S_RUNNER_DEBUG:-<unset>} (k8s helm debug)"
@ -274,7 +274,7 @@ checks::main() {
checks::print_debug_flags
checks::section "Done"
checks::say "If something looks off, start with: scripts/run/run-examples.sh <mode> -t 60 -v 1 -e 1"
checks::say "If something looks off, start with: scripts/run/run-examples.sh <mode> -t 60 -n 1"
}
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then

View File

@ -40,37 +40,37 @@ Modes:
Options:
-t, --run-seconds N Duration to run the demo (required)
-n, --nodes N Number of nodes (required)
--bundle PATH Convenience alias for setting NOMOS_BINARIES_TAR=PATH
--bundle PATH Convenience alias for setting LOGOS_BLOCKCHAIN_BINARIES_TAR=PATH
--metrics-query-url URL PromQL base URL the runner process can query (optional)
--metrics-otlp-ingest-url URL Full OTLP HTTP ingest URL for node metrics export (optional)
--external-prometheus URL Alias for --metrics-query-url
--external-otlp-metrics-endpoint URL Alias for --metrics-otlp-ingest-url
--local Use a local Docker image tag (default for docker-desktop k8s)
--no-image-build Skip rebuilding the compose/k8s image (sets NOMOS_SKIP_IMAGE_BUILD=1)
--no-image-build Skip rebuilding the compose/k8s image (sets LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1)
Environment:
VERSION Bundle version (default from versions.env)
CONSENSUS_SLOT_TIME Consensus slot duration in seconds (default 2)
CONSENSUS_ACTIVE_SLOT_COEFF Probability a slot is active (default 0.9); expected block interval ≈ slot_time / coeff
NOMOS_TESTNET_IMAGE Image reference (overridden by --local/--ecr selection)
LOGOS_BLOCKCHAIN_TESTNET_IMAGE Image reference (overridden by --local/--ecr selection)
ECR_IMAGE Full image reference for --ecr (overrides ECR_REGISTRY/ECR_REPO/TAG)
ECR_REGISTRY Registry hostname for --ecr (default ${DEFAULT_PUBLIC_ECR_REGISTRY})
ECR_REPO Repository path for --ecr (default ${DEFAULT_PUBLIC_ECR_REPO})
TAG Tag for --ecr (default ${DEFAULT_ECR_TAG})
NOMOS_TESTNET_IMAGE_PULL_POLICY K8s imagePullPolicy (default ${DEFAULT_PULL_POLICY_LOCAL}; set to ${DEFAULT_PULL_POLICY_ECR} for --ecr)
NOMOS_BINARIES_TAR Path to prebuilt binaries tarball (default .tmp/nomos-binaries-<platform>-<version>.tar.gz)
LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY K8s imagePullPolicy (default ${DEFAULT_PULL_POLICY_LOCAL}; set to ${DEFAULT_PULL_POLICY_ECR} for --ecr)
LOGOS_BLOCKCHAIN_BINARIES_TAR Path to prebuilt binaries tarball (default .tmp/nomos-binaries-<platform>-<version>.tar.gz)
LOGOS_BLOCKCHAIN_CIRCUITS Directory containing circuits assets (defaults to ~/.logos-blockchain-circuits)
NOMOS_SKIP_IMAGE_BUILD Set to 1 to skip rebuilding the compose/k8s image
NOMOS_FORCE_IMAGE_BUILD Set to 1 to force image rebuild even for k8s ECR mode
NOMOS_METRICS_QUERY_URL PromQL base URL for the runner process (optional)
NOMOS_METRICS_OTLP_INGEST_URL Full OTLP HTTP ingest URL for node metrics export (optional)
NOMOS_GRAFANA_URL Grafana base URL for printing/logging (optional)
LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD Set to 1 to skip rebuilding the compose/k8s image
LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD Set to 1 to force image rebuild even for k8s ECR mode
LOGOS_BLOCKCHAIN_METRICS_QUERY_URL PromQL base URL for the runner process (optional)
LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL Full OTLP HTTP ingest URL for node metrics export (optional)
LOGOS_BLOCKCHAIN_GRAFANA_URL Grafana base URL for printing/logging (optional)
Notes:
- For k8s runs on non-docker-desktop clusters (e.g. EKS), a locally built Docker image is not
visible to the cluster. By default, this script skips local image rebuilds in that case.
If you need a custom image, run scripts/build/build_test_image.sh and push it to a registry the
cluster can pull from, then set NOMOS_TESTNET_IMAGE accordingly.
cluster can pull from, then set LOGOS_BLOCKCHAIN_TESTNET_IMAGE accordingly.
EOF
}
@ -138,13 +138,13 @@ run_examples::parse_args() {
shift
;;
--bundle)
NOMOS_BINARIES_TAR="${2:-}"
export NOMOS_BINARIES_TAR
LOGOS_BLOCKCHAIN_BINARIES_TAR="${2:-}"
export LOGOS_BLOCKCHAIN_BINARIES_TAR
shift 2
;;
--bundle=*)
NOMOS_BINARIES_TAR="${1#*=}"
export NOMOS_BINARIES_TAR
LOGOS_BLOCKCHAIN_BINARIES_TAR="${1#*=}"
export LOGOS_BLOCKCHAIN_BINARIES_TAR
shift
;;
--metrics-query-url)
@ -184,8 +184,8 @@ run_examples::parse_args() {
shift
;;
--no-image-build)
NOMOS_SKIP_IMAGE_BUILD=1
export NOMOS_SKIP_IMAGE_BUILD
LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1
export LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD
shift
;;
compose|host|k8s)
@ -204,8 +204,8 @@ run_examples::parse_args() {
esac
done
if [ -n "${NOMOS_BINARIES_TAR:-}" ] && [ ! -f "${NOMOS_BINARIES_TAR}" ]; then
run_examples::fail_with_usage "NOMOS_BINARIES_TAR is set but missing: ${NOMOS_BINARIES_TAR}"
if [ -n "${LOGOS_BLOCKCHAIN_BINARIES_TAR:-}" ] && [ ! -f "${LOGOS_BLOCKCHAIN_BINARIES_TAR}" ]; then
run_examples::fail_with_usage "LOGOS_BLOCKCHAIN_BINARIES_TAR is set but missing: ${LOGOS_BLOCKCHAIN_BINARIES_TAR}"
fi
if ! common::is_uint "${RUN_SECS_RAW}" || [ "${RUN_SECS_RAW}" -le 0 ]; then
@ -239,8 +239,8 @@ run_examples::select_image() {
fi
if [ "${selection}" = "local" ]; then
IMAGE="${NOMOS_TESTNET_IMAGE:-${DEFAULT_LOCAL_IMAGE}}"
export NOMOS_TESTNET_IMAGE_PULL_POLICY="${NOMOS_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_LOCAL}}"
IMAGE="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE:-${DEFAULT_LOCAL_IMAGE}}"
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_LOCAL}}"
elif [ "${selection}" = "ecr" ]; then
local tag="${TAG:-${DEFAULT_ECR_TAG}}"
if [ -n "${ECR_IMAGE:-}" ]; then
@ -259,35 +259,35 @@ run_examples::select_image() {
local repo="${ECR_REPO:-${DEFAULT_PUBLIC_ECR_REPO}}"
IMAGE="${registry}/${repo}:${tag}"
fi
export NOMOS_TESTNET_IMAGE_PULL_POLICY="${NOMOS_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_ECR}}"
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY="${LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY:-${DEFAULT_PULL_POLICY_ECR}}"
else
run_examples::fail_with_usage "Unknown image selection mode: ${selection}"
fi
export NOMOS_IMAGE_SELECTION="${selection}"
export LOGOS_BLOCKCHAIN_IMAGE_SELECTION="${selection}"
export IMAGE_TAG="${IMAGE}"
export NOMOS_TESTNET_IMAGE="${IMAGE}"
export LOGOS_BLOCKCHAIN_TESTNET_IMAGE="${IMAGE}"
if [ "${MODE}" = "k8s" ] && [ "${selection}" = "ecr" ]; then
# A locally built Docker image isn't visible to remote clusters (e.g. EKS). Default to
# skipping the local rebuild, unless the user explicitly set NOMOS_SKIP_IMAGE_BUILD or
# overrides via NOMOS_FORCE_IMAGE_BUILD=1.
if [ "${NOMOS_FORCE_IMAGE_BUILD:-0}" != "1" ]; then
NOMOS_SKIP_IMAGE_BUILD="${NOMOS_SKIP_IMAGE_BUILD:-${DEFAULT_K8S_ECR_SKIP_IMAGE_BUILD}}"
export NOMOS_SKIP_IMAGE_BUILD
# skipping the local rebuild, unless the user explicitly set LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD or
# overrides via LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD=1.
if [ "${LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD:-0}" != "1" ]; then
LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD="${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-${DEFAULT_K8S_ECR_SKIP_IMAGE_BUILD}}"
export LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD
fi
fi
}
run_examples::default_tar_path() {
if [ -n "${NOMOS_BINARIES_TAR:-}" ]; then
echo "${NOMOS_BINARIES_TAR}"
if [ -n "${LOGOS_BLOCKCHAIN_BINARIES_TAR:-}" ]; then
echo "${LOGOS_BLOCKCHAIN_BINARIES_TAR}"
return
fi
case "${MODE}" in
host) echo "${ROOT_DIR}/.tmp/nomos-binaries-host-${VERSION}.tar.gz" ;;
compose|k8s)
if [ "${NOMOS_SKIP_IMAGE_BUILD:-}" = "1" ]; then
if [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-}" = "1" ]; then
echo "${ROOT_DIR}/.tmp/nomos-binaries-host-${VERSION}.tar.gz"
else
echo "${ROOT_DIR}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz"
@ -300,7 +300,7 @@ run_examples::default_tar_path() {
run_examples::bundle_matches_expected() {
local tar_path="$1"
[ -f "${tar_path}" ] || return 1
[ -z "${NOMOS_NODE_REV:-}" ] && return 0
[ -z "${LOGOS_BLOCKCHAIN_NODE_REV:-}" ] && return 0
local meta tar_rev tar_head
meta="$(tar -xOzf "${tar_path}" artifacts/nomos-bundle-meta.env 2>/dev/null || true)"
@ -310,13 +310,13 @@ run_examples::bundle_matches_expected() {
fi
tar_rev="$(echo "${meta}" | sed -n 's/^nomos_node_rev=//p' | head -n 1)"
tar_head="$(echo "${meta}" | sed -n 's/^nomos_node_git_head=//p' | head -n 1)"
if [ -n "${tar_rev}" ] && [ "${tar_rev}" != "${NOMOS_NODE_REV}" ]; then
echo "Bundle ${tar_path} is for logos-blockchain-node rev ${tar_rev}, expected ${NOMOS_NODE_REV}; rebuilding." >&2
if [ -n "${tar_rev}" ] && [ "${tar_rev}" != "${LOGOS_BLOCKCHAIN_NODE_REV}" ]; then
echo "Bundle ${tar_path} is for logos-blockchain-node rev ${tar_rev}, expected ${LOGOS_BLOCKCHAIN_NODE_REV}; rebuilding." >&2
return 1
fi
if [ -n "${tar_head}" ] && echo "${NOMOS_NODE_REV}" | grep -Eq '^[0-9a-f]{7,40}$'; then
if [ "${tar_head}" != "${NOMOS_NODE_REV}" ]; then
echo "Bundle ${tar_path} is for logos-blockchain-node git head ${tar_head}, expected ${NOMOS_NODE_REV}; rebuilding." >&2
if [ -n "${tar_head}" ] && echo "${LOGOS_BLOCKCHAIN_NODE_REV}" | grep -Eq '^[0-9a-f]{7,40}$'; then
if [ "${tar_head}" != "${LOGOS_BLOCKCHAIN_NODE_REV}" ]; then
echo "Bundle ${tar_path} is for logos-blockchain-node git head ${tar_head}, expected ${LOGOS_BLOCKCHAIN_NODE_REV}; rebuilding." >&2
return 1
fi
fi
@ -381,7 +381,7 @@ run_examples::ensure_binaries_tar() {
local platform="$1"
local tar_path="$2"
echo "==> Building fresh binaries bundle (${platform}) at ${tar_path}"
"${ROOT_DIR}/scripts/build/build-bundle.sh" --platform "${platform}" --output "${tar_path}" --rev "${NOMOS_NODE_REV}"
"${ROOT_DIR}/scripts/build/build-bundle.sh" --platform "${platform}" --output "${tar_path}" --rev "${LOGOS_BLOCKCHAIN_NODE_REV}"
}
run_examples::prepare_bundles() {
@ -397,7 +397,7 @@ run_examples::prepare_bundles() {
fi
# On non-Linux compose/k8s runs, use the Linux bundle for image build, then restore host bundle for the runner.
if [ "${MODE}" != "host" ] && [ "$(uname -s)" != "Linux" ] && [ "${NOMOS_SKIP_IMAGE_BUILD:-0}" = "0" ] && [ -f "${LINUX_TAR}" ]; then
if [ "${MODE}" != "host" ] && [ "$(uname -s)" != "Linux" ] && [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-0}" = "0" ] && [ -f "${LINUX_TAR}" ]; then
NEED_HOST_RESTORE_AFTER_IMAGE=1
run_examples::restore_binaries_from_tar "${LINUX_TAR}" || {
run_examples::ensure_binaries_tar linux "${LINUX_TAR}"
@ -411,7 +411,7 @@ run_examples::prepare_bundles() {
case "${MODE}" in
host) run_examples::ensure_binaries_tar host "${tar_path}" ;;
compose|k8s)
if [ "${NOMOS_SKIP_IMAGE_BUILD:-0}" = "1" ]; then
if [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-0}" = "1" ]; then
run_examples::ensure_binaries_tar host "${tar_path}"
else
run_examples::ensure_binaries_tar linux "${tar_path}"
@ -421,7 +421,7 @@ run_examples::prepare_bundles() {
esac
run_examples::restore_binaries_from_tar "${tar_path}" || common::die \
"Missing or invalid binaries tarball. Provide it via --bundle/NOMOS_BINARIES_TAR or place it at $(run_examples::default_tar_path)."
"Missing or invalid binaries tarball. Provide it via --bundle/LOGOS_BLOCKCHAIN_BINARIES_TAR or place it at $(run_examples::default_tar_path)."
fi
}
@ -430,8 +430,8 @@ run_examples::maybe_rebuild_image() {
return 0
fi
if [ "${NOMOS_SKIP_IMAGE_BUILD:-0}" = "1" ]; then
echo "==> Skipping testnet image rebuild (NOMOS_SKIP_IMAGE_BUILD=1)"
if [ "${LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD:-0}" = "1" ]; then
echo "==> Skipping testnet image rebuild (LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD=1)"
return 0
fi
@ -489,14 +489,14 @@ run_examples::ensure_circuits() {
}
run_examples::run() {
export NOMOS_DEMO_RUN_SECS="${RUN_SECS}"
export NOMOS_DEMO_NODES="${DEMO_NODES}"
export LOGOS_BLOCKCHAIN_DEMO_RUN_SECS="${RUN_SECS}"
export LOGOS_BLOCKCHAIN_DEMO_NODES="${DEMO_NODES}"
if [ -n "${METRICS_QUERY_URL}" ]; then
export NOMOS_METRICS_QUERY_URL="${METRICS_QUERY_URL}"
export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL="${METRICS_QUERY_URL}"
fi
if [ -n "${METRICS_OTLP_INGEST_URL}" ]; then
export NOMOS_METRICS_OTLP_INGEST_URL="${METRICS_OTLP_INGEST_URL}"
export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL="${METRICS_OTLP_INGEST_URL}"
fi
if [ "${MODE}" = "host" ]; then
@ -508,7 +508,7 @@ run_examples::run() {
POL_PROOF_DEV_MODE=true \
TESTNET_PRINT_ENDPOINTS=1 \
NOMOS_TESTNET_IMAGE="${IMAGE}" \
LOGOS_BLOCKCHAIN_TESTNET_IMAGE="${IMAGE}" \
LOGOS_BLOCKCHAIN_NODE_BIN="${LOGOS_BLOCKCHAIN_NODE_BIN:-}" \
cargo run -p runner-examples --bin "${BIN}"
}

View File

@ -250,14 +250,14 @@ matrix::main() {
# On non-docker-desktop clusters, run-examples.sh defaults to skipping local image builds
# since the cluster can't see them. Honor the matrix "force" option by overriding.
if [ "${ctx}" != "docker-desktop" ] && [ "${FORCE_K8S_IMAGE_BUILD}" -eq 1 ]; then
export NOMOS_FORCE_IMAGE_BUILD=1
export LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD=1
fi
matrix::run_case "k8s.image_build" \
"${ROOT_DIR}/scripts/run/run-examples.sh" \
-t "${RUN_SECS}" -n "${NODES}" \
"${forward[@]}" \
k8s
unset NOMOS_FORCE_IMAGE_BUILD || true
unset LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD || true
else
echo "==> [k8s] Detected context '${ctx}'; skipping image-build variant (use --force-k8s-image-build to override)"
fi

View File

@ -17,12 +17,12 @@ Usage:
Compose:
- Runs Prometheus (+ OTLP receiver) and Grafana via docker compose.
- Prints NOMOS_METRICS_* / NOMOS_GRAFANA_URL exports to wire into runs.
- Prints LOGOS_BLOCKCHAIN_METRICS_* / LOGOS_BLOCKCHAIN_GRAFANA_URL exports to wire into runs.
Kubernetes:
- Installs prometheus-community/kube-prometheus-stack into namespace
"logos-observability" and optionally loads Logos Grafana dashboards.
- Prints port-forward commands + NOMOS_METRICS_* / NOMOS_GRAFANA_URL exports.
- Prints port-forward commands + LOGOS_BLOCKCHAIN_METRICS_* / LOGOS_BLOCKCHAIN_GRAFANA_URL exports.
USAGE
}
@ -43,14 +43,14 @@ compose_run() {
compose_env() {
cat <<'EOF'
export NOMOS_METRICS_QUERY_URL=http://localhost:9090
export NOMOS_METRICS_OTLP_INGEST_URL=http://host.docker.internal:9090/api/v1/otlp/v1/metrics
export NOMOS_GRAFANA_URL=http://localhost:3000
export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090
export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://host.docker.internal:9090/api/v1/otlp/v1/metrics
export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000
EOF
}
k8s_namespace() { echo "${LOGOS_OBSERVABILITY_NAMESPACE:-${NOMOS_OBSERVABILITY_NAMESPACE:-logos-observability}}"; }
k8s_release() { echo "${LOGOS_OBSERVABILITY_RELEASE:-${NOMOS_OBSERVABILITY_RELEASE:-logos-observability}}"; }
k8s_namespace() { echo "${LOGOS_OBSERVABILITY_NAMESPACE:-${LOGOS_BLOCKCHAIN_OBSERVABILITY_NAMESPACE:-logos-observability}}"; }
k8s_release() { echo "${LOGOS_OBSERVABILITY_RELEASE:-${LOGOS_BLOCKCHAIN_OBSERVABILITY_RELEASE:-logos-observability}}"; }
k8s_values() { echo "${ROOT}/scripts/observability/k8s/kube-prometheus-stack.values.yaml"; }
k8s_install() {
@ -119,14 +119,14 @@ k8s_env() {
cat <<EOF
# Prometheus (runner-side): port-forward then set:
kubectl -n ${ns} port-forward svc/${release}-kube-p-prometheus 9090:9090
export NOMOS_METRICS_QUERY_URL=http://localhost:9090
export LOGOS_BLOCKCHAIN_METRICS_QUERY_URL=http://localhost:9090
# Grafana (runner-side): port-forward then set:
kubectl -n ${ns} port-forward svc/${release}-grafana 3000:80
export NOMOS_GRAFANA_URL=http://localhost:3000
export LOGOS_BLOCKCHAIN_GRAFANA_URL=http://localhost:3000
# Prometheus OTLP ingest (node-side inside the cluster):
export NOMOS_METRICS_OTLP_INGEST_URL=http://${release}-kube-p-prometheus.${ns}:9090/api/v1/otlp/v1/metrics
export LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL=http://${release}-kube-p-prometheus.${ns}:9090/api/v1/otlp/v1/metrics
EOF
}

View File

@ -3,7 +3,7 @@
# Ignore warnings about sensitive information as this is test data.
ARG VERSION
ARG NOMOS_NODE_REV
ARG LOGOS_BLOCKCHAIN_NODE_REV
# ===========================
# BUILD IMAGE
@ -12,7 +12,7 @@ ARG NOMOS_NODE_REV
FROM rust:1.91.0-slim-bookworm AS builder
ARG VERSION
ARG NOMOS_NODE_REV
ARG LOGOS_BLOCKCHAIN_NODE_REV
LABEL maintainer="augustinas@status.im" \
source="https://github.com/logos-co/nomos-node" \
@ -23,7 +23,7 @@ COPY . .
# Reduce debug artifact size.
ENV CARGO_PROFILE_DEV_DEBUG=0
ENV NOMOS_NODE_REV=${NOMOS_NODE_REV}
ENV LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV}
# Install dependencies needed for building RocksDB and for circuit tooling.
RUN apt-get update && apt-get install -yq \

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
NOMOS_NODE_REV="${NOMOS_NODE_REV:?NOMOS_NODE_REV build arg missing}"
LOGOS_BLOCKCHAIN_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV:?LOGOS_BLOCKCHAIN_NODE_REV build arg missing}"
mkdir -p /workspace/artifacts
@ -42,11 +42,11 @@ else
echo "Prebuilt logos-blockchain binaries missing; building from source"
fi
echo "Building logos-blockchain binaries from source (rev ${NOMOS_NODE_REV})"
echo "Building logos-blockchain binaries from source (rev ${LOGOS_BLOCKCHAIN_NODE_REV})"
git clone https://github.com/logos-co/nomos-node.git /tmp/nomos-node
cd /tmp/nomos-node
git fetch --depth 1 origin "${NOMOS_NODE_REV}"
git checkout "${NOMOS_NODE_REV}"
git fetch --depth 1 origin "${LOGOS_BLOCKCHAIN_NODE_REV}"
git checkout "${LOGOS_BLOCKCHAIN_NODE_REV}"
git reset --hard
git clean -fdx

View File

@ -42,11 +42,11 @@ check_binary_arch "$bin_path" "logos-blockchain-${role}"
host_identifier_default="${role}-$(hostname -i)"
export CFG_FILE_PATH="/config.yaml" \
CFG_SERVER_ADDR="${CFG_SERVER_ADDR:-http://cfgsync:${NOMOS_CFGSYNC_PORT:-4400}}" \
CFG_SERVER_ADDR="${CFG_SERVER_ADDR:-http://cfgsync:${LOGOS_BLOCKCHAIN_CFGSYNC_PORT:-4400}}" \
CFG_HOST_IP=$(hostname -i) \
CFG_HOST_KIND="${CFG_HOST_KIND:-$role}" \
CFG_HOST_IDENTIFIER="${CFG_HOST_IDENTIFIER:-$host_identifier_default}" \
NOMOS_TIME_BACKEND="${NOMOS_TIME_BACKEND:-monotonic}" \
LOGOS_BLOCKCHAIN_TIME_BACKEND="${LOGOS_BLOCKCHAIN_TIME_BACKEND:-monotonic}" \
LOG_LEVEL="${LOG_LEVEL:-INFO}" \
POL_PROOF_DEV_MODE="${POL_PROOF_DEV_MODE:-true}"

View File

@ -32,7 +32,8 @@ pub const DEFAULT_DA_NETWORK_PORT: u16 = 3300;
/// Default blend network port.
pub const DEFAULT_BLEND_NETWORK_PORT: u16 = 3400; //4401;
/// Resolve cfgsync port from `NOMOS_CFGSYNC_PORT`, falling back to the default.
/// Resolve cfgsync port from `LOGOS_BLOCKCHAIN_CFGSYNC_PORT`, falling back to
/// the default.
pub fn cfgsync_port() -> u16 {
tf_env::nomos_cfgsync_port().unwrap_or(DEFAULT_CFGSYNC_PORT)
}

View File

@ -18,33 +18,42 @@ fn env_duration(key: &str, default: u64) -> Duration {
}
pub fn dispersal_timeout() -> Duration {
env_duration("NOMOS_DISPERSAL_TIMEOUT_SECS", DISPERSAL_TIMEOUT_SECS)
env_duration(
"LOGOS_BLOCKCHAIN_DISPERSAL_TIMEOUT_SECS",
DISPERSAL_TIMEOUT_SECS,
)
}
pub fn retry_cooldown() -> Duration {
env_duration("NOMOS_RETRY_COOLDOWN_SECS", RETRY_COOLDOWN_SECS)
env_duration("LOGOS_BLOCKCHAIN_RETRY_COOLDOWN_SECS", RETRY_COOLDOWN_SECS)
}
pub fn grace_period() -> Duration {
env_duration("NOMOS_GRACE_PERIOD_SECS", GRACE_PERIOD_SECS)
env_duration("LOGOS_BLOCKCHAIN_GRACE_PERIOD_SECS", GRACE_PERIOD_SECS)
}
pub fn prune_duration() -> Duration {
env_duration("NOMOS_PRUNE_DURATION_SECS", PRUNE_DURATION_SECS)
env_duration("LOGOS_BLOCKCHAIN_PRUNE_DURATION_SECS", PRUNE_DURATION_SECS)
}
pub fn prune_interval() -> Duration {
env_duration("NOMOS_PRUNE_INTERVAL_SECS", PRUNE_INTERVAL_SECS)
env_duration("LOGOS_BLOCKCHAIN_PRUNE_INTERVAL_SECS", PRUNE_INTERVAL_SECS)
}
pub fn share_duration() -> Duration {
env_duration("NOMOS_SHARE_DURATION_SECS", SHARE_DURATION_SECS)
env_duration("LOGOS_BLOCKCHAIN_SHARE_DURATION_SECS", SHARE_DURATION_SECS)
}
pub fn commitments_wait() -> Duration {
env_duration("NOMOS_COMMITMENTS_WAIT_SECS", COMMITMENTS_WAIT_SECS)
env_duration(
"LOGOS_BLOCKCHAIN_COMMITMENTS_WAIT_SECS",
COMMITMENTS_WAIT_SECS,
)
}
pub fn sdp_trigger_delay() -> Duration {
env_duration("NOMOS_SDP_TRIGGER_DELAY_SECS", SDP_TRIGGER_DELAY_SECS)
env_duration(
"LOGOS_BLOCKCHAIN_SDP_TRIGGER_DELAY_SECS",
SDP_TRIGGER_DELAY_SECS,
)
}

View File

@ -12,8 +12,8 @@ use tracing::debug;
use crate::nodes::common::config::injection::normalize_ed25519_sigs;
/// Configure tracing logger to write into `NOMOS_LOG_DIR` if set, else into the
/// provided base dir.
/// Configure tracing logger to write into `LOGOS_BLOCKCHAIN_LOG_DIR` if set,
/// else into the provided base dir.
pub fn configure_logging<F>(base_dir: &Path, prefix: &str, set_logger: F)
where
F: FnOnce(FileConfig),

View File

@ -2,7 +2,7 @@ use async_trait::async_trait;
use reqwest::Url;
use super::DynError;
use crate::{nodes::ApiClient, topology::generation::NodeKind};
use crate::nodes::ApiClient;
/// Marker type used by scenario builders to request node control support.
#[derive(Clone, Copy, Debug, Default)]
@ -91,6 +91,5 @@ pub trait NodeControlHandle: Send + Sync {
#[derive(Clone)]
pub struct StartedNode {
pub name: String,
pub kind: NodeKind,
pub api: ApiClient,
}

View File

@ -295,13 +295,6 @@ impl<Caps> TopologyConfigurator<Caps> {
self
}
#[must_use]
/// Legacy alias for node count (kept for downstream compatibility; remove
/// later).
pub fn validators(self, count: usize) -> Self {
self.nodes(count)
}
/// Use a star libp2p network layout.
#[must_use]
pub fn network_star(mut self) -> Self {

View File

@ -7,33 +7,21 @@ use thiserror::Error;
use tokio::time::{Instant, sleep};
use tracing::{debug, info};
/// Kind used for labelling readiness probes.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum NodeKind {
Node,
}
impl NodeKind {
#[must_use]
pub const fn label(self) -> &'static str {
match self {
Self::Node => "node",
}
}
}
/// Label used for readiness probes.
pub const NODE_ROLE: &str = "node";
/// Error raised when HTTP readiness checks time out.
#[derive(Clone, Copy, Debug, Error)]
#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}", role = role.label())]
#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}")]
pub struct HttpReadinessError {
role: NodeKind,
role: &'static str,
port: u16,
timeout: Duration,
}
impl HttpReadinessError {
#[must_use]
pub const fn new(role: NodeKind, port: u16, timeout: Duration) -> Self {
pub const fn new(role: &'static str, port: u16, timeout: Duration) -> Self {
Self {
role,
port,
@ -42,7 +30,7 @@ impl HttpReadinessError {
}
#[must_use]
pub const fn role(&self) -> NodeKind {
pub const fn role(&self) -> &'static str {
self.role
}
@ -60,7 +48,7 @@ impl HttpReadinessError {
/// Wait for HTTP readiness on the provided ports against localhost.
pub async fn wait_for_http_ports(
ports: &[u16],
role: NodeKind,
role: &'static str,
timeout_duration: Duration,
poll_interval: Duration,
) -> Result<(), HttpReadinessError> {
@ -70,7 +58,7 @@ pub async fn wait_for_http_ports(
/// Wait for HTTP readiness on the provided ports against a specific host.
pub async fn wait_for_http_ports_with_host(
ports: &[u16],
role: NodeKind,
role: &'static str,
host: &str,
timeout_duration: Duration,
poll_interval: Duration,
@ -80,7 +68,7 @@ pub async fn wait_for_http_ports_with_host(
}
info!(
role = role.label(),
role,
?ports,
host,
timeout_secs = timeout_duration.as_secs_f32(),
@ -106,13 +94,13 @@ pub async fn wait_for_http_ports_with_host(
async fn wait_for_single_port(
client: ReqwestClient,
port: u16,
role: NodeKind,
role: &'static str,
host: &str,
timeout_duration: Duration,
poll_interval: Duration,
) -> Result<(), HttpReadinessError> {
let url = format!("http://{host}:{port}{}", paths::CRYPTARCHIA_INFO);
debug!(role = role.label(), %url, "probing HTTP endpoint");
debug!(role, %url, "probing HTTP endpoint");
let start = Instant::now();
let deadline = start + timeout_duration;
let mut attempts: u64 = 0;
@ -123,7 +111,7 @@ async fn wait_for_single_port(
let last_failure: Option<String> = match client.get(&url).send().await {
Ok(response) if response.status().is_success() => {
info!(
role = role.label(),
role,
port,
host,
%url,
@ -142,7 +130,7 @@ async fn wait_for_single_port(
if attempts == 1 || attempts % 10 == 0 {
debug!(
role = role.label(),
role,
port,
host,
%url,
@ -155,7 +143,7 @@ async fn wait_for_single_port(
if Instant::now() >= deadline {
info!(
role = role.label(),
role,
port,
host,
%url,

View File

@ -60,9 +60,9 @@ impl ObservabilityInputs {
/// vars are also accepted as aliases for backwards compatibility.
pub fn from_env() -> Result<Self, MetricsError> {
Ok(Self {
metrics_query_url: read_url_var(&["NOMOS_METRICS_QUERY_URL"])?,
metrics_otlp_ingest_url: read_url_var(&["NOMOS_METRICS_OTLP_INGEST_URL"])?,
grafana_url: read_url_var(&["NOMOS_GRAFANA_URL"])?,
metrics_query_url: read_url_var(&["LOGOS_BLOCKCHAIN_METRICS_QUERY_URL"])?,
metrics_otlp_ingest_url: read_url_var(&["LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL"])?,
grafana_url: read_url_var(&["LOGOS_BLOCKCHAIN_GRAFANA_URL"])?,
})
}

View File

@ -20,7 +20,7 @@ use thiserror::Error;
use crate::topology::{
configs::{GeneralConfig, time::default_time_config},
generation::{GeneratedNodeConfig, GeneratedTopology, NodeKind},
generation::{GeneratedNodeConfig, GeneratedTopology},
utils::{TopologyResolveError, create_kms_configs, resolve_ids, resolve_ports},
};
@ -319,10 +319,8 @@ fn build_node_descriptors(
kms_config,
};
let (kind, index) = (NodeKind::Node, i);
let descriptor = GeneratedNodeConfig {
kind,
index,
index: i,
id,
general,
blend_port,

View File

@ -9,16 +9,9 @@ use crate::topology::{
readiness::{HttpNetworkReadiness, ReadinessCheck, ReadinessError},
};
/// Node kind within the generated topology.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum NodeKind {
Node,
}
/// Fully generated configuration for an individual node.
#[derive(Clone)]
pub struct GeneratedNodeConfig {
pub kind: NodeKind,
pub index: usize,
pub id: [u8; 32],
pub general: GeneralConfig,
@ -27,13 +20,7 @@ pub struct GeneratedNodeConfig {
impl GeneratedNodeConfig {
#[must_use]
/// Logical kind of the node.
pub const fn kind(&self) -> NodeKind {
self.kind
}
#[must_use]
/// Zero-based index within its role group.
/// Zero-based index within the topology.
pub const fn index(&self) -> usize {
self.index
}
@ -75,7 +62,7 @@ impl GeneratedTopology {
&self.nodes
}
/// Iterator over all node configs in role order.
/// Iterator over all node configs in topology order.
pub fn iter(&self) -> impl Iterator<Item = &GeneratedNodeConfig> {
self.nodes.iter()
}

View File

@ -64,7 +64,6 @@ impl<'a> ComposeDescriptorBuilder<'a> {
let nodes = build_nodes(
self.topology.nodes(),
ComposeNodeKind::Node,
&image,
platform.as_deref(),
cfgsync_port,
@ -74,28 +73,14 @@ impl<'a> ComposeDescriptorBuilder<'a> {
}
}
#[derive(Clone, Copy)]
pub(crate) enum ComposeNodeKind {
Node,
}
const NODE_ENTRYPOINT: &str = "/etc/nomos/scripts/run_nomos_node.sh";
impl ComposeNodeKind {
fn instance_name(self, index: usize) -> String {
match self {
Self::Node => format!("node-{index}"),
}
}
const fn entrypoint(self) -> &'static str {
match self {
Self::Node => "/etc/nomos/scripts/run_nomos_node.sh",
}
}
pub(crate) fn node_instance_name(index: usize) -> String {
format!("node-{index}")
}
fn build_nodes(
nodes: &[GeneratedNodeConfig],
kind: ComposeNodeKind,
image: &str,
platform: Option<&str>,
cfgsync_port: u16,
@ -103,9 +88,7 @@ fn build_nodes(
nodes
.iter()
.enumerate()
.map(|(index, node)| {
NodeDescriptor::from_node(kind, index, node, image, platform, cfgsync_port)
})
.map(|(index, node)| NodeDescriptor::from_node(index, node, image, platform, cfgsync_port))
.collect()
}
@ -143,8 +126,8 @@ fn base_environment(cfgsync_port: u16) -> Vec<EnvEntry> {
vec![
EnvEntry::new("POL_PROOF_DEV_MODE", pol_mode),
EnvEntry::new("RUST_LOG", rust_log),
EnvEntry::new("NOMOS_LOG_LEVEL", nomos_log_level),
EnvEntry::new("NOMOS_TIME_BACKEND", time_backend),
EnvEntry::new("LOGOS_BLOCKCHAIN_LOG_LEVEL", nomos_log_level),
EnvEntry::new("LOGOS_BLOCKCHAIN_TIME_BACKEND", time_backend),
EnvEntry::new(
"CFG_SERVER_ADDR",
format!("http://host.docker.internal:{cfgsync_port}"),

View File

@ -1,7 +1,9 @@
use serde::Serialize;
use testing_framework_core::topology::generation::GeneratedNodeConfig;
use super::{ComposeNodeKind, base_environment, base_volumes, default_extra_hosts};
use super::{
NODE_ENTRYPOINT, base_environment, base_volumes, default_extra_hosts, node_instance_name,
};
/// Describes a node container in the compose stack.
#[derive(Clone, Debug, Serialize)]
@ -45,7 +47,6 @@ impl EnvEntry {
impl NodeDescriptor {
pub(crate) fn from_node(
kind: ComposeNodeKind,
index: usize,
node: &GeneratedNodeConfig,
image: &str,
@ -53,7 +54,7 @@ impl NodeDescriptor {
cfgsync_port: u16,
) -> Self {
let mut environment = base_environment(cfgsync_port);
let identifier = kind.instance_name(index);
let identifier = node_instance_name(index);
let api_port = node.general.api_config.address.port();
let testing_port = node.general.api_config.testing_http_address.port();
environment.extend([
@ -76,9 +77,9 @@ impl NodeDescriptor {
];
Self {
name: kind.instance_name(index),
name: node_instance_name(index),
image: image.to_owned(),
entrypoint: kind.entrypoint().to_owned(),
entrypoint: NODE_ENTRYPOINT.to_owned(),
volumes: base_volumes(),
extra_hosts: default_extra_hosts(),
ports,

View File

@ -4,7 +4,7 @@ use testing_framework_env as tf_env;
use tracing::debug;
/// Select the compose image and optional platform, honoring
/// NOMOS_TESTNET_IMAGE.
/// LOGOS_BLOCKCHAIN_TESTNET_IMAGE.
pub fn resolve_image() -> (String, Option<String>) {
let image = tf_env::nomos_testnet_image()
.unwrap_or_else(|| String::from("logos-blockchain-testing:local"));

View File

@ -1,10 +1,7 @@
use std::path::PathBuf;
use testing_framework_core::{
scenario::{
MetricsError,
http_probe::{HttpReadinessError, NodeKind},
},
scenario::{MetricsError, http_probe::HttpReadinessError},
topology::readiness::ReadinessError,
};
use url::ParseError;
@ -45,7 +42,7 @@ pub enum ComposeRunnerError {
source: anyhow::Error,
},
#[error(
"docker image '{image}' is not available; set NOMOS_TESTNET_IMAGE or build the image manually"
"docker image '{image}' is not available; set LOGOS_BLOCKCHAIN_TESTNET_IMAGE or build the image manually"
)]
MissingImage { image: String },
#[error("failed to prepare docker image: {source}")]
@ -103,9 +100,9 @@ pub enum ConfigError {
pub enum StackReadinessError {
#[error(transparent)]
Http(#[from] HttpReadinessError),
#[error("failed to build readiness URL for {role} port {port}: {source}", role = role.label())]
#[error("failed to build readiness URL for {role} port {port}: {source}")]
Endpoint {
role: NodeKind,
role: &'static str,
port: u16,
#[source]
source: ParseError,
@ -120,12 +117,9 @@ pub enum StackReadinessError {
#[derive(Debug, thiserror::Error)]
/// Node client construction failures.
pub enum NodeClientError {
#[error(
"failed to build {endpoint} client URL for {role} port {port}: {source}",
role = role.label()
)]
#[error("failed to build {endpoint} client URL for {role} port {port}: {source}")]
Endpoint {
role: NodeKind,
role: &'static str,
endpoint: &'static str,
port: u16,
#[source]

View File

@ -3,8 +3,7 @@ use std::time::Duration;
use anyhow::{Context as _, anyhow};
use reqwest::Url;
use testing_framework_core::{
adjust_timeout, scenario::http_probe::NodeKind as HttpNodeKind,
topology::generation::GeneratedTopology,
adjust_timeout, scenario::http_probe::NODE_ROLE, topology::generation::GeneratedTopology,
};
use tokio::{process::Command, time::timeout};
use tracing::{debug, info};
@ -132,7 +131,7 @@ pub async fn ensure_remote_readiness_with_ports(
let node_urls = mapping
.nodes
.iter()
.map(|ports| readiness_url(HttpNodeKind::Node, ports.api))
.map(|ports| readiness_url(NODE_ROLE, ports.api))
.collect::<Result<Vec<_>, _>>()?;
descriptors
@ -141,7 +140,7 @@ pub async fn ensure_remote_readiness_with_ports(
.map_err(|source| StackReadinessError::Remote { source })
}
fn readiness_url(role: HttpNodeKind, port: u16) -> Result<Url, StackReadinessError> {
fn readiness_url(role: &'static str, port: u16) -> Result<Url, StackReadinessError> {
localhost_url(port).map_err(|source| StackReadinessError::Endpoint { role, port, source })
}

View File

@ -3,7 +3,7 @@ use std::time::Duration;
use reqwest::Url;
use testing_framework_core::{
nodes::ApiClient,
scenario::{NodeClients, http_probe::NodeKind as HttpNodeKind},
scenario::{NodeClients, http_probe::NODE_ROLE},
topology::generation::GeneratedTopology,
};
use tokio::time::sleep;
@ -42,14 +42,14 @@ pub fn build_node_clients_with_ports(
.nodes()
.iter()
.zip(mapping.nodes.iter())
.map(|(_node, ports)| api_client_from_host_ports(HttpNodeKind::Node, ports, host))
.map(|(_node, ports)| api_client_from_host_ports(NODE_ROLE, ports, host))
.collect::<Result<Vec<_>, _>>()?;
Ok(NodeClients::new(nodes))
}
fn api_client_from_host_ports(
role: HttpNodeKind,
role: &'static str,
ports: &NodeHostPorts,
host: &str,
) -> Result<ApiClient, NodeClientError> {

View File

@ -2,7 +2,7 @@ use std::{env, time::Duration};
use testing_framework_core::{
adjust_timeout,
scenario::http_probe::{self, HttpReadinessError, NodeKind},
scenario::http_probe::{self, HttpReadinessError, NODE_ROLE},
};
use tracing::{debug, info};
@ -13,14 +13,14 @@ const DEFAULT_WAIT: Duration = Duration::from_secs(DEFAULT_WAIT_TIMEOUT_SECS);
const POLL_INTERVAL: Duration = Duration::from_millis(POLL_INTERVAL_MILLIS);
pub async fn wait_for_nodes(ports: &[u16]) -> Result<(), HttpReadinessError> {
wait_for_ports(ports, NodeKind::Node).await
wait_for_ports(ports, NODE_ROLE).await
}
async fn wait_for_ports(ports: &[u16], role: NodeKind) -> Result<(), HttpReadinessError> {
async fn wait_for_ports(ports: &[u16], role: &'static str) -> Result<(), HttpReadinessError> {
let host = compose_runner_host();
let timeout = compose_http_timeout();
info!(role = ?role, ports = ?ports, host, "waiting for compose HTTP readiness");
info!(role, ports = ?ports, host, "waiting for compose HTTP readiness");
http_probe::wait_for_http_ports_with_host(
ports,

View File

@ -31,7 +31,7 @@ spec:
env:
- name: CFG_SERVER_ADDR
value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }}
- name: NOMOS_TIME_BACKEND
- name: LOGOS_BLOCKCHAIN_TIME_BACKEND
value: {{ $root.Values.timeBackend | default "monotonic" | quote }}
{{- range $key, $value := $node.env }}
- name: {{ $key }}

View File

@ -4,7 +4,7 @@ use kube::Client;
use reqwest::Url;
use testing_framework_core::{
nodes::ApiClient,
scenario::{CleanupGuard, NodeClients, http_probe::NodeKind},
scenario::{CleanupGuard, NodeClients, http_probe::NODE_ROLE},
topology::{generation::GeneratedTopology, readiness::ReadinessError},
};
use tracing::{debug, info};
@ -107,12 +107,9 @@ impl ClusterEnvironment {
#[derive(Debug, thiserror::Error)]
/// Failures while building node clients against forwarded ports.
pub enum NodeClientError {
#[error(
"failed to build {endpoint} client URL for {role} port {port}: {source}",
role = role.label()
)]
#[error("failed to build {endpoint} client URL for {role} port {port}: {source}")]
Endpoint {
role: NodeKind,
role: &'static str,
endpoint: &'static str,
port: u16,
#[source]
@ -123,12 +120,9 @@ pub enum NodeClientError {
#[derive(Debug, thiserror::Error)]
/// Readiness check failures for the remote cluster endpoints.
pub enum RemoteReadinessError {
#[error(
"failed to build readiness URL for {role} port {port}: {source}",
role = role.label()
)]
#[error("failed to build readiness URL for {role} port {port}: {source}")]
Endpoint {
role: NodeKind,
role: &'static str,
port: u16,
#[source]
source: ParseError,
@ -164,7 +158,7 @@ pub fn build_node_clients(cluster: &ClusterEnvironment) -> Result<NodeClients, N
.copied()
.zip(cluster.node_testing_ports.iter().copied())
.map(|(api_port, testing_port)| {
api_client_from_ports(&cluster.node_host, NodeKind::Node, api_port, testing_port)
api_client_from_ports(&cluster.node_host, NODE_ROLE, api_port, testing_port)
})
.collect::<Result<Vec<_>, _>>()?;
@ -180,7 +174,7 @@ pub async fn ensure_cluster_readiness(
info!("waiting for remote readiness (API + membership)");
let (node_api, _node_testing) = cluster.node_ports();
let node_urls = readiness_urls(node_api, NodeKind::Node, &cluster.node_host)?;
let node_urls = readiness_urls(node_api, NODE_ROLE, &cluster.node_host)?;
descriptors
.wait_remote_readiness(&node_urls)
@ -279,7 +273,7 @@ async fn cleanup_pending(client: &Client, namespace: &str, guard: &mut Option<Ru
fn readiness_urls(
ports: &[u16],
role: NodeKind,
role: &'static str,
host: &str,
) -> Result<Vec<Url>, RemoteReadinessError> {
ports
@ -289,7 +283,7 @@ fn readiness_urls(
.collect()
}
fn readiness_url(host: &str, role: NodeKind, port: u16) -> Result<Url, RemoteReadinessError> {
fn readiness_url(host: &str, role: &'static str, port: u16) -> Result<Url, RemoteReadinessError> {
cluster_host_url(host, port).map_err(|source| RemoteReadinessError::Endpoint {
role,
port,
@ -303,7 +297,7 @@ fn cluster_host_url(host: &str, port: u16) -> Result<Url, ParseError> {
fn api_client_from_ports(
host: &str,
role: NodeKind,
role: &'static str,
api_port: u16,
testing_port: u16,
) -> Result<ApiClient, NodeClientError> {

View File

@ -1,11 +1,11 @@
use testing_framework_core::scenario::http_probe::{self, HttpReadinessError, NodeKind};
use testing_framework_core::scenario::http_probe::{self, HttpReadinessError};
use super::{ClusterWaitError, http_poll_interval, node_http_probe_timeout, node_http_timeout};
use crate::host::node_host;
pub async fn wait_for_node_http_nodeport(
ports: &[u16],
role: NodeKind,
role: &'static str,
) -> Result<(), ClusterWaitError> {
let host = node_host();
wait_for_node_http_on_host(ports, role, &host, node_http_probe_timeout()).await
@ -15,14 +15,14 @@ const LOCALHOST: &str = "127.0.0.1";
pub async fn wait_for_node_http_port_forward(
ports: &[u16],
role: NodeKind,
role: &'static str,
) -> Result<(), ClusterWaitError> {
wait_for_node_http_on_host(ports, role, LOCALHOST, node_http_timeout()).await
}
async fn wait_for_node_http_on_host(
ports: &[u16],
role: NodeKind,
role: &'static str,
host: &str,
timeout: std::time::Duration,
) -> Result<(), ClusterWaitError> {

View File

@ -1,7 +1,6 @@
use std::{env, sync::LazyLock, time::Duration};
use kube::Error as KubeError;
use testing_framework_core::scenario::http_probe::NodeKind;
use thiserror::Error;
mod deployment;
@ -77,12 +76,9 @@ pub enum ClusterWaitError {
NodePortUnavailable { service: String, port: u16 },
#[error("cluster must have at least one node")]
MissingNode,
#[error(
"timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}",
role = role.label()
)]
#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}")]
NodeHttpTimeout {
role: NodeKind,
role: &'static str,
port: u16,
timeout: Duration,
},

View File

@ -1,5 +1,5 @@
use kube::Client;
use testing_framework_core::scenario::http_probe::NodeKind;
use testing_framework_core::scenario::http_probe::NODE_ROLE;
use super::{ClusterPorts, ClusterReady, ClusterWaitError, NodeConfigPorts};
use crate::lifecycle::wait::{
@ -32,7 +32,7 @@ pub async fn wait_for_cluster_ready(
let mut port_forwards: Vec<PortForwardHandle> = Vec::new();
let node_api_ports: Vec<u16> = node_allocations.iter().map(|ports| ports.api).collect();
if wait_for_node_http_nodeport(&node_api_ports, NodeKind::Node)
if wait_for_node_http_nodeport(&node_api_ports, NODE_ROLE)
.await
.is_err()
{
@ -54,7 +54,7 @@ pub async fn wait_for_cluster_ready(
port_forwards = forwards;
node_allocations = allocations;
let node_api_ports: Vec<u16> = node_allocations.iter().map(|ports| ports.api).collect();
if let Err(err) = wait_for_node_http_port_forward(&node_api_ports, NodeKind::Node).await {
if let Err(err) = wait_for_node_http_port_forward(&node_api_ports, NODE_ROLE).await {
kill_port_forwards(&mut port_forwards);
return Err(err);
}

View File

@ -12,7 +12,7 @@ use testing_framework_core::{
},
scenario::{DynError, NodeControlHandle, StartNodeOptions, StartedNode},
topology::{
generation::{GeneratedTopology, NodeKind, find_expected_peer_counts},
generation::{GeneratedTopology, find_expected_peer_counts},
utils::multiaddr_port,
},
};
@ -247,7 +247,6 @@ impl LocalDynamicNodes {
Ok(StartedNode {
name: node_name,
kind: NodeKind::Node,
api: api_client,
})
}

View File

@ -7,49 +7,49 @@ pub fn slow_test_env() -> bool {
#[must_use]
pub fn debug_tracing() -> bool {
env::var("NOMOS_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true"))
env::var("LOGOS_BLOCKCHAIN_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true"))
}
#[must_use]
pub fn nomos_log_dir() -> Option<PathBuf> {
env::var("NOMOS_LOG_DIR").ok().map(PathBuf::from)
env::var("LOGOS_BLOCKCHAIN_LOG_DIR").ok().map(PathBuf::from)
}
#[must_use]
pub fn nomos_log_level() -> Option<String> {
env::var("NOMOS_LOG_LEVEL").ok()
env::var("LOGOS_BLOCKCHAIN_LOG_LEVEL").ok()
}
#[must_use]
pub fn nomos_log_filter() -> Option<String> {
env::var("NOMOS_LOG_FILTER").ok()
env::var("LOGOS_BLOCKCHAIN_LOG_FILTER").ok()
}
#[must_use]
pub fn nomos_use_autonat() -> bool {
env::var("NOMOS_USE_AUTONAT").is_ok()
env::var("LOGOS_BLOCKCHAIN_USE_AUTONAT").is_ok()
}
#[must_use]
pub fn nomos_cfgsync_port() -> Option<u16> {
env::var("NOMOS_CFGSYNC_PORT")
env::var("LOGOS_BLOCKCHAIN_CFGSYNC_PORT")
.ok()
.and_then(|v| v.parse::<u16>().ok())
}
#[must_use]
pub fn nomos_tests_keep_logs() -> bool {
env::var("NOMOS_TESTS_KEEP_LOGS").is_ok()
env::var("LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS").is_ok()
}
#[must_use]
pub fn nomos_testnet_image() -> Option<String> {
env::var("NOMOS_TESTNET_IMAGE").ok()
env::var("LOGOS_BLOCKCHAIN_TESTNET_IMAGE").ok()
}
#[must_use]
pub fn nomos_testnet_image_pull_policy() -> Option<String> {
env::var("NOMOS_TESTNET_IMAGE_PULL_POLICY").ok()
env::var("LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY").ok()
}
#[must_use]
@ -64,15 +64,15 @@ pub fn rust_log() -> Option<String> {
#[must_use]
pub fn nomos_time_backend() -> Option<String> {
env::var("NOMOS_TIME_BACKEND").ok()
env::var("LOGOS_BLOCKCHAIN_TIME_BACKEND").ok()
}
#[must_use]
pub fn nomos_otlp_endpoint() -> Option<String> {
env::var("NOMOS_OTLP_ENDPOINT").ok()
env::var("LOGOS_BLOCKCHAIN_OTLP_ENDPOINT").ok()
}
#[must_use]
pub fn nomos_otlp_metrics_endpoint() -> Option<String> {
env::var("NOMOS_OTLP_METRICS_ENDPOINT").ok()
env::var("LOGOS_BLOCKCHAIN_OTLP_METRICS_ENDPOINT").ok()
}

View File

@ -1,7 +1,7 @@
VERSION=v0.3.2
NOMOS_BUNDLE_VERSION=v4
LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v4
# Pinned logos-blockchain-node revision used for CI builds and binary bundles.
NOMOS_NODE_REV=47ae18e95f643bde563b4769212b37f6f018fed3
LOGOS_BLOCKCHAIN_NODE_REV=47ae18e95f643bde563b4769212b37f6f018fed3
# Optional: local logos-blockchain-node checkout override (do not commit absolute paths).
# NOMOS_NODE_PATH=
# LOGOS_BLOCKCHAIN_NODE_PATH=