mirror of
https://github.com/logos-blockchain/logos-blockchain-testing.git
synced 2026-01-02 05:13:09 +00:00
Initial import of Nomos testing framework
This commit is contained in:
commit
e1c2bb2b95
54
.cargo-deny.toml
Normal file
54
.cargo-deny.toml
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Config file reference can be found at https://embarkstudios.github.io/cargo-deny/checks/cfg.html.
|
||||||
|
|
||||||
|
[graph]
|
||||||
|
all-features = true
|
||||||
|
exclude-dev = true
|
||||||
|
no-default-features = true
|
||||||
|
|
||||||
|
[advisories]
|
||||||
|
ignore = [
|
||||||
|
# Keep local ignores in sync with nomos-node if needed. Unused entries removed.
|
||||||
|
"RUSTSEC-2024-0370", # proc-macro-error unmaintained; upstream dependency
|
||||||
|
"RUSTSEC-2024-0384", # instant unmaintained; upstream dependency
|
||||||
|
"RUSTSEC-2024-0388", # derivative unmaintained; no safe upgrade available upstream
|
||||||
|
"RUSTSEC-2024-0436", # paste unmaintained; upstream dependency
|
||||||
|
"RUSTSEC-2025-0012", # backoff unmaintained; upstream workspace still relies on it
|
||||||
|
"RUSTSEC-2025-0055", # tracing-subscriber ansi escape issue; upstream dependency
|
||||||
|
]
|
||||||
|
yanked = "deny"
|
||||||
|
|
||||||
|
[bans]
|
||||||
|
allow-wildcard-paths = false
|
||||||
|
multiple-versions = "allow"
|
||||||
|
|
||||||
|
[licenses]
|
||||||
|
allow = [
|
||||||
|
"Apache-2.0 WITH LLVM-exception",
|
||||||
|
"Apache-2.0",
|
||||||
|
"BSD-2-Clause",
|
||||||
|
"BSD-3-Clause",
|
||||||
|
"BSL-1.0",
|
||||||
|
"CC0-1.0",
|
||||||
|
"CDDL-1.0",
|
||||||
|
"CDLA-Permissive-2.0",
|
||||||
|
"ISC",
|
||||||
|
"MIT",
|
||||||
|
"MPL-2.0",
|
||||||
|
"Unicode-3.0",
|
||||||
|
"Zlib",
|
||||||
|
]
|
||||||
|
private = { ignore = false }
|
||||||
|
unused-allowed-license = "deny"
|
||||||
|
|
||||||
|
[[licenses.clarify]]
|
||||||
|
expression = "MIT AND ISC"
|
||||||
|
license-files = [{ hash = 0xbd0eed23, path = "LICENSE" }]
|
||||||
|
name = "ring"
|
||||||
|
|
||||||
|
[sources]
|
||||||
|
allow-git = ["https://github.com/EspressoSystems/jellyfish.git"]
|
||||||
|
unknown-git = "deny"
|
||||||
|
unknown-registry = "deny"
|
||||||
|
|
||||||
|
[sources.allow-org]
|
||||||
|
github = ["logos-co"]
|
||||||
4
.cargo/config.toml
Normal file
4
.cargo/config.toml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
[target.'cfg(target_os = "macos")']
|
||||||
|
# when using osx, we need to link against some golang libraries, it did just work with this missing flags
|
||||||
|
# from: https://github.com/golang/go/issues/42459
|
||||||
|
rustflags = ["-C", "link-args=-framework CoreFoundation -framework Security -framework CoreServices -lresolv"]
|
||||||
9
.dockerignore
Normal file
9
.dockerignore
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# General trim for runner images and CI builds
|
||||||
|
.git
|
||||||
|
target
|
||||||
|
.tmp
|
||||||
|
tests/workflows/.tmp*
|
||||||
|
book
|
||||||
|
scripts/build-rapidsnark.sh~
|
||||||
|
rust-project-all-in-one.txt
|
||||||
|
**/*.log
|
||||||
355
.github/workflows/lint.yml
vendored
Normal file
355
.github/workflows/lint.yml
vendored
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
name: Lint
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["*"]
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
env:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: lint-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
fmt:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install nomos circuits
|
||||||
|
run: |
|
||||||
|
./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits"
|
||||||
|
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
components: rustfmt
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: ${{ runner.os }}-cargo-
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: target
|
||||||
|
key: ${{ runner.os }}-target-fmt-${{ hashFiles('**/Cargo.lock') }}-nightly-2025-09-14
|
||||||
|
restore-keys: ${{ runner.os }}-target-fmt-
|
||||||
|
- run: cargo +nightly-2025-09-14 fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install nomos circuits
|
||||||
|
run: |
|
||||||
|
./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits"
|
||||||
|
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
components: clippy
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: ${{ runner.os }}-cargo-
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: target
|
||||||
|
key: ${{ runner.os }}-target-clippy-${{ hashFiles('**/Cargo.lock') }}-nightly-2025-09-14
|
||||||
|
restore-keys: ${{ runner.os }}-target-clippy-
|
||||||
|
- run: cargo +nightly-2025-09-14 clippy --all --all-targets --all-features -- -D warnings
|
||||||
|
|
||||||
|
deny:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install nomos circuits
|
||||||
|
run: |
|
||||||
|
./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits"
|
||||||
|
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: ${{ runner.os }}-cargo-
|
||||||
|
- name: Install cargo-deny
|
||||||
|
run: cargo install cargo-deny --locked --version 0.18.2
|
||||||
|
- run: cargo deny check --hide-inclusion-graph -c .cargo-deny.toml --show-stats -D warnings
|
||||||
|
|
||||||
|
taplo:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
- name: Install taplo
|
||||||
|
run: |
|
||||||
|
TAPLO_VERSION=0.9.3
|
||||||
|
cargo install taplo-cli --locked --version ${TAPLO_VERSION}
|
||||||
|
- run: taplo fmt --check
|
||||||
|
- run: taplo lint
|
||||||
|
|
||||||
|
machete:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install nomos circuits
|
||||||
|
run: |
|
||||||
|
./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits"
|
||||||
|
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: ${{ runner.os }}-cargo-
|
||||||
|
- name: Install cargo-machete
|
||||||
|
run: cargo +nightly-2025-09-14 install --git https://github.com/bnjbvr/cargo-machete --locked cargo-machete
|
||||||
|
- run: cargo machete
|
||||||
|
|
||||||
|
local_smoke:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
POL_PROOF_DEV_MODE: true
|
||||||
|
LOCAL_DEMO_RUN_SECS: 120
|
||||||
|
LOCAL_DEMO_VALIDATORS: 1
|
||||||
|
LOCAL_DEMO_EXECUTORS: 1
|
||||||
|
TMPDIR: ${{ runner.temp }}
|
||||||
|
NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if command -v sudo >/dev/null 2>&1; then
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
|
||||||
|
else
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
|
||||||
|
fi
|
||||||
|
- name: Install nomos circuits
|
||||||
|
run: |
|
||||||
|
./scripts/setup-nomos-circuits.sh v0.3.1 "$NOMOS_CIRCUITS"
|
||||||
|
echo "NOMOS_CIRCUITS=$NOMOS_CIRCUITS" >> "$GITHUB_ENV"
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: ${{ runner.os }}-target-local-smoke-${{ hashFiles('**/Cargo.lock') }}-nightly-2025-09-14
|
||||||
|
restore-keys: ${{ runner.os }}-target-local-smoke-
|
||||||
|
- name: Build local binaries (nomos-node/executor)
|
||||||
|
run: |
|
||||||
|
SRC_DIR="${TMPDIR:-/tmp}/nomos-node-src"
|
||||||
|
mkdir -p "$SRC_DIR"
|
||||||
|
if [ ! -d "$SRC_DIR/.git" ]; then
|
||||||
|
git clone https://github.com/logos-co/nomos-node.git "$SRC_DIR"
|
||||||
|
else
|
||||||
|
cd "$SRC_DIR"
|
||||||
|
git fetch --depth 1 origin 2f60a0372c228968c3526c341ebc7e58bbd178dd
|
||||||
|
git checkout 2f60a0372c228968c3526c341ebc7e58bbd178dd
|
||||||
|
git reset --hard
|
||||||
|
git clean -fdx
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
cd "$SRC_DIR"
|
||||||
|
git fetch --depth 1 origin 2f60a0372c228968c3526c341ebc7e58bbd178dd
|
||||||
|
git checkout 2f60a0372c228968c3526c341ebc7e58bbd178dd
|
||||||
|
git reset --hard
|
||||||
|
git clean -fdx
|
||||||
|
cargo +nightly-2025-09-14 build --locked --all-features -p nomos-node -p nomos-executor
|
||||||
|
- name: Run local runner smoke (ignored test)
|
||||||
|
run: |
|
||||||
|
cargo +nightly-2025-09-14 test -p runner-examples --test local_runner_bin_smoke -- --ignored --nocapture
|
||||||
|
|
||||||
|
compose_smoke:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
TMPDIR: ${{ github.workspace }}/.tmp
|
||||||
|
NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits
|
||||||
|
NOMOS_TESTNET_IMAGE: nomos-testnet:local
|
||||||
|
DOCKER_BUILDKIT: 1
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Prepare workspace tmpdir
|
||||||
|
run: mkdir -p "$TMPDIR"
|
||||||
|
|
||||||
|
- name: Install Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if command -v sudo >/dev/null 2>&1; then
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
|
||||||
|
else
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if command -v sudo >/dev/null 2>&1; then
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
|
||||||
|
else
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Cache cargo registry
|
||||||
|
if: env.ACT != 'true'
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-cargo-
|
||||||
|
|
||||||
|
- name: Cache target directory
|
||||||
|
if: env.ACT != 'true'
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: target
|
||||||
|
key: ${{ runner.os }}-target-compose-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-target-compose-
|
||||||
|
|
||||||
|
- name: Install circuits for host build
|
||||||
|
env:
|
||||||
|
NOMOS_CIRCUITS_PLATFORM: linux-x86_64
|
||||||
|
NOMOS_CIRCUITS_REBUILD_RAPIDSNARK: "1"
|
||||||
|
RAPIDSNARK_FORCE_REBUILD: "1"
|
||||||
|
RAPIDSNARK_BUILD_GMP: "0"
|
||||||
|
RAPIDSNARK_USE_ASM: "OFF"
|
||||||
|
run: |
|
||||||
|
CIRCUITS_DIR="${NOMOS_CIRCUITS}"
|
||||||
|
chmod +x scripts/setup-nomos-circuits.sh
|
||||||
|
scripts/setup-nomos-circuits.sh v0.3.1 "$CIRCUITS_DIR"
|
||||||
|
# Copy into build context so Docker doesn't need network
|
||||||
|
rm -rf testing-framework/assets/stack/kzgrs_test_params
|
||||||
|
mkdir -p testing-framework/assets/stack/kzgrs_test_params
|
||||||
|
if command -v rsync >/dev/null 2>&1; then
|
||||||
|
rsync -a --delete "$CIRCUITS_DIR"/ testing-framework/assets/stack/kzgrs_test_params/
|
||||||
|
else
|
||||||
|
rm -rf testing-framework/assets/stack/kzgrs_test_params/*
|
||||||
|
cp -a "$CIRCUITS_DIR"/. testing-framework/assets/stack/kzgrs_test_params/
|
||||||
|
fi
|
||||||
|
echo "NOMOS_CIRCUITS=$CIRCUITS_DIR" >> "$GITHUB_ENV"
|
||||||
|
echo "CIRCUITS_OVERRIDE=testing-framework/assets/stack/kzgrs_test_params" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- name: Build compose test image
|
||||||
|
env:
|
||||||
|
DOCKER_CLI_HINTS: "false"
|
||||||
|
IMAGE_TAG: ${{ env.NOMOS_TESTNET_IMAGE }}
|
||||||
|
CIRCUITS_OVERRIDE: ${{ env.CIRCUITS_OVERRIDE }}
|
||||||
|
run: |
|
||||||
|
chmod +x testing-framework/assets/stack/scripts/build_test_image.sh
|
||||||
|
testing-framework/assets/stack/scripts/build_test_image.sh
|
||||||
|
|
||||||
|
- name: Run compose mixed workload binary
|
||||||
|
env:
|
||||||
|
POL_PROOF_DEV_MODE: "true"
|
||||||
|
COMPOSE_NODE_PAIRS: "1x1"
|
||||||
|
NOMOS_TESTNET_IMAGE: ${{ env.NOMOS_TESTNET_IMAGE }}
|
||||||
|
COMPOSE_RUNNER_HOST: ${{ env.ACT == 'true' && 'host.docker.internal' || '127.0.0.1' }}
|
||||||
|
RUST_BACKTRACE: "1"
|
||||||
|
NOMOS_TESTS_TRACING: "true"
|
||||||
|
NOMOS_LOG_DIR: "${{ github.workspace }}/.tmp/compose-logs"
|
||||||
|
NOMOS_LOG_LEVEL: "info"
|
||||||
|
run: |
|
||||||
|
mkdir -p "$TMPDIR"
|
||||||
|
if [ "${{ env.ACT }}" = "true" ]; then
|
||||||
|
export COMPOSE_RUNNER_PRESERVE=1
|
||||||
|
fi
|
||||||
|
cargo run -p runner-examples --bin compose_runner -- --nocapture
|
||||||
|
|
||||||
|
- name: Collect compose logs
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
mkdir -p ci-artifacts/compose
|
||||||
|
if [ -d "${TMPDIR}/compose-logs" ]; then
|
||||||
|
tar -czf ci-artifacts/compose/node-logs.tgz -C "${TMPDIR}/compose-logs" .
|
||||||
|
fi
|
||||||
|
mkdir -p ci-artifacts/compose
|
||||||
|
docker ps -a --filter "name=nomos-compose-" --format '{{.ID}} {{.Names}} {{.Status}}' > ci-artifacts/compose/containers.txt || true
|
||||||
|
for id in $(docker ps -a --filter "name=nomos-compose-" -q); do
|
||||||
|
docker logs "$id" > "ci-artifacts/compose/${id}.log" 2>&1 || true
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Upload compose artifacts
|
||||||
|
if: failure() && env.ACT != 'true'
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: compose-mixed-workload-logs
|
||||||
|
path: ci-artifacts
|
||||||
|
|
||||||
|
- name: Cleanup compose containers
|
||||||
|
if: always() && env.ACT != 'true'
|
||||||
|
run: |
|
||||||
|
ids=$(docker ps -a --filter "name=nomos-compose-" -q)
|
||||||
|
if [ -n "$ids" ]; then
|
||||||
|
docker rm -f $ids
|
||||||
|
fi
|
||||||
|
|
||||||
|
book:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
RUSTUP_TOOLCHAIN: nightly-2025-09-14
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: ${{ runner.os }}-cargo-
|
||||||
|
- name: Install mdBook toolchain
|
||||||
|
run: |
|
||||||
|
MDBOOK_VERSION=0.4.40
|
||||||
|
LINKCHECK_VERSION=0.7.7
|
||||||
|
MERMAID_VERSION=0.12.6
|
||||||
|
cargo +nightly-2025-09-14 install --locked mdbook --version ${MDBOOK_VERSION}
|
||||||
|
cargo +nightly-2025-09-14 install mdbook-linkcheck --version ${LINKCHECK_VERSION}
|
||||||
|
cargo +nightly-2025-09-14 install --locked mdbook-mermaid --version ${MERMAID_VERSION}
|
||||||
|
cargo +nightly-2025-09-14 install --locked typos-cli --version 1.20.11
|
||||||
|
- name: Spell check (typos)
|
||||||
|
run: typos --format brief book/src
|
||||||
|
- name: Markdown lint
|
||||||
|
run: npx -y markdownlint-cli2 "book/src/**/*.md"
|
||||||
|
- name: Build book
|
||||||
|
run: mdbook build book
|
||||||
|
- name: Check links
|
||||||
|
run: mdbook-linkcheck book
|
||||||
56
.github/workflows/pre-commit.yml
vendored
Normal file
56
.github/workflows/pre-commit.yml
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
name: Pre-commit
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: pre-commit-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pre-commit:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install nomos circuits
|
||||||
|
run: |
|
||||||
|
./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits"
|
||||||
|
echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- name: Set up Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2025-09-14
|
||||||
|
components: rustfmt, clippy
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.x'
|
||||||
|
|
||||||
|
- name: Cache cargo registry
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: ${{ runner.os }}-cargo-
|
||||||
|
|
||||||
|
- name: Cache pre-commit
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pre-commit
|
||||||
|
key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pre-commit-
|
||||||
|
|
||||||
|
- name: Install pre-commit
|
||||||
|
run: pip install pre-commit
|
||||||
|
|
||||||
|
- name: Run pre-commit
|
||||||
|
run: pre-commit run --all-files
|
||||||
18
.gitignore
vendored
Normal file
18
.gitignore
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
/target
|
||||||
|
**/target
|
||||||
|
.tmp/
|
||||||
|
# IDE / OS cruft
|
||||||
|
.idea/
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# Builder/test artifacts
|
||||||
|
.tmp_check/
|
||||||
|
.tmp_docker/
|
||||||
|
ci-artifacts/
|
||||||
|
tests/kzgrs/circuits_bundle/
|
||||||
|
NOMOS_RUST_SOURCES_ONLY.txt
|
||||||
|
dump.zsh
|
||||||
|
|
||||||
|
# Local test artifacts (kept when NOMOS_TESTS_KEEP_LOGS=1)
|
||||||
|
tests/workflows/.tmp*
|
||||||
|
tests/workflows/.tmp*/
|
||||||
40
.pre-commit-config.yaml
Normal file
40
.pre-commit-config.yaml
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/doublify/pre-commit-rust
|
||||||
|
rev: v1.0
|
||||||
|
hooks:
|
||||||
|
- id: fmt
|
||||||
|
# We're running `fmt` with `--all` and `pass_filenames: false` to format the entire workspace at once.
|
||||||
|
# Otherwise, `pre-commit` passes staged files one by one, which can lead to inconsistent results
|
||||||
|
# due to, presumably, the lack of full workspace context.
|
||||||
|
entry: cargo +nightly-2025-09-14 fmt
|
||||||
|
pass_filenames: false
|
||||||
|
- id: clippy
|
||||||
|
args: ["--all", "--all-targets", "--all-features", "--", "-D", "warnings"]
|
||||||
|
- repo: https://github.com/EmbarkStudios/cargo-deny
|
||||||
|
rev: 0.18.2
|
||||||
|
hooks:
|
||||||
|
- id: cargo-deny
|
||||||
|
args:
|
||||||
|
- check
|
||||||
|
- --hide-inclusion-graph
|
||||||
|
- -c
|
||||||
|
- .cargo-deny.toml
|
||||||
|
- --show-stats
|
||||||
|
- -D
|
||||||
|
- warnings
|
||||||
|
- repo: https://github.com/ComPWA/taplo-pre-commit
|
||||||
|
rev: v0.9.3
|
||||||
|
hooks:
|
||||||
|
- id: taplo-format
|
||||||
|
- id: taplo-lint
|
||||||
|
- repo: https://github.com/bnjbvr/cargo-machete
|
||||||
|
rev: ba1bcd4 # No tag yet with .pre-commit-hooks.yml
|
||||||
|
hooks:
|
||||||
|
- id: cargo-machete
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: cargo-hack-check
|
||||||
|
language: script
|
||||||
|
name: cargo hack check
|
||||||
|
entry: ./hooks/cargo-hack.sh
|
||||||
|
stages: [manual]
|
||||||
10
.taplo.toml
Normal file
10
.taplo.toml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
exclude = ["target/**"]
|
||||||
|
|
||||||
|
[formatting]
|
||||||
|
align_entries = true
|
||||||
|
allowed_blank_lines = 1
|
||||||
|
column_width = 120
|
||||||
|
keys = ["build-dependencies", "dependencies", "dev-dependencies"]
|
||||||
|
reorder_arrays = true
|
||||||
|
reorder_inline_tables = true
|
||||||
|
reorder_keys = true
|
||||||
8903
Cargo.lock
generated
Normal file
8903
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
101
Cargo.toml
Normal file
101
Cargo.toml
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
[workspace]
|
||||||
|
members = [
|
||||||
|
"examples",
|
||||||
|
"testing-framework/configs",
|
||||||
|
"testing-framework/core",
|
||||||
|
"testing-framework/runners/compose",
|
||||||
|
"testing-framework/runners/k8s",
|
||||||
|
"testing-framework/runners/local",
|
||||||
|
"testing-framework/tools/cfgsync",
|
||||||
|
"testing-framework/workflows",
|
||||||
|
]
|
||||||
|
resolver = "2"
|
||||||
|
|
||||||
|
[workspace.package]
|
||||||
|
categories = []
|
||||||
|
description = "Nomos testing framework workspace (split out from nomos-node)"
|
||||||
|
edition = "2024"
|
||||||
|
keywords = ["framework", "nomos", "testing"]
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
|
readme = "README.md"
|
||||||
|
repository = "https://example.invalid/nomos-testing-local"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[workspace.lints.rust]
|
||||||
|
unsafe_code = "allow"
|
||||||
|
|
||||||
|
[workspace.lints.clippy]
|
||||||
|
all = "allow"
|
||||||
|
|
||||||
|
[workspace.dependencies]
|
||||||
|
# Local testing framework crates
|
||||||
|
testing-framework-config = { default-features = false, path = "testing-framework/configs" }
|
||||||
|
testing-framework-core = { default-features = false, path = "testing-framework/core" }
|
||||||
|
testing-framework-runner-compose = { default-features = false, path = "testing-framework/runners/compose" }
|
||||||
|
testing-framework-runner-k8s = { default-features = false, path = "testing-framework/runners/k8s" }
|
||||||
|
testing-framework-runner-local = { default-features = false, path = "testing-framework/runners/local" }
|
||||||
|
testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" }
|
||||||
|
|
||||||
|
# Nomos git dependencies (pinned to latest master)
|
||||||
|
broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
cfgsync = { default-features = false, path = "testing-framework/tools/cfgsync" }
|
||||||
|
chain-leader = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd", features = [
|
||||||
|
"pol-dev-mode",
|
||||||
|
] }
|
||||||
|
chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
executor-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
key-management-system = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
kzgrs = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
kzgrs-backend = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-cli = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-da-dispersal = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-da-network-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-da-network-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-da-sampling = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-da-verifier = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-executor = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-sdp = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-time = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
nomos-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
subnetworks-assignations = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" }
|
||||||
|
|
||||||
|
# External crates
|
||||||
|
async-trait = { default-features = false, version = "0.1" }
|
||||||
|
bytes = { default-features = false, version = "1.3" }
|
||||||
|
hex = { default-features = false, version = "0.4.3" }
|
||||||
|
libp2p = { default-features = false, version = "0.55" }
|
||||||
|
overwatch = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
|
||||||
|
overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
|
||||||
|
rand = { default-features = false, version = "0.8" }
|
||||||
|
reqwest = { default-features = false, version = "0.12" }
|
||||||
|
serde = { default-features = true, version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = { default-features = false, version = "1.0" }
|
||||||
|
serde_with = { default-features = false, version = "3.14.0" }
|
||||||
|
serde_yaml = { default-features = false, version = "0.9.33" }
|
||||||
|
tempfile = { default-features = false, version = "3" }
|
||||||
|
thiserror = { default-features = false, version = "2.0" }
|
||||||
|
tokio = { default-features = false, version = "1" }
|
||||||
|
tracing = { default-features = false, version = "0.1" }
|
||||||
8
README.md
Normal file
8
README.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# Nomos Testing
|
||||||
|
|
||||||
|
This repo is the standalone Nomos testing framework. For docs, quick start, and examples, read the mdBook at https://logos-co.github.io/nomos-testing/ (sources in `book/`) — start with:
|
||||||
|
- What you’ll learn: https://logos-co.github.io/nomos-testing/what-you-will-learn.html
|
||||||
|
- Quick examples: https://logos-co.github.io/nomos-testing/examples.html and https://logos-co.github.io/nomos-testing/examples-advanced.html
|
||||||
|
- Runners (compose/k8s/local): https://logos-co.github.io/nomos-testing/runners.html
|
||||||
|
|
||||||
|
Key crates live under `testing-framework/` (core, runners, workflows, configs) with integration tests in `tests/workflows/`. Compose/k8s assets sit in `testing-framework/assets/stack/`.
|
||||||
13
book/book.toml
Normal file
13
book/book.toml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[book]
|
||||||
|
authors = ["Nomos Testing"]
|
||||||
|
language = "en"
|
||||||
|
src = "src"
|
||||||
|
title = "Nomos Testing Book"
|
||||||
|
|
||||||
|
[build]
|
||||||
|
# Keep book output in target/ to avoid polluting the workspace root.
|
||||||
|
build-dir = "../target/book"
|
||||||
|
|
||||||
|
[output.html]
|
||||||
|
additional-js = ["theme/mermaid-init.js"]
|
||||||
|
default-theme = "light"
|
||||||
31
book/src/SUMMARY.md
Normal file
31
book/src/SUMMARY.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Summary
|
||||||
|
- [Project Context Primer](project-context-primer.md)
|
||||||
|
- [What You Will Learn](what-you-will-learn.md)
|
||||||
|
- [Part I — Foundations](part-i.md)
|
||||||
|
- [Introduction](introduction.md)
|
||||||
|
- [Architecture Overview](architecture-overview.md)
|
||||||
|
- [Testing Philosophy](testing-philosophy.md)
|
||||||
|
- [Scenario Lifecycle](scenario-lifecycle.md)
|
||||||
|
- [Design Rationale](design-rationale.md)
|
||||||
|
- [Part II — User Guide](part-ii.md)
|
||||||
|
- [Workspace Layout](workspace-layout.md)
|
||||||
|
- [Annotated Tree](annotated-tree.md)
|
||||||
|
- [Authoring Scenarios](authoring-scenarios.md)
|
||||||
|
- [Core Content: Workloads & Expectations](workloads.md)
|
||||||
|
- [Core Content: ScenarioBuilderExt Patterns](scenario-builder-ext-patterns.md)
|
||||||
|
- [Best Practices](best-practices.md)
|
||||||
|
- [Examples](examples.md)
|
||||||
|
- [Advanced & Artificial Examples](examples-advanced.md)
|
||||||
|
- [Running Scenarios](running-scenarios.md)
|
||||||
|
- [Runners](runners.md)
|
||||||
|
- [Operations](operations.md)
|
||||||
|
- [Part III — Developer Reference](part-iii.md)
|
||||||
|
- [Scenario Model (Developer Level)](scenario-model.md)
|
||||||
|
- [Extending the Framework](extending.md)
|
||||||
|
- [Example: New Workload & Expectation (Rust)](custom-workload-example.md)
|
||||||
|
- [Internal Crate Reference](internal-crate-reference.md)
|
||||||
|
- [Part IV — Appendix](part-iv.md)
|
||||||
|
- [Builder API Quick Reference](dsl-cheat-sheet.md)
|
||||||
|
- [Troubleshooting Scenarios](troubleshooting.md)
|
||||||
|
- [FAQ](faq.md)
|
||||||
|
- [Glossary](glossary.md)
|
||||||
96
book/src/annotated-tree.md
Normal file
96
book/src/annotated-tree.md
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# Annotated Tree
|
||||||
|
|
||||||
|
Directory structure with key paths annotated:
|
||||||
|
|
||||||
|
```
|
||||||
|
nomos-testing/
|
||||||
|
├─ testing-framework/ # Core library crates
|
||||||
|
│ ├─ configs/ # Node config builders, topology generation, tracing/logging config
|
||||||
|
│ ├─ core/ # Scenario model (ScenarioBuilder), runtime (Runner, Deployer), topology, node spawning
|
||||||
|
│ ├─ workflows/ # Workloads (transactions, DA, chaos), expectations (liveness), builder DSL extensions
|
||||||
|
│ ├─ runners/ # Deployment backends
|
||||||
|
│ │ ├─ local/ # LocalDeployer (spawns local processes)
|
||||||
|
│ │ ├─ compose/ # ComposeDeployer (Docker Compose + Prometheus)
|
||||||
|
│ │ └─ k8s/ # K8sDeployer (Kubernetes Helm)
|
||||||
|
│ └─ assets/ # Docker/K8s stack assets
|
||||||
|
│ └─ stack/
|
||||||
|
│ ├─ kzgrs_test_params/ # KZG circuit parameters (fetch via setup-nomos-circuits.sh)
|
||||||
|
│ ├─ monitoring/ # Prometheus config
|
||||||
|
│ ├─ scripts/ # Container entrypoints, image builder
|
||||||
|
│ └─ cfgsync.yaml # Config sync server template
|
||||||
|
│
|
||||||
|
├─ examples/ # PRIMARY ENTRY POINT: runnable binaries
|
||||||
|
│ └─ src/bin/
|
||||||
|
│ ├─ local_runner.rs # Local processes demo (POL_PROOF_DEV_MODE=true)
|
||||||
|
│ ├─ compose_runner.rs # Docker Compose demo (requires image)
|
||||||
|
│ └─ k8s_runner.rs # Kubernetes demo (requires cluster + image)
|
||||||
|
│
|
||||||
|
├─ scripts/ # Helper utilities
|
||||||
|
│ └─ setup-nomos-circuits.sh # Fetch KZG circuit parameters
|
||||||
|
│
|
||||||
|
└─ book/ # This documentation (mdBook)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Directories Explained
|
||||||
|
|
||||||
|
### `testing-framework/`
|
||||||
|
Core library crates providing the testing API.
|
||||||
|
|
||||||
|
| Crate | Purpose | Key Exports |
|
||||||
|
|-------|---------|-------------|
|
||||||
|
| `configs` | Node configuration builders | Topology generation, tracing config |
|
||||||
|
| `core` | Scenario model & runtime | `ScenarioBuilder`, `Deployer`, `Runner` |
|
||||||
|
| `workflows` | Workloads & expectations | `ScenarioBuilderExt`, `ChaosBuilderExt` |
|
||||||
|
| `runners/local` | Local process deployer | `LocalDeployer` |
|
||||||
|
| `runners/compose` | Docker Compose deployer | `ComposeDeployer` |
|
||||||
|
| `runners/k8s` | Kubernetes deployer | `K8sDeployer` |
|
||||||
|
|
||||||
|
### `testing-framework/assets/stack/`
|
||||||
|
Docker/K8s deployment assets:
|
||||||
|
- **`kzgrs_test_params/`**: Circuit parameters (override via `NOMOS_KZGRS_PARAMS_PATH`)
|
||||||
|
- **`monitoring/`**: Prometheus config
|
||||||
|
- **`scripts/`**: Container entrypoints and image builder
|
||||||
|
- **`cfgsync.yaml`**: Configuration sync server template
|
||||||
|
|
||||||
|
### `examples/` (Start Here!)
|
||||||
|
**Runnable binaries** demonstrating framework usage:
|
||||||
|
- `local_runner.rs` — Local processes
|
||||||
|
- `compose_runner.rs` — Docker Compose (requires `NOMOS_TESTNET_IMAGE` built)
|
||||||
|
- `k8s_runner.rs` — Kubernetes (requires cluster + image)
|
||||||
|
|
||||||
|
**Run with:** `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin <name>`
|
||||||
|
|
||||||
|
**All runners require `POL_PROOF_DEV_MODE=true`** to avoid expensive proof generation.
|
||||||
|
|
||||||
|
### `scripts/`
|
||||||
|
Helper utilities:
|
||||||
|
- **`setup-nomos-circuits.sh`**: Fetch KZG parameters from releases
|
||||||
|
|
||||||
|
## Observability
|
||||||
|
|
||||||
|
**Compose runner** includes:
|
||||||
|
- **Prometheus** at `http://localhost:9090` (metrics scraping)
|
||||||
|
- Node metrics exposed per validator/executor
|
||||||
|
- Access in expectations: `ctx.telemetry().prometheus_endpoint()`
|
||||||
|
|
||||||
|
**Logging** controlled by:
|
||||||
|
- `NOMOS_LOG_DIR` — Write per-node log files
|
||||||
|
- `NOMOS_LOG_LEVEL` — Global log level (error/warn/info/debug/trace)
|
||||||
|
- `NOMOS_LOG_FILTER` — Target-specific filtering (e.g., `consensus=trace,da=debug`)
|
||||||
|
- `NOMOS_TESTS_TRACING` — Enable file logging for local runner
|
||||||
|
|
||||||
|
See [Logging and Observability](operations.md#logging-and-observability) for details.
|
||||||
|
|
||||||
|
## Navigation Guide
|
||||||
|
|
||||||
|
| To Do This | Go Here |
|
||||||
|
|------------|---------|
|
||||||
|
| **Run an example** | `examples/src/bin/` → `cargo run -p runner-examples --bin <name>` |
|
||||||
|
| **Write a custom scenario** | `testing-framework/core/` → Implement using `ScenarioBuilder` |
|
||||||
|
| **Add a new workload** | `testing-framework/workflows/src/workloads/` → Implement `Workload` trait |
|
||||||
|
| **Add a new expectation** | `testing-framework/workflows/src/expectations/` → Implement `Expectation` trait |
|
||||||
|
| **Modify node configs** | `testing-framework/configs/src/topology/configs/` |
|
||||||
|
| **Extend builder DSL** | `testing-framework/workflows/src/builder/` → Add trait methods |
|
||||||
|
| **Add a new deployer** | `testing-framework/runners/` → Implement `Deployer` trait |
|
||||||
|
|
||||||
|
For detailed guidance, see [Internal Crate Reference](internal-crate-reference.md).
|
||||||
139
book/src/architecture-overview.md
Normal file
139
book/src/architecture-overview.md
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
# Architecture Overview
|
||||||
|
|
||||||
|
The framework follows a clear flow: **Topology → Scenario → Deployer → Runner → Workloads → Expectations**.
|
||||||
|
|
||||||
|
## Core Flow
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
A(Topology<br/>shape cluster) --> B(Scenario<br/>plan)
|
||||||
|
B --> C(Deployer<br/>provision & readiness)
|
||||||
|
C --> D(Runner<br/>orchestrate execution)
|
||||||
|
D --> E(Workloads<br/>drive traffic)
|
||||||
|
E --> F(Expectations<br/>verify outcomes)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
- **Topology** describes the cluster: how many nodes, their roles, and the high-level network and data-availability parameters they should follow.
|
||||||
|
- **Scenario** combines that topology with the activities to run and the checks to perform, forming a single plan.
|
||||||
|
- **Deployer** provisions infrastructure on the chosen backend (local processes, Docker Compose, or Kubernetes), waits for readiness, and returns a Runner.
|
||||||
|
- **Runner** orchestrates scenario execution: starts workloads, observes signals, evaluates expectations, and triggers cleanup.
|
||||||
|
- **Workloads** generate traffic and conditions that exercise the system.
|
||||||
|
- **Expectations** observe the run and judge success or failure once activity completes.
|
||||||
|
|
||||||
|
Each layer has a narrow responsibility so that cluster shape, deployment choice,
|
||||||
|
traffic generation, and health checks can evolve independently while fitting
|
||||||
|
together predictably.
|
||||||
|
|
||||||
|
## Entry Points
|
||||||
|
|
||||||
|
The framework is consumed via **runnable example binaries** in `examples/src/bin/`:
|
||||||
|
|
||||||
|
- `local_runner.rs` — Spawns nodes as local processes
|
||||||
|
- `compose_runner.rs` — Deploys via Docker Compose (requires `NOMOS_TESTNET_IMAGE` built)
|
||||||
|
- `k8s_runner.rs` — Deploys via Kubernetes Helm (requires cluster + image)
|
||||||
|
|
||||||
|
**Run with:** `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin <name>`
|
||||||
|
|
||||||
|
**Important:** All runners require `POL_PROOF_DEV_MODE=true` to avoid expensive Groth16 proof generation that causes timeouts.
|
||||||
|
|
||||||
|
These binaries use the framework API (`ScenarioBuilder`) to construct and execute scenarios.
|
||||||
|
|
||||||
|
## Builder API
|
||||||
|
|
||||||
|
Scenarios are defined using a fluent builder pattern:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star() // Topology configuration
|
||||||
|
.validators(3)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.wallets(50) // Wallet seeding
|
||||||
|
.transactions() // Transaction workload
|
||||||
|
.rate(5)
|
||||||
|
.users(20)
|
||||||
|
.apply()
|
||||||
|
.da() // DA workload
|
||||||
|
.channel_rate(1)
|
||||||
|
.blob_rate(2)
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness() // Expectations
|
||||||
|
.with_run_duration(Duration::from_secs(90))
|
||||||
|
.build();
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key API Points:**
|
||||||
|
- Topology uses `.topology().validators(N).executors(M).apply()` pattern (not `with_node_counts`)
|
||||||
|
- Workloads are configured via extension traits (`ScenarioBuilderExt`, `ChaosBuilderExt`)
|
||||||
|
- Chaos workloads require `.enable_node_control()` and a compatible runner
|
||||||
|
|
||||||
|
## Deployers
|
||||||
|
|
||||||
|
Three deployer implementations:
|
||||||
|
|
||||||
|
| Deployer | Backend | Prerequisites | Node Control |
|
||||||
|
|----------|---------|---------------|--------------|
|
||||||
|
| `LocalDeployer` | Local processes | Binaries in sibling checkout | No |
|
||||||
|
| `ComposeDeployer` | Docker Compose | `NOMOS_TESTNET_IMAGE` built | Yes |
|
||||||
|
| `K8sDeployer` | Kubernetes Helm | Cluster + image loaded | Not yet |
|
||||||
|
|
||||||
|
**Compose-specific features:**
|
||||||
|
- Includes Prometheus at `http://localhost:9090` (override via `TEST_FRAMEWORK_PROMETHEUS_PORT`)
|
||||||
|
- Optional OTLP trace/metrics endpoints (`NOMOS_OTLP_ENDPOINT`, `NOMOS_OTLP_METRICS_ENDPOINT`)
|
||||||
|
- Node control for chaos testing (restart validators/executors)
|
||||||
|
|
||||||
|
## Assets and Images
|
||||||
|
|
||||||
|
### Docker Image
|
||||||
|
Built via `testing-framework/assets/stack/scripts/build_test_image.sh`:
|
||||||
|
- Embeds KZG circuit parameters from `testing-framework/assets/stack/kzgrs_test_params/`
|
||||||
|
- Includes runner scripts: `run_nomos_node.sh`, `run_nomos_executor.sh`
|
||||||
|
- Tagged as `NOMOS_TESTNET_IMAGE` (default: `nomos-testnet:local`)
|
||||||
|
|
||||||
|
### Circuit Assets
|
||||||
|
KZG parameters required for DA workloads:
|
||||||
|
- **Default path:** `testing-framework/assets/stack/kzgrs_test_params/`
|
||||||
|
- **Override:** `NOMOS_KZGRS_PARAMS_PATH=/custom/path`
|
||||||
|
- **Fetch via:** `scripts/setup-nomos-circuits.sh v0.3.1 /tmp/circuits`
|
||||||
|
|
||||||
|
### Compose Stack
|
||||||
|
Templates and configs in `testing-framework/runners/compose/assets/`:
|
||||||
|
- `docker-compose.yml.tera` — Stack template (validators, executors, Prometheus)
|
||||||
|
- Cfgsync config: `testing-framework/assets/stack/cfgsync.yaml`
|
||||||
|
- Monitoring: `testing-framework/assets/stack/monitoring/prometheus.yml`
|
||||||
|
|
||||||
|
## Logging Architecture
|
||||||
|
|
||||||
|
**Two separate logging pipelines:**
|
||||||
|
|
||||||
|
| Component | Configuration | Output |
|
||||||
|
|-----------|--------------|--------|
|
||||||
|
| **Runner binaries** | `RUST_LOG` | Framework orchestration logs |
|
||||||
|
| **Node processes** | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER`, `NOMOS_LOG_DIR` | Consensus, DA, mempool logs |
|
||||||
|
|
||||||
|
**Node logging:**
|
||||||
|
- **Local runner:** Writes to temporary directories by default (cleaned up). Set `NOMOS_TESTS_TRACING=true` + `NOMOS_LOG_DIR` for persistent files.
|
||||||
|
- **Compose runner:** Default logs to container stdout/stderr (`docker logs`). Optional per-node files if `NOMOS_LOG_DIR` is set and mounted.
|
||||||
|
- **K8s runner:** Logs to pod stdout/stderr (`kubectl logs`). Optional per-node files if `NOMOS_LOG_DIR` is set and mounted.
|
||||||
|
|
||||||
|
**File naming:** Per-node files use prefix `nomos-node-{index}` or `nomos-executor-{index}` (may include timestamps).
|
||||||
|
|
||||||
|
## Observability
|
||||||
|
|
||||||
|
**Prometheus (Compose only):**
|
||||||
|
- Exposed at `http://localhost:9090` (configurable)
|
||||||
|
- Scrapes all validator and executor metrics
|
||||||
|
- Accessible in expectations: `ctx.telemetry().prometheus_endpoint()`
|
||||||
|
|
||||||
|
**Node APIs:**
|
||||||
|
- HTTP endpoints per node for consensus info, network status, DA membership
|
||||||
|
- Accessible in expectations: `ctx.node_clients().validators().get(0)`
|
||||||
|
|
||||||
|
**OTLP (optional):**
|
||||||
|
- Trace endpoint: `NOMOS_OTLP_ENDPOINT=http://localhost:4317`
|
||||||
|
- Metrics endpoint: `NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318`
|
||||||
|
- Disabled by default (no noise if unset)
|
||||||
|
|
||||||
|
For detailed logging configuration, see [Logging and Observability](operations.md#logging-and-observability).
|
||||||
20
book/src/authoring-scenarios.md
Normal file
20
book/src/authoring-scenarios.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# Authoring Scenarios
|
||||||
|
|
||||||
|
Creating a scenario is a declarative exercise:
|
||||||
|
|
||||||
|
1. **Shape the topology**: decide how many validators and executors to run, and
|
||||||
|
what high-level network and data-availability characteristics matter for the
|
||||||
|
test.
|
||||||
|
2. **Attach workloads**: pick traffic generators that align with your goals
|
||||||
|
(transactions, data-availability blobs, or chaos for resilience probes).
|
||||||
|
3. **Define expectations**: specify the health signals that must hold when the
|
||||||
|
run finishes (e.g., consensus liveness, inclusion of submitted activity; see
|
||||||
|
[Core Content: Workloads & Expectations](workloads.md)).
|
||||||
|
4. **Set duration**: choose a run window long enough to observe meaningful
|
||||||
|
block progression and the effects of your workloads.
|
||||||
|
5. **Choose a runner**: target local processes for fast iteration, Docker
|
||||||
|
Compose for reproducible multi-node stacks, or Kubernetes for cluster-grade
|
||||||
|
validation. For environment considerations, see [Operations](operations.md).
|
||||||
|
|
||||||
|
Keep scenarios small and explicit: make the intended behavior and the success
|
||||||
|
criteria clear so failures are easy to interpret and act upon.
|
||||||
17
book/src/best-practices.md
Normal file
17
book/src/best-practices.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Best Practices
|
||||||
|
|
||||||
|
- **State your intent**: document the goal of each scenario (throughput, DA
|
||||||
|
validation, resilience) so expectation choices are obvious.
|
||||||
|
- **Keep runs meaningful**: choose durations that allow multiple blocks and make
|
||||||
|
timing-based assertions trustworthy.
|
||||||
|
- **Separate concerns**: start with deterministic workloads for functional
|
||||||
|
checks; add chaos in dedicated resilience scenarios to avoid noisy failures.
|
||||||
|
- **Reuse patterns**: standardize on shared topology and workload presets so
|
||||||
|
results are comparable across environments and teams.
|
||||||
|
- **Observe first, tune second**: rely on liveness and inclusion signals to
|
||||||
|
interpret outcomes before tweaking rates or topology.
|
||||||
|
- **Environment fit**: pick runners that match the feedback loop you need—local
|
||||||
|
for speed (including fast CI smoke tests), compose for reproducible stacks
|
||||||
|
(recommended for CI), k8s for cluster-grade fidelity.
|
||||||
|
- **Minimal surprises**: seed only necessary wallets and keep configuration
|
||||||
|
deltas explicit when moving between CI and developer machines.
|
||||||
58
book/src/chaos.md
Normal file
58
book/src/chaos.md
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Chaos Workloads
|
||||||
|
|
||||||
|
Chaos in the framework uses node control to introduce failures and validate
|
||||||
|
recovery. The built-in restart workload lives in
|
||||||
|
`testing_framework_workflows::workloads::chaos::RandomRestartWorkload`.
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
- Requires `NodeControlCapability` (`enable_node_control()` in the scenario
|
||||||
|
builder) and a runner that provides a `NodeControlHandle`.
|
||||||
|
- Randomly selects nodes (validators, executors) to restart based on your
|
||||||
|
include/exclude flags.
|
||||||
|
- Respects min/max delay between restarts and a target cooldown to avoid
|
||||||
|
flapping the same node too frequently.
|
||||||
|
- Runs alongside other workloads; expectations should account for the added
|
||||||
|
disruption.
|
||||||
|
- Support varies by runner: node control is not provided by the local runner
|
||||||
|
and is not yet implemented for the k8s runner. Use a runner that advertises
|
||||||
|
`NodeControlHandle` support (e.g., compose) for chaos workloads.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
```rust
|
||||||
|
use std::time::Duration;
|
||||||
|
use testing_framework_core::scenario::ScenarioBuilder;
|
||||||
|
use testing_framework_workflows::workloads::chaos::RandomRestartWorkload;
|
||||||
|
|
||||||
|
let plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(2)
|
||||||
|
.executors(1)
|
||||||
|
.apply()
|
||||||
|
.enable_node_control()
|
||||||
|
.with_workload(
|
||||||
|
RandomRestartWorkload::new(
|
||||||
|
Duration::from_secs(45), // min delay
|
||||||
|
Duration::from_secs(75), // max delay
|
||||||
|
Duration::from_secs(120), // target cooldown
|
||||||
|
true, // include validators
|
||||||
|
true, // include executors
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(150))
|
||||||
|
.build();
|
||||||
|
// deploy with a runner that supports node control and run the scenario
|
||||||
|
```
|
||||||
|
|
||||||
|
## Expectations to pair
|
||||||
|
- **Consensus liveness**: ensure blocks keep progressing despite restarts.
|
||||||
|
- **Height convergence**: optionally check all nodes converge after the chaos
|
||||||
|
window.
|
||||||
|
- Any workload-specific inclusion checks if you’re also driving tx/DA traffic.
|
||||||
|
|
||||||
|
## Best practices
|
||||||
|
- Keep delays/cooldowns realistic; avoid back-to-back restarts that would never
|
||||||
|
happen in production.
|
||||||
|
- Limit chaos scope: toggle validators vs executors based on what you want to
|
||||||
|
test.
|
||||||
|
- Combine with observability: monitor metrics/logs to explain failures.
|
||||||
116
book/src/custom-workload-example.md
Normal file
116
book/src/custom-workload-example.md
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# Example: New Workload & Expectation (Rust)
|
||||||
|
|
||||||
|
A minimal, end-to-end illustration of adding a custom workload and matching
|
||||||
|
expectation. This shows the shape of the traits and where to plug into the
|
||||||
|
framework; expand the logic to fit your real test.
|
||||||
|
|
||||||
|
## Workload: simple reachability probe
|
||||||
|
|
||||||
|
Key ideas:
|
||||||
|
- **name**: identifies the workload in logs.
|
||||||
|
- **expectations**: workloads can bundle defaults so callers don’t forget checks.
|
||||||
|
- **init**: derive inputs from the generated topology (e.g., pick a target node).
|
||||||
|
- **start**: drive async activity using the shared `RunContext`.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use std::sync::Arc;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use testing_framework_core::scenario::{
|
||||||
|
DynError, Expectation, RunContext, RunMetrics, Workload,
|
||||||
|
};
|
||||||
|
use testing_framework_core::topology::GeneratedTopology;
|
||||||
|
|
||||||
|
pub struct ReachabilityWorkload {
|
||||||
|
target_idx: usize,
|
||||||
|
bundled: Vec<Box<dyn Expectation>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReachabilityWorkload {
|
||||||
|
pub fn new(target_idx: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
target_idx,
|
||||||
|
bundled: vec![Box::new(ReachabilityExpectation::new(target_idx))],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Workload for ReachabilityWorkload {
|
||||||
|
fn name(&self) -> &'static str {
|
||||||
|
"reachability_workload"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expectations(&self) -> Vec<Box<dyn Expectation>> {
|
||||||
|
self.bundled.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init(
|
||||||
|
&mut self,
|
||||||
|
topology: &GeneratedTopology,
|
||||||
|
_metrics: &RunMetrics,
|
||||||
|
) -> Result<(), DynError> {
|
||||||
|
if topology.validators().get(self.target_idx).is_none() {
|
||||||
|
return Err("no validator at requested index".into());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
|
||||||
|
let client = ctx
|
||||||
|
.clients()
|
||||||
|
.validators()
|
||||||
|
.get(self.target_idx)
|
||||||
|
.ok_or("missing target client")?;
|
||||||
|
|
||||||
|
// Pseudo-action: issue a lightweight RPC to prove reachability.
|
||||||
|
client.health_check().await.map_err(|e| e.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Expectation: confirm the target stayed reachable
|
||||||
|
|
||||||
|
Key ideas:
|
||||||
|
- **start_capture**: snapshot baseline if needed (not used here).
|
||||||
|
- **evaluate**: assert the condition after workloads finish.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use testing_framework_core::scenario::{DynError, Expectation, RunContext};
|
||||||
|
|
||||||
|
pub struct ReachabilityExpectation {
|
||||||
|
target_idx: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReachabilityExpectation {
|
||||||
|
pub fn new(target_idx: usize) -> Self {
|
||||||
|
Self { target_idx }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Expectation for ReachabilityExpectation {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"target_reachable"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> {
|
||||||
|
let client = ctx
|
||||||
|
.clients()
|
||||||
|
.validators()
|
||||||
|
.get(self.target_idx)
|
||||||
|
.ok_or("missing target client")?;
|
||||||
|
|
||||||
|
client.health_check().await.map_err(|e| {
|
||||||
|
format!("target became unreachable during run: {e}").into()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to wire it
|
||||||
|
- Build your scenario as usual and call `.with_workload(ReachabilityWorkload::new(0))`.
|
||||||
|
- The bundled expectation is attached automatically; you can add more with
|
||||||
|
`.with_expectation(...)` if needed.
|
||||||
|
- Keep the logic minimal and fast for smoke tests; grow it into richer probes
|
||||||
|
for deeper scenarios.
|
||||||
7
book/src/design-rationale.md
Normal file
7
book/src/design-rationale.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# Design Rationale
|
||||||
|
|
||||||
|
- **Modular crates** keep configuration, orchestration, workloads, and runners decoupled so each can evolve without breaking the others.
|
||||||
|
- **Pluggable runners** let the same scenario run on a laptop, a Docker host, or a Kubernetes cluster, making validation portable across environments.
|
||||||
|
- **Separated workloads and expectations** clarify intent: what traffic to generate versus how to judge success. This simplifies review and reuse.
|
||||||
|
- **Declarative topology** makes cluster shape explicit and repeatable, reducing surprise when moving between CI and developer machines.
|
||||||
|
- **Maintainability through predictability**: a clear flow from plan to deployment to verification lowers the cost of extending the framework and interpreting failures.
|
||||||
133
book/src/dsl-cheat-sheet.md
Normal file
133
book/src/dsl-cheat-sheet.md
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
# Builder API Quick Reference
|
||||||
|
|
||||||
|
Quick reference for the scenario builder DSL. All methods are chainable.
|
||||||
|
|
||||||
|
## Imports
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use testing_framework_runner_compose::ComposeDeployer;
|
||||||
|
use testing_framework_runner_k8s::K8sDeployer;
|
||||||
|
use testing_framework_workflows::{ScenarioBuilderExt, ChaosBuilderExt};
|
||||||
|
use std::time::Duration;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Topology
|
||||||
|
|
||||||
|
```rust
|
||||||
|
ScenarioBuilder::topology()
|
||||||
|
.network_star() // Star topology (all connect to seed node)
|
||||||
|
.validators(3) // Number of validator nodes
|
||||||
|
.executors(2) // Number of executor nodes
|
||||||
|
.apply() // Finish topology configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
## Wallets
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.wallets(50) // Seed 50 funded wallet accounts
|
||||||
|
```
|
||||||
|
|
||||||
|
## Transaction Workload
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.users(20) // Use 20 of the seeded wallets
|
||||||
|
.apply() // Finish transaction workload config
|
||||||
|
```
|
||||||
|
|
||||||
|
## DA Workload
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.da()
|
||||||
|
.channel_rate(1) // 1 channel operation per block
|
||||||
|
.blob_rate(2) // 2 blob dispersals per block
|
||||||
|
.apply() // Finish DA workload config
|
||||||
|
```
|
||||||
|
|
||||||
|
## Chaos Workload (Requires `enable_node_control()`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.enable_node_control() // Enable node control capability
|
||||||
|
.chaos()
|
||||||
|
.restart() // Random restart chaos
|
||||||
|
.min_delay(Duration::from_secs(30)) // Min time between restarts
|
||||||
|
.max_delay(Duration::from_secs(60)) // Max time between restarts
|
||||||
|
.target_cooldown(Duration::from_secs(45)) // Cooldown after restart
|
||||||
|
.apply() // Finish chaos workload config
|
||||||
|
```
|
||||||
|
|
||||||
|
## Expectations
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.expect_consensus_liveness() // Assert blocks are produced continuously
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run Duration
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.with_run_duration(Duration::from_secs(120)) // Run for 120 seconds
|
||||||
|
```
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.build() // Construct the final Scenario
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployers
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Local processes
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
|
||||||
|
// Docker Compose
|
||||||
|
let deployer = ComposeDeployer::default();
|
||||||
|
|
||||||
|
// Kubernetes
|
||||||
|
let deployer = K8sDeployer::default();
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Complete Example
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn run_test() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(3)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.wallets(50)
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.users(20)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1) // 1 channel operation per block
|
||||||
|
.blob_rate(2) // 2 blob dispersals per block
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(90))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
178
book/src/examples-advanced.md
Normal file
178
book/src/examples-advanced.md
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
# Advanced Examples
|
||||||
|
|
||||||
|
Realistic advanced scenarios demonstrating framework capabilities for production testing.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
| Example | Topology | Workloads | Deployer | Key Feature |
|
||||||
|
|---------|----------|-----------|----------|-------------|
|
||||||
|
| Load Progression | 3 validators + 2 executors | Increasing tx rate | Compose | Dynamic load testing |
|
||||||
|
| Sustained Load | 4 validators + 2 executors | High tx + DA rate | Compose | Stress testing |
|
||||||
|
| Aggressive Chaos | 4 validators + 2 executors | Frequent restarts + traffic | Compose | Resilience validation |
|
||||||
|
|
||||||
|
## Load Progression Test
|
||||||
|
|
||||||
|
Test consensus under progressively increasing transaction load:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_compose::ComposeDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn load_progression_test() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
for rate in [5, 10, 20, 30] {
|
||||||
|
println!("Testing with rate: {}", rate);
|
||||||
|
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(3)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.wallets(50)
|
||||||
|
.transactions()
|
||||||
|
.rate(rate)
|
||||||
|
.users(20)
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(60))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = ComposeDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use:** Finding the maximum sustainable transaction rate for a given topology.
|
||||||
|
|
||||||
|
## Sustained Load Test
|
||||||
|
|
||||||
|
Run high transaction and DA load for extended duration:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_compose::ComposeDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn sustained_load_test() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(4)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.wallets(100)
|
||||||
|
.transactions()
|
||||||
|
.rate(15)
|
||||||
|
.users(50)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(2)
|
||||||
|
.blob_rate(3)
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(300))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = ComposeDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use:** Validating stability under continuous high load over extended periods.
|
||||||
|
|
||||||
|
## Aggressive Chaos Test
|
||||||
|
|
||||||
|
Frequent node restarts with active traffic:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_compose::ComposeDeployer;
|
||||||
|
use testing_framework_workflows::{ScenarioBuilderExt, ChaosBuilderExt};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn aggressive_chaos_test() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(4)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.enable_node_control()
|
||||||
|
.wallets(50)
|
||||||
|
.transactions()
|
||||||
|
.rate(10)
|
||||||
|
.users(20)
|
||||||
|
.apply()
|
||||||
|
.chaos()
|
||||||
|
.restart()
|
||||||
|
.min_delay(Duration::from_secs(10))
|
||||||
|
.max_delay(Duration::from_secs(20))
|
||||||
|
.target_cooldown(Duration::from_secs(15))
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(180))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = ComposeDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use:** Validating recovery and liveness under aggressive failure conditions.
|
||||||
|
|
||||||
|
**Note:** Requires `ComposeDeployer` for node control support.
|
||||||
|
|
||||||
|
## Extension Ideas
|
||||||
|
|
||||||
|
These scenarios require custom implementations but demonstrate framework extensibility:
|
||||||
|
|
||||||
|
### Network Partition Recovery
|
||||||
|
|
||||||
|
**Concept:** Test consensus recovery after network partitions.
|
||||||
|
|
||||||
|
**Requirements:**
|
||||||
|
- Needs `block_peer()` / `unblock_peer()` methods in `NodeControlHandle`
|
||||||
|
- Partition subsets of validators, wait, then restore connectivity
|
||||||
|
- Verify chain convergence after partition heals
|
||||||
|
|
||||||
|
**Why useful:** Tests the most realistic failure mode in distributed systems.
|
||||||
|
|
||||||
|
**Current blocker:** Node control doesn't yet support network-level actions (only process restarts).
|
||||||
|
|
||||||
|
### Block Timing Consistency
|
||||||
|
|
||||||
|
**Concept:** Verify block production intervals stay within expected bounds.
|
||||||
|
|
||||||
|
**Implementation approach:**
|
||||||
|
- Custom expectation that consumes `BlockFeed`
|
||||||
|
- Collect block timestamps during run
|
||||||
|
- Assert intervals are within `(slot_duration * active_slot_coeff) ± tolerance`
|
||||||
|
|
||||||
|
**Why useful:** Validates consensus timing under various loads.
|
||||||
|
|
||||||
|
### Invalid Transaction Fuzzing
|
||||||
|
|
||||||
|
**Concept:** Submit malformed transactions and verify they're rejected properly.
|
||||||
|
|
||||||
|
**Implementation approach:**
|
||||||
|
- Custom workload that generates invalid transactions (bad signatures, insufficient funds, malformed structure)
|
||||||
|
- Expectation verifies mempool rejects them and they never appear in blocks
|
||||||
|
- Test mempool resilience and filtering
|
||||||
|
|
||||||
|
**Why useful:** Ensures mempool doesn't crash or include invalid transactions under fuzzing.
|
||||||
|
|
||||||
|
### Wallet Balance Verification
|
||||||
|
|
||||||
|
**Concept:** Track wallet balances and verify state consistency.
|
||||||
|
|
||||||
|
**Description:** After transaction workload completes, query all wallet balances via node API and verify total supply is conserved. Requires tracking initial state, submitted transactions, and final balances. Validates that the ledger maintains correctness under load (no funds lost or created). This is a **state assertion** expectation that checks correctness, not just liveness.
|
||||||
163
book/src/examples.md
Normal file
163
book/src/examples.md
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# Examples
|
||||||
|
|
||||||
|
Concrete scenario shapes that illustrate how to combine topologies, workloads,
|
||||||
|
and expectations.
|
||||||
|
|
||||||
|
**Runnable examples:** The repo includes complete binaries in `examples/src/bin/`:
|
||||||
|
- `local_runner.rs` — Local processes
|
||||||
|
- `compose_runner.rs` — Docker Compose (requires `NOMOS_TESTNET_IMAGE` built)
|
||||||
|
- `k8s_runner.rs` — Kubernetes (requires cluster access and image loaded)
|
||||||
|
|
||||||
|
Run with: `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin <name>`
|
||||||
|
|
||||||
|
**All runners require `POL_PROOF_DEV_MODE=true`** to avoid expensive proof generation.
|
||||||
|
|
||||||
|
**Code patterns** below show how to build scenarios. Wrap these in `#[tokio::test]` functions for integration tests, or `#[tokio::main]` for binaries.
|
||||||
|
|
||||||
|
## Simple consensus liveness
|
||||||
|
|
||||||
|
Minimal test that validates basic block production:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn simple_consensus() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(3)
|
||||||
|
.executors(0)
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(30))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use**: smoke tests for consensus on minimal hardware.
|
||||||
|
|
||||||
|
## Transaction workload
|
||||||
|
|
||||||
|
Test consensus under transaction load:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn transaction_workload() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(2)
|
||||||
|
.executors(0)
|
||||||
|
.apply()
|
||||||
|
.wallets(20)
|
||||||
|
.transactions()
|
||||||
|
.rate(5)
|
||||||
|
.users(10)
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(60))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use**: validate transaction submission and inclusion.
|
||||||
|
|
||||||
|
## DA + transaction workload
|
||||||
|
|
||||||
|
Combined test stressing both transaction and DA layers:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn da_and_transactions() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(3)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.wallets(30)
|
||||||
|
.transactions()
|
||||||
|
.rate(5)
|
||||||
|
.users(15)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1)
|
||||||
|
.blob_rate(2)
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(90))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use**: end-to-end coverage of transaction and DA layers.
|
||||||
|
|
||||||
|
## Chaos resilience
|
||||||
|
|
||||||
|
Test system resilience under node restarts:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_compose::ComposeDeployer;
|
||||||
|
use testing_framework_workflows::{ScenarioBuilderExt, ChaosBuilderExt};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
async fn chaos_resilience() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(4)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.enable_node_control()
|
||||||
|
.wallets(20)
|
||||||
|
.transactions()
|
||||||
|
.rate(3)
|
||||||
|
.users(10)
|
||||||
|
.apply()
|
||||||
|
.chaos()
|
||||||
|
.restart()
|
||||||
|
.min_delay(Duration::from_secs(20))
|
||||||
|
.max_delay(Duration::from_secs(40))
|
||||||
|
.target_cooldown(Duration::from_secs(30))
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(120))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = ComposeDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**When to use**: resilience validation and operational readiness drills.
|
||||||
|
|
||||||
|
**Note**: Chaos tests require `ComposeDeployer` or another runner with node control support.
|
||||||
31
book/src/extending.md
Normal file
31
book/src/extending.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Extending the Framework
|
||||||
|
|
||||||
|
## Adding a workload
|
||||||
|
1) Implement `testing_framework_core::scenario::Workload`:
|
||||||
|
- Provide a name and any bundled expectations.
|
||||||
|
- In `init`, derive inputs from `GeneratedTopology` and `RunMetrics`; fail
|
||||||
|
fast if prerequisites are missing (e.g., wallet data, node addresses).
|
||||||
|
- In `start`, drive async traffic using the `RunContext` clients.
|
||||||
|
2) Expose the workload from a module under `testing-framework/workflows` and
|
||||||
|
consider adding a DSL helper for ergonomic wiring.
|
||||||
|
|
||||||
|
## Adding an expectation
|
||||||
|
1) Implement `testing_framework_core::scenario::Expectation`:
|
||||||
|
- Use `start_capture` to snapshot baseline metrics.
|
||||||
|
- Use `evaluate` to assert outcomes after workloads finish; return all errors
|
||||||
|
so the runner can aggregate them.
|
||||||
|
2) Export it from `testing-framework/workflows` if it is reusable.
|
||||||
|
|
||||||
|
## Adding a runner
|
||||||
|
1) Implement `testing_framework_core::scenario::Deployer` for your backend.
|
||||||
|
- Produce a `RunContext` with `NodeClients`, metrics endpoints, and optional
|
||||||
|
`NodeControlHandle`.
|
||||||
|
- Guard cleanup with `CleanupGuard` to reclaim resources even on failures.
|
||||||
|
2) Mirror the readiness and block-feed probes used by the existing runners so
|
||||||
|
workloads can rely on consistent signals.
|
||||||
|
|
||||||
|
## Adding topology helpers
|
||||||
|
- Extend `testing_framework_core::topology::TopologyBuilder` with new layouts or
|
||||||
|
configuration presets (e.g., specialized DA parameters). Keep defaults safe:
|
||||||
|
ensure at least one participant and clamp dispersal factors as the current
|
||||||
|
helpers do.
|
||||||
33
book/src/faq.md
Normal file
33
book/src/faq.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# FAQ
|
||||||
|
|
||||||
|
**Why block-oriented timing?**
|
||||||
|
Slots advance at a fixed rate (NTP-synchronized, 2s by default), so reasoning
|
||||||
|
about blocks and consensus intervals keeps assertions aligned with protocol
|
||||||
|
behavior rather than arbitrary wall-clock durations.
|
||||||
|
|
||||||
|
**Can I reuse the same scenario across runners?**
|
||||||
|
Yes. The plan stays the same; swap runners (local, compose, k8s) to target
|
||||||
|
different environments.
|
||||||
|
|
||||||
|
**When should I enable chaos workloads?**
|
||||||
|
Only when testing resilience or operational recovery; keep functional smoke
|
||||||
|
tests deterministic.
|
||||||
|
|
||||||
|
**How long should runs be?**
|
||||||
|
The framework enforces a minimum of **2× slot duration** (4 seconds with default 2s slots), but practical recommendations:
|
||||||
|
|
||||||
|
- **Smoke tests**: 30s minimum (~14 blocks with default 2s slots, 0.9 coefficient)
|
||||||
|
- **Transaction workloads**: 60s+ (~27 blocks) to observe inclusion patterns
|
||||||
|
- **DA workloads**: 90s+ (~40 blocks) to account for dispersal and sampling
|
||||||
|
- **Chaos tests**: 120s+ (~54 blocks) to allow recovery after restarts
|
||||||
|
|
||||||
|
Very short runs (< 30s) risk false confidence—one or two lucky blocks don't prove liveness.
|
||||||
|
|
||||||
|
**Do I always need seeded wallets?**
|
||||||
|
Only for transaction scenarios. Data-availability or pure chaos scenarios may
|
||||||
|
not require them, but liveness checks still need validators producing blocks.
|
||||||
|
|
||||||
|
**What if expectations fail but workloads “look fine”?**
|
||||||
|
Trust expectations first—they capture the intended success criteria. Use the
|
||||||
|
observability signals and runner logs to pinpoint why the system missed the
|
||||||
|
target.
|
||||||
52
book/src/glossary.md
Normal file
52
book/src/glossary.md
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Glossary
|
||||||
|
|
||||||
|
- **Validator**: node role responsible for participating in consensus and block
|
||||||
|
production.
|
||||||
|
- **Executor**: a validator node with the DA dispersal service enabled. Executors
|
||||||
|
can submit transactions and disperse blob data to the DA network, in addition
|
||||||
|
to performing all validator functions.
|
||||||
|
- **DA (Data Availability)**: subsystem ensuring blobs or channel data are
|
||||||
|
published and retrievable for validation.
|
||||||
|
- **Deployer**: component that provisions infrastructure (spawns processes,
|
||||||
|
creates containers, or launches pods), waits for readiness, and returns a
|
||||||
|
Runner. Examples: LocalDeployer, ComposeDeployer, K8sDeployer.
|
||||||
|
- **Runner**: component returned by deployers that orchestrates scenario
|
||||||
|
execution—starts workloads, observes signals, evaluates expectations, and
|
||||||
|
triggers cleanup.
|
||||||
|
- **Workload**: traffic or behavior generator that exercises the system during a
|
||||||
|
scenario run.
|
||||||
|
- **Expectation**: post-run assertion that judges whether the system met the
|
||||||
|
intended success criteria.
|
||||||
|
- **Topology**: declarative description of the cluster shape, roles, and
|
||||||
|
high-level parameters for a scenario.
|
||||||
|
- **Scenario**: immutable plan combining topology, workloads, expectations, and
|
||||||
|
run duration.
|
||||||
|
- **Blockfeed**: stream of block observations used for liveness or inclusion
|
||||||
|
signals during a run.
|
||||||
|
- **Control capability**: the ability for a runner to start, stop, or restart
|
||||||
|
nodes, used by chaos workloads.
|
||||||
|
- **Slot duration**: time interval between consensus rounds in Cryptarchia. Blocks
|
||||||
|
are produced at multiples of the slot duration based on lottery outcomes.
|
||||||
|
- **Block cadence**: observed rate of block production in a live network, measured
|
||||||
|
in blocks per second or seconds per block.
|
||||||
|
- **Cooldown**: waiting period after a chaos action (e.g., node restart) before
|
||||||
|
triggering the next action, allowing the system to stabilize.
|
||||||
|
- **Run window**: total duration a scenario executes, specified via
|
||||||
|
`with_run_duration()`. Framework auto-extends to at least 2× slot duration.
|
||||||
|
- **Readiness probe**: health check performed by runners to ensure nodes are
|
||||||
|
reachable and responsive before starting workloads. Prevents false negatives
|
||||||
|
from premature traffic.
|
||||||
|
- **Liveness**: property that the system continues making progress (producing
|
||||||
|
blocks) under specified conditions. Contrasts with safety/correctness which
|
||||||
|
verifies that state transitions are accurate.
|
||||||
|
- **State assertion**: expectation that verifies specific values in the system
|
||||||
|
state (e.g., wallet balances, UTXO sets) rather than just progress signals.
|
||||||
|
Also called "correctness expectations."
|
||||||
|
- **Mantle transaction**: transaction type in Nomos that can contain UTXO transfers
|
||||||
|
(LedgerTx) and operations (Op), including channel data (ChannelBlob).
|
||||||
|
- **Channel**: logical grouping for DA blobs; each blob belongs to a channel and
|
||||||
|
references a parent blob in the same channel, creating a chain of related data.
|
||||||
|
- **POL_PROOF_DEV_MODE**: environment variable that disables expensive Groth16 zero-knowledge
|
||||||
|
proof generation for leader election. **Required for all runners** (local, compose, k8s)
|
||||||
|
for practical testing—without it, proof generation causes timeouts. Should never be
|
||||||
|
used in production environments.
|
||||||
123
book/src/internal-crate-reference.md
Normal file
123
book/src/internal-crate-reference.md
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Internal Crate Reference
|
||||||
|
|
||||||
|
High-level roles of the crates that make up the framework:
|
||||||
|
|
||||||
|
- **Configs** (`testing-framework/configs/`): Prepares reusable configuration primitives for nodes, networking, tracing, data availability, and wallets, shared by all scenarios and runners. Includes topology generation and circuit asset resolution.
|
||||||
|
|
||||||
|
- **Core scenario orchestration** (`testing-framework/core/`): Houses the topology and scenario model, runtime coordination, node clients, and readiness/health probes. Defines `Deployer` and `Runner` traits, `ScenarioBuilder`, and `RunContext`.
|
||||||
|
|
||||||
|
- **Workflows** (`testing-framework/workflows/`): Packages workloads (transaction, DA, chaos) and expectations (consensus liveness) into reusable building blocks. Offers fluent DSL extensions (`ScenarioBuilderExt`, `ChaosBuilderExt`).
|
||||||
|
|
||||||
|
- **Runners** (`testing-framework/runners/{local,compose,k8s}/`): Implements deployment backends (local host, Docker Compose, Kubernetes) that all consume the same scenario plan. Each provides a `Deployer` implementation (`LocalDeployer`, `ComposeDeployer`, `K8sDeployer`).
|
||||||
|
|
||||||
|
- **Runner Examples** (`examples/runner-examples`): Runnable binaries demonstrating framework usage and serving as living documentation. These are the **primary entry point** for running scenarios (`local_runner.rs`, `compose_runner.rs`, `k8s_runner.rs`).
|
||||||
|
|
||||||
|
## Where to Add New Capabilities
|
||||||
|
|
||||||
|
| What You're Adding | Where It Goes | Examples |
|
||||||
|
|-------------------|---------------|----------|
|
||||||
|
| **Node config parameter** | `testing-framework/configs/src/topology/configs/` | Slot duration, log levels, DA params |
|
||||||
|
| **Topology feature** | `testing-framework/core/src/topology/` | New network layouts, node roles |
|
||||||
|
| **Scenario capability** | `testing-framework/core/src/scenario/` | New capabilities, context methods |
|
||||||
|
| **Workload** | `testing-framework/workflows/src/workloads/` | New traffic generators |
|
||||||
|
| **Expectation** | `testing-framework/workflows/src/expectations/` | New success criteria |
|
||||||
|
| **Builder API** | `testing-framework/workflows/src/builder/` | DSL extensions, fluent methods |
|
||||||
|
| **Deployer** | `testing-framework/runners/` | New deployment backends |
|
||||||
|
| **Example scenario** | `examples/src/bin/` | Demonstration binaries |
|
||||||
|
|
||||||
|
## Extension Workflow
|
||||||
|
|
||||||
|
### Adding a New Workload
|
||||||
|
|
||||||
|
1. **Define the workload** in `testing-framework/workflows/src/workloads/your_workload.rs`:
|
||||||
|
```rust
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use testing_framework_core::scenario::{Workload, RunContext, DynError};
|
||||||
|
|
||||||
|
pub struct YourWorkload {
|
||||||
|
// config fields
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Workload for YourWorkload {
|
||||||
|
fn name(&self) -> &'static str { "your_workload" }
|
||||||
|
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
|
||||||
|
// implementation
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add builder extension** in `testing-framework/workflows/src/builder/mod.rs`:
|
||||||
|
```rust
|
||||||
|
pub trait ScenarioBuilderExt {
|
||||||
|
fn your_workload(self) -> YourWorkloadBuilder;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Use in examples** in `examples/src/bin/your_scenario.rs`:
|
||||||
|
```rust
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(3)
|
||||||
|
.executors(0)
|
||||||
|
.apply()
|
||||||
|
.your_workload() // Your new DSL method
|
||||||
|
.apply()
|
||||||
|
.build();
|
||||||
|
```
|
||||||
|
|
||||||
|
### Adding a New Expectation
|
||||||
|
|
||||||
|
1. **Define the expectation** in `testing-framework/workflows/src/expectations/your_expectation.rs`:
|
||||||
|
```rust
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use testing_framework_core::scenario::{Expectation, RunContext, DynError};
|
||||||
|
|
||||||
|
pub struct YourExpectation {
|
||||||
|
// config fields
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Expectation for YourExpectation {
|
||||||
|
fn name(&self) -> &str { "your_expectation" }
|
||||||
|
async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> {
|
||||||
|
// implementation
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add builder extension** in `testing-framework/workflows/src/builder/mod.rs`:
|
||||||
|
```rust
|
||||||
|
pub trait ScenarioBuilderExt {
|
||||||
|
fn expect_your_condition(self) -> Self;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Adding a New Deployer
|
||||||
|
|
||||||
|
1. **Implement `Deployer` trait** in `testing-framework/runners/your_runner/src/deployer.rs`:
|
||||||
|
```rust
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use testing_framework_core::scenario::{Deployer, Runner, Scenario};
|
||||||
|
|
||||||
|
pub struct YourDeployer;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Deployer for YourDeployer {
|
||||||
|
type Error = YourError;
|
||||||
|
|
||||||
|
async fn deploy(&self, scenario: &Scenario) -> Result<Runner, Self::Error> {
|
||||||
|
// Provision infrastructure
|
||||||
|
// Wait for readiness
|
||||||
|
// Return Runner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Provide cleanup** and handle node control if supported.
|
||||||
|
|
||||||
|
3. **Add example** in `examples/src/bin/your_runner.rs`.
|
||||||
|
|
||||||
|
For detailed examples, see [Extending the Framework](extending.md) and [Custom Workload Example](custom-workload-example.md).
|
||||||
15
book/src/introduction.md
Normal file
15
book/src/introduction.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Introduction
|
||||||
|
|
||||||
|
The Nomos Testing Framework is a purpose-built toolkit for exercising Nomos in
|
||||||
|
realistic, multi-node environments. It solves the gap between small, isolated
|
||||||
|
tests and full-system validation by letting teams describe a cluster layout,
|
||||||
|
drive meaningful traffic, and assert the outcomes in one coherent plan.
|
||||||
|
|
||||||
|
It is for protocol engineers, infrastructure operators, and QA teams who need
|
||||||
|
repeatable confidence that validators, executors, and data-availability
|
||||||
|
components work together under network and timing constraints.
|
||||||
|
|
||||||
|
Multi-node integration testing is required because many Nomos behaviors—block
|
||||||
|
progress, data availability, liveness under churn—only emerge when several
|
||||||
|
roles interact over real networking and time. This framework makes those checks
|
||||||
|
declarative, observable, and portable across environments.
|
||||||
76
book/src/node-control.md
Normal file
76
book/src/node-control.md
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Node Control & RunContext
|
||||||
|
|
||||||
|
The deployer supplies a `RunContext` that workloads and expectations share. It
|
||||||
|
provides:
|
||||||
|
|
||||||
|
- Topology descriptors (`GeneratedTopology`)
|
||||||
|
- Client handles (`NodeClients` / `ClusterClient`) for HTTP/RPC calls
|
||||||
|
- Metrics (`RunMetrics`, `Metrics`) and block feed
|
||||||
|
- Optional `NodeControlHandle` for managing nodes
|
||||||
|
|
||||||
|
## Current Chaos Capabilities and Limitations
|
||||||
|
|
||||||
|
The framework currently supports **process-level chaos** (node restarts) for
|
||||||
|
resilience testing:
|
||||||
|
|
||||||
|
**Supported:**
|
||||||
|
- Restart validators (`restart_validator`)
|
||||||
|
- Restart executors (`restart_executor`)
|
||||||
|
- Random restart workload via `.chaos().restart()`
|
||||||
|
|
||||||
|
**Not Yet Supported:**
|
||||||
|
- Network partitions (blocking peers, packet loss)
|
||||||
|
- Resource constraints (CPU throttling, memory limits)
|
||||||
|
- Byzantine behavior injection (invalid blocks, bad signatures)
|
||||||
|
- Selective peer blocking/unblocking
|
||||||
|
|
||||||
|
For network partition testing, see [Extension Ideas](examples-advanced.md#extension-ideas)
|
||||||
|
which describes the proposed `block_peer`/`unblock_peer` API (not yet implemented).
|
||||||
|
|
||||||
|
## Accessing node control in workloads/expectations
|
||||||
|
|
||||||
|
Check for control support and use it conditionally:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Expectation, RunContext, Workload};
|
||||||
|
|
||||||
|
struct RestartWorkload;
|
||||||
|
|
||||||
|
impl Workload for RestartWorkload {
|
||||||
|
fn name(&self) -> &'static str { "restart_workload" }
|
||||||
|
|
||||||
|
async fn start(&self, ctx: &RunContext) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
if let Some(control) = ctx.node_control() {
|
||||||
|
// Restart the first validator (index 0) if supported.
|
||||||
|
control.restart_validator(0).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
When chaos workloads need control, require `enable_node_control()` in the
|
||||||
|
scenario builder and deploy with a runner that supports it.
|
||||||
|
|
||||||
|
## Current API surface
|
||||||
|
|
||||||
|
The `NodeControlHandle` trait currently provides:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub trait NodeControlHandle: Send + Sync {
|
||||||
|
async fn restart_validator(&self, index: usize) -> Result<(), DynError>;
|
||||||
|
async fn restart_executor(&self, index: usize) -> Result<(), DynError>;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Future extensions may include peer blocking/unblocking or other control
|
||||||
|
operations. For now, focus on restart-based chaos patterns as shown in the
|
||||||
|
chaos workload examples.
|
||||||
|
|
||||||
|
## Considerations
|
||||||
|
|
||||||
|
- Always guard control usage: not all runners expose `NodeControlHandle`.
|
||||||
|
- Treat control as best-effort: failures should surface as test failures, but
|
||||||
|
workloads should degrade gracefully when control is absent.
|
||||||
|
- Combine control actions with expectations (e.g., restart then assert height
|
||||||
|
convergence) to keep scenarios meaningful.
|
||||||
412
book/src/operations.md
Normal file
412
book/src/operations.md
Normal file
@ -0,0 +1,412 @@
|
|||||||
|
# Operations
|
||||||
|
|
||||||
|
Operational readiness focuses on prerequisites, environment fit, and clear
|
||||||
|
signals:
|
||||||
|
|
||||||
|
- **Prerequisites**: keep a sibling `nomos-node` checkout available; ensure the
|
||||||
|
chosen runner’s platform needs are met (local binaries for host runs, Docker
|
||||||
|
for compose, cluster access for k8s).
|
||||||
|
- **Artifacts**: DA scenarios require KZG parameters (circuit assets) located at
|
||||||
|
`testing-framework/assets/stack/kzgrs_test_params`. Fetch them via
|
||||||
|
`scripts/setup-nomos-circuits.sh` or override the path with `NOMOS_KZGRS_PARAMS_PATH`.
|
||||||
|
- **Environment flags**: `POL_PROOF_DEV_MODE=true` is **required for all runners**
|
||||||
|
(local, compose, k8s) unless you want expensive Groth16 proof generation that
|
||||||
|
will cause tests to timeout. Configure logging via `NOMOS_LOG_DIR`, `NOMOS_LOG_LEVEL`,
|
||||||
|
and `NOMOS_LOG_FILTER` (see [Logging and Observability](#logging-and-observability)
|
||||||
|
for details). Note that nodes ignore `RUST_LOG` and only respond to `NOMOS_*` variables.
|
||||||
|
- **Readiness checks**: verify runners report node readiness before starting
|
||||||
|
workloads; this avoids false negatives from starting too early.
|
||||||
|
- **Failure triage**: map failures to missing prerequisites (wallet seeding,
|
||||||
|
node control availability), runner platform issues, or unmet expectations.
|
||||||
|
Start with liveness signals, then dive into workload-specific assertions.
|
||||||
|
|
||||||
|
Treat operational hygiene—assets present, prerequisites satisfied, observability
|
||||||
|
reachable—as the first step to reliable scenario outcomes.
|
||||||
|
|
||||||
|
## CI Usage
|
||||||
|
|
||||||
|
Both **LocalDeployer** and **ComposeDeployer** work in CI environments:
|
||||||
|
|
||||||
|
**LocalDeployer in CI:**
|
||||||
|
- Faster (no Docker overhead)
|
||||||
|
- Good for quick smoke tests
|
||||||
|
- **Trade-off:** Less isolation (processes share host)
|
||||||
|
|
||||||
|
**ComposeDeployer in CI (recommended):**
|
||||||
|
- Better isolation (containerized)
|
||||||
|
- Reproducible environment
|
||||||
|
- Includes Prometheus/observability
|
||||||
|
- **Trade-off:** Slower startup (Docker image build)
|
||||||
|
- **Trade-off:** Requires Docker daemon
|
||||||
|
|
||||||
|
See `.github/workflows/compose-mixed.yml` for a complete CI example using ComposeDeployer.
|
||||||
|
|
||||||
|
## Running Examples
|
||||||
|
|
||||||
|
### Local Runner
|
||||||
|
|
||||||
|
```bash
|
||||||
|
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional environment variables:**
|
||||||
|
- `LOCAL_DEMO_VALIDATORS=3` — Number of validators (default: 1)
|
||||||
|
- `LOCAL_DEMO_EXECUTORS=2` — Number of executors (default: 1)
|
||||||
|
- `LOCAL_DEMO_RUN_SECS=120` — Run duration in seconds (default: 60)
|
||||||
|
- `NOMOS_TESTS_TRACING=true` — Enable persistent file logging (required with `NOMOS_LOG_DIR`)
|
||||||
|
- `NOMOS_LOG_DIR=/tmp/logs` — Directory for per-node log files (only with `NOMOS_TESTS_TRACING=true`)
|
||||||
|
- `NOMOS_LOG_LEVEL=debug` — Set log level (default: info)
|
||||||
|
- `NOMOS_LOG_FILTER=consensus=trace,da=debug` — Fine-grained module filtering (rate is per-block, not per-second)
|
||||||
|
|
||||||
|
**Note:** The default `local_runner` example includes DA workload, so circuit assets in `testing-framework/assets/stack/kzgrs_test_params/` are required (fetch via `scripts/setup-nomos-circuits.sh`).
|
||||||
|
|
||||||
|
### Compose Runner
|
||||||
|
|
||||||
|
**Prerequisites:**
|
||||||
|
1. **Docker daemon running**
|
||||||
|
2. **Circuit assets** in `testing-framework/assets/stack/kzgrs_test_params` (fetched via `scripts/setup-nomos-circuits.sh`)
|
||||||
|
3. **Test image built** (see below)
|
||||||
|
|
||||||
|
**Build the test image:**
|
||||||
|
```bash
|
||||||
|
# Fetch circuit assets first
|
||||||
|
chmod +x scripts/setup-nomos-circuits.sh
|
||||||
|
scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
|
||||||
|
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
|
||||||
|
|
||||||
|
# Build image (embeds assets)
|
||||||
|
chmod +x testing-framework/assets/stack/scripts/build_test_image.sh
|
||||||
|
testing-framework/assets/stack/scripts/build_test_image.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**Run the example:**
|
||||||
|
```bash
|
||||||
|
NOMOS_TESTNET_IMAGE=nomos-testnet:local \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin compose_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
**Required environment variables:**
|
||||||
|
- `NOMOS_TESTNET_IMAGE=nomos-testnet:local` — Image tag (must match built image)
|
||||||
|
- `POL_PROOF_DEV_MODE=true` — **Critical:** Without this, proof generation is CPU-intensive and tests will timeout
|
||||||
|
|
||||||
|
**Optional environment variables:**
|
||||||
|
- `COMPOSE_NODE_PAIRS=1x1` — Topology: "validators×executors" (default varies by example)
|
||||||
|
- `TEST_FRAMEWORK_PROMETHEUS_PORT=9091` — Override Prometheus port (default: 9090)
|
||||||
|
- `COMPOSE_RUNNER_HOST=127.0.0.1` — Host address for port mappings (default: 127.0.0.1)
|
||||||
|
- `COMPOSE_RUNNER_PRESERVE=1` — Keep containers running after test (for debugging)
|
||||||
|
- `NOMOS_LOG_DIR=/tmp/compose-logs` — Write logs to files inside containers (requires copy-out or volume mount)
|
||||||
|
- `NOMOS_LOG_LEVEL=debug` — Set log level
|
||||||
|
|
||||||
|
**Compose-specific features:**
|
||||||
|
- **Node control support**: Only runner that supports chaos testing (`.enable_node_control()` + `.chaos()` workloads)
|
||||||
|
- **Prometheus observability**: Metrics at `http://localhost:9090`
|
||||||
|
|
||||||
|
**Important:** Chaos workloads (random restarts) **only work with ComposeDeployer**. LocalDeployer and K8sDeployer do not support node control.
|
||||||
|
|
||||||
|
### K8s Runner
|
||||||
|
|
||||||
|
**Prerequisites:**
|
||||||
|
1. **Kubernetes cluster** with `kubectl` configured and working
|
||||||
|
2. **Circuit assets** in `testing-framework/assets/stack/kzgrs_test_params`
|
||||||
|
3. **Test image built** (same as Compose: `testing-framework/assets/stack/scripts/build_test_image.sh`)
|
||||||
|
4. **Image available in cluster** (loaded via `kind`, `minikube`, or pushed to registry)
|
||||||
|
5. **POL_PROOF_DEV_MODE=true** environment variable set
|
||||||
|
|
||||||
|
**Load image into cluster:**
|
||||||
|
```bash
|
||||||
|
# For kind clusters
|
||||||
|
export NOMOS_TESTNET_IMAGE=nomos-testnet:local
|
||||||
|
kind load docker-image nomos-testnet:local
|
||||||
|
|
||||||
|
# For minikube
|
||||||
|
minikube image load nomos-testnet:local
|
||||||
|
|
||||||
|
# For remote clusters (push to registry)
|
||||||
|
docker tag nomos-testnet:local your-registry/nomos-testnet:local
|
||||||
|
docker push your-registry/nomos-testnet:local
|
||||||
|
export NOMOS_TESTNET_IMAGE=your-registry/nomos-testnet:local
|
||||||
|
```
|
||||||
|
|
||||||
|
**Run the example:**
|
||||||
|
```bash
|
||||||
|
export NOMOS_TESTNET_IMAGE=nomos-testnet:local
|
||||||
|
export POL_PROOF_DEV_MODE=true
|
||||||
|
cargo run -p runner-examples --bin k8s_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important:**
|
||||||
|
- K8s runner mounts `testing-framework/assets/stack/kzgrs_test_params` as a hostPath volume. Ensure this directory exists and contains circuit assets on the node where pods will be scheduled.
|
||||||
|
- **No node control support yet**: Chaos workloads (`.enable_node_control()`) will fail. Use ComposeDeployer for chaos testing.
|
||||||
|
|
||||||
|
## Circuit Assets (KZG Parameters)
|
||||||
|
|
||||||
|
DA workloads require KZG cryptographic parameters for polynomial commitment schemes.
|
||||||
|
|
||||||
|
### Asset Location
|
||||||
|
|
||||||
|
**Default path:** `testing-framework/assets/stack/kzgrs_test_params`
|
||||||
|
|
||||||
|
**Override:** Set `NOMOS_KZGRS_PARAMS_PATH` to use a custom location:
|
||||||
|
```bash
|
||||||
|
NOMOS_KZGRS_PARAMS_PATH=/path/to/custom/params cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
### Getting Circuit Assets
|
||||||
|
|
||||||
|
**Option 1: Use helper script** (recommended):
|
||||||
|
```bash
|
||||||
|
# From the repository root
|
||||||
|
chmod +x scripts/setup-nomos-circuits.sh
|
||||||
|
scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
|
||||||
|
|
||||||
|
# Copy to default location
|
||||||
|
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 2: Build locally** (advanced):
|
||||||
|
```bash
|
||||||
|
# Requires Go, Rust, and circuit build tools
|
||||||
|
make kzgrs_test_params
|
||||||
|
```
|
||||||
|
|
||||||
|
### CI Workflow
|
||||||
|
|
||||||
|
The CI automatically fetches and places assets:
|
||||||
|
```yaml
|
||||||
|
- name: Install circuits for host build
|
||||||
|
run: |
|
||||||
|
scripts/setup-nomos-circuits.sh v0.3.1 "$TMPDIR/nomos-circuits"
|
||||||
|
cp -a "$TMPDIR/nomos-circuits"/. testing-framework/assets/stack/kzgrs_test_params/
|
||||||
|
```
|
||||||
|
|
||||||
|
### When Are Assets Needed?
|
||||||
|
|
||||||
|
| Runner | When Required |
|
||||||
|
|--------|---------------|
|
||||||
|
| **Local** | Always (for DA workloads) |
|
||||||
|
| **Compose** | During image build (baked into `NOMOS_TESTNET_IMAGE`) |
|
||||||
|
| **K8s** | During image build + deployed to cluster via hostPath volume |
|
||||||
|
|
||||||
|
**Error without assets:**
|
||||||
|
```
|
||||||
|
Error: missing KZG parameters at testing-framework/assets/stack/kzgrs_test_params
|
||||||
|
```
|
||||||
|
|
||||||
|
## Logging and Observability
|
||||||
|
|
||||||
|
### Node Logging vs Framework Logging
|
||||||
|
|
||||||
|
**Critical distinction:** Node logs and framework logs use different configuration mechanisms.
|
||||||
|
|
||||||
|
| Component | Controlled By | Purpose |
|
||||||
|
|-----------|--------------|---------|
|
||||||
|
| **Framework binaries** (`cargo run -p runner-examples --bin local_runner`) | `RUST_LOG` | Runner orchestration, deployment logs |
|
||||||
|
| **Node processes** (validators, executors spawned by runner) | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER`, `NOMOS_LOG_DIR` | Consensus, DA, mempool, network logs |
|
||||||
|
|
||||||
|
**Common mistake:** Setting `RUST_LOG=debug` only increases verbosity of the runner binary itself. Node logs remain at their default level unless you also set `NOMOS_LOG_LEVEL=debug`.
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
# This only makes the RUNNER verbose, not the nodes:
|
||||||
|
RUST_LOG=debug cargo run -p runner-examples --bin local_runner
|
||||||
|
|
||||||
|
# This makes the NODES verbose:
|
||||||
|
NOMOS_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner
|
||||||
|
|
||||||
|
# Both verbose (typically not needed):
|
||||||
|
RUST_LOG=debug NOMOS_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
### Logging Environment Variables
|
||||||
|
|
||||||
|
| Variable | Default | Effect |
|
||||||
|
|----------|---------|--------|
|
||||||
|
| `NOMOS_LOG_DIR` | None (console only) | Directory for per-node log files. If unset, logs go to stdout/stderr. |
|
||||||
|
| `NOMOS_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` |
|
||||||
|
| `NOMOS_LOG_FILTER` | None | Fine-grained target filtering (e.g., `consensus=trace,da=debug`) |
|
||||||
|
| `NOMOS_TESTS_TRACING` | `false` | Enable tracing subscriber for local runner file logging |
|
||||||
|
| `NOMOS_OTLP_ENDPOINT` | None | OTLP trace endpoint (optional, disables OTLP noise if unset) |
|
||||||
|
| `NOMOS_OTLP_METRICS_ENDPOINT` | None | OTLP metrics endpoint (optional) |
|
||||||
|
|
||||||
|
**Example:** Full debug logging to files:
|
||||||
|
```bash
|
||||||
|
NOMOS_TESTS_TRACING=true \
|
||||||
|
NOMOS_LOG_DIR=/tmp/test-logs \
|
||||||
|
NOMOS_LOG_LEVEL=debug \
|
||||||
|
NOMOS_LOG_FILTER="nomos_consensus=trace,nomos_da_sampling=debug" \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
### Per-Node Log Files
|
||||||
|
|
||||||
|
When `NOMOS_LOG_DIR` is set, each node writes logs to separate files:
|
||||||
|
|
||||||
|
**File naming pattern:**
|
||||||
|
- **Validators**: Prefix `nomos-node-0`, `nomos-node-1`, etc. (may include timestamp suffix)
|
||||||
|
- **Executors**: Prefix `nomos-executor-0`, `nomos-executor-1`, etc. (may include timestamp suffix)
|
||||||
|
|
||||||
|
**Local runner caveat:** By default, the local runner writes logs to temporary directories in the working directory. These are automatically cleaned up after tests complete. To preserve logs, you MUST set both `NOMOS_TESTS_TRACING=true` AND `NOMOS_LOG_DIR=/path/to/logs`.
|
||||||
|
|
||||||
|
### Filter Target Names
|
||||||
|
|
||||||
|
Common target prefixes for `NOMOS_LOG_FILTER`:
|
||||||
|
|
||||||
|
| Target Prefix | Subsystem |
|
||||||
|
|---------------|-----------|
|
||||||
|
| `nomos_consensus` | Consensus (Cryptarchia) |
|
||||||
|
| `nomos_da_sampling` | DA sampling service |
|
||||||
|
| `nomos_da_dispersal` | DA dispersal service |
|
||||||
|
| `nomos_da_verifier` | DA verification |
|
||||||
|
| `nomos_mempool` | Transaction mempool |
|
||||||
|
| `nomos_blend` | Mix network/privacy layer |
|
||||||
|
| `chain_network` | P2P networking |
|
||||||
|
| `chain_leader` | Leader election |
|
||||||
|
|
||||||
|
**Example filter:**
|
||||||
|
```bash
|
||||||
|
NOMOS_LOG_FILTER="nomos_consensus=trace,nomos_da_sampling=debug,chain_network=info"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Accessing Logs Per Runner
|
||||||
|
|
||||||
|
#### Local Runner
|
||||||
|
|
||||||
|
**Default (temporary directories, auto-cleanup):**
|
||||||
|
```bash
|
||||||
|
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
|
||||||
|
# Logs written to temporary directories in working directory
|
||||||
|
# Automatically cleaned up after test completes
|
||||||
|
```
|
||||||
|
|
||||||
|
**Persistent file output:**
|
||||||
|
```bash
|
||||||
|
NOMOS_TESTS_TRACING=true \
|
||||||
|
NOMOS_LOG_DIR=/tmp/local-logs \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin local_runner
|
||||||
|
|
||||||
|
# After test completes:
|
||||||
|
ls /tmp/local-logs/
|
||||||
|
# Files with prefix: nomos-node-0*, nomos-node-1*, nomos-executor-0*
|
||||||
|
# May include timestamps in filename
|
||||||
|
```
|
||||||
|
|
||||||
|
**Both flags required:** You MUST set both `NOMOS_TESTS_TRACING=true` (enables tracing file sink) AND `NOMOS_LOG_DIR` (specifies directory) to get persistent logs.
|
||||||
|
|
||||||
|
#### Compose Runner
|
||||||
|
|
||||||
|
**Via Docker logs (default, recommended):**
|
||||||
|
```bash
|
||||||
|
# List containers (note the UUID prefix in names)
|
||||||
|
docker ps --filter "name=nomos-compose-"
|
||||||
|
|
||||||
|
# Stream logs from specific container
|
||||||
|
docker logs -f <container-id-or-name>
|
||||||
|
|
||||||
|
# Or use name pattern matching:
|
||||||
|
docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Via file collection (advanced):**
|
||||||
|
|
||||||
|
Setting `NOMOS_LOG_DIR` writes files **inside the container**. To access them, you must either:
|
||||||
|
|
||||||
|
1. **Copy files out after the run:**
|
||||||
|
```bash
|
||||||
|
NOMOS_LOG_DIR=/logs \
|
||||||
|
NOMOS_TESTNET_IMAGE=nomos-testnet:local \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin compose_runner
|
||||||
|
|
||||||
|
# After test, copy files from containers:
|
||||||
|
docker ps --filter "name=nomos-compose-"
|
||||||
|
docker cp <container-id>:/logs/nomos-node-0* /tmp/
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Mount a host volume** (requires modifying compose template):
|
||||||
|
```yaml
|
||||||
|
volumes:
|
||||||
|
- /tmp/host-logs:/logs # Add to docker-compose.yml.tera
|
||||||
|
```
|
||||||
|
|
||||||
|
**Recommendation:** Use `docker logs` by default. File collection inside containers is complex and rarely needed.
|
||||||
|
|
||||||
|
**Keep containers for debugging:**
|
||||||
|
```bash
|
||||||
|
COMPOSE_RUNNER_PRESERVE=1 \
|
||||||
|
NOMOS_TESTNET_IMAGE=nomos-testnet:local \
|
||||||
|
cargo run -p runner-examples --bin compose_runner
|
||||||
|
# Containers remain running after test—inspect with docker logs or docker exec
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Container names follow pattern `nomos-compose-{uuid}-validator-{index}-1` where `{uuid}` changes per run.
|
||||||
|
|
||||||
|
#### K8s Runner
|
||||||
|
|
||||||
|
**Via kubectl logs (use label selectors):**
|
||||||
|
```bash
|
||||||
|
# List pods
|
||||||
|
kubectl get pods
|
||||||
|
|
||||||
|
# Stream logs using label selectors (recommended)
|
||||||
|
kubectl logs -l app=nomos-validator -f
|
||||||
|
kubectl logs -l app=nomos-executor -f
|
||||||
|
|
||||||
|
# Stream logs from specific pod
|
||||||
|
kubectl logs -f nomos-validator-0
|
||||||
|
|
||||||
|
# Previous logs from crashed pods
|
||||||
|
kubectl logs --previous -l app=nomos-validator
|
||||||
|
```
|
||||||
|
|
||||||
|
**Download logs for offline analysis:**
|
||||||
|
```bash
|
||||||
|
# Using label selectors
|
||||||
|
kubectl logs -l app=nomos-validator --tail=1000 > all-validators.log
|
||||||
|
kubectl logs -l app=nomos-executor --tail=1000 > all-executors.log
|
||||||
|
|
||||||
|
# Specific pods
|
||||||
|
kubectl logs nomos-validator-0 > validator-0.log
|
||||||
|
kubectl logs nomos-executor-1 > executor-1.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Specify namespace (if not using default):**
|
||||||
|
```bash
|
||||||
|
kubectl logs -n my-namespace -l app=nomos-validator -f
|
||||||
|
```
|
||||||
|
|
||||||
|
### OTLP and Telemetry
|
||||||
|
|
||||||
|
**OTLP exporters are optional.** If you see errors about unreachable OTLP endpoints, it's safe to ignore them unless you're actively collecting traces/metrics.
|
||||||
|
|
||||||
|
**To enable OTLP:**
|
||||||
|
```bash
|
||||||
|
NOMOS_OTLP_ENDPOINT=http://localhost:4317 \
|
||||||
|
NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318 \
|
||||||
|
cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
**To silence OTLP errors:** Simply leave these variables unset (the default).
|
||||||
|
|
||||||
|
### Observability: Prometheus and Node APIs
|
||||||
|
|
||||||
|
Runners expose metrics and node HTTP endpoints for expectation code and debugging:
|
||||||
|
|
||||||
|
**Prometheus (Compose only):**
|
||||||
|
- Default: `http://localhost:9090`
|
||||||
|
- Override: `TEST_FRAMEWORK_PROMETHEUS_PORT=9091`
|
||||||
|
- Access from expectations: `ctx.telemetry().prometheus_endpoint()`
|
||||||
|
|
||||||
|
**Node APIs:**
|
||||||
|
- Access from expectations: `ctx.node_clients().validators().get(0)`
|
||||||
|
- Endpoints: consensus info, network info, DA membership, etc.
|
||||||
|
- See `testing-framework/core/src/nodes/api_client.rs` for available methods
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
Expose[Runner exposes endpoints/ports] --> Collect[Runtime collects block/health signals]
|
||||||
|
Collect --> Consume[Expectations consume signals<br/>decide pass/fail]
|
||||||
|
Consume --> Inspect[Operators inspect logs/metrics<br/>when failures arise]
|
||||||
|
```
|
||||||
4
book/src/part-i.md
Normal file
4
book/src/part-i.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Part I — Foundations
|
||||||
|
|
||||||
|
Conceptual chapters that establish the mental model for the framework and how
|
||||||
|
it approaches multi-node testing.
|
||||||
4
book/src/part-ii.md
Normal file
4
book/src/part-ii.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Part II — User Guide
|
||||||
|
|
||||||
|
Practical guidance for shaping scenarios, combining workloads and expectations,
|
||||||
|
and running them across different environments.
|
||||||
4
book/src/part-iii.md
Normal file
4
book/src/part-iii.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Part III — Developer Reference
|
||||||
|
|
||||||
|
Deep dives for contributors who extend the framework, evolve its abstractions,
|
||||||
|
or maintain the crate set.
|
||||||
4
book/src/part-iv.md
Normal file
4
book/src/part-iv.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Part IV — Appendix
|
||||||
|
|
||||||
|
Quick-reference material and supporting guidance to keep scenarios discoverable,
|
||||||
|
debuggable, and consistent.
|
||||||
16
book/src/project-context-primer.md
Normal file
16
book/src/project-context-primer.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# Project Context Primer
|
||||||
|
|
||||||
|
This book focuses on the Nomos Testing Framework. It assumes familiarity with
|
||||||
|
the Nomos architecture, but for completeness, here is a short primer.
|
||||||
|
|
||||||
|
- **Nomos** is a modular blockchain protocol composed of validators, executors,
|
||||||
|
and a data-availability (DA) subsystem.
|
||||||
|
- **Validators** participate in consensus and produce blocks.
|
||||||
|
- **Executors** are validators with the DA dispersal service enabled. They perform
|
||||||
|
all validator functions plus submit blob data to the DA network.
|
||||||
|
- **Data Availability (DA)** ensures that blob data submitted via channel operations
|
||||||
|
in transactions is published and retrievable by the network.
|
||||||
|
|
||||||
|
These roles interact tightly, which is why meaningful testing must be performed
|
||||||
|
in multi-node environments that include real networking, timing, and DA
|
||||||
|
interaction.
|
||||||
187
book/src/quickstart.md
Normal file
187
book/src/quickstart.md
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
# Quickstart
|
||||||
|
|
||||||
|
Get a working example running quickly.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Rust toolchain (nightly)
|
||||||
|
- Sibling `nomos-node` checkout built and available
|
||||||
|
- This repository cloned
|
||||||
|
- Unix-like system (tested on Linux and macOS)
|
||||||
|
|
||||||
|
## Your First Test
|
||||||
|
|
||||||
|
The framework ships with runnable example binaries in `examples/src/bin/`. Let's start with the local runner:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# From the nomos-testing directory
|
||||||
|
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
This runs a complete scenario with **defaults**: 1 validator + 1 executor, mixed transaction + DA workload (5 tx/block + 1 channel + 1 blob), 60s duration.
|
||||||
|
|
||||||
|
**Core API Pattern** (simplified example):
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use testing_framework_workflows::ScenarioBuilderExt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
// Define the scenario (1 validator + 1 executor, tx + DA workload)
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(1)
|
||||||
|
.executors(1)
|
||||||
|
.apply()
|
||||||
|
.wallets(64)
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.users(8)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1) // 1 channel operation per block
|
||||||
|
.blob_rate(1) // 1 blob dispersal per block
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.with_run_duration(Duration::from_secs(60))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Deploy and run
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** The examples are binaries with `#[tokio::main]`, not test functions. If you want to write integration tests, wrap this pattern in `#[tokio::test]` functions in your own test suite.
|
||||||
|
|
||||||
|
**Important:** `POL_PROOF_DEV_MODE=true` disables expensive Groth16 zero-knowledge proof generation for leader election. Without it, proof generation is CPU-intensive and tests will timeout. **This is required for all runners** (local, compose, k8s) for practical testing. Never use in production.
|
||||||
|
|
||||||
|
**What you should see:**
|
||||||
|
- Nodes spawn as local processes
|
||||||
|
- Consensus starts producing blocks
|
||||||
|
- Scenario runs for the configured duration
|
||||||
|
- Node logs written to temporary directories in working directory (auto-cleaned up after test)
|
||||||
|
- To persist logs: set `NOMOS_TESTS_TRACING=true` and `NOMOS_LOG_DIR=/path/to/logs` (files will have prefix like `nomos-node-0*`, may include timestamps)
|
||||||
|
|
||||||
|
## What Just Happened?
|
||||||
|
|
||||||
|
Let's unpack the code:
|
||||||
|
|
||||||
|
### 1. Topology Configuration
|
||||||
|
|
||||||
|
```rust
|
||||||
|
ScenarioBuilder::topology()
|
||||||
|
.network_star() // Star topology: all nodes connect to seed
|
||||||
|
.validators(1) // 1 validator node
|
||||||
|
.executors(1) // 1 executor node (validator + DA dispersal)
|
||||||
|
.apply()
|
||||||
|
```
|
||||||
|
|
||||||
|
This defines **what** your test network looks like.
|
||||||
|
|
||||||
|
### 2. Wallet Seeding
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.wallets(64) // Seed 64 funded wallet accounts
|
||||||
|
```
|
||||||
|
|
||||||
|
Provides funded accounts for transaction submission.
|
||||||
|
|
||||||
|
### 3. Workloads
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.users(8) // Use 8 of the 64 wallets
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1) // 1 channel operation per block
|
||||||
|
.blob_rate(1) // 1 blob dispersal per block
|
||||||
|
.apply()
|
||||||
|
```
|
||||||
|
|
||||||
|
Generates both transaction and DA traffic to stress both subsystems.
|
||||||
|
|
||||||
|
### 4. Expectation
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
```
|
||||||
|
|
||||||
|
This says **what success means**: blocks must be produced continuously.
|
||||||
|
|
||||||
|
### 5. Run Duration
|
||||||
|
|
||||||
|
```rust
|
||||||
|
.with_run_duration(Duration::from_secs(60))
|
||||||
|
```
|
||||||
|
|
||||||
|
Run for 60 seconds (~27 blocks with default 2s slots, 0.9 coefficient). Framework ensures this is at least 2× the consensus slot duration.
|
||||||
|
|
||||||
|
### 6. Deploy and Execute
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let deployer = LocalDeployer::default(); // Use local process deployer
|
||||||
|
let runner = deployer.deploy(&plan).await?; // Provision infrastructure
|
||||||
|
let _handle = runner.run(&mut plan).await?; // Execute workloads & expectations
|
||||||
|
```
|
||||||
|
|
||||||
|
**Deployer** provisions the infrastructure. **Runner** orchestrates execution.
|
||||||
|
|
||||||
|
## Adjust the Topology
|
||||||
|
|
||||||
|
The binary accepts environment variables to adjust defaults:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Scale up to 3 validators + 2 executors, run for 2 minutes
|
||||||
|
LOCAL_DEMO_VALIDATORS=3 \
|
||||||
|
LOCAL_DEMO_EXECUTORS=2 \
|
||||||
|
LOCAL_DEMO_RUN_SECS=120 \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
## Try Docker Compose
|
||||||
|
|
||||||
|
Use the same API with a different deployer for reproducible containerized environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the test image first (includes circuit assets)
|
||||||
|
chmod +x scripts/setup-nomos-circuits.sh
|
||||||
|
scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits
|
||||||
|
cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/
|
||||||
|
|
||||||
|
chmod +x testing-framework/assets/stack/scripts/build_test_image.sh
|
||||||
|
testing-framework/assets/stack/scripts/build_test_image.sh
|
||||||
|
|
||||||
|
# Run with Compose
|
||||||
|
NOMOS_TESTNET_IMAGE=nomos-testnet:local \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin compose_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
**Benefit:** Reproducible containerized environment with Prometheus at `http://localhost:9090`.
|
||||||
|
|
||||||
|
**In code:** Just swap the deployer:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use testing_framework_runner_compose::ComposeDeployer;
|
||||||
|
|
||||||
|
// ... same scenario definition ...
|
||||||
|
|
||||||
|
let deployer = ComposeDeployer::default(); // Use Docker Compose
|
||||||
|
let runner = deployer.deploy(&plan).await?;
|
||||||
|
let _handle = runner.run(&mut plan).await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
Now that you have a working test:
|
||||||
|
|
||||||
|
- **Understand the philosophy**: [Testing Philosophy](testing-philosophy.md)
|
||||||
|
- **Learn the architecture**: [Architecture Overview](architecture-overview.md)
|
||||||
|
- **See more examples**: [Examples](examples.md)
|
||||||
|
- **API reference**: [Builder API Quick Reference](dsl-cheat-sheet.md)
|
||||||
|
- **Debug failures**: [Troubleshooting](troubleshooting.md)
|
||||||
|
|
||||||
44
book/src/runners.md
Normal file
44
book/src/runners.md
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Runners
|
||||||
|
|
||||||
|
Runners turn a scenario plan into a live environment while keeping the plan
|
||||||
|
unchanged. Choose based on feedback speed, reproducibility, and fidelity. For
|
||||||
|
environment and operational considerations, see [Operations](operations.md).
|
||||||
|
|
||||||
|
**Important:** All runners require `POL_PROOF_DEV_MODE=true` to avoid expensive Groth16 proof generation that causes timeouts.
|
||||||
|
|
||||||
|
## Local runner
|
||||||
|
- Launches node processes directly on the host.
|
||||||
|
- Fastest feedback loop and minimal orchestration overhead.
|
||||||
|
- Best for development-time iteration and debugging.
|
||||||
|
- **Can run in CI** for fast smoke tests.
|
||||||
|
- **Node control:** Not supported (chaos workloads not available)
|
||||||
|
|
||||||
|
## Docker Compose runner
|
||||||
|
- Starts nodes in containers to provide a reproducible multi-node stack on a
|
||||||
|
single machine.
|
||||||
|
- Discovers service ports and wires observability for convenient inspection.
|
||||||
|
- Good balance between fidelity and ease of setup.
|
||||||
|
- **Recommended for CI pipelines** (isolated environment, reproducible).
|
||||||
|
- **Node control:** Supported (can restart nodes for chaos testing)
|
||||||
|
|
||||||
|
## Kubernetes runner
|
||||||
|
- Deploys nodes onto a cluster for higher-fidelity, longer-running scenarios.
|
||||||
|
- Suits CI with cluster access or shared test environments where cluster behavior
|
||||||
|
and scheduling matter.
|
||||||
|
- **Node control:** Not supported yet (chaos workloads not available)
|
||||||
|
|
||||||
|
### Common expectations
|
||||||
|
- All runners require at least one validator and, for transaction scenarios,
|
||||||
|
access to seeded wallets.
|
||||||
|
- Readiness probes gate workload start so traffic begins only after nodes are
|
||||||
|
reachable.
|
||||||
|
- Environment flags can relax timeouts or increase tracing when diagnostics are
|
||||||
|
needed.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
Plan[Scenario Plan] --> RunSel{Runner<br/>(local | compose | k8s)}
|
||||||
|
RunSel --> Provision[Provision & readiness]
|
||||||
|
Provision --> Runtime[Runtime + observability]
|
||||||
|
Runtime --> Exec[Workloads & Expectations execute]
|
||||||
|
```
|
||||||
18
book/src/running-scenarios.md
Normal file
18
book/src/running-scenarios.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Running Scenarios
|
||||||
|
|
||||||
|
Running a scenario follows the same conceptual flow regardless of environment:
|
||||||
|
|
||||||
|
1. Select or author a scenario plan that pairs a topology with workloads,
|
||||||
|
expectations, and a suitable run window.
|
||||||
|
2. Choose a deployer aligned with your environment (local, compose, or k8s) and
|
||||||
|
ensure its prerequisites are available.
|
||||||
|
3. Deploy the plan through the deployer, which provisions infrastructure and
|
||||||
|
returns a runner.
|
||||||
|
4. The runner orchestrates workload execution for the planned duration; keep
|
||||||
|
observability signals visible so you can correlate outcomes.
|
||||||
|
5. The runner evaluates expectations and captures results as the primary
|
||||||
|
pass/fail signal.
|
||||||
|
|
||||||
|
Use the same plan across different deployers to compare behavior between local
|
||||||
|
development and CI or cluster settings. For environment prerequisites and
|
||||||
|
flags, see [Operations](operations.md).
|
||||||
17
book/src/scenario-builder-ext-patterns.md
Normal file
17
book/src/scenario-builder-ext-patterns.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Core Content: ScenarioBuilderExt Patterns
|
||||||
|
|
||||||
|
Patterns that keep scenarios readable and reusable:
|
||||||
|
|
||||||
|
- **Topology-first**: start by shaping the cluster (counts, layout) so later
|
||||||
|
steps inherit a clear foundation.
|
||||||
|
- **Bundle defaults**: use the DSL helpers to attach common expectations (like
|
||||||
|
liveness) whenever you add a matching workload, reducing forgotten checks.
|
||||||
|
- **Intentional rates**: express traffic in per-block terms to align with
|
||||||
|
protocol timing rather than wall-clock assumptions.
|
||||||
|
- **Opt-in chaos**: enable restart patterns only in scenarios meant to probe
|
||||||
|
resilience; keep functional smoke tests deterministic.
|
||||||
|
- **Wallet clarity**: seed only the number of actors you need; it keeps
|
||||||
|
transaction scenarios deterministic and interpretable.
|
||||||
|
|
||||||
|
These patterns make scenario definitions self-explanatory while staying aligned
|
||||||
|
with the framework’s block-oriented timing model.
|
||||||
18
book/src/scenario-lifecycle.md
Normal file
18
book/src/scenario-lifecycle.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Scenario Lifecycle
|
||||||
|
|
||||||
|
1. **Build the plan**: Declare a topology, attach workloads and expectations, and set the run window. The plan is the single source of truth for what will happen.
|
||||||
|
2. **Deploy**: Hand the plan to a deployer. It provisions the environment on the chosen backend, waits for nodes to signal readiness, and returns a runner.
|
||||||
|
3. **Drive workloads**: The runner starts traffic and behaviors (transactions, data-availability activity, restarts) for the planned duration.
|
||||||
|
4. **Observe blocks and signals**: Track block progression and other high-level metrics during or after the run window to ground assertions in protocol time.
|
||||||
|
5. **Evaluate expectations**: Once activity stops (and optional cooldown completes), the runner checks liveness and workload-specific outcomes to decide pass or fail.
|
||||||
|
6. **Cleanup**: Tear down resources so successive runs start fresh and do not inherit leaked state.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
P[Plan<br/>topology + workloads + expectations] --> D[Deploy<br/>deployer provisions]
|
||||||
|
D --> R[Runner<br/>orchestrates execution]
|
||||||
|
R --> W[Drive Workloads]
|
||||||
|
W --> O[Observe<br/>blocks/metrics]
|
||||||
|
O --> E[Evaluate Expectations]
|
||||||
|
E --> C[Cleanup]
|
||||||
|
```
|
||||||
23
book/src/scenario-model.md
Normal file
23
book/src/scenario-model.md
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# Scenario Model (Developer Level)
|
||||||
|
|
||||||
|
The scenario model defines clear, composable responsibilities:
|
||||||
|
|
||||||
|
- **Topology**: a declarative description of the cluster—how many nodes, their
|
||||||
|
roles, and the broad network and data-availability characteristics. It
|
||||||
|
represents the intended shape of the system under test.
|
||||||
|
- **Scenario**: a plan combining topology, workloads, expectations, and a run
|
||||||
|
window. Building a scenario validates prerequisites (like seeded wallets) and
|
||||||
|
ensures the run lasts long enough to observe meaningful block progression.
|
||||||
|
- **Workloads**: asynchronous tasks that generate traffic or conditions. They
|
||||||
|
use shared context to interact with the deployed cluster and may bundle
|
||||||
|
default expectations.
|
||||||
|
- **Expectations**: post-run assertions. They can capture baselines before
|
||||||
|
workloads start and evaluate success once activity stops.
|
||||||
|
- **Runtime**: coordinates workloads and expectations for the configured
|
||||||
|
duration, enforces cooldowns when control actions occur, and ensures cleanup
|
||||||
|
so runs do not leak resources.
|
||||||
|
|
||||||
|
Developers extending the model should keep these boundaries strict: topology
|
||||||
|
describes, scenarios assemble, deployers provision, runners orchestrate,
|
||||||
|
workloads drive, and expectations judge outcomes. For guidance on adding new
|
||||||
|
capabilities, see [Extending the Framework](extending.md).
|
||||||
155
book/src/testing-philosophy.md
Normal file
155
book/src/testing-philosophy.md
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
# Testing Philosophy
|
||||||
|
|
||||||
|
This framework embodies specific principles that shape how you author and run
|
||||||
|
scenarios. Understanding these principles helps you write effective tests and
|
||||||
|
interpret results correctly.
|
||||||
|
|
||||||
|
## Declarative over Imperative
|
||||||
|
|
||||||
|
Describe **what** you want to test, not **how** to orchestrate it:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Good: declarative
|
||||||
|
ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(2)
|
||||||
|
.executors(1)
|
||||||
|
.apply()
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Bad: imperative (framework doesn't work this way)
|
||||||
|
// spawn_validator(); spawn_executor();
|
||||||
|
// loop { submit_tx(); check_block(); }
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why it matters:** The framework handles deployment, readiness, and cleanup.
|
||||||
|
You focus on test intent, not infrastructure orchestration.
|
||||||
|
|
||||||
|
## Protocol Time, Not Wall Time
|
||||||
|
|
||||||
|
Reason in **blocks** and **consensus intervals**, not wall-clock seconds.
|
||||||
|
|
||||||
|
**Consensus defaults:**
|
||||||
|
- Slot duration: 2 seconds (NTP-synchronized, configurable via `CONSENSUS_SLOT_TIME`)
|
||||||
|
- Active slot coefficient: 0.9 (90% block probability per slot)
|
||||||
|
- Expected rate: ~27 blocks per minute
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Good: protocol-oriented thinking
|
||||||
|
let plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(2)
|
||||||
|
.executors(1)
|
||||||
|
.apply()
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.apply()
|
||||||
|
.with_run_duration(Duration::from_secs(60)) // Let framework calculate expected blocks
|
||||||
|
.expect_consensus_liveness() // "Did we produce the expected blocks?"
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Bad: wall-clock assumptions
|
||||||
|
// "I expect exactly 30 blocks in 60 seconds"
|
||||||
|
// This breaks on slow CI where slot timing might drift
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why it matters:** Slot timing is fixed (2s by default, NTP-synchronized), so the
|
||||||
|
expected number of blocks is predictable: ~27 blocks in 60s with the default
|
||||||
|
0.9 active slot coefficient. The framework calculates expected blocks from slot
|
||||||
|
duration and run window, making assertions protocol-based rather than tied to
|
||||||
|
specific wall-clock expectations. Assert on "blocks produced relative to slots"
|
||||||
|
not "blocks produced in exact wall-clock seconds".
|
||||||
|
|
||||||
|
## Determinism First, Chaos When Needed
|
||||||
|
|
||||||
|
**Default scenarios are repeatable:**
|
||||||
|
- Fixed topology
|
||||||
|
- Predictable traffic rates
|
||||||
|
- Deterministic checks
|
||||||
|
|
||||||
|
**Chaos is opt-in:**
|
||||||
|
```rust
|
||||||
|
// Separate: functional test (deterministic)
|
||||||
|
let plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(2)
|
||||||
|
.executors(1)
|
||||||
|
.apply()
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Separate: chaos test (introduces randomness)
|
||||||
|
let chaos_plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(3)
|
||||||
|
.executors(2)
|
||||||
|
.apply()
|
||||||
|
.enable_node_control()
|
||||||
|
.chaos()
|
||||||
|
.restart()
|
||||||
|
.apply()
|
||||||
|
.transactions()
|
||||||
|
.rate(5) // 5 transactions per block
|
||||||
|
.apply()
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.build();
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why it matters:** Mixing determinism with chaos creates noisy, hard-to-debug
|
||||||
|
failures. Separate concerns make failures actionable.
|
||||||
|
|
||||||
|
## Observable Health Signals
|
||||||
|
|
||||||
|
Prefer **user-facing signals** over internal state:
|
||||||
|
|
||||||
|
**Good checks:**
|
||||||
|
- Blocks progressing at expected rate (liveness)
|
||||||
|
- Transactions included within N blocks (inclusion)
|
||||||
|
- DA blobs retrievable (availability)
|
||||||
|
|
||||||
|
**Avoid internal checks:**
|
||||||
|
- Memory pool size
|
||||||
|
- Internal service state
|
||||||
|
- Cache hit rates
|
||||||
|
|
||||||
|
**Why it matters:** User-facing signals reflect actual system health.
|
||||||
|
Internal state can be "healthy" while the system is broken from a user
|
||||||
|
perspective.
|
||||||
|
|
||||||
|
## Minimum Run Windows
|
||||||
|
|
||||||
|
Always run long enough for **meaningful block production**:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Bad: too short
|
||||||
|
.with_run_duration(Duration::from_secs(5)) // ~2 blocks (with default 2s slots, 0.9 coeff)
|
||||||
|
|
||||||
|
// Good: enough blocks for assertions
|
||||||
|
.with_run_duration(Duration::from_secs(60)) // ~27 blocks (with default 2s slots, 0.9 coeff)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Block counts assume default consensus parameters:
|
||||||
|
- Slot duration: 2 seconds (configurable via `CONSENSUS_SLOT_TIME`)
|
||||||
|
- Active slot coefficient: 0.9 (90% block probability per slot)
|
||||||
|
- Formula: `blocks ≈ (duration / slot_duration) × active_slot_coeff`
|
||||||
|
|
||||||
|
If upstream changes these parameters, adjust your duration expectations accordingly.
|
||||||
|
|
||||||
|
The framework enforces minimum durations (at least 2× slot duration), but be explicit. Very short runs risk false confidence—one lucky block doesn't prove liveness.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
These principles keep scenarios:
|
||||||
|
- **Portable** across environments (protocol time, declarative)
|
||||||
|
- **Debuggable** (determinism, separation of concerns)
|
||||||
|
- **Meaningful** (observable signals, sufficient duration)
|
||||||
|
|
||||||
|
When authoring scenarios, ask: "Does this test the protocol behavior or
|
||||||
|
my local environment quirks?"
|
||||||
33
book/src/topology-chaos.md
Normal file
33
book/src/topology-chaos.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# Topology & Chaos Patterns
|
||||||
|
|
||||||
|
This page focuses on cluster manipulation: node control, chaos patterns, and
|
||||||
|
what the tooling supports today.
|
||||||
|
|
||||||
|
## Node control availability
|
||||||
|
- **Supported**: restart/peer control via `NodeControlHandle` (compose runner).
|
||||||
|
- **Not supported**: local runner does not expose node control; k8s runner does
|
||||||
|
not support it yet.
|
||||||
|
|
||||||
|
## Chaos patterns to consider
|
||||||
|
- **Restarts**: random restarts with minimum delay/cooldown to test recovery.
|
||||||
|
- **Partitions**: block/unblock peers to simulate partial isolation, then assert
|
||||||
|
height convergence after healing.
|
||||||
|
- **Validator churn**: stop one validator and start another (new key) mid-run to
|
||||||
|
test membership changes; expect convergence.
|
||||||
|
- **Load SLOs**: push tx/DA rates and assert inclusion/availability budgets
|
||||||
|
instead of only liveness.
|
||||||
|
- **API probes**: poll HTTP/RPC endpoints during chaos to ensure external
|
||||||
|
contracts stay healthy (shape + latency).
|
||||||
|
|
||||||
|
## Expectations to pair
|
||||||
|
- **Liveness/height convergence** after chaos windows.
|
||||||
|
- **SLO checks**: inclusion latency, DA responsiveness, API latency/shape.
|
||||||
|
- **Recovery checks**: ensure nodes that were isolated or restarted catch up to
|
||||||
|
cluster height within a timeout.
|
||||||
|
|
||||||
|
## Guidance
|
||||||
|
- Keep chaos realistic: avoid flapping or patterns you wouldn't operate in prod.
|
||||||
|
- Scope chaos: choose validators vs executors intentionally; don't restart all
|
||||||
|
nodes at once unless you're testing full outages.
|
||||||
|
- Combine chaos with observability: capture block feed/metrics and API health so
|
||||||
|
failures are diagnosable.
|
||||||
249
book/src/troubleshooting.md
Normal file
249
book/src/troubleshooting.md
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
# Troubleshooting Scenarios
|
||||||
|
|
||||||
|
**Prerequisites for All Runners:**
|
||||||
|
- **`POL_PROOF_DEV_MODE=true`** MUST be set for all runners (local, compose, k8s) to avoid expensive Groth16 proof generation that causes timeouts
|
||||||
|
- **KZG circuit assets** must be present at `testing-framework/assets/stack/kzgrs_test_params/` for DA workloads (fetch via `scripts/setup-nomos-circuits.sh`)
|
||||||
|
|
||||||
|
## Quick Symptom Guide
|
||||||
|
|
||||||
|
Common symptoms and likely causes:
|
||||||
|
|
||||||
|
- **No or slow block progression**: missing `POL_PROOF_DEV_MODE=true`, missing KZG circuit assets for DA workloads, too-short run window, port conflicts, or resource exhaustion—set required env vars, verify assets, extend duration, check node logs for startup errors.
|
||||||
|
- **Transactions not included**: unfunded or misconfigured wallets (check `.wallets(N)` vs `.users(M)`), transaction rate exceeding block capacity, or rates exceeding block production speed—reduce rate, increase wallet count, verify wallet setup in logs.
|
||||||
|
- **Chaos stalls the run**: chaos (node control) only works with ComposeDeployer; LocalDeployer and K8sDeployer don't support it (won't "stall", just can't execute chaos workloads). With compose, aggressive restart cadence can prevent consensus recovery—widen restart intervals.
|
||||||
|
- **Observability gaps**: metrics or logs unreachable because ports clash or services are not exposed—adjust observability ports and confirm runner wiring.
|
||||||
|
- **Flaky behavior across runs**: mixing chaos with functional smoke tests or inconsistent topology between environments—separate deterministic and chaos scenarios and standardize topology presets.
|
||||||
|
|
||||||
|
## Where to Find Logs
|
||||||
|
|
||||||
|
### Log Location Quick Reference
|
||||||
|
|
||||||
|
| Runner | Default Output | With `NOMOS_LOG_DIR` + Flags | Access Command |
|
||||||
|
|--------|---------------|------------------------------|----------------|
|
||||||
|
| **Local** | Temporary directories (cleaned up) | Per-node files with prefix `nomos-node-{index}` (requires `NOMOS_TESTS_TRACING=true`) | `cat $NOMOS_LOG_DIR/nomos-node-0*` |
|
||||||
|
| **Compose** | Docker container stdout/stderr | Per-node files inside containers (if path is mounted) | `docker ps` then `docker logs <container-id>` |
|
||||||
|
| **K8s** | Pod stdout/stderr | Per-node files inside pods (if path is mounted) | `kubectl logs -l app=nomos-validator` |
|
||||||
|
|
||||||
|
**Important Notes:**
|
||||||
|
- **Local runner**: Logs go to system temporary directories (NOT in working directory) by default and are automatically cleaned up after tests. To persist logs, you MUST set both `NOMOS_TESTS_TRACING=true` AND `NOMOS_LOG_DIR=/path/to/logs`.
|
||||||
|
- **Compose/K8s**: Per-node log files only exist inside containers/pods if `NOMOS_LOG_DIR` is set AND the path is writable inside the container/pod. By default, rely on `docker logs` or `kubectl logs`.
|
||||||
|
- **File naming**: Log files use prefix `nomos-node-{index}*` or `nomos-executor-{index}*` with timestamps, e.g., `nomos-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix).
|
||||||
|
- **Container names**: Compose containers include project UUID, e.g., `nomos-compose-<uuid>-validator-0-1` where `<uuid>` is randomly generated per run
|
||||||
|
|
||||||
|
### Accessing Node Logs by Runner
|
||||||
|
|
||||||
|
#### Local Runner
|
||||||
|
|
||||||
|
**Console output (default):**
|
||||||
|
```bash
|
||||||
|
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner 2>&1 | tee test.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Persistent file output:**
|
||||||
|
```bash
|
||||||
|
NOMOS_TESTS_TRACING=true \
|
||||||
|
NOMOS_LOG_DIR=/tmp/debug-logs \
|
||||||
|
NOMOS_LOG_LEVEL=debug \
|
||||||
|
POL_PROOF_DEV_MODE=true \
|
||||||
|
cargo run -p runner-examples --bin local_runner
|
||||||
|
|
||||||
|
# Inspect logs (note: filenames include timestamps):
|
||||||
|
ls /tmp/debug-logs/
|
||||||
|
# Example: nomos-node-0.2024-12-01T10-30-45.log
|
||||||
|
tail -f /tmp/debug-logs/nomos-node-0* # Use wildcard to match timestamp
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Compose Runner
|
||||||
|
|
||||||
|
**Stream live logs:**
|
||||||
|
```bash
|
||||||
|
# List running containers (note the UUID prefix in names)
|
||||||
|
docker ps --filter "name=nomos-compose-"
|
||||||
|
|
||||||
|
# Find your container ID or name from the list, then:
|
||||||
|
docker logs -f <container-id>
|
||||||
|
|
||||||
|
# Or filter by name pattern:
|
||||||
|
docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1)
|
||||||
|
|
||||||
|
# Show last 100 lines
|
||||||
|
docker logs --tail 100 <container-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Keep containers for post-mortem debugging:**
|
||||||
|
```bash
|
||||||
|
COMPOSE_RUNNER_PRESERVE=1 \
|
||||||
|
NOMOS_TESTNET_IMAGE=nomos-testnet:local \
|
||||||
|
cargo run -p runner-examples --bin compose_runner
|
||||||
|
|
||||||
|
# After test failure, containers remain running:
|
||||||
|
docker ps --filter "name=nomos-compose-"
|
||||||
|
docker exec -it <container-id> /bin/sh
|
||||||
|
docker logs <container-id> > debug.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Container names follow the pattern `nomos-compose-{uuid}-validator-{index}-1` or `nomos-compose-{uuid}-executor-{index}-1`, where `{uuid}` is randomly generated per run.
|
||||||
|
|
||||||
|
#### K8s Runner
|
||||||
|
|
||||||
|
**Important:** Always verify your namespace and use label selectors instead of assuming pod names.
|
||||||
|
|
||||||
|
**Stream pod logs (use label selectors):**
|
||||||
|
```bash
|
||||||
|
# Check your namespace first
|
||||||
|
kubectl config view --minify | grep namespace
|
||||||
|
|
||||||
|
# All validator pods (add -n <namespace> if not using default)
|
||||||
|
kubectl logs -l app=nomos-validator -f
|
||||||
|
|
||||||
|
# All executor pods
|
||||||
|
kubectl logs -l app=nomos-executor -f
|
||||||
|
|
||||||
|
# Specific pod by name (find exact name first)
|
||||||
|
kubectl get pods -l app=nomos-validator # Find the exact pod name
|
||||||
|
kubectl logs -f <actual-pod-name> # Then use it
|
||||||
|
|
||||||
|
# With explicit namespace
|
||||||
|
kubectl logs -n my-namespace -l app=nomos-validator -f
|
||||||
|
```
|
||||||
|
|
||||||
|
**Download logs from crashed pods:**
|
||||||
|
```bash
|
||||||
|
# Previous logs from crashed pod
|
||||||
|
kubectl get pods -l app=nomos-validator # Find crashed pod name first
|
||||||
|
kubectl logs --previous <actual-pod-name> > crashed-validator.log
|
||||||
|
|
||||||
|
# Or use label selector for all crashed validators
|
||||||
|
for pod in $(kubectl get pods -l app=nomos-validator -o name); do
|
||||||
|
kubectl logs --previous $pod > $(basename $pod)-previous.log 2>&1
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
**Access logs from all pods:**
|
||||||
|
```bash
|
||||||
|
# All pods in current namespace
|
||||||
|
for pod in $(kubectl get pods -o name); do
|
||||||
|
echo "=== $pod ==="
|
||||||
|
kubectl logs $pod
|
||||||
|
done > all-logs.txt
|
||||||
|
|
||||||
|
# Or use label selectors (recommended)
|
||||||
|
kubectl logs -l app=nomos-validator --tail=500 > validators.log
|
||||||
|
kubectl logs -l app=nomos-executor --tail=500 > executors.log
|
||||||
|
|
||||||
|
# With explicit namespace
|
||||||
|
kubectl logs -n my-namespace -l app=nomos-validator --tail=500 > validators.log
|
||||||
|
```
|
||||||
|
|
||||||
|
## Debugging Workflow
|
||||||
|
|
||||||
|
When a test fails, follow this sequence:
|
||||||
|
|
||||||
|
### 1. Check Framework Output
|
||||||
|
Start with the test harness output—did expectations fail? Was there a deployment error?
|
||||||
|
|
||||||
|
**Look for:**
|
||||||
|
- Expectation failure messages
|
||||||
|
- Timeout errors
|
||||||
|
- Deployment/readiness failures
|
||||||
|
|
||||||
|
### 2. Verify Node Readiness
|
||||||
|
Ensure all nodes started successfully and became ready before workloads began.
|
||||||
|
|
||||||
|
**Commands:**
|
||||||
|
```bash
|
||||||
|
# Local: check process list
|
||||||
|
ps aux | grep nomos
|
||||||
|
|
||||||
|
# Compose: check container status (note UUID in names)
|
||||||
|
docker ps -a --filter "name=nomos-compose-"
|
||||||
|
|
||||||
|
# K8s: check pod status (use label selectors, add -n <namespace> if needed)
|
||||||
|
kubectl get pods -l app=nomos-validator
|
||||||
|
kubectl get pods -l app=nomos-executor
|
||||||
|
kubectl describe pod <actual-pod-name> # Get name from above first
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Inspect Node Logs
|
||||||
|
Focus on the first node that exhibited problems or the node with the highest index (often the last to start).
|
||||||
|
|
||||||
|
**Common error patterns:**
|
||||||
|
- "Failed to bind address" → port conflict
|
||||||
|
- "Connection refused" → peer not ready or network issue
|
||||||
|
- "Proof verification failed" or "Proof generation timeout" → missing `POL_PROOF_DEV_MODE=true` (REQUIRED for all runners)
|
||||||
|
- "Failed to load KZG parameters" or "Circuit file not found" → missing KZG circuit assets at `testing-framework/assets/stack/kzgrs_test_params/`
|
||||||
|
- "Insufficient funds" → wallet seeding issue (increase `.wallets(N)` or reduce `.users(M)`)
|
||||||
|
|
||||||
|
### 4. Check Log Levels
|
||||||
|
If logs are too sparse, increase verbosity:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
NOMOS_LOG_LEVEL=debug \
|
||||||
|
NOMOS_LOG_FILTER="nomos_consensus=trace,nomos_da_sampling=debug" \
|
||||||
|
cargo run -p runner-examples --bin local_runner
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Verify Observability Endpoints
|
||||||
|
If expectations report observability issues:
|
||||||
|
|
||||||
|
**Prometheus (Compose):**
|
||||||
|
```bash
|
||||||
|
curl http://localhost:9090/-/healthy
|
||||||
|
```
|
||||||
|
|
||||||
|
**Node HTTP APIs:**
|
||||||
|
```bash
|
||||||
|
curl http://localhost:18080/consensus/info # Adjust port per node
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6. Compare with Known-Good Scenario
|
||||||
|
Run a minimal baseline test (e.g., 2 validators, consensus liveness only). If it passes, the issue is in your workload or topology configuration.
|
||||||
|
|
||||||
|
## Common Error Messages
|
||||||
|
|
||||||
|
### "Consensus liveness expectation failed"
|
||||||
|
- **Cause**: Not enough blocks produced during run window, missing `POL_PROOF_DEV_MODE=true` (causes slow proof generation), or missing KZG assets for DA workloads
|
||||||
|
- **Fix**:
|
||||||
|
1. Verify `POL_PROOF_DEV_MODE=true` is set (REQUIRED for all runners)
|
||||||
|
2. Verify KZG assets exist at `testing-framework/assets/stack/kzgrs_test_params/` (for DA workloads)
|
||||||
|
3. Extend `with_run_duration()` to allow more blocks
|
||||||
|
4. Check node logs for proof generation or DA errors
|
||||||
|
5. Reduce transaction/DA rate if nodes are overwhelmed
|
||||||
|
|
||||||
|
### "Wallet seeding failed"
|
||||||
|
- **Cause**: Topology doesn't have enough funded wallets for the workload
|
||||||
|
- **Fix**: Increase `.wallets(N)` count or reduce `.users(M)` in transaction workload (ensure N ≥ M)
|
||||||
|
|
||||||
|
### "Node control not available"
|
||||||
|
- **Cause**: Runner doesn't support node control (only ComposeDeployer does), or `enable_node_control()` wasn't called
|
||||||
|
- **Fix**:
|
||||||
|
1. Use ComposeDeployer for chaos tests (LocalDeployer and K8sDeployer don't support node control)
|
||||||
|
2. Ensure `.enable_node_control()` is called in scenario before `.chaos()`
|
||||||
|
|
||||||
|
### "Readiness timeout"
|
||||||
|
- **Cause**: Nodes didn't become responsive within expected time (often due to missing prerequisites)
|
||||||
|
- **Fix**:
|
||||||
|
1. **Verify `POL_PROOF_DEV_MODE=true` is set** (REQUIRED for all runners—without it, proof generation is too slow)
|
||||||
|
2. Check node logs for startup errors (port conflicts, missing assets)
|
||||||
|
3. Verify network connectivity between nodes
|
||||||
|
4. For DA workloads, ensure KZG circuit assets are present
|
||||||
|
|
||||||
|
### "Port already in use"
|
||||||
|
- **Cause**: Previous test didn't clean up, or another process holds the port
|
||||||
|
- **Fix**: Kill orphaned processes (`pkill nomos-node`), wait for Docker cleanup (`docker compose down`), or restart Docker
|
||||||
|
|
||||||
|
### "Image not found: nomos-testnet:local"
|
||||||
|
- **Cause**: Docker image not built for Compose/K8s runners, or KZG assets not baked into image
|
||||||
|
- **Fix**:
|
||||||
|
1. Fetch KZG assets: `scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits`
|
||||||
|
2. Copy to assets: `cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/`
|
||||||
|
3. Build image: `testing-framework/assets/stack/scripts/build_test_image.sh`
|
||||||
|
|
||||||
|
### "Failed to load KZG parameters" or "Circuit file not found"
|
||||||
|
- **Cause**: DA workload requires KZG circuit assets that aren't present
|
||||||
|
- **Fix**:
|
||||||
|
1. Fetch assets: `scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits`
|
||||||
|
2. Copy to expected path: `cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/`
|
||||||
|
3. For Compose/K8s: rebuild image with assets baked in
|
||||||
|
|
||||||
|
For detailed logging configuration and observability setup, see [Operations](operations.md).
|
||||||
7
book/src/usage-patterns.md
Normal file
7
book/src/usage-patterns.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# Usage Patterns
|
||||||
|
|
||||||
|
- **Shape a topology, pick a runner**: choose local for quick iteration, compose for reproducible multi-node stacks with observability, or k8s for cluster-grade validation.
|
||||||
|
- **Compose workloads deliberately**: pair transactions and data-availability traffic for end-to-end coverage; add chaos only when assessing recovery and resilience.
|
||||||
|
- **Align expectations with goals**: use liveness-style checks to confirm the system keeps up with planned activity, and add workload-specific assertions for inclusion or availability.
|
||||||
|
- **Reuse plans across environments**: keep the scenario constant while swapping runners to compare behavior between developer machines and CI clusters.
|
||||||
|
- **Iterate with clear signals**: treat expectation outcomes as the primary pass/fail indicator, and adjust topology or workloads based on what those signals reveal.
|
||||||
6
book/src/what-you-will-learn.md
Normal file
6
book/src/what-you-will-learn.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# What You Will Learn
|
||||||
|
|
||||||
|
This book gives you a clear mental model for Nomos multi-node testing, shows how
|
||||||
|
to author scenarios that pair realistic workloads with explicit expectations,
|
||||||
|
and guides you to run them across local, containerized, and cluster environments
|
||||||
|
without changing the plan.
|
||||||
30
book/src/workloads.md
Normal file
30
book/src/workloads.md
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# Core Content: Workloads & Expectations
|
||||||
|
|
||||||
|
Workloads describe the activity a scenario generates; expectations describe the
|
||||||
|
signals that must hold when that activity completes. Both are pluggable so
|
||||||
|
scenarios stay readable and purpose-driven.
|
||||||
|
|
||||||
|
## Workloads
|
||||||
|
- **Transaction workload**: submits user-level transactions at a configurable
|
||||||
|
rate and can limit how many distinct actors participate.
|
||||||
|
- **Data-availability workload**: drives blob and channel activity to exercise
|
||||||
|
data-availability paths.
|
||||||
|
- **Chaos workload**: triggers controlled node restarts to test resilience and
|
||||||
|
recovery behaviors (requires a runner that can control nodes).
|
||||||
|
|
||||||
|
## Expectations
|
||||||
|
- **Consensus liveness**: verifies the system continues to produce blocks in
|
||||||
|
line with the planned workload and timing window.
|
||||||
|
- **Workload-specific checks**: each workload can attach its own success
|
||||||
|
criteria (e.g., inclusion of submitted activity) so scenarios remain concise.
|
||||||
|
|
||||||
|
Together, workloads and expectations let you express both the pressure applied
|
||||||
|
to the system and the definition of “healthy” for that run.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
I[Inputs<br/>topology + wallets + rates] --> Init[Workload init]
|
||||||
|
Init --> Drive[Drive traffic]
|
||||||
|
Drive --> Collect[Collect signals]
|
||||||
|
Collect --> Eval[Expectations evaluate]
|
||||||
|
```
|
||||||
20
book/src/workspace-layout.md
Normal file
20
book/src/workspace-layout.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# Workspace Layout
|
||||||
|
|
||||||
|
The workspace focuses on multi-node integration testing and sits alongside a
|
||||||
|
`nomos-node` checkout. Its crates separate concerns to keep scenarios
|
||||||
|
repeatable and portable:
|
||||||
|
|
||||||
|
- **Configs**: prepares high-level node, network, tracing, and wallet settings
|
||||||
|
used across test environments.
|
||||||
|
- **Core scenario orchestration**: the engine that holds topology descriptions,
|
||||||
|
scenario plans, runtimes, workloads, and expectations.
|
||||||
|
- **Workflows**: ready-made workloads (transactions, data-availability, chaos)
|
||||||
|
and reusable expectations assembled into a user-facing DSL.
|
||||||
|
- **Runners**: deployment backends for local processes, Docker Compose, and
|
||||||
|
Kubernetes, all consuming the same scenario plan.
|
||||||
|
- **Runner Examples** (`examples/runner-examples`): runnable binaries
|
||||||
|
(`local_runner.rs`, `compose_runner.rs`, `k8s_runner.rs`) that demonstrate
|
||||||
|
complete scenario execution with each deployer.
|
||||||
|
|
||||||
|
This split keeps configuration, orchestration, reusable traffic patterns, and
|
||||||
|
deployment adapters loosely coupled while sharing one mental model for tests.
|
||||||
45
book/theme/highlight-github.css
Normal file
45
book/theme/highlight-github.css
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
/* GitHub-like light highlighting */
|
||||||
|
.hljs {
|
||||||
|
display: block;
|
||||||
|
overflow-x: auto;
|
||||||
|
padding: 0.5em;
|
||||||
|
background: #f6f8fa;
|
||||||
|
color: #24292e;
|
||||||
|
}
|
||||||
|
.hljs-comment,
|
||||||
|
.hljs-quote {
|
||||||
|
color: #6a737d;
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
|
.hljs-keyword,
|
||||||
|
.hljs-selector-tag,
|
||||||
|
.hljs-type {
|
||||||
|
color: #d73a49;
|
||||||
|
}
|
||||||
|
.hljs-string,
|
||||||
|
.hljs-title,
|
||||||
|
.hljs-name,
|
||||||
|
.hljs-attr,
|
||||||
|
.hljs-symbol,
|
||||||
|
.hljs-bullet {
|
||||||
|
color: #005cc5;
|
||||||
|
}
|
||||||
|
.hljs-number,
|
||||||
|
.hljs-literal {
|
||||||
|
color: #005cc5;
|
||||||
|
}
|
||||||
|
.hljs-section,
|
||||||
|
.hljs-selector-id,
|
||||||
|
.hljs-selector-class {
|
||||||
|
color: #22863a;
|
||||||
|
}
|
||||||
|
.hljs-built_in,
|
||||||
|
.hljs-type {
|
||||||
|
color: #6f42c1;
|
||||||
|
}
|
||||||
|
.hljs-emphasis {
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
|
.hljs-strong {
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
17
book/theme/highlight-init.js
Normal file
17
book/theme/highlight-init.js
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
(function () {
|
||||||
|
const highlight = (attempt = 0) => {
|
||||||
|
if (window.hljs) {
|
||||||
|
window.hljs.highlightAll();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (attempt < 10) {
|
||||||
|
setTimeout(() => highlight(attempt + 1), 100);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (document.readyState === "loading") {
|
||||||
|
document.addEventListener("DOMContentLoaded", () => highlight());
|
||||||
|
} else {
|
||||||
|
highlight();
|
||||||
|
}
|
||||||
|
})();
|
||||||
43
book/theme/mermaid-init.js
Normal file
43
book/theme/mermaid-init.js
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Lightweight client-side Mermaid rendering for mdBook.
|
||||||
|
(function () {
|
||||||
|
const CDN = "https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.js";
|
||||||
|
|
||||||
|
function loadMermaid(cb) {
|
||||||
|
if (window.mermaid) {
|
||||||
|
cb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const script = document.createElement("script");
|
||||||
|
script.src = CDN;
|
||||||
|
script.onload = cb;
|
||||||
|
script.onerror = () => console.warn("Failed to load mermaid from CDN:", CDN);
|
||||||
|
document.head.appendChild(script);
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderMermaidBlocks() {
|
||||||
|
const codeBlocks = Array.from(
|
||||||
|
document.querySelectorAll("pre code.language-mermaid")
|
||||||
|
);
|
||||||
|
if (codeBlocks.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
codeBlocks.forEach((codeBlock, idx) => {
|
||||||
|
const pre = codeBlock.parentElement;
|
||||||
|
const container = document.createElement("div");
|
||||||
|
container.className = "mermaid";
|
||||||
|
container.textContent = codeBlock.textContent;
|
||||||
|
container.id = `mermaid-diagram-${idx}`;
|
||||||
|
pre.replaceWith(container);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (window.mermaid) {
|
||||||
|
window.mermaid.initialize({ startOnLoad: false });
|
||||||
|
window.mermaid.run();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", () => {
|
||||||
|
loadMermaid(renderMermaidBlocks);
|
||||||
|
});
|
||||||
|
})();
|
||||||
57
book/theme/mermaid-overlay.js
Normal file
57
book/theme/mermaid-overlay.js
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
(function () {
|
||||||
|
const openOverlay = (svg) => {
|
||||||
|
const overlay = document.createElement("div");
|
||||||
|
overlay.className = "mermaid-overlay";
|
||||||
|
|
||||||
|
const content = document.createElement("div");
|
||||||
|
content.className = "mermaid-overlay__content";
|
||||||
|
|
||||||
|
const clone = svg.cloneNode(true);
|
||||||
|
clone.removeAttribute("width");
|
||||||
|
clone.removeAttribute("height");
|
||||||
|
clone.style.width = "95vw";
|
||||||
|
clone.style.maxWidth = "1400px";
|
||||||
|
clone.style.height = "auto";
|
||||||
|
clone.style.display = "block";
|
||||||
|
clone.style.margin = "0 auto";
|
||||||
|
|
||||||
|
content.appendChild(clone);
|
||||||
|
overlay.appendChild(content);
|
||||||
|
document.body.appendChild(overlay);
|
||||||
|
|
||||||
|
const close = () => overlay.remove();
|
||||||
|
overlay.addEventListener("click", close);
|
||||||
|
document.addEventListener(
|
||||||
|
"keydown",
|
||||||
|
(e) => {
|
||||||
|
if (e.key === "Escape") {
|
||||||
|
close();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ once: true }
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const bind = () => {
|
||||||
|
document.querySelectorAll(".mermaid svg").forEach((svg) => {
|
||||||
|
if (svg.dataset.overlayBound === "true") {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
svg.style.cursor = "zoom-in";
|
||||||
|
svg.addEventListener("click", () => openOverlay(svg));
|
||||||
|
svg.dataset.overlayBound = "true";
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const init = () => {
|
||||||
|
bind();
|
||||||
|
// Mermaid renders asynchronously; bind again after a short delay.
|
||||||
|
setTimeout(bind, 500);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (document.readyState === "loading") {
|
||||||
|
document.addEventListener("DOMContentLoaded", init);
|
||||||
|
} else {
|
||||||
|
init();
|
||||||
|
}
|
||||||
|
})();
|
||||||
38
book/theme/mermaid.css
Normal file
38
book/theme/mermaid.css
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
.mermaid {
|
||||||
|
max-width: 100%;
|
||||||
|
overflow-x: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.mermaid svg {
|
||||||
|
width: 100% !important;
|
||||||
|
height: auto !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.mermaid-overlay {
|
||||||
|
position: fixed;
|
||||||
|
inset: 0;
|
||||||
|
background: rgba(0, 0, 0, 0.75);
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
z-index: 9999;
|
||||||
|
cursor: zoom-out;
|
||||||
|
}
|
||||||
|
|
||||||
|
.mermaid-overlay__content {
|
||||||
|
background: #fff;
|
||||||
|
padding: 16px;
|
||||||
|
max-width: 95vw;
|
||||||
|
max-height: 95vh;
|
||||||
|
overflow: auto;
|
||||||
|
border-radius: 8px;
|
||||||
|
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.25);
|
||||||
|
}
|
||||||
|
|
||||||
|
.mermaid-overlay__content svg {
|
||||||
|
width: 95vw !important;
|
||||||
|
max-width: 1400px;
|
||||||
|
height: auto !important;
|
||||||
|
display: block;
|
||||||
|
margin: 0 auto;
|
||||||
|
}
|
||||||
23
examples/Cargo.toml
Normal file
23
examples/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[package]
|
||||||
|
categories.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
keywords.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
name = "runner-examples"
|
||||||
|
readme.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
testing-framework-core = { workspace = true }
|
||||||
|
testing-framework-runner-compose = { workspace = true }
|
||||||
|
testing-framework-runner-k8s = { workspace = true }
|
||||||
|
testing-framework-runner-local = { workspace = true }
|
||||||
|
testing-framework-workflows = { workspace = true }
|
||||||
|
tokio = { workspace = true, features = ["macros", "net", "rt-multi-thread", "time"] }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
98
examples/src/bin/compose_runner.rs
Normal file
98
examples/src/bin/compose_runner.rs
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use runner_examples::{ChaosBuilderExt as _, ScenarioBuilderExt as _};
|
||||||
|
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_compose::{ComposeDeployer, ComposeRunnerError};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
const DEFAULT_VALIDATORS: usize = 1;
|
||||||
|
const DEFAULT_EXECUTORS: usize = 1;
|
||||||
|
const DEFAULT_RUN_SECS: u64 = 60;
|
||||||
|
const MIXED_TXS_PER_BLOCK: u64 = 5;
|
||||||
|
const TOTAL_WALLETS: usize = 64;
|
||||||
|
const TRANSACTION_WALLETS: usize = 8;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
tracing_subscriber::fmt::init();
|
||||||
|
|
||||||
|
let validators = read_env("COMPOSE_DEMO_VALIDATORS", DEFAULT_VALIDATORS);
|
||||||
|
let executors = read_env("COMPOSE_DEMO_EXECUTORS", DEFAULT_EXECUTORS);
|
||||||
|
let run_secs = read_env("COMPOSE_DEMO_RUN_SECS", DEFAULT_RUN_SECS);
|
||||||
|
info!(
|
||||||
|
validators,
|
||||||
|
executors, run_secs, "starting compose runner demo"
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Err(err) = run_compose_case(validators, executors, Duration::from_secs(run_secs)).await {
|
||||||
|
warn!("compose runner demo failed: {err}");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
async fn run_compose_case(
|
||||||
|
validators: usize,
|
||||||
|
executors: usize,
|
||||||
|
run_duration: Duration,
|
||||||
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
info!(
|
||||||
|
validators,
|
||||||
|
executors,
|
||||||
|
duration_secs = run_duration.as_secs(),
|
||||||
|
"building scenario plan"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(validators)
|
||||||
|
.executors(executors)
|
||||||
|
.apply()
|
||||||
|
.enable_node_control()
|
||||||
|
.chaos()
|
||||||
|
.restart()
|
||||||
|
// Keep chaos restarts outside the test run window to avoid crash loops on restart.
|
||||||
|
.min_delay(Duration::from_secs(120))
|
||||||
|
.max_delay(Duration::from_secs(180))
|
||||||
|
.target_cooldown(Duration::from_secs(240))
|
||||||
|
.apply()
|
||||||
|
.wallets(TOTAL_WALLETS)
|
||||||
|
.transactions()
|
||||||
|
.rate(MIXED_TXS_PER_BLOCK)
|
||||||
|
.users(TRANSACTION_WALLETS)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1)
|
||||||
|
.blob_rate(1)
|
||||||
|
.apply()
|
||||||
|
.with_run_duration(run_duration)
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = ComposeDeployer::new();
|
||||||
|
info!("deploying compose stack");
|
||||||
|
let runner: Runner = match deployer.deploy(&plan).await {
|
||||||
|
Ok(runner) => runner,
|
||||||
|
Err(ComposeRunnerError::DockerUnavailable) => {
|
||||||
|
warn!("Docker is unavailable; cannot run compose demo");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
};
|
||||||
|
if !runner.context().telemetry().is_configured() {
|
||||||
|
warn!("compose runner should expose prometheus metrics");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("running scenario");
|
||||||
|
runner.run(&mut plan).await.map(|_| ()).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_env<T>(key: &str, default: T) -> T
|
||||||
|
where
|
||||||
|
T: std::str::FromStr + Copy,
|
||||||
|
{
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|raw| raw.parse::<T>().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
109
examples/src/bin/k8s_runner.rs
Normal file
109
examples/src/bin/k8s_runner.rs
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use runner_examples::ScenarioBuilderExt as _;
|
||||||
|
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_k8s::{K8sDeployer, K8sRunnerError};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
const DEFAULT_RUN_SECS: u64 = 60;
|
||||||
|
const DEFAULT_VALIDATORS: usize = 1;
|
||||||
|
const DEFAULT_EXECUTORS: usize = 1;
|
||||||
|
const MIXED_TXS_PER_BLOCK: u64 = 5;
|
||||||
|
const TOTAL_WALLETS: usize = 64;
|
||||||
|
const TRANSACTION_WALLETS: usize = 8;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
tracing_subscriber::fmt::init();
|
||||||
|
|
||||||
|
let validators = read_env("K8S_DEMO_VALIDATORS", DEFAULT_VALIDATORS);
|
||||||
|
let executors = read_env("K8S_DEMO_EXECUTORS", DEFAULT_EXECUTORS);
|
||||||
|
let run_secs = read_env("K8S_DEMO_RUN_SECS", DEFAULT_RUN_SECS);
|
||||||
|
info!(validators, executors, run_secs, "starting k8s runner demo");
|
||||||
|
|
||||||
|
if let Err(err) = run_k8s_case(validators, executors, Duration::from_secs(run_secs)).await {
|
||||||
|
warn!("k8s runner demo failed: {err}");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
async fn run_k8s_case(
|
||||||
|
validators: usize,
|
||||||
|
executors: usize,
|
||||||
|
run_duration: Duration,
|
||||||
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
info!(
|
||||||
|
validators,
|
||||||
|
executors,
|
||||||
|
duration_secs = run_duration.as_secs(),
|
||||||
|
"building scenario plan"
|
||||||
|
);
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(validators)
|
||||||
|
.executors(executors)
|
||||||
|
.apply()
|
||||||
|
.wallets(TOTAL_WALLETS)
|
||||||
|
.transactions()
|
||||||
|
.rate(MIXED_TXS_PER_BLOCK)
|
||||||
|
.users(TRANSACTION_WALLETS)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1)
|
||||||
|
.blob_rate(1)
|
||||||
|
.apply()
|
||||||
|
.with_run_duration(run_duration)
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = K8sDeployer::new();
|
||||||
|
info!("deploying k8s stack");
|
||||||
|
let runner: Runner = match deployer.deploy(&plan).await {
|
||||||
|
Ok(runner) => runner,
|
||||||
|
Err(K8sRunnerError::ClientInit { source }) => {
|
||||||
|
warn!("Kubernetes cluster unavailable ({source}); skipping");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !runner.context().telemetry().is_configured() {
|
||||||
|
warn!("k8s runner should expose prometheus metrics");
|
||||||
|
}
|
||||||
|
|
||||||
|
let validator_clients = runner.context().node_clients().validator_clients().to_vec();
|
||||||
|
|
||||||
|
info!("running scenario");
|
||||||
|
let _handle = runner
|
||||||
|
.run(&mut plan)
|
||||||
|
.await
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(|err| format!("k8s scenario failed: {err}"))?;
|
||||||
|
|
||||||
|
for (idx, client) in validator_clients.iter().enumerate() {
|
||||||
|
let info = client
|
||||||
|
.consensus_info()
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("validator {idx} consensus_info failed: {err}"))?;
|
||||||
|
if info.height < 5 {
|
||||||
|
return Err(format!(
|
||||||
|
"validator {idx} height {} should reach at least 5 blocks",
|
||||||
|
info.height
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_env<T>(key: &str, default: T) -> T
|
||||||
|
where
|
||||||
|
T: std::str::FromStr + Copy,
|
||||||
|
{
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|raw| raw.parse::<T>().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
84
examples/src/bin/local_runner.rs
Normal file
84
examples/src/bin/local_runner.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use runner_examples::ScenarioBuilderExt as _;
|
||||||
|
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
|
||||||
|
use testing_framework_runner_local::LocalDeployer;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
const DEFAULT_VALIDATORS: usize = 1;
|
||||||
|
const DEFAULT_EXECUTORS: usize = 1;
|
||||||
|
const DEFAULT_RUN_SECS: u64 = 60;
|
||||||
|
const MIXED_TXS_PER_BLOCK: u64 = 5;
|
||||||
|
const TOTAL_WALLETS: usize = 64;
|
||||||
|
const TRANSACTION_WALLETS: usize = 8;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
tracing_subscriber::fmt::init();
|
||||||
|
|
||||||
|
if std::env::var("POL_PROOF_DEV_MODE").is_err() {
|
||||||
|
warn!("POL_PROOF_DEV_MODE=true is required for the local runner demo");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let validators = read_env("LOCAL_DEMO_VALIDATORS", DEFAULT_VALIDATORS);
|
||||||
|
let executors = read_env("LOCAL_DEMO_EXECUTORS", DEFAULT_EXECUTORS);
|
||||||
|
let run_secs = read_env("LOCAL_DEMO_RUN_SECS", DEFAULT_RUN_SECS);
|
||||||
|
info!(
|
||||||
|
validators,
|
||||||
|
executors, run_secs, "starting local runner demo"
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Err(err) = run_local_case(validators, executors, Duration::from_secs(run_secs)).await {
|
||||||
|
warn!("local runner demo failed: {err}");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
async fn run_local_case(
|
||||||
|
validators: usize,
|
||||||
|
executors: usize,
|
||||||
|
run_duration: Duration,
|
||||||
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
info!(
|
||||||
|
validators,
|
||||||
|
executors,
|
||||||
|
duration_secs = run_duration.as_secs(),
|
||||||
|
"building scenario plan"
|
||||||
|
);
|
||||||
|
let mut plan = ScenarioBuilder::topology()
|
||||||
|
.network_star()
|
||||||
|
.validators(validators)
|
||||||
|
.executors(executors)
|
||||||
|
.apply()
|
||||||
|
.wallets(TOTAL_WALLETS)
|
||||||
|
.transactions()
|
||||||
|
.rate(MIXED_TXS_PER_BLOCK)
|
||||||
|
.users(TRANSACTION_WALLETS)
|
||||||
|
.apply()
|
||||||
|
.da()
|
||||||
|
.channel_rate(1)
|
||||||
|
.blob_rate(1)
|
||||||
|
.apply()
|
||||||
|
.with_run_duration(run_duration)
|
||||||
|
.expect_consensus_liveness()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let deployer = LocalDeployer::default();
|
||||||
|
info!("deploying local nodes");
|
||||||
|
let runner: Runner = deployer.deploy(&plan).await?;
|
||||||
|
info!("running scenario");
|
||||||
|
runner.run(&mut plan).await.map(|_| ())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_env<T>(key: &str, default: T) -> T
|
||||||
|
where
|
||||||
|
T: std::str::FromStr + Copy,
|
||||||
|
{
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|raw| raw.parse::<T>().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
11
examples/src/lib.rs
Normal file
11
examples/src/lib.rs
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
use testing_framework_core::scenario::Metrics;
|
||||||
|
pub use testing_framework_workflows::{
|
||||||
|
builder::{ChaosBuilderExt, ScenarioBuilderExt},
|
||||||
|
expectations, util, workloads,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Metrics are currently disabled in this branch; return a stub handle.
|
||||||
|
#[must_use]
|
||||||
|
pub const fn configure_prometheus_metrics() -> Metrics {
|
||||||
|
Metrics::empty()
|
||||||
|
}
|
||||||
28
examples/tests/local_runner_bin_smoke.rs
Normal file
28
examples/tests/local_runner_bin_smoke.rs
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
// Manually run the local runner binary as a smoke test.
|
||||||
|
// This spins up real nodes and should be invoked explicitly:
|
||||||
|
// POL_PROOF_DEV_MODE=true cargo test -p runner-examples --test
|
||||||
|
// local_runner_bin_smoke -- --ignored --nocapture
|
||||||
|
#[test]
|
||||||
|
#[ignore = "runs local_runner binary (~2min) and requires local assets/binaries"]
|
||||||
|
fn local_runner_bin_smoke() {
|
||||||
|
let status = Command::new("cargo")
|
||||||
|
.args([
|
||||||
|
"run",
|
||||||
|
"-p",
|
||||||
|
"runner-examples",
|
||||||
|
"--bin",
|
||||||
|
"local_runner",
|
||||||
|
"--",
|
||||||
|
"--nocapture",
|
||||||
|
])
|
||||||
|
.env("POL_PROOF_DEV_MODE", "true")
|
||||||
|
.env("LOCAL_DEMO_RUN_SECS", "120")
|
||||||
|
.env("LOCAL_DEMO_VALIDATORS", "1")
|
||||||
|
.env("LOCAL_DEMO_EXECUTORS", "1")
|
||||||
|
.status()
|
||||||
|
.expect("failed to spawn cargo run");
|
||||||
|
|
||||||
|
assert!(status.success(), "local runner binary exited with {status}");
|
||||||
|
}
|
||||||
12
rust-toolchain.toml
Normal file
12
rust-toolchain.toml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
[toolchain]
|
||||||
|
# Keep this version in sync also in the following places:
|
||||||
|
# * Dockerfile
|
||||||
|
# * flake.nix
|
||||||
|
# * testing-framework/assets/stack/Dockerfile
|
||||||
|
# Also, update the version of the nightly toolchain to the latest nightly of the new version specified in the following places:
|
||||||
|
# * .github/workflows/code-check.yml (fmt job)
|
||||||
|
# * .pre-commit-config.yml (fmt hook)
|
||||||
|
# Then, if there is any new allow-by-default rustc lint introduced/stabilized, add it to the respective entry in our `config.toml`.
|
||||||
|
channel = "nightly-2025-09-14"
|
||||||
|
# Even if clippy should be included in the default profile, in some cases it is not installed. So we force it with an explicit declaration.
|
||||||
|
components = ["clippy", "rustfmt"]
|
||||||
5
rustfmt.toml
Normal file
5
rustfmt.toml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
group_imports = "StdExternalCrate"
|
||||||
|
imports_granularity = "Crate"
|
||||||
|
reorder_imports = true
|
||||||
|
reorder_modules = true
|
||||||
|
wrap_comments = true
|
||||||
122
scripts/build-rapidsnark.sh
Executable file
122
scripts/build-rapidsnark.sh
Executable file
@ -0,0 +1,122 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Rebuild the rapidsnark prover for the current architecture.
|
||||||
|
#
|
||||||
|
# Usage: ./scripts/build-rapidsnark.sh <circuits_dir>
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
if [ $# -lt 1 ]; then
|
||||||
|
echo "usage: $0 <circuits_dir>" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TARGET_ARCH="$(uname -m)"
|
||||||
|
CIRCUITS_DIR="$1"
|
||||||
|
RAPIDSNARK_REPO="${RAPIDSNARK_REPO:-https://github.com/iden3/rapidsnark.git}"
|
||||||
|
RAPIDSNARK_REF="${RAPIDSNARK_REF:-main}"
|
||||||
|
FORCE_REBUILD="${RAPIDSNARK_FORCE_REBUILD:-0}"
|
||||||
|
BUILD_DIR=""
|
||||||
|
PACKAGE_DIR=""
|
||||||
|
CMAKE_TARGET_PLATFORM=""
|
||||||
|
USE_ASM="${RAPIDSNARK_USE_ASM:-ON}"
|
||||||
|
CMAKE_C_FLAGS="${RAPIDSNARK_C_FLAGS:-}"
|
||||||
|
CMAKE_CXX_FLAGS="${RAPIDSNARK_CXX_FLAGS:-}"
|
||||||
|
|
||||||
|
if [ ! -d "$CIRCUITS_DIR" ]; then
|
||||||
|
echo "circuits directory '$CIRCUITS_DIR' does not exist" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
system_gmp_package() {
|
||||||
|
local multiarch arch
|
||||||
|
arch="${1:-${TARGET_ARCH}}"
|
||||||
|
multiarch="$(gcc -print-multiarch 2>/dev/null || echo "${arch}-linux-gnu")"
|
||||||
|
local lib_path="/usr/lib/${multiarch}/libgmp.a"
|
||||||
|
if [ ! -f "$lib_path" ]; then
|
||||||
|
echo "system libgmp.a not found at $lib_path" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
mkdir -p "depends/gmp/package_${arch}/lib" "depends/gmp/package_${arch}/include"
|
||||||
|
cp "$lib_path" "depends/gmp/package_${arch}/lib/"
|
||||||
|
# Headers are small; copy the public ones the build expects.
|
||||||
|
cp /usr/include/gmp*.h "depends/gmp/package_${arch}/include/" || true
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$TARGET_ARCH" in
|
||||||
|
arm64 | aarch64)
|
||||||
|
CMAKE_TARGET_PLATFORM="aarch64"
|
||||||
|
BUILD_DIR="build_prover_arm64"
|
||||||
|
PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_arm64}"
|
||||||
|
;;
|
||||||
|
x86_64)
|
||||||
|
if [ "$FORCE_REBUILD" != "1" ]; then
|
||||||
|
echo "rapidsnark rebuild skipped for architecture '$TARGET_ARCH' (set RAPIDSNARK_FORCE_REBUILD=1 to override)" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if [ -z "$CMAKE_C_FLAGS" ]; then
|
||||||
|
# Keep CPU requirements minimal so binaries run under emulation (e.g. act on Apple hosts).
|
||||||
|
CMAKE_C_FLAGS="-march=x86-64 -mno-avx -mno-avx2 -mno-sse4.2"
|
||||||
|
fi
|
||||||
|
if [ -z "$CMAKE_CXX_FLAGS" ]; then
|
||||||
|
CMAKE_CXX_FLAGS="$CMAKE_C_FLAGS"
|
||||||
|
fi
|
||||||
|
# Assembly paths assume modern CPU features; disable by default for x86_64 unless overridden.
|
||||||
|
if [ "${RAPIDSNARK_USE_ASM:-}" = "" ]; then
|
||||||
|
USE_ASM="OFF"
|
||||||
|
fi
|
||||||
|
CMAKE_TARGET_PLATFORM="x86_64"
|
||||||
|
BUILD_DIR="build_prover_x86_64"
|
||||||
|
PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_x86_64}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
if [ "$FORCE_REBUILD" != "1" ]; then
|
||||||
|
echo "rapidsnark rebuild skipped for unsupported architecture '$TARGET_ARCH'" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
CMAKE_TARGET_PLATFORM="$TARGET_ARCH"
|
||||||
|
BUILD_DIR="build_prover_${TARGET_ARCH}"
|
||||||
|
PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_${TARGET_ARCH}}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
workdir="$(mktemp -d)"
|
||||||
|
trap 'rm -rf "$workdir"' EXIT
|
||||||
|
|
||||||
|
echo "Building rapidsnark ($RAPIDSNARK_REF) for $TARGET_ARCH..." >&2
|
||||||
|
git clone --depth 1 --branch "$RAPIDSNARK_REF" "$RAPIDSNARK_REPO" "$workdir/rapidsnark" >&2
|
||||||
|
cd "$workdir/rapidsnark"
|
||||||
|
git submodule update --init --recursive >&2
|
||||||
|
|
||||||
|
if [ "${RAPIDSNARK_BUILD_GMP:-1}" = "1" ]; then
|
||||||
|
if [ -z "${RAPIDSNARK_GMP_TARGET:-}" ]; then
|
||||||
|
if [ "$CMAKE_TARGET_PLATFORM" = "x86_64" ]; then
|
||||||
|
GMP_TARGET="host"
|
||||||
|
else
|
||||||
|
GMP_TARGET="$CMAKE_TARGET_PLATFORM"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
GMP_TARGET="$RAPIDSNARK_GMP_TARGET"
|
||||||
|
fi
|
||||||
|
./build_gmp.sh "$GMP_TARGET" >&2
|
||||||
|
else
|
||||||
|
echo "Using system libgmp to satisfy rapidsnark dependencies" >&2
|
||||||
|
system_gmp_package "$CMAKE_TARGET_PLATFORM"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf "$BUILD_DIR"
|
||||||
|
mkdir "$BUILD_DIR"
|
||||||
|
cd "$BUILD_DIR"
|
||||||
|
cmake .. \
|
||||||
|
-DTARGET_PLATFORM="$CMAKE_TARGET_PLATFORM" \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DCMAKE_INSTALL_PREFIX="../${PACKAGE_DIR}" \
|
||||||
|
-DBUILD_SHARED_LIBS=OFF \
|
||||||
|
-DUSE_ASM="$USE_ASM" \
|
||||||
|
${CMAKE_C_FLAGS:+-DCMAKE_C_FLAGS="$CMAKE_C_FLAGS"} \
|
||||||
|
${CMAKE_CXX_FLAGS:+-DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS"} >&2
|
||||||
|
cmake --build . --target prover verifier -- -j"$(nproc)" >&2
|
||||||
|
|
||||||
|
install -m 0755 "src/prover" "$CIRCUITS_DIR/prover"
|
||||||
|
install -m 0755 "src/verifier" "$CIRCUITS_DIR/verifier"
|
||||||
|
echo "rapidsnark prover installed to $CIRCUITS_DIR/prover" >&2
|
||||||
226
scripts/setup-nomos-circuits.sh
Executable file
226
scripts/setup-nomos-circuits.sh
Executable file
@ -0,0 +1,226 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Setup script for nomos-circuits
|
||||||
|
#
|
||||||
|
# Usage: ./setup-nomos-circuits.sh [VERSION] [INSTALL_DIR]
|
||||||
|
#
|
||||||
|
# Arguments:
|
||||||
|
# VERSION - Optional. Version to install (default: v0.3.1)
|
||||||
|
# INSTALL_DIR - Optional. Installation directory (default: $HOME/.nomos-circuits)
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# ./setup-nomos-circuits.sh # Install default version to default location
|
||||||
|
# ./setup-nomos-circuits.sh v0.2.0 # Install specific version to default location
|
||||||
|
# ./setup-nomos-circuits.sh v0.2.0 /opt/circuits # Install to custom location
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
VERSION="${1:-v0.3.1}"
|
||||||
|
DEFAULT_INSTALL_DIR="$HOME/.nomos-circuits"
|
||||||
|
INSTALL_DIR="${2:-$DEFAULT_INSTALL_DIR}"
|
||||||
|
REPO="logos-co/nomos-circuits"
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}ℹ${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}✓${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}⚠${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}✗${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Detect OS and architecture
|
||||||
|
detect_platform() {
|
||||||
|
local os=""
|
||||||
|
local arch=""
|
||||||
|
|
||||||
|
# Detect OS
|
||||||
|
case "$(uname -s)" in
|
||||||
|
Linux*) os="linux";;
|
||||||
|
Darwin*) os="macos";;
|
||||||
|
MINGW*|MSYS*|CYGWIN*) os="windows";;
|
||||||
|
*) print_error "Unsupported operating system: $(uname -s)"; exit 1;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Detect architecture
|
||||||
|
case "$(uname -m)" in
|
||||||
|
x86_64) arch="x86_64";;
|
||||||
|
aarch64) arch="aarch64";;
|
||||||
|
arm64) arch="aarch64";;
|
||||||
|
*) print_error "Unsupported architecture: $(uname -m)"; exit 1;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "${os}-${arch}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if installation directory exists and get confirmation
|
||||||
|
check_existing_installation() {
|
||||||
|
if [ -d "$INSTALL_DIR" ]; then
|
||||||
|
print_warning "Installation directory already exists: $INSTALL_DIR"
|
||||||
|
|
||||||
|
# Check if it has a VERSION file
|
||||||
|
if [ -f "$INSTALL_DIR/VERSION" ]; then
|
||||||
|
local current_version=$(cat "$INSTALL_DIR/VERSION")
|
||||||
|
print_info "Currently installed version: $current_version"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# In non-interactive environments (CI), automatically overwrite
|
||||||
|
if [ ! -t 0 ]; then
|
||||||
|
print_info "Non-interactive environment detected, automatically overwriting..."
|
||||||
|
else
|
||||||
|
# Interactive environment - ask for confirmation
|
||||||
|
echo
|
||||||
|
read -p "Do you want to overwrite it? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
print_info "Installation cancelled."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_info "Removing existing installation..."
|
||||||
|
rm -rf "$INSTALL_DIR"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Download and extract the release
|
||||||
|
download_release() {
|
||||||
|
local platform="$1"
|
||||||
|
local artifact="nomos-circuits-${VERSION}-${platform}.tar.gz"
|
||||||
|
local url="https://github.com/${REPO}/releases/download/${VERSION}/${artifact}"
|
||||||
|
local temp_dir=$(mktemp -d)
|
||||||
|
|
||||||
|
print_info "Downloading nomos-circuits ${VERSION} for ${platform}..."
|
||||||
|
print_info "URL: $url"
|
||||||
|
|
||||||
|
# Build curl command with optional authentication
|
||||||
|
local curl_cmd="curl -L"
|
||||||
|
if [ -n "$GITHUB_TOKEN" ]; then
|
||||||
|
curl_cmd="$curl_cmd --header 'authorization: Bearer ${GITHUB_TOKEN}'"
|
||||||
|
fi
|
||||||
|
curl_cmd="$curl_cmd -o ${temp_dir}/${artifact} $url"
|
||||||
|
|
||||||
|
if ! eval "$curl_cmd"; then
|
||||||
|
print_error "Failed to download release artifact"
|
||||||
|
print_error "Please check that version ${VERSION} exists for platform ${platform}"
|
||||||
|
print_error "Available releases: https://github.com/${REPO}/releases"
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Download complete"
|
||||||
|
|
||||||
|
print_info "Extracting to ${INSTALL_DIR}..."
|
||||||
|
mkdir -p "$INSTALL_DIR"
|
||||||
|
|
||||||
|
if ! tar -xzf "${temp_dir}/${artifact}" -C "$INSTALL_DIR" --strip-components=1; then
|
||||||
|
print_error "Failed to extract archive"
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
print_success "Extraction complete"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle macOS code signing/quarantine issues
|
||||||
|
handle_macos_quarantine() {
|
||||||
|
print_info "macOS detected: Removing quarantine attributes from executables..."
|
||||||
|
|
||||||
|
# Remove quarantine attribute from all executable files
|
||||||
|
if find "$INSTALL_DIR" -type f -perm +111 -exec xattr -d com.apple.quarantine {} \; 2>/dev/null; then
|
||||||
|
print_success "Quarantine attributes removed"
|
||||||
|
else
|
||||||
|
print_warning "Could not remove quarantine attributes (they may not exist)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main installation process
|
||||||
|
main() {
|
||||||
|
print_info "Setting up nomos-circuits ${VERSION}"
|
||||||
|
print_info "Installation directory: $INSTALL_DIR"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Detect platform (allow override via NOMOS_CIRCUITS_PLATFORM)
|
||||||
|
local platform_override="${NOMOS_CIRCUITS_PLATFORM:-}"
|
||||||
|
local platform
|
||||||
|
if [ -n "$platform_override" ]; then
|
||||||
|
platform="$platform_override"
|
||||||
|
print_info "Using overridden platform: $platform"
|
||||||
|
else
|
||||||
|
platform=$(detect_platform)
|
||||||
|
print_info "Detected platform: $platform"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check existing installation
|
||||||
|
check_existing_installation
|
||||||
|
|
||||||
|
# Download and extract (retry with x86_64 bundle on aarch64 if needed)
|
||||||
|
if ! download_release "$platform"; then
|
||||||
|
if [[ "$platform" == linux-aarch64 ]]; then
|
||||||
|
print_warning "Falling back to linux-x86_64 circuits bundle; will rebuild prover for aarch64."
|
||||||
|
rm -rf "$INSTALL_DIR"
|
||||||
|
if ! download_release "linux-x86_64"; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Handle macOS quarantine if needed
|
||||||
|
if [[ "$platform" == macos-* ]]; then
|
||||||
|
echo
|
||||||
|
handle_macos_quarantine
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${NOMOS_CIRCUITS_REBUILD_RAPIDSNARK:-0}" == "1" || "$platform" == *"aarch64" ]]; then
|
||||||
|
echo
|
||||||
|
print_info "Rebuilding rapidsnark prover for ${platform}..."
|
||||||
|
"${SCRIPT_DIR}/build-rapidsnark.sh" "$INSTALL_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
print_success "Installation complete!"
|
||||||
|
echo
|
||||||
|
print_info "nomos-circuits ${VERSION} is now installed at: $INSTALL_DIR"
|
||||||
|
print_info "The following circuits are available:"
|
||||||
|
|
||||||
|
# Discover circuits by finding directories that contain a witness_generator
|
||||||
|
for dir in "$INSTALL_DIR"/*/; do
|
||||||
|
if [ -d "$dir" ]; then
|
||||||
|
local circuit_name
|
||||||
|
circuit_name=$(basename "$dir")
|
||||||
|
if [ -f "$dir/witness_generator" ]; then
|
||||||
|
echo " • $circuit_name"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Only show export instructions if not using the default location
|
||||||
|
if [ "$INSTALL_DIR" != "$DEFAULT_INSTALL_DIR" ]; then
|
||||||
|
echo
|
||||||
|
print_info "Since you're using a custom installation directory, set the environment variable:"
|
||||||
|
print_info " export NOMOS_CIRCUITS=$INSTALL_DIR"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main
|
||||||
|
main
|
||||||
125
testing-framework/assets/stack/Dockerfile
Normal file
125
testing-framework/assets/stack/Dockerfile
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
# check=skip=SecretsUsedInArgOrEnv
|
||||||
|
# Ignore warnings about sensitive information as this is test data.
|
||||||
|
|
||||||
|
ARG VERSION=v0.3.1
|
||||||
|
ARG CIRCUITS_OVERRIDE
|
||||||
|
|
||||||
|
# ===========================
|
||||||
|
# BUILD IMAGE
|
||||||
|
# ===========================
|
||||||
|
|
||||||
|
FROM rust:1.91.0-slim-bookworm AS builder
|
||||||
|
|
||||||
|
ARG VERSION
|
||||||
|
ARG CIRCUITS_OVERRIDE
|
||||||
|
|
||||||
|
LABEL maintainer="augustinas@status.im" \
|
||||||
|
source="https://github.com/logos-co/nomos-node" \
|
||||||
|
description="Nomos testnet build image"
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Reduce debug artifact size.
|
||||||
|
ENV CARGO_PROFILE_DEV_DEBUG=0
|
||||||
|
|
||||||
|
# Install dependencies needed for building RocksDB.
|
||||||
|
RUN apt-get update && apt-get install -yq \
|
||||||
|
git gcc g++ clang make cmake m4 xz-utils libgmp-dev libssl-dev pkg-config ca-certificates curl wget file
|
||||||
|
|
||||||
|
RUN mkdir -p /opt/circuits && \
|
||||||
|
select_circuits_source() { \
|
||||||
|
# Prefer an explicit override when it exists (file or directory). \
|
||||||
|
if [ -n "$CIRCUITS_OVERRIDE" ] && [ -e "/workspace/${CIRCUITS_OVERRIDE}" ]; then \
|
||||||
|
echo "/workspace/${CIRCUITS_OVERRIDE}"; \
|
||||||
|
return 0; \
|
||||||
|
fi; \
|
||||||
|
# Fall back to the workspace bundle shipped with the repo. \
|
||||||
|
if [ -e "/workspace/tests/kzgrs/kzgrs_test_params" ]; then \
|
||||||
|
echo "/workspace/tests/kzgrs/kzgrs_test_params"; \
|
||||||
|
return 0; \
|
||||||
|
fi; \
|
||||||
|
return 1; \
|
||||||
|
}; \
|
||||||
|
if CIRCUITS_PATH="$(select_circuits_source)"; then \
|
||||||
|
echo "Using prebuilt circuits bundle from ${CIRCUITS_PATH#/workspace/}"; \
|
||||||
|
if [ -d "$CIRCUITS_PATH" ]; then \
|
||||||
|
cp -R "${CIRCUITS_PATH}/." /opt/circuits; \
|
||||||
|
else \
|
||||||
|
cp "${CIRCUITS_PATH}" /opt/circuits/; \
|
||||||
|
fi; \
|
||||||
|
fi; \
|
||||||
|
TARGET_ARCH="$(uname -m)"; \
|
||||||
|
if [ -f "/opt/circuits/prover" ]; then \
|
||||||
|
PROVER_INFO="$(file -b /opt/circuits/prover || true)"; \
|
||||||
|
case "$TARGET_ARCH" in \
|
||||||
|
x86_64) EXPECT_ARCH="x86-64" ;; \
|
||||||
|
aarch64|arm64) EXPECT_ARCH="aarch64" ;; \
|
||||||
|
*) EXPECT_ARCH="$TARGET_ARCH" ;; \
|
||||||
|
esac; \
|
||||||
|
if [ -n "$PROVER_INFO" ] && ! echo "$PROVER_INFO" | grep -qi "$EXPECT_ARCH"; then \
|
||||||
|
echo "Circuits prover architecture ($PROVER_INFO) does not match target ${TARGET_ARCH}; rebuilding rapidsnark binaries"; \
|
||||||
|
chmod +x scripts/build-rapidsnark.sh && \
|
||||||
|
RAPIDSNARK_FORCE_REBUILD=1 \
|
||||||
|
scripts/build-rapidsnark.sh /opt/circuits; \
|
||||||
|
fi; \
|
||||||
|
fi; \
|
||||||
|
if [ ! -f "/opt/circuits/pol/verification_key.json" ]; then \
|
||||||
|
echo "Local circuits missing pol artifacts; downloading ${VERSION} bundle and rebuilding"; \
|
||||||
|
chmod +x scripts/setup-nomos-circuits.sh && \
|
||||||
|
NOMOS_CIRCUITS_REBUILD_RAPIDSNARK=1 \
|
||||||
|
RAPIDSNARK_BUILD_GMP=1 \
|
||||||
|
scripts/setup-nomos-circuits.sh "$VERSION" "/opt/circuits"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
ENV NOMOS_CIRCUITS=/opt/circuits
|
||||||
|
ENV CARGO_TARGET_DIR=/workspace/target
|
||||||
|
|
||||||
|
# Fetch the nomos-node sources pinned in Cargo.lock and build the runtime binaries.
|
||||||
|
RUN if [ ! -d /workspace/nomos-node ]; then \
|
||||||
|
git clone https://github.com/logos-co/nomos-node.git /workspace/nomos-node; \
|
||||||
|
fi && \
|
||||||
|
cd /workspace/nomos-node && \
|
||||||
|
git fetch --depth 1 origin 2f60a0372c228968c3526c341ebc7e58bbd178dd && \
|
||||||
|
git checkout 2f60a0372c228968c3526c341ebc7e58bbd178dd && \
|
||||||
|
git reset --hard && git clean -fdx && \
|
||||||
|
cargo build --locked --all-features --bins && \
|
||||||
|
rm -rf /workspace/nomos-node/target/debug/incremental
|
||||||
|
|
||||||
|
# Build cfgsync binaries from this workspace.
|
||||||
|
RUN cargo build --locked --all-features --manifest-path /workspace/testing-framework/tools/cfgsync/Cargo.toml --bins
|
||||||
|
|
||||||
|
# ===========================
|
||||||
|
# NODE IMAGE
|
||||||
|
# ===========================
|
||||||
|
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
|
ARG VERSION
|
||||||
|
|
||||||
|
LABEL maintainer="augustinas@status.im" \
|
||||||
|
source="https://github.com/logos-co/nomos-node" \
|
||||||
|
description="Nomos node image"
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -yq \
|
||||||
|
libstdc++6 \
|
||||||
|
libgmp10 \
|
||||||
|
libgomp1 \
|
||||||
|
libssl3 \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
COPY --from=builder /opt/circuits /opt/circuits
|
||||||
|
|
||||||
|
COPY --from=builder /workspace/target/debug/nomos-node /usr/bin/nomos-node
|
||||||
|
COPY --from=builder /workspace/target/debug/nomos-executor /usr/bin/nomos-executor
|
||||||
|
COPY --from=builder /workspace/target/debug/nomos-cli /usr/bin/nomos-cli
|
||||||
|
COPY --from=builder /workspace/target/debug/cfgsync-server /usr/bin/cfgsync-server
|
||||||
|
COPY --from=builder /workspace/target/debug/cfgsync-client /usr/bin/cfgsync-client
|
||||||
|
|
||||||
|
ENV NOMOS_CIRCUITS=/opt/circuits
|
||||||
|
|
||||||
|
EXPOSE 3000 8080 9000 60000
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/bin/nomos-node"]
|
||||||
60
testing-framework/assets/stack/README.md
Normal file
60
testing-framework/assets/stack/README.md
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Docker Compose Testnet for Nomos
|
||||||
|
|
||||||
|
The Nomos Docker Compose Testnet contains four distinct service types:
|
||||||
|
|
||||||
|
- **Nomos Node Services**: Multiple dynamically spawned Nomos nodes that synchronizes their configuration via cfgsync utility.
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
Upon making modifications to the codebase or the Dockerfile, the Nomos images must be rebuilt:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose build
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuring
|
||||||
|
|
||||||
|
Configuration of the Docker testnet is accomplished using the `.env` file. An example configuration can be found in `.env.example`.
|
||||||
|
|
||||||
|
To adjust the count of Nomos nodes, modify the variable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DOCKER_COMPOSE_LIBP2P_REPLICAS=100
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
Initiate the testnet by executing the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will merge all output logs and display them in Stdout. For a more refined output, it's recommended to first run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Followed by:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose logs -f nomos-node
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using testnet
|
||||||
|
|
||||||
|
Bootstrap node is accessible from the host via `3000` and `18080` ports. To expose other nomos nodes, please update `nomos-node` service in the `compose.yml` file with this configuration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nomos-node-0:
|
||||||
|
ports:
|
||||||
|
- "3001-3010:3000" # Use range depending on the number of nomos node replicas.
|
||||||
|
- "18081-18190:18080"
|
||||||
|
```
|
||||||
|
|
||||||
|
After running `docker compose up`, the randomly assigned ports can be viewed with `ps` command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose ps
|
||||||
|
```
|
||||||
49
testing-framework/assets/stack/cfgsync.yaml
Normal file
49
testing-framework/assets/stack/cfgsync.yaml
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
port: 4400
|
||||||
|
n_hosts: 4
|
||||||
|
timeout: 10
|
||||||
|
|
||||||
|
# ConsensusConfig related parameters
|
||||||
|
security_param: 10
|
||||||
|
active_slot_coeff: 0.9
|
||||||
|
|
||||||
|
# DaConfig related parameters
|
||||||
|
subnetwork_size: 2
|
||||||
|
dispersal_factor: 2
|
||||||
|
num_samples: 1
|
||||||
|
num_subnets: 2
|
||||||
|
old_blobs_check_interval: "5.0"
|
||||||
|
blobs_validity_duration: "60.0"
|
||||||
|
global_params_path: "/kzgrs_test_params"
|
||||||
|
min_dispersal_peers: 1
|
||||||
|
min_replication_peers: 1
|
||||||
|
monitor_failure_time_window: "5.0"
|
||||||
|
balancer_interval: "5.0"
|
||||||
|
# Dispersal mempool publish strategy
|
||||||
|
mempool_publish_strategy: !SampleSubnetworks
|
||||||
|
sample_threshold: 2
|
||||||
|
timeout: "2.0"
|
||||||
|
cooldown: "0.0001"
|
||||||
|
|
||||||
|
replication_settings:
|
||||||
|
seen_message_cache_size: 204800
|
||||||
|
seen_message_ttl: "900.0"
|
||||||
|
retry_shares_limit: 5
|
||||||
|
retry_commitments_limit: 5
|
||||||
|
|
||||||
|
# Tracing
|
||||||
|
tracing_settings:
|
||||||
|
logger: !Loki
|
||||||
|
endpoint: http://loki:3100/
|
||||||
|
host_identifier: node
|
||||||
|
tracing: !Otlp
|
||||||
|
endpoint: http://tempo:4317/
|
||||||
|
sample_ratio: 0.5
|
||||||
|
service_name: node
|
||||||
|
filter: !EnvFilter
|
||||||
|
filters:
|
||||||
|
nomos: debug
|
||||||
|
metrics: !Otlp
|
||||||
|
endpoint: http://prometheus:9090/api/v1/otlp/v1/metrics
|
||||||
|
host_identifier: node
|
||||||
|
console: None
|
||||||
|
level: INFO
|
||||||
BIN
testing-framework/assets/stack/kzgrs_test_params
Normal file
BIN
testing-framework/assets/stack/kzgrs_test_params
Normal file
Binary file not shown.
@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
providers:
|
||||||
|
- name: 'default'
|
||||||
|
orgId: 1
|
||||||
|
folder: ''
|
||||||
|
type: 'file'
|
||||||
|
options:
|
||||||
|
path: '/var/lib/grafana/dashboards'
|
||||||
@ -0,0 +1,237 @@
|
|||||||
|
{
|
||||||
|
"annotations": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"builtIn": 1,
|
||||||
|
"datasource": {
|
||||||
|
"type": "grafana",
|
||||||
|
"uid": "-- Grafana --"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "rgba(0, 211, 255, 1)",
|
||||||
|
"name": "Annotations & Alerts",
|
||||||
|
"type": "dashboard"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"editable": true,
|
||||||
|
"fiscalYearStartMonth": 0,
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"id": 1,
|
||||||
|
"links": [],
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"barWidthFactor": 0.6,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 2,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "11.4.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"disableTextWrap": false,
|
||||||
|
"editorMode": "builder",
|
||||||
|
"expr": "da_mempool_pending_items",
|
||||||
|
"fullMetaSearch": false,
|
||||||
|
"includeNullMetadata": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A",
|
||||||
|
"useBackend": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Mempool: Pending DA blobs",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"barWidthFactor": 0.6,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 8
|
||||||
|
},
|
||||||
|
"id": 1,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "11.4.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "PBFA97CFB590B2093"
|
||||||
|
},
|
||||||
|
"disableTextWrap": false,
|
||||||
|
"editorMode": "builder",
|
||||||
|
"expr": "consensus_processed_blocks",
|
||||||
|
"fullMetaSearch": false,
|
||||||
|
"includeNullMetadata": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A",
|
||||||
|
"useBackend": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Consensus: Processed Blocks",
|
||||||
|
"type": "timeseries"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"preload": false,
|
||||||
|
"schemaVersion": 40,
|
||||||
|
"tags": [],
|
||||||
|
"templating": {
|
||||||
|
"list": []
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"from": "now-6h",
|
||||||
|
"to": "now"
|
||||||
|
},
|
||||||
|
"timepicker": {},
|
||||||
|
"timezone": "browser",
|
||||||
|
"title": "Testnet Metrics",
|
||||||
|
"uid": "ce6ebepwk737kf",
|
||||||
|
"version": 5,
|
||||||
|
"weekStart": ""
|
||||||
|
}
|
||||||
@ -0,0 +1,37 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
- name: Prometheus
|
||||||
|
type: prometheus
|
||||||
|
access: proxy
|
||||||
|
org_id: 1
|
||||||
|
url: http://prometheus:9090
|
||||||
|
is_default: true
|
||||||
|
version: 1
|
||||||
|
editable: true
|
||||||
|
|
||||||
|
- name: Tempo
|
||||||
|
type: tempo
|
||||||
|
access: proxy
|
||||||
|
org_id: 1
|
||||||
|
url: http://tempo:3200
|
||||||
|
is_default: false
|
||||||
|
version: 1
|
||||||
|
editable: true
|
||||||
|
uid: tempods
|
||||||
|
|
||||||
|
- name: Loki
|
||||||
|
type: loki
|
||||||
|
access: proxy
|
||||||
|
org_id: 1
|
||||||
|
url: http://loki:3100
|
||||||
|
is_default: false
|
||||||
|
version: 1
|
||||||
|
editable: true
|
||||||
|
jsonData:
|
||||||
|
derivedFields:
|
||||||
|
- name: trace_id
|
||||||
|
matcherRegex: "\"trace_id\":\"(\\w+)\""
|
||||||
|
url: "$${__value.raw}"
|
||||||
|
datasourceUid: tempods
|
||||||
|
|
||||||
@ -0,0 +1,51 @@
|
|||||||
|
instance_name = nomos dashboard
|
||||||
|
|
||||||
|
;[dashboards.json]
|
||||||
|
;enabled = true
|
||||||
|
;path = /home/git/grafana/grafana-dashboards/dashboards
|
||||||
|
|
||||||
|
|
||||||
|
#################################### Auth ##########################
|
||||||
|
[auth]
|
||||||
|
disable_login_form = false
|
||||||
|
|
||||||
|
#################################### Anonymous Auth ##########################
|
||||||
|
[auth.anonymous]
|
||||||
|
# enable anonymous access
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# specify organization name that should be used for unauthenticated users
|
||||||
|
;org_name = Public
|
||||||
|
|
||||||
|
# specify role for unauthenticated users
|
||||||
|
; org_role = Admin
|
||||||
|
org_role = Viewer
|
||||||
|
|
||||||
|
;[security]
|
||||||
|
;admin_user = ocr
|
||||||
|
;admin_password = ocr
|
||||||
|
|
||||||
|
;[users]
|
||||||
|
# disable user signup / registration
|
||||||
|
;allow_sign_up = false
|
||||||
|
|
||||||
|
# Set to true to automatically assign new users to the default organization (id 1)
|
||||||
|
;auto_assign_org = true
|
||||||
|
|
||||||
|
# Default role new users will be automatically assigned (if disabled above is set to true)
|
||||||
|
;auto_assign_org_role = Viewer
|
||||||
|
|
||||||
|
#################################### SMTP / Emailing ##########################
|
||||||
|
;[smtp]
|
||||||
|
;enabled = false
|
||||||
|
;host = localhost:25
|
||||||
|
;user =
|
||||||
|
;password =
|
||||||
|
;cert_file =
|
||||||
|
;key_file =
|
||||||
|
;skip_verify = false
|
||||||
|
;from_address = admin@grafana.localhost
|
||||||
|
|
||||||
|
;[emails]
|
||||||
|
;welcome_email_on_sign_up = false
|
||||||
|
|
||||||
@ -0,0 +1 @@
|
|||||||
|
GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel
|
||||||
4
testing-framework/assets/stack/monitoring/prometheus.yml
Normal file
4
testing-framework/assets/stack/monitoring/prometheus.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
global:
|
||||||
|
evaluation_interval: 15s
|
||||||
|
external_labels:
|
||||||
|
monitor: "Monitoring"
|
||||||
53
testing-framework/assets/stack/monitoring/tempo.yaml
Normal file
53
testing-framework/assets/stack/monitoring/tempo.yaml
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
stream_over_http_enabled: true
|
||||||
|
server:
|
||||||
|
http_listen_port: 3200
|
||||||
|
log_level: info
|
||||||
|
|
||||||
|
query_frontend:
|
||||||
|
search:
|
||||||
|
duration_slo: 5s
|
||||||
|
throughput_bytes_slo: 1.073741824e+09
|
||||||
|
trace_by_id:
|
||||||
|
duration_slo: 5s
|
||||||
|
|
||||||
|
distributor:
|
||||||
|
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: "0.0.0.0:4317"
|
||||||
|
|
||||||
|
ingester:
|
||||||
|
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
|
||||||
|
|
||||||
|
compactor:
|
||||||
|
compaction:
|
||||||
|
block_retention: 24h
|
||||||
|
|
||||||
|
metrics_generator:
|
||||||
|
registry:
|
||||||
|
external_labels:
|
||||||
|
source: tempo
|
||||||
|
cluster: docker-compose
|
||||||
|
storage:
|
||||||
|
path: /var/tempo/generator/wal
|
||||||
|
remote_write:
|
||||||
|
- url: http://prometheus:9090/api/v1/write
|
||||||
|
send_exemplars: true
|
||||||
|
traces_storage:
|
||||||
|
path: /var/tempo/generator/traces
|
||||||
|
|
||||||
|
storage:
|
||||||
|
trace:
|
||||||
|
backend: local # backend configuration to use
|
||||||
|
wal:
|
||||||
|
path: /var/tempo/wal # where to store the wal locally
|
||||||
|
local:
|
||||||
|
path: /var/tempo/blocks
|
||||||
|
|
||||||
|
overrides:
|
||||||
|
defaults:
|
||||||
|
metrics_generator:
|
||||||
|
processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator
|
||||||
|
generate_native_histograms: both
|
||||||
|
|
||||||
39
testing-framework/assets/stack/scripts/build_test_image.sh
Executable file
39
testing-framework/assets/stack/scripts/build_test_image.sh
Executable file
@ -0,0 +1,39 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Builds the testnet image with circuits. Prefers a local circuits bundle
|
||||||
|
# (tests/kzgrs/kzgrs_test_params) or a custom override; otherwise downloads
|
||||||
|
# from logos-co/nomos-circuits.
|
||||||
|
|
||||||
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)"
|
||||||
|
DOCKERFILE_PATH="${ROOT_DIR}/testing-framework/assets/stack/Dockerfile"
|
||||||
|
IMAGE_TAG="${IMAGE_TAG:-nomos-testnet:local}"
|
||||||
|
VERSION="${VERSION:-v0.3.1}"
|
||||||
|
CIRCUITS_OVERRIDE="${CIRCUITS_OVERRIDE:-testing-framework/assets/stack/kzgrs_test_params}"
|
||||||
|
|
||||||
|
echo "Workspace root: ${ROOT_DIR}"
|
||||||
|
echo "Image tag: ${IMAGE_TAG}"
|
||||||
|
echo "Circuits override: ${CIRCUITS_OVERRIDE:-<none>}"
|
||||||
|
echo "Circuits version (fallback download): ${VERSION}"
|
||||||
|
|
||||||
|
build_args=(
|
||||||
|
-f "${DOCKERFILE_PATH}"
|
||||||
|
-t "${IMAGE_TAG}"
|
||||||
|
"${ROOT_DIR}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pass override/version args to the Docker build.
|
||||||
|
if [ -n "${CIRCUITS_OVERRIDE}" ]; then
|
||||||
|
build_args+=(--build-arg "CIRCUITS_OVERRIDE=${CIRCUITS_OVERRIDE}")
|
||||||
|
fi
|
||||||
|
build_args+=(--build-arg "VERSION=${VERSION}")
|
||||||
|
|
||||||
|
echo "Running: docker build ${build_args[*]}"
|
||||||
|
docker build "${build_args[@]}"
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
Build complete.
|
||||||
|
- Use this image in k8s/compose by exporting NOMOS_TESTNET_IMAGE=${IMAGE_TAG}
|
||||||
|
- Circuits source: ${CIRCUITS_OVERRIDE:-download ${VERSION}}
|
||||||
|
EOF
|
||||||
5
testing-framework/assets/stack/scripts/run_cfgsync.sh
Executable file
5
testing-framework/assets/stack/scripts/run_cfgsync.sh
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
exec /usr/bin/cfgsync-server /etc/nomos/cfgsync.yaml
|
||||||
35
testing-framework/assets/stack/scripts/run_nomos_executor.sh
Executable file
35
testing-framework/assets/stack/scripts/run_nomos_executor.sh
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
export CFG_FILE_PATH="/config.yaml" \
|
||||||
|
CFG_SERVER_ADDR="${CFG_SERVER_ADDR:-http://cfgsync:4400}" \
|
||||||
|
CFG_HOST_IP=$(hostname -i) \
|
||||||
|
CFG_HOST_KIND="${CFG_HOST_KIND:-executor}" \
|
||||||
|
CFG_HOST_IDENTIFIER="${CFG_HOST_IDENTIFIER:-executor-$(hostname -i)}" \
|
||||||
|
LOG_LEVEL="INFO" \
|
||||||
|
POL_PROOF_DEV_MODE="${POL_PROOF_DEV_MODE:-true}"
|
||||||
|
|
||||||
|
# Ensure recovery directory exists to avoid early crashes in services that
|
||||||
|
# persist state.
|
||||||
|
mkdir -p /recovery
|
||||||
|
|
||||||
|
# cfgsync-server can start a little after the executor container; retry until
|
||||||
|
# it is reachable instead of exiting immediately and crash-looping.
|
||||||
|
attempt=0
|
||||||
|
max_attempts=30
|
||||||
|
sleep_seconds=3
|
||||||
|
until /usr/bin/cfgsync-client; do
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
if [ "$attempt" -ge "$max_attempts" ]; then
|
||||||
|
echo "cfgsync-client failed after ${max_attempts} attempts, giving up"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "cfgsync not ready yet (attempt ${attempt}/${max_attempts}), retrying in ${sleep_seconds}s..."
|
||||||
|
sleep "$sleep_seconds"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Align bootstrap timing with validators to keep configs consistent.
|
||||||
|
sed -i "s/prolonged_bootstrap_period: .*/prolonged_bootstrap_period: '3.000000000'/" /config.yaml
|
||||||
|
|
||||||
|
exec /usr/bin/nomos-executor /config.yaml
|
||||||
35
testing-framework/assets/stack/scripts/run_nomos_node.sh
Executable file
35
testing-framework/assets/stack/scripts/run_nomos_node.sh
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
export CFG_FILE_PATH="/config.yaml" \
|
||||||
|
CFG_SERVER_ADDR="${CFG_SERVER_ADDR:-http://cfgsync:4400}" \
|
||||||
|
CFG_HOST_IP=$(hostname -i) \
|
||||||
|
CFG_HOST_KIND="${CFG_HOST_KIND:-validator}" \
|
||||||
|
CFG_HOST_IDENTIFIER="${CFG_HOST_IDENTIFIER:-validator-$(hostname -i)}" \
|
||||||
|
LOG_LEVEL="INFO" \
|
||||||
|
POL_PROOF_DEV_MODE="${POL_PROOF_DEV_MODE:-true}"
|
||||||
|
|
||||||
|
# Ensure recovery directory exists to avoid early crashes in services that
|
||||||
|
# persist state.
|
||||||
|
mkdir -p /recovery
|
||||||
|
|
||||||
|
# cfgsync-server can start a little after the node container; retry until it is
|
||||||
|
# reachable instead of exiting immediately and crash-looping.
|
||||||
|
attempt=0
|
||||||
|
max_attempts=30
|
||||||
|
sleep_seconds=3
|
||||||
|
until /usr/bin/cfgsync-client; do
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
if [ "$attempt" -ge "$max_attempts" ]; then
|
||||||
|
echo "cfgsync-client failed after ${max_attempts} attempts, giving up"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "cfgsync not ready yet (attempt ${attempt}/${max_attempts}), retrying in ${sleep_seconds}s..."
|
||||||
|
sleep "$sleep_seconds"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Align bootstrap timing with executors to keep configs consistent.
|
||||||
|
sed -i "s/prolonged_bootstrap_period: .*/prolonged_bootstrap_period: '3.000000000'/" /config.yaml
|
||||||
|
|
||||||
|
exec /usr/bin/nomos-node /config.yaml
|
||||||
@ -0,0 +1,76 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Setup script for nomos-circuits
|
||||||
|
#
|
||||||
|
# Usage: ./setup-nomos-circuits.sh [VERSION] [INSTALL_DIR]
|
||||||
|
# VERSION - Optional. Version to install (default: v0.3.1)
|
||||||
|
# INSTALL_DIR - Optional. Installation directory (default: $HOME/.nomos-circuits)
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# ./setup-nomos-circuits.sh # Install default version to default location
|
||||||
|
# ./setup-nomos-circuits.sh v0.2.0 # Install specific version to default location
|
||||||
|
# ./setup-nomos-circuits.sh v0.2.0 /opt/circuits # Install to custom location
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
VERSION="${1:-v0.3.1}"
|
||||||
|
DEFAULT_INSTALL_DIR="$HOME/.nomos-circuits"
|
||||||
|
INSTALL_DIR="${2:-$DEFAULT_INSTALL_DIR}"
|
||||||
|
REPO="logos-co/nomos-circuits"
|
||||||
|
|
||||||
|
detect_platform() {
|
||||||
|
local os=""
|
||||||
|
local arch=""
|
||||||
|
case "$(uname -s)" in
|
||||||
|
Linux*) os="linux" ;;
|
||||||
|
Darwin*) os="macos" ;;
|
||||||
|
MINGW*|MSYS*|CYGWIN*) os="windows" ;;
|
||||||
|
*) echo "Unsupported operating system: $(uname -s)" >&2; exit 1 ;;
|
||||||
|
esac
|
||||||
|
case "$(uname -m)" in
|
||||||
|
x86_64) arch="x86_64" ;;
|
||||||
|
aarch64|arm64) arch="aarch64" ;;
|
||||||
|
*) echo "Unsupported architecture: $(uname -m)" >&2; exit 1 ;;
|
||||||
|
esac
|
||||||
|
echo "${os}-${arch}"
|
||||||
|
}
|
||||||
|
|
||||||
|
download_release() {
|
||||||
|
local platform="$1"
|
||||||
|
local artifact="nomos-circuits-${VERSION}-${platform}.tar.gz"
|
||||||
|
local url="https://github.com/${REPO}/releases/download/${VERSION}/${artifact}"
|
||||||
|
local temp_dir
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
|
|
||||||
|
echo "Downloading nomos-circuits ${VERSION} for ${platform}..."
|
||||||
|
if [ -n "${GITHUB_TOKEN:-}" ]; then
|
||||||
|
auth_header="Authorization: Bearer ${GITHUB_TOKEN}"
|
||||||
|
else
|
||||||
|
auth_header=""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! curl -L ${auth_header:+-H "$auth_header"} -o "${temp_dir}/${artifact}" "${url}"; then
|
||||||
|
echo "Failed to download release artifact from ${url}" >&2
|
||||||
|
rm -rf "${temp_dir}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Extracting to ${INSTALL_DIR}..."
|
||||||
|
rm -rf "${INSTALL_DIR}"
|
||||||
|
mkdir -p "${INSTALL_DIR}"
|
||||||
|
if ! tar -xzf "${temp_dir}/${artifact}" -C "${INSTALL_DIR}" --strip-components=1; then
|
||||||
|
echo "Failed to extract ${artifact}" >&2
|
||||||
|
rm -rf "${temp_dir}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -rf "${temp_dir}"
|
||||||
|
}
|
||||||
|
|
||||||
|
platform=$(detect_platform)
|
||||||
|
echo "Setting up nomos-circuits ${VERSION} for ${platform}"
|
||||||
|
echo "Installing to ${INSTALL_DIR}"
|
||||||
|
|
||||||
|
download_release "${platform}"
|
||||||
|
|
||||||
|
echo "Installation complete. Circuits installed at: ${INSTALL_DIR}"
|
||||||
|
echo "If using a custom directory, set NOMOS_CIRCUITS=${INSTALL_DIR}"
|
||||||
51
testing-framework/configs/Cargo.toml
Normal file
51
testing-framework/configs/Cargo.toml
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
[package]
|
||||||
|
categories.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
keywords.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
name = "testing-framework-config"
|
||||||
|
readme.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
blst = "0.3.11"
|
||||||
|
chain-leader = { workspace = true }
|
||||||
|
chain-network = { workspace = true }
|
||||||
|
chain-service = { workspace = true }
|
||||||
|
cryptarchia-engine = { workspace = true, features = ["serde"] }
|
||||||
|
cryptarchia-sync = { workspace = true }
|
||||||
|
ed25519-dalek = { version = "2.2.0", features = ["rand_core", "serde"] }
|
||||||
|
groth16 = { workspace = true }
|
||||||
|
hex = { version = "0.4.3", default-features = false }
|
||||||
|
key-management-system = { workspace = true }
|
||||||
|
nomos-api = { workspace = true }
|
||||||
|
nomos-blend-message = { workspace = true }
|
||||||
|
nomos-blend-service = { workspace = true, features = ["libp2p"] }
|
||||||
|
nomos-core = { workspace = true }
|
||||||
|
nomos-da-dispersal = { workspace = true }
|
||||||
|
nomos-da-network-core = { workspace = true }
|
||||||
|
nomos-da-network-service = { workspace = true }
|
||||||
|
nomos-da-sampling = { workspace = true }
|
||||||
|
nomos-da-verifier = { workspace = true }
|
||||||
|
nomos-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] }
|
||||||
|
nomos-ledger = { workspace = true, features = ["serde"] }
|
||||||
|
nomos-libp2p = { workspace = true }
|
||||||
|
nomos-node = { workspace = true, default-features = false, features = ["testing"] }
|
||||||
|
nomos-sdp = { workspace = true }
|
||||||
|
nomos-time = { workspace = true }
|
||||||
|
nomos-tracing = { workspace = true }
|
||||||
|
nomos-tracing-service = { workspace = true }
|
||||||
|
nomos-utils = { workspace = true }
|
||||||
|
nomos-wallet = { workspace = true }
|
||||||
|
num-bigint = { version = "0.4", default-features = false }
|
||||||
|
rand = { workspace = true }
|
||||||
|
serde = { workspace = true, features = ["derive"] }
|
||||||
|
subnetworks-assignations = { workspace = true }
|
||||||
|
time = { version = "0.3", default-features = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
zksign = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
14
testing-framework/configs/src/common/kms.rs
Normal file
14
testing-framework/configs/src/common/kms.rs
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
use groth16::fr_to_bytes;
|
||||||
|
use key_management_system::{
|
||||||
|
backend::preload::KeyId,
|
||||||
|
keys::{Key, secured_key::SecuredKey as _},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn key_id_for_preload_backend(key: &Key) -> KeyId {
|
||||||
|
let key_id_bytes = match key {
|
||||||
|
Key::Ed25519(ed25519_secret_key) => ed25519_secret_key.as_public_key().to_bytes(),
|
||||||
|
Key::Zk(zk_secret_key) => fr_to_bytes(zk_secret_key.as_public_key().as_fr()),
|
||||||
|
};
|
||||||
|
hex::encode(key_id_bytes)
|
||||||
|
}
|
||||||
1
testing-framework/configs/src/common/mod.rs
Normal file
1
testing-framework/configs/src/common/mod.rs
Normal file
@ -0,0 +1 @@
|
|||||||
|
pub mod kms;
|
||||||
45
testing-framework/configs/src/lib.rs
Normal file
45
testing-framework/configs/src/lib.rs
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
use std::{env, net::Ipv4Addr, ops::Mul as _, sync::LazyLock, time::Duration};
|
||||||
|
|
||||||
|
use nomos_core::sdp::ProviderId;
|
||||||
|
use nomos_libp2p::{Multiaddr, PeerId, multiaddr};
|
||||||
|
|
||||||
|
pub mod common;
|
||||||
|
pub mod nodes;
|
||||||
|
pub mod topology;
|
||||||
|
|
||||||
|
static IS_SLOW_TEST_ENV: LazyLock<bool> =
|
||||||
|
LazyLock::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true"));
|
||||||
|
|
||||||
|
pub static IS_DEBUG_TRACING: LazyLock<bool> = LazyLock::new(|| {
|
||||||
|
env::var("NOMOS_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true"))
|
||||||
|
});
|
||||||
|
|
||||||
|
/// In slow test environments like Codecov, use 2x timeout.
|
||||||
|
#[must_use]
|
||||||
|
pub fn adjust_timeout(d: Duration) -> Duration {
|
||||||
|
if *IS_SLOW_TEST_ENV { d.mul(2) } else { d }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn node_address_from_port(port: u16) -> Multiaddr {
|
||||||
|
multiaddr(Ipv4Addr::LOCALHOST, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn secret_key_to_peer_id(node_key: nomos_libp2p::ed25519::SecretKey) -> PeerId {
|
||||||
|
PeerId::from_public_key(
|
||||||
|
&nomos_libp2p::ed25519::Keypair::from(node_key)
|
||||||
|
.public()
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn secret_key_to_provider_id(node_key: nomos_libp2p::ed25519::SecretKey) -> ProviderId {
|
||||||
|
ProviderId::try_from(
|
||||||
|
nomos_libp2p::ed25519::Keypair::from(node_key)
|
||||||
|
.public()
|
||||||
|
.to_bytes(),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
330
testing-framework/configs/src/nodes/executor.rs
Normal file
330
testing-framework/configs/src/nodes/executor.rs
Normal file
@ -0,0 +1,330 @@
|
|||||||
|
use std::{
|
||||||
|
collections::HashSet,
|
||||||
|
num::{NonZeroU64, NonZeroUsize},
|
||||||
|
path::PathBuf,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use chain_leader::LeaderSettings;
|
||||||
|
use chain_network::{ChainNetworkSettings, OrphanConfig, SyncConfig};
|
||||||
|
use chain_service::{CryptarchiaSettings, StartingState};
|
||||||
|
use cryptarchia_engine::time::SlotConfig;
|
||||||
|
use key_management_system::keys::{Key, ZkKey};
|
||||||
|
use nomos_blend_service::{
|
||||||
|
core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings, ZkSettings},
|
||||||
|
settings::TimingSettings,
|
||||||
|
};
|
||||||
|
use nomos_da_dispersal::{
|
||||||
|
DispersalServiceSettings,
|
||||||
|
backend::kzgrs::{DispersalKZGRSBackendSettings, EncoderSettings},
|
||||||
|
};
|
||||||
|
use nomos_da_network_core::protocols::sampling::SubnetsConfig;
|
||||||
|
use nomos_da_network_service::{
|
||||||
|
NetworkConfig as DaNetworkConfig,
|
||||||
|
api::http::ApiAdapterSettings,
|
||||||
|
backends::libp2p::{
|
||||||
|
common::DaNetworkBackendSettings, executor::DaNetworkExecutorBackendSettings,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use nomos_da_sampling::{
|
||||||
|
DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings,
|
||||||
|
verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings,
|
||||||
|
};
|
||||||
|
use nomos_da_verifier::{
|
||||||
|
DaVerifierServiceSettings,
|
||||||
|
backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig},
|
||||||
|
storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings,
|
||||||
|
};
|
||||||
|
use nomos_executor::config::Config as ExecutorConfig;
|
||||||
|
use nomos_node::{
|
||||||
|
RocksBackendSettings,
|
||||||
|
api::backend::AxumBackendSettings as NodeAxumBackendSettings,
|
||||||
|
config::{
|
||||||
|
blend::{
|
||||||
|
deployment::{self as blend_deployment},
|
||||||
|
serde as blend_serde,
|
||||||
|
},
|
||||||
|
deployment::{CustomDeployment, Settings as NodeDeploymentSettings},
|
||||||
|
mempool::MempoolConfig,
|
||||||
|
network::deployment::Settings as NetworkDeploymentSettings,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use nomos_sdp::SdpSettings;
|
||||||
|
use nomos_time::{
|
||||||
|
TimeServiceSettings,
|
||||||
|
backends::{NtpTimeBackendSettings, ntp::async_client::NTPClientSettings},
|
||||||
|
};
|
||||||
|
use nomos_utils::math::NonNegativeF64;
|
||||||
|
use nomos_wallet::WalletServiceSettings;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
adjust_timeout,
|
||||||
|
common::kms::key_id_for_preload_backend,
|
||||||
|
topology::configs::{
|
||||||
|
GeneralConfig, blend::GeneralBlendConfig as TopologyBlendConfig, wallet::WalletAccount,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
#[expect(clippy::too_many_lines, reason = "TODO: Address this at some point.")]
|
||||||
|
pub fn create_executor_config(config: GeneralConfig) -> ExecutorConfig {
|
||||||
|
let (blend_user_config, deployment_settings) = build_blend_service_config(&config.blend_config);
|
||||||
|
ExecutorConfig {
|
||||||
|
network: config.network_config,
|
||||||
|
blend: blend_user_config,
|
||||||
|
deployment: deployment_settings,
|
||||||
|
cryptarchia: CryptarchiaSettings {
|
||||||
|
config: config.consensus_config.ledger_config.clone(),
|
||||||
|
starting_state: StartingState::Genesis {
|
||||||
|
genesis_tx: config.consensus_config.genesis_tx,
|
||||||
|
},
|
||||||
|
// Disable on-disk recovery in compose tests to avoid serde errors on
|
||||||
|
// non-string keys and keep services alive.
|
||||||
|
recovery_file: PathBuf::new(),
|
||||||
|
bootstrap: chain_service::BootstrapConfig {
|
||||||
|
prolonged_bootstrap_period: config.bootstrapping_config.prolonged_bootstrap_period,
|
||||||
|
force_bootstrap: false,
|
||||||
|
offline_grace_period: chain_service::OfflineGracePeriodConfig {
|
||||||
|
grace_period: Duration::from_secs(20 * 60),
|
||||||
|
state_recording_interval: Duration::from_secs(60),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
chain_network: ChainNetworkSettings {
|
||||||
|
config: config.consensus_config.ledger_config.clone(),
|
||||||
|
network_adapter_settings:
|
||||||
|
chain_network::network::adapters::libp2p::LibP2pAdapterSettings {
|
||||||
|
topic: String::from(nomos_node::CONSENSUS_TOPIC),
|
||||||
|
},
|
||||||
|
bootstrap: chain_network::BootstrapConfig {
|
||||||
|
ibd: chain_network::IbdConfig {
|
||||||
|
peers: HashSet::new(),
|
||||||
|
delay_before_new_download: Duration::from_secs(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
sync: SyncConfig {
|
||||||
|
orphan: OrphanConfig {
|
||||||
|
max_orphan_cache_size: NonZeroUsize::new(5)
|
||||||
|
.expect("Max orphan cache size must be non-zero"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cryptarchia_leader: LeaderSettings {
|
||||||
|
transaction_selector_settings: (),
|
||||||
|
config: config.consensus_config.ledger_config.clone(),
|
||||||
|
leader_config: config.consensus_config.leader_config.clone(),
|
||||||
|
blend_broadcast_settings:
|
||||||
|
nomos_blend_service::core::network::libp2p::Libp2pBroadcastSettings {
|
||||||
|
topic: String::from(nomos_node::CONSENSUS_TOPIC),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
da_network: DaNetworkConfig {
|
||||||
|
backend: DaNetworkExecutorBackendSettings {
|
||||||
|
validator_settings: DaNetworkBackendSettings {
|
||||||
|
node_key: config.da_config.node_key,
|
||||||
|
listening_address: config.da_config.listening_address,
|
||||||
|
policy_settings: config.da_config.policy_settings,
|
||||||
|
monitor_settings: config.da_config.monitor_settings,
|
||||||
|
balancer_interval: config.da_config.balancer_interval,
|
||||||
|
redial_cooldown: config.da_config.redial_cooldown,
|
||||||
|
replication_settings: config.da_config.replication_settings,
|
||||||
|
subnets_settings: SubnetsConfig {
|
||||||
|
num_of_subnets: config.da_config.num_samples as usize,
|
||||||
|
shares_retry_limit: config.da_config.retry_shares_limit,
|
||||||
|
commitments_retry_limit: config.da_config.retry_commitments_limit,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
num_subnets: config.da_config.num_subnets,
|
||||||
|
},
|
||||||
|
membership: config.da_config.membership.clone(),
|
||||||
|
api_adapter_settings: ApiAdapterSettings {
|
||||||
|
api_port: config.api_config.address.port(),
|
||||||
|
is_secure: false,
|
||||||
|
},
|
||||||
|
subnet_refresh_interval: config.da_config.subnets_refresh_interval,
|
||||||
|
subnet_threshold: config.da_config.num_samples as usize,
|
||||||
|
min_session_members: config.da_config.num_samples as usize,
|
||||||
|
},
|
||||||
|
da_verifier: DaVerifierServiceSettings {
|
||||||
|
share_verifier_settings: KzgrsDaVerifierSettings {
|
||||||
|
global_params_path: config.da_config.global_params_path.clone(),
|
||||||
|
domain_size: config.da_config.num_subnets as usize,
|
||||||
|
},
|
||||||
|
tx_verifier_settings: (),
|
||||||
|
network_adapter_settings: (),
|
||||||
|
storage_adapter_settings: VerifierStorageAdapterSettings {
|
||||||
|
blob_storage_directory: "./".into(),
|
||||||
|
},
|
||||||
|
mempool_trigger_settings: MempoolPublishTriggerConfig {
|
||||||
|
publish_threshold: NonNegativeF64::try_from(0.8).unwrap(),
|
||||||
|
share_duration: Duration::from_secs(5),
|
||||||
|
prune_duration: Duration::from_secs(30),
|
||||||
|
prune_interval: Duration::from_secs(5),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tracing: config.tracing_config.tracing_settings,
|
||||||
|
http: nomos_api::ApiServiceSettings {
|
||||||
|
backend_settings: NodeAxumBackendSettings {
|
||||||
|
address: config.api_config.address,
|
||||||
|
rate_limit_per_second: 10000,
|
||||||
|
rate_limit_burst: 10000,
|
||||||
|
max_concurrent_requests: 1000,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
da_sampling: DaSamplingServiceSettings {
|
||||||
|
sampling_settings: KzgrsSamplingBackendSettings {
|
||||||
|
num_samples: config.da_config.num_samples,
|
||||||
|
num_subnets: config.da_config.num_subnets,
|
||||||
|
old_blobs_check_interval: config.da_config.old_blobs_check_interval,
|
||||||
|
blobs_validity_duration: config.da_config.blobs_validity_duration,
|
||||||
|
},
|
||||||
|
share_verifier_settings: SamplingVerifierSettings {
|
||||||
|
global_params_path: config.da_config.global_params_path.clone(),
|
||||||
|
domain_size: config.da_config.num_subnets as usize,
|
||||||
|
},
|
||||||
|
commitments_wait_duration: Duration::from_secs(1),
|
||||||
|
sdp_blob_trigger_sampling_delay: adjust_timeout(Duration::from_secs(5)),
|
||||||
|
},
|
||||||
|
storage: RocksBackendSettings {
|
||||||
|
db_path: "./db".into(),
|
||||||
|
read_only: false,
|
||||||
|
column_family: Some("blocks".into()),
|
||||||
|
},
|
||||||
|
da_dispersal: DispersalServiceSettings {
|
||||||
|
backend: DispersalKZGRSBackendSettings {
|
||||||
|
encoder_settings: EncoderSettings {
|
||||||
|
num_columns: config.da_config.num_subnets as usize,
|
||||||
|
with_cache: false,
|
||||||
|
global_params_path: config.da_config.global_params_path,
|
||||||
|
},
|
||||||
|
dispersal_timeout: Duration::from_secs(20),
|
||||||
|
retry_cooldown: Duration::from_secs(3),
|
||||||
|
retry_limit: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
time: TimeServiceSettings {
|
||||||
|
backend_settings: NtpTimeBackendSettings {
|
||||||
|
ntp_server: config.time_config.ntp_server,
|
||||||
|
ntp_client_settings: NTPClientSettings {
|
||||||
|
timeout: config.time_config.timeout,
|
||||||
|
listening_interface: config.time_config.interface,
|
||||||
|
},
|
||||||
|
update_interval: config.time_config.update_interval,
|
||||||
|
slot_config: SlotConfig {
|
||||||
|
slot_duration: config.time_config.slot_duration,
|
||||||
|
chain_start_time: config.time_config.chain_start_time,
|
||||||
|
},
|
||||||
|
epoch_config: config.consensus_config.ledger_config.epoch_config,
|
||||||
|
base_period_length: config.consensus_config.ledger_config.base_period_length(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
mempool: MempoolConfig {
|
||||||
|
pool_recovery_path: "./recovery/mempool.json".into(),
|
||||||
|
},
|
||||||
|
sdp: SdpSettings { declaration: None },
|
||||||
|
wallet: WalletServiceSettings {
|
||||||
|
known_keys: {
|
||||||
|
let mut keys = HashSet::from_iter([config.consensus_config.leader_config.pk]);
|
||||||
|
keys.extend(
|
||||||
|
config
|
||||||
|
.consensus_config
|
||||||
|
.wallet_accounts
|
||||||
|
.iter()
|
||||||
|
.map(WalletAccount::public_key),
|
||||||
|
);
|
||||||
|
keys
|
||||||
|
},
|
||||||
|
},
|
||||||
|
key_management: config.kms_config,
|
||||||
|
|
||||||
|
testing_http: nomos_api::ApiServiceSettings {
|
||||||
|
backend_settings: NodeAxumBackendSettings {
|
||||||
|
address: config.api_config.testing_http_address,
|
||||||
|
rate_limit_per_second: 10000,
|
||||||
|
rate_limit_burst: 10000,
|
||||||
|
max_concurrent_requests: 1000,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_blend_service_config(
|
||||||
|
config: &TopologyBlendConfig,
|
||||||
|
) -> (blend_serde::Config, NodeDeploymentSettings) {
|
||||||
|
let zk_key_id =
|
||||||
|
key_id_for_preload_backend(&Key::from(ZkKey::new(config.secret_zk_key.clone())));
|
||||||
|
|
||||||
|
let backend_core = &config.backend_core;
|
||||||
|
let backend_edge = &config.backend_edge;
|
||||||
|
|
||||||
|
let user = blend_serde::Config {
|
||||||
|
common: blend_serde::common::Config {
|
||||||
|
non_ephemeral_signing_key: config.private_key.clone(),
|
||||||
|
recovery_path_prefix: PathBuf::from("./recovery/blend"),
|
||||||
|
},
|
||||||
|
core: blend_serde::core::Config {
|
||||||
|
backend: blend_serde::core::BackendConfig {
|
||||||
|
listening_address: backend_core.listening_address.clone(),
|
||||||
|
core_peering_degree: backend_core.core_peering_degree.clone(),
|
||||||
|
edge_node_connection_timeout: backend_core.edge_node_connection_timeout,
|
||||||
|
max_edge_node_incoming_connections: backend_core.max_edge_node_incoming_connections,
|
||||||
|
max_dial_attempts_per_peer: backend_core.max_dial_attempts_per_peer,
|
||||||
|
},
|
||||||
|
zk: ZkSettings {
|
||||||
|
secret_key_kms_id: zk_key_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
edge: blend_serde::edge::Config {
|
||||||
|
backend: blend_serde::edge::BackendConfig {
|
||||||
|
max_dial_attempts_per_peer_per_message: backend_edge
|
||||||
|
.max_dial_attempts_per_peer_per_message,
|
||||||
|
replication_factor: backend_edge.replication_factor,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let deployment_settings = blend_deployment::Settings {
|
||||||
|
common: blend_deployment::CommonSettings {
|
||||||
|
num_blend_layers: NonZeroU64::try_from(1).unwrap(),
|
||||||
|
minimum_network_size: NonZeroU64::try_from(1).unwrap(),
|
||||||
|
timing: TimingSettings {
|
||||||
|
round_duration: Duration::from_secs(1),
|
||||||
|
rounds_per_interval: NonZeroU64::try_from(30u64).unwrap(),
|
||||||
|
rounds_per_session: NonZeroU64::try_from(648_000u64).unwrap(),
|
||||||
|
rounds_per_observation_window: NonZeroU64::try_from(30u64).unwrap(),
|
||||||
|
rounds_per_session_transition_period: NonZeroU64::try_from(30u64).unwrap(),
|
||||||
|
epoch_transition_period_in_slots: NonZeroU64::try_from(2_600).unwrap(),
|
||||||
|
},
|
||||||
|
protocol_name: backend_core.protocol_name.clone(),
|
||||||
|
},
|
||||||
|
core: blend_deployment::CoreSettings {
|
||||||
|
scheduler: SchedulerSettings {
|
||||||
|
cover: CoverTrafficSettings {
|
||||||
|
intervals_for_safety_buffer: 100,
|
||||||
|
message_frequency_per_round: NonNegativeF64::try_from(1f64).unwrap(),
|
||||||
|
},
|
||||||
|
delayer: MessageDelayerSettings {
|
||||||
|
maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64).unwrap(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
minimum_messages_coefficient: backend_core.minimum_messages_coefficient,
|
||||||
|
normalization_constant: backend_core.normalization_constant,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let deployment = NodeDeploymentSettings::Custom(CustomDeployment {
|
||||||
|
blend: deployment_settings,
|
||||||
|
network: NetworkDeploymentSettings {
|
||||||
|
identify_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new(
|
||||||
|
"/integration/nomos/identify/1.0.0",
|
||||||
|
),
|
||||||
|
kademlia_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new(
|
||||||
|
"/integration/nomos/kad/1.0.0",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
(user, deployment)
|
||||||
|
}
|
||||||
2
testing-framework/configs/src/nodes/mod.rs
Normal file
2
testing-framework/configs/src/nodes/mod.rs
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
pub mod executor;
|
||||||
|
pub mod validator;
|
||||||
319
testing-framework/configs/src/nodes/validator.rs
Normal file
319
testing-framework/configs/src/nodes/validator.rs
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
use std::{
|
||||||
|
collections::HashSet,
|
||||||
|
num::{NonZeroU64, NonZeroUsize},
|
||||||
|
path::PathBuf,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use chain_leader::LeaderSettings;
|
||||||
|
use chain_network::{ChainNetworkSettings, OrphanConfig, SyncConfig};
|
||||||
|
use chain_service::{CryptarchiaSettings, StartingState};
|
||||||
|
use cryptarchia_engine::time::SlotConfig;
|
||||||
|
use key_management_system::keys::{Key, ZkKey};
|
||||||
|
use nomos_blend_service::{
|
||||||
|
core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings, ZkSettings},
|
||||||
|
settings::TimingSettings,
|
||||||
|
};
|
||||||
|
use nomos_da_network_core::{
|
||||||
|
protocols::sampling::SubnetsConfig, swarm::DAConnectionPolicySettings,
|
||||||
|
};
|
||||||
|
use nomos_da_network_service::{
|
||||||
|
NetworkConfig as DaNetworkConfig, api::http::ApiAdapterSettings,
|
||||||
|
backends::libp2p::common::DaNetworkBackendSettings,
|
||||||
|
};
|
||||||
|
use nomos_da_sampling::{
|
||||||
|
DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings,
|
||||||
|
verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings,
|
||||||
|
};
|
||||||
|
use nomos_da_verifier::{
|
||||||
|
DaVerifierServiceSettings,
|
||||||
|
backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig},
|
||||||
|
storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings,
|
||||||
|
};
|
||||||
|
use nomos_node::{
|
||||||
|
Config as ValidatorConfig, RocksBackendSettings,
|
||||||
|
api::backend::AxumBackendSettings as NodeAxumBackendSettings,
|
||||||
|
config::{
|
||||||
|
blend::{
|
||||||
|
deployment::{self as blend_deployment},
|
||||||
|
serde as blend_serde,
|
||||||
|
},
|
||||||
|
deployment::{CustomDeployment, Settings as NodeDeploymentSettings},
|
||||||
|
mempool::MempoolConfig,
|
||||||
|
network::deployment::Settings as NetworkDeploymentSettings,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use nomos_sdp::SdpSettings;
|
||||||
|
use nomos_time::{
|
||||||
|
TimeServiceSettings,
|
||||||
|
backends::{NtpTimeBackendSettings, ntp::async_client::NTPClientSettings},
|
||||||
|
};
|
||||||
|
use nomos_utils::math::NonNegativeF64;
|
||||||
|
use nomos_wallet::WalletServiceSettings;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
adjust_timeout,
|
||||||
|
common::kms::key_id_for_preload_backend,
|
||||||
|
topology::configs::{
|
||||||
|
GeneralConfig, blend::GeneralBlendConfig as TopologyBlendConfig, wallet::WalletAccount,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
#[expect(
|
||||||
|
clippy::too_many_lines,
|
||||||
|
reason = "Validator config wiring aggregates many service settings"
|
||||||
|
)]
|
||||||
|
pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig {
|
||||||
|
let da_policy_settings = config.da_config.policy_settings;
|
||||||
|
let (blend_user_config, deployment_settings) = build_blend_service_config(&config.blend_config);
|
||||||
|
ValidatorConfig {
|
||||||
|
network: config.network_config,
|
||||||
|
blend: blend_user_config,
|
||||||
|
deployment: deployment_settings,
|
||||||
|
cryptarchia: CryptarchiaSettings {
|
||||||
|
config: config.consensus_config.ledger_config.clone(),
|
||||||
|
starting_state: StartingState::Genesis {
|
||||||
|
genesis_tx: config.consensus_config.genesis_tx,
|
||||||
|
},
|
||||||
|
// Disable on-disk recovery in compose tests to avoid serde errors on
|
||||||
|
// non-string keys and keep services alive.
|
||||||
|
recovery_file: PathBuf::new(),
|
||||||
|
bootstrap: chain_service::BootstrapConfig {
|
||||||
|
prolonged_bootstrap_period: config.bootstrapping_config.prolonged_bootstrap_period,
|
||||||
|
force_bootstrap: false,
|
||||||
|
offline_grace_period: chain_service::OfflineGracePeriodConfig {
|
||||||
|
grace_period: Duration::from_secs(20 * 60),
|
||||||
|
state_recording_interval: Duration::from_secs(60),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
chain_network: ChainNetworkSettings {
|
||||||
|
config: config.consensus_config.ledger_config.clone(),
|
||||||
|
network_adapter_settings:
|
||||||
|
chain_network::network::adapters::libp2p::LibP2pAdapterSettings {
|
||||||
|
topic: String::from(nomos_node::CONSENSUS_TOPIC),
|
||||||
|
},
|
||||||
|
bootstrap: chain_network::BootstrapConfig {
|
||||||
|
ibd: chain_network::IbdConfig {
|
||||||
|
peers: HashSet::new(),
|
||||||
|
delay_before_new_download: Duration::from_secs(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
sync: SyncConfig {
|
||||||
|
orphan: OrphanConfig {
|
||||||
|
max_orphan_cache_size: NonZeroUsize::new(5)
|
||||||
|
.expect("Max orphan cache size must be non-zero"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cryptarchia_leader: LeaderSettings {
|
||||||
|
transaction_selector_settings: (),
|
||||||
|
config: config.consensus_config.ledger_config.clone(),
|
||||||
|
leader_config: config.consensus_config.leader_config.clone(),
|
||||||
|
blend_broadcast_settings:
|
||||||
|
nomos_blend_service::core::network::libp2p::Libp2pBroadcastSettings {
|
||||||
|
topic: String::from(nomos_node::CONSENSUS_TOPIC),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
da_network: DaNetworkConfig {
|
||||||
|
backend: DaNetworkBackendSettings {
|
||||||
|
node_key: config.da_config.node_key,
|
||||||
|
listening_address: config.da_config.listening_address,
|
||||||
|
policy_settings: DAConnectionPolicySettings {
|
||||||
|
min_dispersal_peers: 0,
|
||||||
|
min_replication_peers: da_policy_settings.min_replication_peers,
|
||||||
|
max_dispersal_failures: da_policy_settings.max_dispersal_failures,
|
||||||
|
max_sampling_failures: da_policy_settings.max_sampling_failures,
|
||||||
|
max_replication_failures: da_policy_settings.max_replication_failures,
|
||||||
|
malicious_threshold: da_policy_settings.malicious_threshold,
|
||||||
|
},
|
||||||
|
monitor_settings: config.da_config.monitor_settings,
|
||||||
|
balancer_interval: config.da_config.balancer_interval,
|
||||||
|
redial_cooldown: config.da_config.redial_cooldown,
|
||||||
|
replication_settings: config.da_config.replication_settings,
|
||||||
|
subnets_settings: SubnetsConfig {
|
||||||
|
num_of_subnets: config.da_config.num_samples as usize,
|
||||||
|
shares_retry_limit: config.da_config.retry_shares_limit,
|
||||||
|
commitments_retry_limit: config.da_config.retry_commitments_limit,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
membership: config.da_config.membership.clone(),
|
||||||
|
api_adapter_settings: ApiAdapterSettings {
|
||||||
|
api_port: config.api_config.address.port(),
|
||||||
|
is_secure: false,
|
||||||
|
},
|
||||||
|
subnet_refresh_interval: config.da_config.subnets_refresh_interval,
|
||||||
|
subnet_threshold: config.da_config.num_samples as usize,
|
||||||
|
min_session_members: config.da_config.num_samples as usize,
|
||||||
|
},
|
||||||
|
da_verifier: DaVerifierServiceSettings {
|
||||||
|
share_verifier_settings: KzgrsDaVerifierSettings {
|
||||||
|
global_params_path: config.da_config.global_params_path.clone(),
|
||||||
|
domain_size: config.da_config.num_subnets as usize,
|
||||||
|
},
|
||||||
|
tx_verifier_settings: (),
|
||||||
|
network_adapter_settings: (),
|
||||||
|
storage_adapter_settings: VerifierStorageAdapterSettings {
|
||||||
|
blob_storage_directory: "./".into(),
|
||||||
|
},
|
||||||
|
mempool_trigger_settings: MempoolPublishTriggerConfig {
|
||||||
|
publish_threshold: NonNegativeF64::try_from(0.8).unwrap(),
|
||||||
|
share_duration: Duration::from_secs(5),
|
||||||
|
prune_duration: Duration::from_secs(30),
|
||||||
|
prune_interval: Duration::from_secs(5),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tracing: config.tracing_config.tracing_settings,
|
||||||
|
http: nomos_api::ApiServiceSettings {
|
||||||
|
backend_settings: NodeAxumBackendSettings {
|
||||||
|
address: config.api_config.address,
|
||||||
|
rate_limit_per_second: 10000,
|
||||||
|
rate_limit_burst: 10000,
|
||||||
|
max_concurrent_requests: 1000,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
da_sampling: DaSamplingServiceSettings {
|
||||||
|
sampling_settings: KzgrsSamplingBackendSettings {
|
||||||
|
num_samples: config.da_config.num_samples,
|
||||||
|
num_subnets: config.da_config.num_subnets,
|
||||||
|
old_blobs_check_interval: config.da_config.old_blobs_check_interval,
|
||||||
|
blobs_validity_duration: config.da_config.blobs_validity_duration,
|
||||||
|
},
|
||||||
|
share_verifier_settings: SamplingVerifierSettings {
|
||||||
|
global_params_path: config.da_config.global_params_path,
|
||||||
|
domain_size: config.da_config.num_subnets as usize,
|
||||||
|
},
|
||||||
|
commitments_wait_duration: Duration::from_secs(1),
|
||||||
|
sdp_blob_trigger_sampling_delay: adjust_timeout(Duration::from_secs(5)),
|
||||||
|
},
|
||||||
|
storage: RocksBackendSettings {
|
||||||
|
db_path: "./db".into(),
|
||||||
|
read_only: false,
|
||||||
|
column_family: Some("blocks".into()),
|
||||||
|
},
|
||||||
|
time: TimeServiceSettings {
|
||||||
|
backend_settings: NtpTimeBackendSettings {
|
||||||
|
ntp_server: config.time_config.ntp_server,
|
||||||
|
ntp_client_settings: NTPClientSettings {
|
||||||
|
timeout: config.time_config.timeout,
|
||||||
|
listening_interface: config.time_config.interface,
|
||||||
|
},
|
||||||
|
update_interval: config.time_config.update_interval,
|
||||||
|
slot_config: SlotConfig {
|
||||||
|
slot_duration: config.time_config.slot_duration,
|
||||||
|
chain_start_time: config.time_config.chain_start_time,
|
||||||
|
},
|
||||||
|
epoch_config: config.consensus_config.ledger_config.epoch_config,
|
||||||
|
base_period_length: config.consensus_config.ledger_config.base_period_length(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
mempool: MempoolConfig {
|
||||||
|
pool_recovery_path: "./recovery/mempool.json".into(),
|
||||||
|
},
|
||||||
|
sdp: SdpSettings { declaration: None },
|
||||||
|
wallet: WalletServiceSettings {
|
||||||
|
known_keys: {
|
||||||
|
let mut keys = HashSet::from_iter([config.consensus_config.leader_config.pk]);
|
||||||
|
keys.extend(
|
||||||
|
config
|
||||||
|
.consensus_config
|
||||||
|
.wallet_accounts
|
||||||
|
.iter()
|
||||||
|
.map(WalletAccount::public_key),
|
||||||
|
);
|
||||||
|
keys
|
||||||
|
},
|
||||||
|
},
|
||||||
|
key_management: config.kms_config,
|
||||||
|
testing_http: nomos_api::ApiServiceSettings {
|
||||||
|
backend_settings: NodeAxumBackendSettings {
|
||||||
|
address: config.api_config.testing_http_address,
|
||||||
|
rate_limit_per_second: 10000,
|
||||||
|
rate_limit_burst: 10000,
|
||||||
|
max_concurrent_requests: 1000,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_blend_service_config(
|
||||||
|
config: &TopologyBlendConfig,
|
||||||
|
) -> (blend_serde::Config, NodeDeploymentSettings) {
|
||||||
|
let zk_key_id =
|
||||||
|
key_id_for_preload_backend(&Key::from(ZkKey::new(config.secret_zk_key.clone())));
|
||||||
|
|
||||||
|
let backend_core = &config.backend_core;
|
||||||
|
let backend_edge = &config.backend_edge;
|
||||||
|
|
||||||
|
let user = blend_serde::Config {
|
||||||
|
common: blend_serde::common::Config {
|
||||||
|
non_ephemeral_signing_key: config.private_key.clone(),
|
||||||
|
recovery_path_prefix: PathBuf::from("./recovery/blend"),
|
||||||
|
},
|
||||||
|
core: blend_serde::core::Config {
|
||||||
|
backend: blend_serde::core::BackendConfig {
|
||||||
|
listening_address: backend_core.listening_address.clone(),
|
||||||
|
core_peering_degree: backend_core.core_peering_degree.clone(),
|
||||||
|
edge_node_connection_timeout: backend_core.edge_node_connection_timeout,
|
||||||
|
max_edge_node_incoming_connections: backend_core.max_edge_node_incoming_connections,
|
||||||
|
max_dial_attempts_per_peer: backend_core.max_dial_attempts_per_peer,
|
||||||
|
},
|
||||||
|
zk: ZkSettings {
|
||||||
|
secret_key_kms_id: zk_key_id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
edge: blend_serde::edge::Config {
|
||||||
|
backend: blend_serde::edge::BackendConfig {
|
||||||
|
max_dial_attempts_per_peer_per_message: backend_edge
|
||||||
|
.max_dial_attempts_per_peer_per_message,
|
||||||
|
replication_factor: backend_edge.replication_factor,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let deployment_settings = blend_deployment::Settings {
|
||||||
|
common: blend_deployment::CommonSettings {
|
||||||
|
num_blend_layers: NonZeroU64::try_from(1).unwrap(),
|
||||||
|
minimum_network_size: NonZeroU64::try_from(1).unwrap(),
|
||||||
|
timing: TimingSettings {
|
||||||
|
round_duration: Duration::from_secs(1),
|
||||||
|
rounds_per_interval: NonZeroU64::try_from(30u64).unwrap(),
|
||||||
|
rounds_per_session: NonZeroU64::try_from(648_000u64).unwrap(),
|
||||||
|
rounds_per_observation_window: NonZeroU64::try_from(30u64).unwrap(),
|
||||||
|
rounds_per_session_transition_period: NonZeroU64::try_from(30u64).unwrap(),
|
||||||
|
epoch_transition_period_in_slots: NonZeroU64::try_from(2_600).unwrap(),
|
||||||
|
},
|
||||||
|
protocol_name: backend_core.protocol_name.clone(),
|
||||||
|
},
|
||||||
|
core: blend_deployment::CoreSettings {
|
||||||
|
scheduler: SchedulerSettings {
|
||||||
|
cover: CoverTrafficSettings {
|
||||||
|
intervals_for_safety_buffer: 100,
|
||||||
|
message_frequency_per_round: NonNegativeF64::try_from(1f64).unwrap(),
|
||||||
|
},
|
||||||
|
delayer: MessageDelayerSettings {
|
||||||
|
maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64).unwrap(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
minimum_messages_coefficient: backend_core.minimum_messages_coefficient,
|
||||||
|
normalization_constant: backend_core.normalization_constant,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let deployment = NodeDeploymentSettings::Custom(CustomDeployment {
|
||||||
|
blend: deployment_settings,
|
||||||
|
network: NetworkDeploymentSettings {
|
||||||
|
identify_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new(
|
||||||
|
"/integration/nomos/identify/1.0.0",
|
||||||
|
),
|
||||||
|
kademlia_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new(
|
||||||
|
"/integration/nomos/kad/1.0.0",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
(user, deployment)
|
||||||
|
}
|
||||||
23
testing-framework/configs/src/topology/configs/api.rs
Normal file
23
testing-framework/configs/src/topology/configs/api.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use nomos_utils::net::get_available_tcp_port;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GeneralApiConfig {
|
||||||
|
pub address: SocketAddr,
|
||||||
|
pub testing_http_address: SocketAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_api_configs(ids: &[[u8; 32]]) -> Vec<GeneralApiConfig> {
|
||||||
|
ids.iter()
|
||||||
|
.map(|_| GeneralApiConfig {
|
||||||
|
address: format!("127.0.0.1:{}", get_available_tcp_port().unwrap())
|
||||||
|
.parse()
|
||||||
|
.unwrap(),
|
||||||
|
testing_http_address: format!("127.0.0.1:{}", get_available_tcp_port().unwrap())
|
||||||
|
.parse()
|
||||||
|
.unwrap(),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
72
testing-framework/configs/src/topology/configs/blend.rs
Normal file
72
testing-framework/configs/src/topology/configs/blend.rs
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
use core::time::Duration;
|
||||||
|
use std::{num::NonZeroU64, str::FromStr as _};
|
||||||
|
|
||||||
|
use ed25519_dalek::SigningKey;
|
||||||
|
use nomos_blend_message::crypto::keys::Ed25519PrivateKey;
|
||||||
|
use nomos_blend_service::{
|
||||||
|
core::backends::libp2p::Libp2pBlendBackendSettings as Libp2pCoreBlendBackendSettings,
|
||||||
|
edge::backends::libp2p::Libp2pBlendBackendSettings as Libp2pEdgeBlendBackendSettings,
|
||||||
|
};
|
||||||
|
use nomos_libp2p::{Multiaddr, protocol_name::StreamProtocol};
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use zksign::SecretKey;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GeneralBlendConfig {
|
||||||
|
pub backend_core: Libp2pCoreBlendBackendSettings,
|
||||||
|
pub backend_edge: Libp2pEdgeBlendBackendSettings,
|
||||||
|
pub private_key: Ed25519PrivateKey,
|
||||||
|
pub secret_zk_key: SecretKey,
|
||||||
|
pub signer: SigningKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds blend configs for each node.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the provided port strings cannot be parsed into valid `Multiaddr`s
|
||||||
|
/// or if any of the numeric blend parameters are zero, which would make the
|
||||||
|
/// libp2p configuration invalid.
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_blend_configs(ids: &[[u8; 32]], ports: &[u16]) -> Vec<GeneralBlendConfig> {
|
||||||
|
ids.iter()
|
||||||
|
.zip(ports)
|
||||||
|
.map(|(id, port)| {
|
||||||
|
let signer = SigningKey::from_bytes(id);
|
||||||
|
|
||||||
|
let private_key = Ed25519PrivateKey::from(*id);
|
||||||
|
// We need unique ZK secret keys, so we just derive them deterministically from
|
||||||
|
// the generated Ed25519 public keys, which are guaranteed to be unique because
|
||||||
|
// they are in turned derived from node ID.
|
||||||
|
let secret_zk_key =
|
||||||
|
SecretKey::from(BigUint::from_bytes_le(private_key.public_key().as_bytes()));
|
||||||
|
GeneralBlendConfig {
|
||||||
|
backend_core: Libp2pCoreBlendBackendSettings {
|
||||||
|
listening_address: Multiaddr::from_str(&format!(
|
||||||
|
"/ip4/127.0.0.1/udp/{port}/quic-v1",
|
||||||
|
))
|
||||||
|
.unwrap(),
|
||||||
|
core_peering_degree: 1..=3,
|
||||||
|
minimum_messages_coefficient: NonZeroU64::try_from(1)
|
||||||
|
.expect("Minimum messages coefficient cannot be zero."),
|
||||||
|
normalization_constant: 1.03f64
|
||||||
|
.try_into()
|
||||||
|
.expect("Normalization constant cannot be negative."),
|
||||||
|
edge_node_connection_timeout: Duration::from_secs(1),
|
||||||
|
max_edge_node_incoming_connections: 300,
|
||||||
|
max_dial_attempts_per_peer: NonZeroU64::try_from(3)
|
||||||
|
.expect("Max dial attempts per peer cannot be zero."),
|
||||||
|
protocol_name: StreamProtocol::new("/blend/integration-tests"),
|
||||||
|
},
|
||||||
|
backend_edge: Libp2pEdgeBlendBackendSettings {
|
||||||
|
max_dial_attempts_per_peer_per_message: 1.try_into().unwrap(),
|
||||||
|
protocol_name: StreamProtocol::new("/blend/integration-tests"),
|
||||||
|
replication_factor: 1.try_into().unwrap(),
|
||||||
|
},
|
||||||
|
private_key,
|
||||||
|
secret_zk_key,
|
||||||
|
signer,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
20
testing-framework/configs/src/topology/configs/bootstrap.rs
Normal file
20
testing-framework/configs/src/topology/configs/bootstrap.rs
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GeneralBootstrapConfig {
|
||||||
|
pub prolonged_bootstrap_period: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const SHORT_PROLONGED_BOOTSTRAP_PERIOD: Duration = Duration::from_secs(1);
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_bootstrap_configs(
|
||||||
|
ids: &[[u8; 32]],
|
||||||
|
prolonged_bootstrap_period: Duration,
|
||||||
|
) -> Vec<GeneralBootstrapConfig> {
|
||||||
|
ids.iter()
|
||||||
|
.map(|_| GeneralBootstrapConfig {
|
||||||
|
prolonged_bootstrap_period,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
343
testing-framework/configs/src/topology/configs/consensus.rs
Normal file
343
testing-framework/configs/src/topology/configs/consensus.rs
Normal file
@ -0,0 +1,343 @@
|
|||||||
|
use std::{num::NonZero, sync::Arc};
|
||||||
|
|
||||||
|
use chain_leader::LeaderConfig;
|
||||||
|
use cryptarchia_engine::EpochConfig;
|
||||||
|
use ed25519_dalek::ed25519::signature::SignerMut as _;
|
||||||
|
use groth16::CompressedGroth16Proof;
|
||||||
|
use nomos_core::{
|
||||||
|
mantle::{
|
||||||
|
MantleTx, Note, OpProof, Utxo,
|
||||||
|
genesis_tx::GenesisTx,
|
||||||
|
ledger::Tx as LedgerTx,
|
||||||
|
ops::{
|
||||||
|
Op,
|
||||||
|
channel::{ChannelId, Ed25519PublicKey, MsgId, inscribe::InscriptionOp},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
sdp::{DeclarationMessage, Locator, ProviderId, ServiceParameters, ServiceType},
|
||||||
|
};
|
||||||
|
use nomos_node::{SignedMantleTx, Transaction as _};
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use zksign::{PublicKey, SecretKey};
|
||||||
|
|
||||||
|
use super::wallet::{WalletAccount, WalletConfig};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ConsensusParams {
|
||||||
|
pub n_participants: usize,
|
||||||
|
pub security_param: NonZero<u32>,
|
||||||
|
pub active_slot_coeff: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConsensusParams {
|
||||||
|
#[must_use]
|
||||||
|
pub const fn default_for_participants(n_participants: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
n_participants,
|
||||||
|
// by setting the slot coeff to 1, we also increase the probability of multiple blocks
|
||||||
|
// (forks) being produced in the same slot (epoch). Setting the security
|
||||||
|
// parameter to some value > 1 ensures nodes have some time to sync before
|
||||||
|
// deciding on the longest chain.
|
||||||
|
security_param: NonZero::new(10).unwrap(),
|
||||||
|
// a block should be produced (on average) every slot
|
||||||
|
active_slot_coeff: 0.9,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ProviderInfo {
|
||||||
|
pub service_type: ServiceType,
|
||||||
|
pub provider_sk: ed25519_dalek::SigningKey,
|
||||||
|
pub zk_sk: SecretKey,
|
||||||
|
pub locator: Locator,
|
||||||
|
pub note: ServiceNote,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProviderInfo {
|
||||||
|
#[must_use]
|
||||||
|
pub fn provider_id(&self) -> ProviderId {
|
||||||
|
ProviderId(self.provider_sk.verifying_key())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn zk_id(&self) -> PublicKey {
|
||||||
|
self.zk_sk.to_public_key()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// General consensus configuration for a chosen participant, that later could
|
||||||
|
/// be converted into a specific service or services configuration.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GeneralConsensusConfig {
|
||||||
|
pub leader_config: LeaderConfig,
|
||||||
|
pub ledger_config: nomos_ledger::Config,
|
||||||
|
pub genesis_tx: GenesisTx,
|
||||||
|
pub utxos: Vec<Utxo>,
|
||||||
|
pub blend_notes: Vec<ServiceNote>,
|
||||||
|
pub da_notes: Vec<ServiceNote>,
|
||||||
|
pub wallet_accounts: Vec<WalletAccount>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ServiceNote {
|
||||||
|
pub pk: PublicKey,
|
||||||
|
pub sk: SecretKey,
|
||||||
|
pub note: Note,
|
||||||
|
pub output_index: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_genesis_tx(utxos: &[Utxo]) -> GenesisTx {
|
||||||
|
// Create a genesis inscription op (similar to config.yaml)
|
||||||
|
let inscription = InscriptionOp {
|
||||||
|
channel_id: ChannelId::from([0; 32]),
|
||||||
|
inscription: vec![103, 101, 110, 101, 115, 105, 115], // "genesis" in bytes
|
||||||
|
parent: MsgId::root(),
|
||||||
|
signer: Ed25519PublicKey::from_bytes(&[0; 32]).unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create ledger transaction with the utxos as outputs
|
||||||
|
let outputs: Vec<Note> = utxos.iter().map(|u| u.note).collect();
|
||||||
|
let ledger_tx = LedgerTx::new(vec![], outputs);
|
||||||
|
|
||||||
|
// Create the mantle transaction
|
||||||
|
let mantle_tx = MantleTx {
|
||||||
|
ops: vec![Op::ChannelInscribe(inscription)],
|
||||||
|
ledger_tx,
|
||||||
|
execution_gas_price: 0,
|
||||||
|
storage_gas_price: 0,
|
||||||
|
};
|
||||||
|
let signed_mantle_tx = SignedMantleTx {
|
||||||
|
mantle_tx,
|
||||||
|
ops_proofs: vec![OpProof::NoProof],
|
||||||
|
ledger_tx_proof: zksign::Signature::new(CompressedGroth16Proof::from_bytes(&[0u8; 128])),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Wrap in GenesisTx
|
||||||
|
GenesisTx::from_tx(signed_mantle_tx).expect("Invalid genesis transaction")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_consensus_configs(
|
||||||
|
ids: &[[u8; 32]],
|
||||||
|
consensus_params: &ConsensusParams,
|
||||||
|
wallet: &WalletConfig,
|
||||||
|
) -> Vec<GeneralConsensusConfig> {
|
||||||
|
let mut leader_keys = Vec::new();
|
||||||
|
let mut blend_notes = Vec::new();
|
||||||
|
let mut da_notes = Vec::new();
|
||||||
|
|
||||||
|
let utxos = create_utxos_for_leader_and_services(
|
||||||
|
ids,
|
||||||
|
&mut leader_keys,
|
||||||
|
&mut blend_notes,
|
||||||
|
&mut da_notes,
|
||||||
|
);
|
||||||
|
let utxos = append_wallet_utxos(utxos, wallet);
|
||||||
|
let genesis_tx = create_genesis_tx(&utxos);
|
||||||
|
let ledger_config = nomos_ledger::Config {
|
||||||
|
epoch_config: EpochConfig {
|
||||||
|
epoch_stake_distribution_stabilization: NonZero::new(3).unwrap(),
|
||||||
|
epoch_period_nonce_buffer: NonZero::new(3).unwrap(),
|
||||||
|
epoch_period_nonce_stabilization: NonZero::new(4).unwrap(),
|
||||||
|
},
|
||||||
|
consensus_config: cryptarchia_engine::Config {
|
||||||
|
security_param: consensus_params.security_param,
|
||||||
|
active_slot_coeff: consensus_params.active_slot_coeff,
|
||||||
|
},
|
||||||
|
sdp_config: nomos_ledger::mantle::sdp::Config {
|
||||||
|
service_params: Arc::new(
|
||||||
|
[
|
||||||
|
(
|
||||||
|
ServiceType::BlendNetwork,
|
||||||
|
ServiceParameters {
|
||||||
|
lock_period: 10,
|
||||||
|
inactivity_period: 20,
|
||||||
|
retention_period: 100,
|
||||||
|
timestamp: 0,
|
||||||
|
session_duration: 1000,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
ServiceType::DataAvailability,
|
||||||
|
ServiceParameters {
|
||||||
|
lock_period: 10,
|
||||||
|
inactivity_period: 20,
|
||||||
|
retention_period: 100,
|
||||||
|
timestamp: 0,
|
||||||
|
session_duration: 1000,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
min_stake: nomos_core::sdp::MinStake {
|
||||||
|
threshold: 1,
|
||||||
|
timestamp: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
leader_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(pk, sk)| GeneralConsensusConfig {
|
||||||
|
leader_config: LeaderConfig { pk, sk },
|
||||||
|
ledger_config: ledger_config.clone(),
|
||||||
|
genesis_tx: genesis_tx.clone(),
|
||||||
|
utxos: utxos.clone(),
|
||||||
|
da_notes: da_notes.clone(),
|
||||||
|
blend_notes: blend_notes.clone(),
|
||||||
|
wallet_accounts: wallet.accounts.clone(),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_utxos_for_leader_and_services(
|
||||||
|
ids: &[[u8; 32]],
|
||||||
|
leader_keys: &mut Vec<(PublicKey, SecretKey)>,
|
||||||
|
blend_notes: &mut Vec<ServiceNote>,
|
||||||
|
da_notes: &mut Vec<ServiceNote>,
|
||||||
|
) -> Vec<Utxo> {
|
||||||
|
let derive_key_material = |prefix: &[u8], id_bytes: &[u8]| -> [u8; 16] {
|
||||||
|
let mut sk_data = [0; 16];
|
||||||
|
let prefix_len = prefix.len();
|
||||||
|
|
||||||
|
sk_data[..prefix_len].copy_from_slice(prefix);
|
||||||
|
let remaining_len = 16 - prefix_len;
|
||||||
|
sk_data[prefix_len..].copy_from_slice(&id_bytes[..remaining_len]);
|
||||||
|
|
||||||
|
sk_data
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut utxos = Vec::new();
|
||||||
|
|
||||||
|
// Assume output index which will be set by the ledger tx.
|
||||||
|
let mut output_index = 0;
|
||||||
|
|
||||||
|
// Create notes for leader, Blend and DA declarations.
|
||||||
|
for &id in ids {
|
||||||
|
let sk_leader_data = derive_key_material(b"ld", &id);
|
||||||
|
let sk_leader = SecretKey::from(BigUint::from_bytes_le(&sk_leader_data));
|
||||||
|
let pk_leader = sk_leader.to_public_key();
|
||||||
|
leader_keys.push((pk_leader, sk_leader));
|
||||||
|
utxos.push(Utxo {
|
||||||
|
note: Note::new(1_000, pk_leader),
|
||||||
|
tx_hash: BigUint::from(0u8).into(),
|
||||||
|
output_index: 0,
|
||||||
|
});
|
||||||
|
output_index += 1;
|
||||||
|
|
||||||
|
let sk_da_data = derive_key_material(b"da", &id);
|
||||||
|
let sk_da = SecretKey::from(BigUint::from_bytes_le(&sk_da_data));
|
||||||
|
let pk_da = sk_da.to_public_key();
|
||||||
|
let note_da = Note::new(1, pk_da);
|
||||||
|
da_notes.push(ServiceNote {
|
||||||
|
pk: pk_da,
|
||||||
|
sk: sk_da,
|
||||||
|
note: note_da,
|
||||||
|
output_index,
|
||||||
|
});
|
||||||
|
utxos.push(Utxo {
|
||||||
|
note: note_da,
|
||||||
|
tx_hash: BigUint::from(0u8).into(),
|
||||||
|
output_index: 0,
|
||||||
|
});
|
||||||
|
output_index += 1;
|
||||||
|
|
||||||
|
let sk_blend_data = derive_key_material(b"bn", &id);
|
||||||
|
let sk_blend = SecretKey::from(BigUint::from_bytes_le(&sk_blend_data));
|
||||||
|
let pk_blend = sk_blend.to_public_key();
|
||||||
|
let note_blend = Note::new(1, pk_blend);
|
||||||
|
blend_notes.push(ServiceNote {
|
||||||
|
pk: pk_blend,
|
||||||
|
sk: sk_blend,
|
||||||
|
note: note_blend,
|
||||||
|
output_index,
|
||||||
|
});
|
||||||
|
utxos.push(Utxo {
|
||||||
|
note: note_blend,
|
||||||
|
tx_hash: BigUint::from(0u8).into(),
|
||||||
|
output_index: 0,
|
||||||
|
});
|
||||||
|
output_index += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
utxos
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_wallet_utxos(mut utxos: Vec<Utxo>, wallet: &WalletConfig) -> Vec<Utxo> {
|
||||||
|
for account in &wallet.accounts {
|
||||||
|
utxos.push(Utxo {
|
||||||
|
note: Note::new(account.value, account.public_key()),
|
||||||
|
tx_hash: BigUint::from(0u8).into(),
|
||||||
|
output_index: 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
utxos
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_genesis_tx_with_declarations(
|
||||||
|
ledger_tx: LedgerTx,
|
||||||
|
providers: Vec<ProviderInfo>,
|
||||||
|
) -> GenesisTx {
|
||||||
|
let inscription = InscriptionOp {
|
||||||
|
channel_id: ChannelId::from([0; 32]),
|
||||||
|
inscription: vec![103, 101, 110, 101, 115, 105, 115], // "genesis" in bytes
|
||||||
|
parent: MsgId::root(),
|
||||||
|
signer: Ed25519PublicKey::from_bytes(&[0; 32]).unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let ledger_tx_hash = ledger_tx.hash();
|
||||||
|
|
||||||
|
let mut ops = vec![Op::ChannelInscribe(inscription)];
|
||||||
|
|
||||||
|
for provider in &providers {
|
||||||
|
let utxo = Utxo {
|
||||||
|
tx_hash: ledger_tx_hash,
|
||||||
|
output_index: provider.note.output_index,
|
||||||
|
note: provider.note.note,
|
||||||
|
};
|
||||||
|
let declaration = DeclarationMessage {
|
||||||
|
service_type: provider.service_type,
|
||||||
|
locators: vec![provider.locator.clone()],
|
||||||
|
provider_id: provider.provider_id(),
|
||||||
|
zk_id: provider.zk_id(),
|
||||||
|
locked_note_id: utxo.id(),
|
||||||
|
};
|
||||||
|
ops.push(Op::SDPDeclare(declaration));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mantle_tx = MantleTx {
|
||||||
|
ops,
|
||||||
|
ledger_tx,
|
||||||
|
execution_gas_price: 0,
|
||||||
|
storage_gas_price: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mantle_tx_hash = mantle_tx.hash();
|
||||||
|
let mut ops_proofs = vec![OpProof::NoProof];
|
||||||
|
|
||||||
|
for mut provider in providers {
|
||||||
|
let zk_sig =
|
||||||
|
SecretKey::multi_sign(&[provider.note.sk, provider.zk_sk], mantle_tx_hash.as_ref())
|
||||||
|
.unwrap();
|
||||||
|
let ed25519_sig = provider
|
||||||
|
.provider_sk
|
||||||
|
.sign(mantle_tx_hash.as_signing_bytes().as_ref());
|
||||||
|
|
||||||
|
ops_proofs.push(OpProof::ZkAndEd25519Sigs {
|
||||||
|
zk_sig,
|
||||||
|
ed25519_sig,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let signed_mantle_tx = SignedMantleTx {
|
||||||
|
mantle_tx,
|
||||||
|
ops_proofs,
|
||||||
|
ledger_tx_proof: zksign::Signature::new(CompressedGroth16Proof::from_bytes(&[0u8; 128])),
|
||||||
|
};
|
||||||
|
|
||||||
|
GenesisTx::from_tx(signed_mantle_tx).expect("Invalid genesis transaction")
|
||||||
|
}
|
||||||
242
testing-framework/configs/src/topology/configs/da.rs
Normal file
242
testing-framework/configs/src/topology/configs/da.rs
Normal file
@ -0,0 +1,242 @@
|
|||||||
|
use std::{
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
env,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
process,
|
||||||
|
str::FromStr as _,
|
||||||
|
sync::LazyLock,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use ed25519_dalek::SigningKey;
|
||||||
|
use nomos_core::sdp::SessionNumber;
|
||||||
|
use nomos_da_network_core::swarm::{
|
||||||
|
DAConnectionMonitorSettings, DAConnectionPolicySettings, ReplicationConfig,
|
||||||
|
};
|
||||||
|
use nomos_libp2p::{Multiaddr, PeerId, ed25519};
|
||||||
|
use nomos_node::NomosDaMembership;
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use rand::random;
|
||||||
|
use subnetworks_assignations::{MembershipCreator as _, MembershipHandler as _};
|
||||||
|
use tracing::warn;
|
||||||
|
use zksign::SecretKey;
|
||||||
|
|
||||||
|
use crate::secret_key_to_peer_id;
|
||||||
|
|
||||||
|
pub static GLOBAL_PARAMS_PATH: LazyLock<String> = LazyLock::new(resolve_global_params_path);
|
||||||
|
|
||||||
|
fn resolve_global_params_path() -> String {
|
||||||
|
if let Ok(path) = env::var("NOMOS_KZGRS_PARAMS_PATH") {
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
let workspace_root = env::var("CARGO_WORKSPACE_DIR")
|
||||||
|
.map(PathBuf::from)
|
||||||
|
.ok()
|
||||||
|
.or_else(|| {
|
||||||
|
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
|
||||||
|
.parent()
|
||||||
|
.and_then(Path::parent)
|
||||||
|
.map(Path::to_path_buf)
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")));
|
||||||
|
|
||||||
|
let params_path = workspace_root.join("testing-framework/assets/stack/kzgrs_test_params");
|
||||||
|
match params_path.canonicalize() {
|
||||||
|
Ok(path) => path.to_string_lossy().to_string(),
|
||||||
|
Err(err) => {
|
||||||
|
warn!(
|
||||||
|
?err,
|
||||||
|
path = %params_path.display(),
|
||||||
|
"falling back to non-canonical KZG params path; set NOMOS_KZGRS_PARAMS_PATH to override"
|
||||||
|
);
|
||||||
|
params_path.to_string_lossy().to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DaParams {
|
||||||
|
pub subnetwork_size: usize,
|
||||||
|
pub dispersal_factor: usize,
|
||||||
|
pub num_samples: u16,
|
||||||
|
pub num_subnets: u16,
|
||||||
|
pub old_blobs_check_interval: Duration,
|
||||||
|
pub blobs_validity_duration: Duration,
|
||||||
|
pub global_params_path: String,
|
||||||
|
pub policy_settings: DAConnectionPolicySettings,
|
||||||
|
pub monitor_settings: DAConnectionMonitorSettings,
|
||||||
|
pub balancer_interval: Duration,
|
||||||
|
pub redial_cooldown: Duration,
|
||||||
|
pub replication_settings: ReplicationConfig,
|
||||||
|
pub subnets_refresh_interval: Duration,
|
||||||
|
pub retry_shares_limit: usize,
|
||||||
|
pub retry_commitments_limit: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DaParams {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
subnetwork_size: 2,
|
||||||
|
dispersal_factor: 1,
|
||||||
|
num_samples: 1,
|
||||||
|
num_subnets: 2,
|
||||||
|
old_blobs_check_interval: Duration::from_secs(5),
|
||||||
|
blobs_validity_duration: Duration::from_secs(60),
|
||||||
|
global_params_path: GLOBAL_PARAMS_PATH.to_string(),
|
||||||
|
policy_settings: DAConnectionPolicySettings {
|
||||||
|
min_dispersal_peers: 1,
|
||||||
|
min_replication_peers: 1,
|
||||||
|
max_dispersal_failures: 0,
|
||||||
|
max_sampling_failures: 0,
|
||||||
|
max_replication_failures: 0,
|
||||||
|
malicious_threshold: 0,
|
||||||
|
},
|
||||||
|
monitor_settings: DAConnectionMonitorSettings {
|
||||||
|
failure_time_window: Duration::from_secs(5),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
balancer_interval: Duration::from_secs(1),
|
||||||
|
redial_cooldown: Duration::ZERO,
|
||||||
|
replication_settings: ReplicationConfig {
|
||||||
|
seen_message_cache_size: 1000,
|
||||||
|
seen_message_ttl: Duration::from_secs(3600),
|
||||||
|
},
|
||||||
|
subnets_refresh_interval: Duration::from_secs(30),
|
||||||
|
retry_shares_limit: 1,
|
||||||
|
retry_commitments_limit: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct GeneralDaConfig {
|
||||||
|
pub node_key: ed25519::SecretKey,
|
||||||
|
pub signer: SigningKey,
|
||||||
|
pub peer_id: PeerId,
|
||||||
|
pub membership: NomosDaMembership,
|
||||||
|
pub listening_address: Multiaddr,
|
||||||
|
pub blob_storage_directory: PathBuf,
|
||||||
|
pub global_params_path: String,
|
||||||
|
pub verifier_sk: String,
|
||||||
|
pub verifier_index: HashSet<u16>,
|
||||||
|
pub num_samples: u16,
|
||||||
|
pub num_subnets: u16,
|
||||||
|
pub old_blobs_check_interval: Duration,
|
||||||
|
pub blobs_validity_duration: Duration,
|
||||||
|
pub policy_settings: DAConnectionPolicySettings,
|
||||||
|
pub monitor_settings: DAConnectionMonitorSettings,
|
||||||
|
pub balancer_interval: Duration,
|
||||||
|
pub redial_cooldown: Duration,
|
||||||
|
pub replication_settings: ReplicationConfig,
|
||||||
|
pub subnets_refresh_interval: Duration,
|
||||||
|
pub retry_shares_limit: usize,
|
||||||
|
pub retry_commitments_limit: usize,
|
||||||
|
pub secret_zk_key: SecretKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_da_configs(
|
||||||
|
ids: &[[u8; 32]],
|
||||||
|
da_params: &DaParams,
|
||||||
|
ports: &[u16],
|
||||||
|
) -> Vec<GeneralDaConfig> {
|
||||||
|
let mut node_keys = vec![];
|
||||||
|
let mut peer_ids = vec![];
|
||||||
|
let mut listening_addresses = vec![];
|
||||||
|
|
||||||
|
for (i, id) in ids.iter().enumerate() {
|
||||||
|
let mut node_key_bytes = *id;
|
||||||
|
let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes)
|
||||||
|
.expect("Failed to generate secret key from bytes");
|
||||||
|
node_keys.push(node_key.clone());
|
||||||
|
|
||||||
|
let peer_id = secret_key_to_peer_id(node_key);
|
||||||
|
peer_ids.push(peer_id);
|
||||||
|
|
||||||
|
let listening_address =
|
||||||
|
Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}/quic-v1", ports[i],))
|
||||||
|
.expect("Failed to create multiaddr");
|
||||||
|
listening_addresses.push(listening_address);
|
||||||
|
}
|
||||||
|
|
||||||
|
let membership = {
|
||||||
|
let template = NomosDaMembership::new(
|
||||||
|
SessionNumber::default(),
|
||||||
|
da_params.subnetwork_size,
|
||||||
|
da_params.dispersal_factor,
|
||||||
|
);
|
||||||
|
let mut assignations: HashMap<u16, HashSet<PeerId>> = HashMap::new();
|
||||||
|
if peer_ids.is_empty() {
|
||||||
|
for id in 0..da_params.subnetwork_size {
|
||||||
|
assignations.insert(u16::try_from(id).unwrap_or_default(), HashSet::new());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let mut sorted_peers = peer_ids.clone();
|
||||||
|
sorted_peers.sort_unstable();
|
||||||
|
let dispersal = da_params.dispersal_factor.max(1);
|
||||||
|
let mut peer_cycle = sorted_peers.iter().cycle();
|
||||||
|
for id in 0..da_params.subnetwork_size {
|
||||||
|
let mut members = HashSet::new();
|
||||||
|
for _ in 0..dispersal {
|
||||||
|
// cycle() only yields None when the iterator is empty, which we guard against.
|
||||||
|
if let Some(peer) = peer_cycle.next() {
|
||||||
|
members.insert(*peer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assignations.insert(u16::try_from(id).unwrap_or_default(), members);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template.init(SessionNumber::default(), assignations)
|
||||||
|
};
|
||||||
|
|
||||||
|
ids.iter()
|
||||||
|
.zip(node_keys)
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, (id, node_key))| {
|
||||||
|
let blob_storage_directory = env::temp_dir().join(format!(
|
||||||
|
"nomos-da-blob-{}-{i}-{}",
|
||||||
|
process::id(),
|
||||||
|
random::<u64>()
|
||||||
|
));
|
||||||
|
let _ = std::fs::create_dir_all(&blob_storage_directory);
|
||||||
|
let verifier_sk = blst::min_sig::SecretKey::key_gen(id, &[]).unwrap();
|
||||||
|
let verifier_sk_bytes = verifier_sk.to_bytes();
|
||||||
|
let peer_id = peer_ids[i];
|
||||||
|
let signer = SigningKey::from_bytes(id);
|
||||||
|
let subnetwork_ids = membership.membership(&peer_id);
|
||||||
|
|
||||||
|
// We need unique ZK secret keys, so we just derive them deterministically from
|
||||||
|
// the generated Ed25519 public keys, which are guaranteed to be unique because
|
||||||
|
// they are in turned derived from node ID.
|
||||||
|
let secret_zk_key =
|
||||||
|
SecretKey::from(BigUint::from_bytes_le(signer.verifying_key().as_bytes()));
|
||||||
|
|
||||||
|
GeneralDaConfig {
|
||||||
|
node_key,
|
||||||
|
signer,
|
||||||
|
peer_id,
|
||||||
|
secret_zk_key,
|
||||||
|
membership: membership.clone(),
|
||||||
|
listening_address: listening_addresses[i].clone(),
|
||||||
|
blob_storage_directory,
|
||||||
|
global_params_path: da_params.global_params_path.clone(),
|
||||||
|
verifier_sk: hex::encode(verifier_sk_bytes),
|
||||||
|
verifier_index: subnetwork_ids,
|
||||||
|
num_samples: da_params.num_samples,
|
||||||
|
num_subnets: da_params.num_subnets,
|
||||||
|
old_blobs_check_interval: da_params.old_blobs_check_interval,
|
||||||
|
blobs_validity_duration: da_params.blobs_validity_duration,
|
||||||
|
policy_settings: da_params.policy_settings.clone(),
|
||||||
|
monitor_settings: da_params.monitor_settings.clone(),
|
||||||
|
balancer_interval: da_params.balancer_interval,
|
||||||
|
redial_cooldown: da_params.redial_cooldown,
|
||||||
|
replication_settings: da_params.replication_settings,
|
||||||
|
subnets_refresh_interval: da_params.subnets_refresh_interval,
|
||||||
|
retry_shares_limit: da_params.retry_shares_limit,
|
||||||
|
retry_commitments_limit: da_params.retry_commitments_limit,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
67
testing-framework/configs/src/topology/configs/deployment.rs
Normal file
67
testing-framework/configs/src/topology/configs/deployment.rs
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
use core::{num::NonZeroU64, time::Duration};
|
||||||
|
|
||||||
|
use nomos_blend_service::{
|
||||||
|
core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings},
|
||||||
|
settings::TimingSettings,
|
||||||
|
};
|
||||||
|
use nomos_libp2p::protocol_name::StreamProtocol;
|
||||||
|
use nomos_node::config::{
|
||||||
|
blend::deployment::{
|
||||||
|
CommonSettings as BlendCommonSettings, CoreSettings as BlendCoreSettings,
|
||||||
|
Settings as BlendDeploymentSettings,
|
||||||
|
},
|
||||||
|
deployment::{CustomDeployment, Settings as DeploymentSettings},
|
||||||
|
network::deployment::Settings as NetworkDeploymentSettings,
|
||||||
|
};
|
||||||
|
use nomos_utils::math::NonNegativeF64;
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn default_e2e_deployment_settings() -> DeploymentSettings {
|
||||||
|
DeploymentSettings::Custom(CustomDeployment {
|
||||||
|
blend: BlendDeploymentSettings {
|
||||||
|
common: BlendCommonSettings {
|
||||||
|
minimum_network_size: NonZeroU64::try_from(30u64)
|
||||||
|
.expect("Minimum network size cannot be zero."),
|
||||||
|
num_blend_layers: NonZeroU64::try_from(3)
|
||||||
|
.expect("Number of blend layers cannot be zero."),
|
||||||
|
timing: TimingSettings {
|
||||||
|
round_duration: Duration::from_secs(1),
|
||||||
|
rounds_per_interval: NonZeroU64::try_from(30u64)
|
||||||
|
.expect("Rounds per interval cannot be zero."),
|
||||||
|
// (21,600 blocks * 30s per block) / 1s per round = 648,000 rounds
|
||||||
|
rounds_per_session: NonZeroU64::try_from(648_000u64)
|
||||||
|
.expect("Rounds per session cannot be zero."),
|
||||||
|
rounds_per_observation_window: NonZeroU64::try_from(30u64)
|
||||||
|
.expect("Rounds per observation window cannot be zero."),
|
||||||
|
rounds_per_session_transition_period: NonZeroU64::try_from(30u64)
|
||||||
|
.expect("Rounds per session transition period cannot be zero."),
|
||||||
|
epoch_transition_period_in_slots: NonZeroU64::try_from(2_600)
|
||||||
|
.expect("Epoch transition period in slots cannot be zero."),
|
||||||
|
},
|
||||||
|
protocol_name: StreamProtocol::new("/blend/integration-tests"),
|
||||||
|
},
|
||||||
|
core: BlendCoreSettings {
|
||||||
|
minimum_messages_coefficient: NonZeroU64::try_from(1)
|
||||||
|
.expect("Minimum messages coefficient cannot be zero."),
|
||||||
|
normalization_constant: 1.03f64
|
||||||
|
.try_into()
|
||||||
|
.expect("Normalization constant cannot be negative."),
|
||||||
|
scheduler: SchedulerSettings {
|
||||||
|
cover: CoverTrafficSettings {
|
||||||
|
intervals_for_safety_buffer: 100,
|
||||||
|
message_frequency_per_round: NonNegativeF64::try_from(1f64)
|
||||||
|
.expect("Message frequency per round cannot be negative."),
|
||||||
|
},
|
||||||
|
delayer: MessageDelayerSettings {
|
||||||
|
maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64)
|
||||||
|
.expect("Maximum release delay between rounds cannot be zero."),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
network: NetworkDeploymentSettings {
|
||||||
|
identify_protocol_name: StreamProtocol::new("/integration/nomos/identify/1.0.0"),
|
||||||
|
kademlia_protocol_name: StreamProtocol::new("/integration/nomos/kad/1.0.0"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
164
testing-framework/configs/src/topology/configs/mod.rs
Normal file
164
testing-framework/configs/src/topology/configs/mod.rs
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
pub mod api;
|
||||||
|
pub mod blend;
|
||||||
|
pub mod bootstrap;
|
||||||
|
pub mod consensus;
|
||||||
|
pub mod da;
|
||||||
|
pub mod network;
|
||||||
|
pub mod time;
|
||||||
|
pub mod tracing;
|
||||||
|
pub mod wallet;
|
||||||
|
|
||||||
|
use blend::GeneralBlendConfig;
|
||||||
|
use consensus::{GeneralConsensusConfig, ProviderInfo, create_genesis_tx_with_declarations};
|
||||||
|
use da::GeneralDaConfig;
|
||||||
|
use key_management_system::{
|
||||||
|
backend::preload::PreloadKMSBackendSettings,
|
||||||
|
keys::{Ed25519Key, Key, ZkKey},
|
||||||
|
};
|
||||||
|
use network::GeneralNetworkConfig;
|
||||||
|
use nomos_core::{
|
||||||
|
mantle::GenesisTx as _,
|
||||||
|
sdp::{Locator, ServiceType},
|
||||||
|
};
|
||||||
|
use nomos_utils::net::get_available_udp_port;
|
||||||
|
use rand::{Rng as _, thread_rng};
|
||||||
|
use tracing::GeneralTracingConfig;
|
||||||
|
use wallet::WalletConfig;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
common::kms::key_id_for_preload_backend,
|
||||||
|
topology::configs::{
|
||||||
|
api::GeneralApiConfig,
|
||||||
|
bootstrap::{GeneralBootstrapConfig, SHORT_PROLONGED_BOOTSTRAP_PERIOD},
|
||||||
|
consensus::ConsensusParams,
|
||||||
|
da::DaParams,
|
||||||
|
network::NetworkParams,
|
||||||
|
time::GeneralTimeConfig,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GeneralConfig {
|
||||||
|
pub api_config: GeneralApiConfig,
|
||||||
|
pub consensus_config: GeneralConsensusConfig,
|
||||||
|
pub bootstrapping_config: GeneralBootstrapConfig,
|
||||||
|
pub da_config: GeneralDaConfig,
|
||||||
|
pub network_config: GeneralNetworkConfig,
|
||||||
|
pub blend_config: GeneralBlendConfig,
|
||||||
|
pub tracing_config: GeneralTracingConfig,
|
||||||
|
pub time_config: GeneralTimeConfig,
|
||||||
|
pub kms_config: PreloadKMSBackendSettings,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_general_configs(n_nodes: usize) -> Vec<GeneralConfig> {
|
||||||
|
create_general_configs_with_network(n_nodes, &NetworkParams::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_general_configs_with_network(
|
||||||
|
n_nodes: usize,
|
||||||
|
network_params: &NetworkParams,
|
||||||
|
) -> Vec<GeneralConfig> {
|
||||||
|
create_general_configs_with_blend_core_subset(n_nodes, n_nodes, network_params)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_general_configs_with_blend_core_subset(
|
||||||
|
n_nodes: usize,
|
||||||
|
// TODO: Instead of this, define a config struct for each node.
|
||||||
|
// That would be also useful for non-even token distributions: https://github.com/logos-co/nomos/issues/1888
|
||||||
|
n_blend_core_nodes: usize,
|
||||||
|
network_params: &NetworkParams,
|
||||||
|
) -> Vec<GeneralConfig> {
|
||||||
|
assert!(
|
||||||
|
n_blend_core_nodes <= n_nodes,
|
||||||
|
"n_blend_core_nodes({n_blend_core_nodes}) must be less than or equal to n_nodes({n_nodes})",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Blend relies on each node declaring a different ZK public key, so we need
|
||||||
|
// different IDs to generate different keys.
|
||||||
|
let mut ids: Vec<_> = (0..n_nodes).map(|i| [i as u8; 32]).collect();
|
||||||
|
let mut da_ports = vec![];
|
||||||
|
let mut blend_ports = vec![];
|
||||||
|
|
||||||
|
for id in &mut ids {
|
||||||
|
thread_rng().fill(id);
|
||||||
|
da_ports.push(get_available_udp_port().unwrap());
|
||||||
|
blend_ports.push(get_available_udp_port().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
let consensus_params = ConsensusParams::default_for_participants(n_nodes);
|
||||||
|
let mut consensus_configs =
|
||||||
|
consensus::create_consensus_configs(&ids, &consensus_params, &WalletConfig::default());
|
||||||
|
let bootstrap_config =
|
||||||
|
bootstrap::create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD);
|
||||||
|
let network_configs = network::create_network_configs(&ids, network_params);
|
||||||
|
let da_configs = da::create_da_configs(&ids, &DaParams::default(), &da_ports);
|
||||||
|
let api_configs = api::create_api_configs(&ids);
|
||||||
|
let blend_configs = blend::create_blend_configs(&ids, &blend_ports);
|
||||||
|
let tracing_configs = tracing::create_tracing_configs(&ids);
|
||||||
|
let time_config = time::default_time_config();
|
||||||
|
|
||||||
|
let providers: Vec<_> = blend_configs
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.take(n_blend_core_nodes)
|
||||||
|
.map(|(i, blend_conf)| ProviderInfo {
|
||||||
|
service_type: ServiceType::BlendNetwork,
|
||||||
|
provider_sk: blend_conf.signer.clone(),
|
||||||
|
zk_sk: blend_conf.secret_zk_key.clone(),
|
||||||
|
locator: Locator(blend_conf.backend_core.listening_address.clone()),
|
||||||
|
note: consensus_configs[0].blend_notes[i].clone(),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let ledger_tx = consensus_configs[0]
|
||||||
|
.genesis_tx
|
||||||
|
.mantle_tx()
|
||||||
|
.ledger_tx
|
||||||
|
.clone();
|
||||||
|
let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers);
|
||||||
|
for c in &mut consensus_configs {
|
||||||
|
c.genesis_tx = genesis_tx.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Blend and DA keys in KMS of each node config.
|
||||||
|
let kms_configs: Vec<_> = blend_configs
|
||||||
|
.iter()
|
||||||
|
.map(|blend_conf| {
|
||||||
|
let ed_key = Ed25519Key::new(blend_conf.signer.clone());
|
||||||
|
let zk_key = ZkKey::new(blend_conf.secret_zk_key.clone());
|
||||||
|
PreloadKMSBackendSettings {
|
||||||
|
keys: [
|
||||||
|
(
|
||||||
|
key_id_for_preload_backend(&Key::from(ed_key.clone())),
|
||||||
|
Key::from(ed_key),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
key_id_for_preload_backend(&Key::from(zk_key.clone())),
|
||||||
|
Key::from(zk_key),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.into(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut general_configs = vec![];
|
||||||
|
|
||||||
|
for i in 0..n_nodes {
|
||||||
|
general_configs.push(GeneralConfig {
|
||||||
|
api_config: api_configs[i].clone(),
|
||||||
|
consensus_config: consensus_configs[i].clone(),
|
||||||
|
bootstrapping_config: bootstrap_config[i].clone(),
|
||||||
|
da_config: da_configs[i].clone(),
|
||||||
|
network_config: network_configs[i].clone(),
|
||||||
|
blend_config: blend_configs[i].clone(),
|
||||||
|
tracing_config: tracing_configs[i].clone(),
|
||||||
|
time_config: time_config.clone(),
|
||||||
|
kms_config: kms_configs[i].clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
general_configs
|
||||||
|
}
|
||||||
116
testing-framework/configs/src/topology/configs/network.rs
Normal file
116
testing-framework/configs/src/topology/configs/network.rs
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use nomos_libp2p::{
|
||||||
|
IdentifySettings, KademliaSettings, Multiaddr, NatSettings, ed25519, gossipsub,
|
||||||
|
};
|
||||||
|
use nomos_node::config::network::serde::{BackendSettings, Config, SwarmConfig};
|
||||||
|
use nomos_utils::net::get_available_udp_port;
|
||||||
|
|
||||||
|
use crate::node_address_from_port;
|
||||||
|
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub enum Libp2pNetworkLayout {
|
||||||
|
#[default]
|
||||||
|
Star,
|
||||||
|
Chain,
|
||||||
|
Full,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct NetworkParams {
|
||||||
|
pub libp2p_network_layout: Libp2pNetworkLayout,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type GeneralNetworkConfig = Config;
|
||||||
|
|
||||||
|
fn default_swarm_config() -> SwarmConfig {
|
||||||
|
SwarmConfig {
|
||||||
|
host: std::net::Ipv4Addr::UNSPECIFIED,
|
||||||
|
port: 60000,
|
||||||
|
node_key: ed25519::SecretKey::generate(),
|
||||||
|
gossipsub_config: gossipsub::Config::default(),
|
||||||
|
kademlia_config: KademliaSettings::default(),
|
||||||
|
identify_config: IdentifySettings::default(),
|
||||||
|
chain_sync_config: cryptarchia_sync::Config::default(),
|
||||||
|
nat_config: NatSettings::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_network_configs(
|
||||||
|
ids: &[[u8; 32]],
|
||||||
|
network_params: &NetworkParams,
|
||||||
|
) -> Vec<GeneralNetworkConfig> {
|
||||||
|
let swarm_configs: Vec<SwarmConfig> = ids
|
||||||
|
.iter()
|
||||||
|
.map(|id| {
|
||||||
|
let mut node_key_bytes = *id;
|
||||||
|
let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes)
|
||||||
|
.expect("Failed to generate secret key from bytes");
|
||||||
|
|
||||||
|
SwarmConfig {
|
||||||
|
node_key,
|
||||||
|
port: get_available_udp_port().unwrap(),
|
||||||
|
chain_sync_config: cryptarchia_sync::Config {
|
||||||
|
peer_response_timeout: Duration::from_secs(60),
|
||||||
|
},
|
||||||
|
..default_swarm_config()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let all_initial_peers = initial_peers_by_network_layout(&swarm_configs, network_params);
|
||||||
|
|
||||||
|
swarm_configs
|
||||||
|
.iter()
|
||||||
|
.zip(all_initial_peers)
|
||||||
|
.map(|(swarm_config, initial_peers)| GeneralNetworkConfig {
|
||||||
|
backend: BackendSettings {
|
||||||
|
initial_peers,
|
||||||
|
inner: swarm_config.to_owned(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn initial_peers_by_network_layout(
|
||||||
|
swarm_configs: &[SwarmConfig],
|
||||||
|
network_params: &NetworkParams,
|
||||||
|
) -> Vec<Vec<Multiaddr>> {
|
||||||
|
let mut all_initial_peers = vec![];
|
||||||
|
|
||||||
|
match network_params.libp2p_network_layout {
|
||||||
|
Libp2pNetworkLayout::Star => {
|
||||||
|
// First node is the hub - has no initial peers
|
||||||
|
all_initial_peers.push(vec![]);
|
||||||
|
let first_addr = node_address_from_port(swarm_configs[0].port);
|
||||||
|
|
||||||
|
// All other nodes connect to the first node
|
||||||
|
for _ in 1..swarm_configs.len() {
|
||||||
|
all_initial_peers.push(vec![first_addr.clone()]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Libp2pNetworkLayout::Chain => {
|
||||||
|
// First node has no initial peers
|
||||||
|
all_initial_peers.push(vec![]);
|
||||||
|
|
||||||
|
// Each subsequent node connects to the previous one
|
||||||
|
for i in 1..swarm_configs.len() {
|
||||||
|
let prev_addr = node_address_from_port(swarm_configs[i - 1].port);
|
||||||
|
all_initial_peers.push(vec![prev_addr]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Libp2pNetworkLayout::Full => {
|
||||||
|
// Each node connects to all previous nodes, unidirectional connections
|
||||||
|
for i in 0..swarm_configs.len() {
|
||||||
|
let mut peers = vec![];
|
||||||
|
for swarm_config in swarm_configs.iter().take(i) {
|
||||||
|
peers.push(node_address_from_port(swarm_config.port));
|
||||||
|
}
|
||||||
|
all_initial_peers.push(peers);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
all_initial_peers
|
||||||
|
}
|
||||||
35
testing-framework/configs/src/topology/configs/time.rs
Normal file
35
testing-framework/configs/src/topology/configs/time.rs
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
use std::{
|
||||||
|
net::{IpAddr, Ipv4Addr},
|
||||||
|
str::FromStr as _,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
|
const DEFAULT_SLOT_TIME: u64 = 2;
|
||||||
|
const CONSENSUS_SLOT_TIME_VAR: &str = "CONSENSUS_SLOT_TIME";
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct GeneralTimeConfig {
|
||||||
|
pub slot_duration: Duration,
|
||||||
|
pub chain_start_time: OffsetDateTime,
|
||||||
|
pub ntp_server: String,
|
||||||
|
pub timeout: Duration,
|
||||||
|
pub interface: IpAddr,
|
||||||
|
pub update_interval: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn default_time_config() -> GeneralTimeConfig {
|
||||||
|
let slot_duration = std::env::var(CONSENSUS_SLOT_TIME_VAR)
|
||||||
|
.map(|s| <u64>::from_str(&s).unwrap())
|
||||||
|
.unwrap_or(DEFAULT_SLOT_TIME);
|
||||||
|
GeneralTimeConfig {
|
||||||
|
slot_duration: Duration::from_secs(slot_duration),
|
||||||
|
chain_start_time: OffsetDateTime::now_utc(),
|
||||||
|
ntp_server: String::from("pool.ntp.org"),
|
||||||
|
timeout: Duration::from_secs(5),
|
||||||
|
interface: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
|
||||||
|
update_interval: Duration::from_secs(16),
|
||||||
|
}
|
||||||
|
}
|
||||||
148
testing-framework/configs/src/topology/configs/tracing.rs
Normal file
148
testing-framework/configs/src/topology/configs/tracing.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
use std::{env, path::PathBuf};
|
||||||
|
|
||||||
|
use nomos_tracing::{
|
||||||
|
logging::{local::FileConfig, loki::LokiConfig},
|
||||||
|
metrics::otlp::OtlpMetricsConfig,
|
||||||
|
tracing::otlp::OtlpTracingConfig,
|
||||||
|
};
|
||||||
|
use nomos_tracing_service::{
|
||||||
|
ConsoleLayer, FilterLayer, LoggerLayer, MetricsLayer, TracingLayer, TracingSettings,
|
||||||
|
};
|
||||||
|
use tracing::Level;
|
||||||
|
|
||||||
|
use crate::IS_DEBUG_TRACING;
|
||||||
|
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
pub struct GeneralTracingConfig {
|
||||||
|
pub tracing_settings: TracingSettings,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GeneralTracingConfig {
|
||||||
|
fn local_debug_tracing(id: usize) -> Self {
|
||||||
|
let host_identifier = format!("node-{id}");
|
||||||
|
let otlp_tracing = otlp_tracing_endpoint()
|
||||||
|
.and_then(|endpoint| endpoint.parse().ok())
|
||||||
|
.map(|endpoint| {
|
||||||
|
TracingLayer::Otlp(OtlpTracingConfig {
|
||||||
|
endpoint,
|
||||||
|
sample_ratio: 0.5,
|
||||||
|
service_name: host_identifier.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or(TracingLayer::None);
|
||||||
|
let otlp_metrics = otlp_metrics_endpoint()
|
||||||
|
.and_then(|endpoint| endpoint.parse().ok())
|
||||||
|
.map(|endpoint| {
|
||||||
|
MetricsLayer::Otlp(OtlpMetricsConfig {
|
||||||
|
endpoint,
|
||||||
|
host_identifier: host_identifier.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or(MetricsLayer::None);
|
||||||
|
|
||||||
|
let filter = file_filter_override().unwrap_or_else(|| {
|
||||||
|
nomos_tracing::filter::envfilter::EnvFilterConfig {
|
||||||
|
filters: std::iter::once(&("nomos", "debug"))
|
||||||
|
.map(|(k, v)| ((*k).to_owned(), (*v).to_owned()))
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Self {
|
||||||
|
tracing_settings: TracingSettings {
|
||||||
|
logger: LoggerLayer::Loki(LokiConfig {
|
||||||
|
endpoint: "http://localhost:3100".try_into().unwrap(),
|
||||||
|
host_identifier: host_identifier.clone(),
|
||||||
|
}),
|
||||||
|
tracing: otlp_tracing,
|
||||||
|
filter: FilterLayer::EnvFilter(filter),
|
||||||
|
metrics: otlp_metrics,
|
||||||
|
console: ConsoleLayer::None,
|
||||||
|
level: Level::DEBUG,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn otlp_tracing_endpoint() -> Option<String> {
|
||||||
|
env::var("NOMOS_OTLP_ENDPOINT").ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn otlp_metrics_endpoint() -> Option<String> {
|
||||||
|
env::var("NOMOS_OTLP_METRICS_ENDPOINT").ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn create_tracing_configs(ids: &[[u8; 32]]) -> Vec<GeneralTracingConfig> {
|
||||||
|
if *IS_DEBUG_TRACING {
|
||||||
|
create_debug_configs(ids)
|
||||||
|
} else {
|
||||||
|
create_default_configs(ids)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_debug_configs(ids: &[[u8; 32]]) -> Vec<GeneralTracingConfig> {
|
||||||
|
ids.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, _)| (i, GeneralTracingConfig::local_debug_tracing(i)))
|
||||||
|
.map(|(i, cfg)| apply_file_logger_override(cfg, i))
|
||||||
|
.map(maybe_disable_otlp_layers)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_default_configs(ids: &[[u8; 32]]) -> Vec<GeneralTracingConfig> {
|
||||||
|
ids.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, _)| (i, GeneralTracingConfig::default()))
|
||||||
|
.map(|(i, cfg)| apply_file_logger_override(cfg, i))
|
||||||
|
.map(maybe_disable_otlp_layers)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_file_logger_override(
|
||||||
|
mut cfg: GeneralTracingConfig,
|
||||||
|
node_index: usize,
|
||||||
|
) -> GeneralTracingConfig {
|
||||||
|
if let Ok(dir) = std::env::var("NOMOS_LOG_DIR") {
|
||||||
|
let directory = PathBuf::from(dir);
|
||||||
|
cfg.tracing_settings.logger = LoggerLayer::File(FileConfig {
|
||||||
|
directory,
|
||||||
|
prefix: Some(format!("nomos-node-{node_index}").into()),
|
||||||
|
});
|
||||||
|
cfg.tracing_settings.level = file_log_level();
|
||||||
|
}
|
||||||
|
cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
fn file_log_level() -> Level {
|
||||||
|
env::var("NOMOS_LOG_LEVEL")
|
||||||
|
.ok()
|
||||||
|
.and_then(|raw| raw.parse::<Level>().ok())
|
||||||
|
.unwrap_or(Level::INFO)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn file_filter_override() -> Option<nomos_tracing::filter::envfilter::EnvFilterConfig> {
|
||||||
|
env::var("NOMOS_LOG_FILTER")
|
||||||
|
.ok()
|
||||||
|
.map(|raw| nomos_tracing::filter::envfilter::EnvFilterConfig {
|
||||||
|
filters: raw
|
||||||
|
.split(',')
|
||||||
|
.filter_map(|pair| {
|
||||||
|
let mut parts = pair.splitn(2, '=');
|
||||||
|
let target = parts.next()?.trim().to_string();
|
||||||
|
let level = parts.next()?.trim().to_string();
|
||||||
|
(!target.is_empty() && !level.is_empty()).then_some((target, level))
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn maybe_disable_otlp_layers(mut cfg: GeneralTracingConfig) -> GeneralTracingConfig {
|
||||||
|
if otlp_tracing_endpoint().is_none() {
|
||||||
|
cfg.tracing_settings.tracing = TracingLayer::None;
|
||||||
|
}
|
||||||
|
if otlp_metrics_endpoint().is_none() {
|
||||||
|
cfg.tracing_settings.metrics = MetricsLayer::None;
|
||||||
|
}
|
||||||
|
cfg
|
||||||
|
}
|
||||||
79
testing-framework/configs/src/topology/configs/wallet.rs
Normal file
79
testing-framework/configs/src/topology/configs/wallet.rs
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
use std::num::NonZeroUsize;
|
||||||
|
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use zksign::{PublicKey, SecretKey};
|
||||||
|
|
||||||
|
/// Collection of wallet accounts that should be funded at genesis.
|
||||||
|
#[derive(Clone, Default, Debug, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct WalletConfig {
|
||||||
|
pub accounts: Vec<WalletAccount>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WalletConfig {
|
||||||
|
#[must_use]
|
||||||
|
pub const fn new(accounts: Vec<WalletAccount>) -> Self {
|
||||||
|
Self { accounts }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn uniform(total_funds: u64, users: NonZeroUsize) -> Self {
|
||||||
|
let user_count = users.get() as u64;
|
||||||
|
assert!(user_count > 0, "wallet user count must be non-zero");
|
||||||
|
assert!(
|
||||||
|
total_funds >= user_count,
|
||||||
|
"wallet funds must allocate at least 1 token per user"
|
||||||
|
);
|
||||||
|
|
||||||
|
let base_allocation = total_funds / user_count;
|
||||||
|
let mut remainder = total_funds % user_count;
|
||||||
|
|
||||||
|
let accounts = (0..users.get())
|
||||||
|
.map(|idx| {
|
||||||
|
let mut amount = base_allocation;
|
||||||
|
if remainder > 0 {
|
||||||
|
amount += 1;
|
||||||
|
remainder -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
WalletAccount::deterministic(idx as u64, amount)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Self { accounts }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wallet account that holds funds in the genesis state.
|
||||||
|
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct WalletAccount {
|
||||||
|
pub label: String,
|
||||||
|
pub secret_key: SecretKey,
|
||||||
|
pub value: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WalletAccount {
|
||||||
|
#[must_use]
|
||||||
|
pub fn new(label: impl Into<String>, secret_key: SecretKey, value: u64) -> Self {
|
||||||
|
assert!(value > 0, "wallet account value must be positive");
|
||||||
|
Self {
|
||||||
|
label: label.into(),
|
||||||
|
secret_key,
|
||||||
|
value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn deterministic(index: u64, value: u64) -> Self {
|
||||||
|
let mut seed = [0u8; 32];
|
||||||
|
seed[..2].copy_from_slice(b"wl");
|
||||||
|
seed[2..10].copy_from_slice(&index.to_le_bytes());
|
||||||
|
|
||||||
|
let secret_key = SecretKey::from(BigUint::from_bytes_le(&seed));
|
||||||
|
Self::new(format!("wallet-user-{index}"), secret_key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn public_key(&self) -> PublicKey {
|
||||||
|
self.secret_key.to_public_key()
|
||||||
|
}
|
||||||
|
}
|
||||||
1
testing-framework/configs/src/topology/mod.rs
Normal file
1
testing-framework/configs/src/topology/mod.rs
Normal file
@ -0,0 +1 @@
|
|||||||
|
pub mod configs;
|
||||||
52
testing-framework/core/Cargo.toml
Normal file
52
testing-framework/core/Cargo.toml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
[package]
|
||||||
|
categories.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
keywords.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
name = "testing-framework-core"
|
||||||
|
readme.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = []
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1"
|
||||||
|
async-trait = "0.1"
|
||||||
|
broadcast-service = { workspace = true }
|
||||||
|
chain-service = { workspace = true }
|
||||||
|
common-http-client = { workspace = true }
|
||||||
|
futures = { default-features = false, version = "0.3" }
|
||||||
|
groth16 = { workspace = true }
|
||||||
|
hex = { version = "0.4.3", default-features = false }
|
||||||
|
key-management-system = { workspace = true }
|
||||||
|
kzgrs-backend = { workspace = true }
|
||||||
|
nomos-core = { workspace = true }
|
||||||
|
nomos-da-network-core = { workspace = true }
|
||||||
|
nomos-da-network-service = { workspace = true }
|
||||||
|
nomos-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] }
|
||||||
|
nomos-http-api-common = { workspace = true }
|
||||||
|
nomos-libp2p = { workspace = true }
|
||||||
|
nomos-network = { workspace = true, features = ["libp2p"] }
|
||||||
|
nomos-node = { workspace = true, default-features = false, features = ["testing"] }
|
||||||
|
nomos-tracing = { workspace = true }
|
||||||
|
nomos-tracing-service = { workspace = true }
|
||||||
|
nomos-utils = { workspace = true }
|
||||||
|
prometheus-http-query = "0.8"
|
||||||
|
rand = { workspace = true }
|
||||||
|
reqwest = { workspace = true, features = ["json"] }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
serde_with = { workspace = true }
|
||||||
|
serde_yaml = { workspace = true }
|
||||||
|
tempfile = { workspace = true }
|
||||||
|
testing-framework-config = { workspace = true }
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
tokio = { workspace = true, features = ["macros", "process", "rt-multi-thread", "time"] }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
tx-service = { workspace = true, features = ["libp2p", "mock"] }
|
||||||
19
testing-framework/core/src/lib.rs
Normal file
19
testing-framework/core/src/lib.rs
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
pub mod nodes;
|
||||||
|
pub mod scenario;
|
||||||
|
pub mod topology;
|
||||||
|
|
||||||
|
use std::{env, ops::Mul as _, sync::LazyLock, time::Duration};
|
||||||
|
|
||||||
|
pub use testing_framework_config::{
|
||||||
|
IS_DEBUG_TRACING, node_address_from_port, secret_key_to_peer_id, secret_key_to_provider_id,
|
||||||
|
topology::configs::da::GLOBAL_PARAMS_PATH,
|
||||||
|
};
|
||||||
|
|
||||||
|
static IS_SLOW_TEST_ENV: LazyLock<bool> =
|
||||||
|
LazyLock::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true"));
|
||||||
|
|
||||||
|
/// In slow test environments like Codecov, use 2x timeout.
|
||||||
|
#[must_use]
|
||||||
|
pub fn adjust_timeout(d: Duration) -> Duration {
|
||||||
|
if *IS_SLOW_TEST_ENV { d.mul(2) } else { d }
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user