commit e1c2bb2b95640cdb9e8f413684799dd11bb6a61b Author: andrussal Date: Mon Dec 1 12:48:39 2025 +0100 Initial import of Nomos testing framework diff --git a/.cargo-deny.toml b/.cargo-deny.toml new file mode 100644 index 0000000..1bdbd0c --- /dev/null +++ b/.cargo-deny.toml @@ -0,0 +1,54 @@ +# Config file reference can be found at https://embarkstudios.github.io/cargo-deny/checks/cfg.html. + +[graph] +all-features = true +exclude-dev = true +no-default-features = true + +[advisories] +ignore = [ + # Keep local ignores in sync with nomos-node if needed. Unused entries removed. + "RUSTSEC-2024-0370", # proc-macro-error unmaintained; upstream dependency + "RUSTSEC-2024-0384", # instant unmaintained; upstream dependency + "RUSTSEC-2024-0388", # derivative unmaintained; no safe upgrade available upstream + "RUSTSEC-2024-0436", # paste unmaintained; upstream dependency + "RUSTSEC-2025-0012", # backoff unmaintained; upstream workspace still relies on it + "RUSTSEC-2025-0055", # tracing-subscriber ansi escape issue; upstream dependency +] +yanked = "deny" + +[bans] +allow-wildcard-paths = false +multiple-versions = "allow" + +[licenses] +allow = [ + "Apache-2.0 WITH LLVM-exception", + "Apache-2.0", + "BSD-2-Clause", + "BSD-3-Clause", + "BSL-1.0", + "CC0-1.0", + "CDDL-1.0", + "CDLA-Permissive-2.0", + "ISC", + "MIT", + "MPL-2.0", + "Unicode-3.0", + "Zlib", +] +private = { ignore = false } +unused-allowed-license = "deny" + +[[licenses.clarify]] +expression = "MIT AND ISC" +license-files = [{ hash = 0xbd0eed23, path = "LICENSE" }] +name = "ring" + +[sources] +allow-git = ["https://github.com/EspressoSystems/jellyfish.git"] +unknown-git = "deny" +unknown-registry = "deny" + +[sources.allow-org] +github = ["logos-co"] diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..4c21fff --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +[target.'cfg(target_os = "macos")'] +# when using osx, we need to link against some golang libraries, it did just work with this missing flags +# from: https://github.com/golang/go/issues/42459 +rustflags = ["-C", "link-args=-framework CoreFoundation -framework Security -framework CoreServices -lresolv"] diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..3cea2d3 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +# General trim for runner images and CI builds +.git +target +.tmp +tests/workflows/.tmp* +book +scripts/build-rapidsnark.sh~ +rust-project-all-in-one.txt +**/*.log diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..a39256e --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,355 @@ +name: Lint + +on: + push: + branches: ["*"] + pull_request: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: lint-${{ github.ref }} + cancel-in-progress: true + +jobs: + fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install nomos circuits + run: | + ./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits" + echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + components: rustfmt + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo- + - uses: actions/cache@v4 + with: + path: target + key: ${{ runner.os }}-target-fmt-${{ hashFiles('**/Cargo.lock') }}-nightly-2025-09-14 + restore-keys: ${{ runner.os }}-target-fmt- + - run: cargo +nightly-2025-09-14 fmt --all -- --check + + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install nomos circuits + run: | + ./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits" + echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + components: clippy + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo- + - uses: actions/cache@v4 + with: + path: target + key: ${{ runner.os }}-target-clippy-${{ hashFiles('**/Cargo.lock') }}-nightly-2025-09-14 + restore-keys: ${{ runner.os }}-target-clippy- + - run: cargo +nightly-2025-09-14 clippy --all --all-targets --all-features -- -D warnings + + deny: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install nomos circuits + run: | + ./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits" + echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo- + - name: Install cargo-deny + run: cargo install cargo-deny --locked --version 0.18.2 + - run: cargo deny check --hide-inclusion-graph -c .cargo-deny.toml --show-stats -D warnings + + taplo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + - name: Install taplo + run: | + TAPLO_VERSION=0.9.3 + cargo install taplo-cli --locked --version ${TAPLO_VERSION} + - run: taplo fmt --check + - run: taplo lint + + machete: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install nomos circuits + run: | + ./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits" + echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo- + - name: Install cargo-machete + run: cargo +nightly-2025-09-14 install --git https://github.com/bnjbvr/cargo-machete --locked cargo-machete + - run: cargo machete + + local_smoke: + runs-on: ubuntu-latest + env: + POL_PROOF_DEV_MODE: true + LOCAL_DEMO_RUN_SECS: 120 + LOCAL_DEMO_VALIDATORS: 1 + LOCAL_DEMO_EXECUTORS: 1 + TMPDIR: ${{ runner.temp }} + NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits + steps: + - uses: actions/checkout@v4 + - name: Install system dependencies + run: | + set -euo pipefail + if command -v sudo >/dev/null 2>&1; then + sudo apt-get update + sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm + else + apt-get update + apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm + fi + - name: Install nomos circuits + run: | + ./scripts/setup-nomos-circuits.sh v0.3.1 "$NOMOS_CIRCUITS" + echo "NOMOS_CIRCUITS=$NOMOS_CIRCUITS" >> "$GITHUB_ENV" + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-target-local-smoke-${{ hashFiles('**/Cargo.lock') }}-nightly-2025-09-14 + restore-keys: ${{ runner.os }}-target-local-smoke- + - name: Build local binaries (nomos-node/executor) + run: | + SRC_DIR="${TMPDIR:-/tmp}/nomos-node-src" + mkdir -p "$SRC_DIR" + if [ ! -d "$SRC_DIR/.git" ]; then + git clone https://github.com/logos-co/nomos-node.git "$SRC_DIR" + else + cd "$SRC_DIR" + git fetch --depth 1 origin 2f60a0372c228968c3526c341ebc7e58bbd178dd + git checkout 2f60a0372c228968c3526c341ebc7e58bbd178dd + git reset --hard + git clean -fdx + cd - + fi + cd "$SRC_DIR" + git fetch --depth 1 origin 2f60a0372c228968c3526c341ebc7e58bbd178dd + git checkout 2f60a0372c228968c3526c341ebc7e58bbd178dd + git reset --hard + git clean -fdx + cargo +nightly-2025-09-14 build --locked --all-features -p nomos-node -p nomos-executor + - name: Run local runner smoke (ignored test) + run: | + cargo +nightly-2025-09-14 test -p runner-examples --test local_runner_bin_smoke -- --ignored --nocapture + + compose_smoke: + runs-on: ubuntu-latest + env: + TMPDIR: ${{ github.workspace }}/.tmp + NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits + NOMOS_TESTNET_IMAGE: nomos-testnet:local + DOCKER_BUILDKIT: 1 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Prepare workspace tmpdir + run: mkdir -p "$TMPDIR" + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + - name: Install system dependencies + run: | + set -euo pipefail + if command -v sudo >/dev/null 2>&1; then + sudo apt-get update + sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm + else + apt-get update + apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm + fi + + - name: Install system dependencies + run: | + set -euo pipefail + if command -v sudo >/dev/null 2>&1; then + sudo apt-get update + sudo apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm + else + apt-get update + apt-get install -y clang llvm-dev libclang-dev pkg-config cmake libssl-dev rsync libgmp10 libgmp-dev libgomp1 nasm + fi + + - name: Cache cargo registry + if: env.ACT != 'true' + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Cache target directory + if: env.ACT != 'true' + uses: actions/cache@v4 + with: + path: target + key: ${{ runner.os }}-target-compose-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-target-compose- + + - name: Install circuits for host build + env: + NOMOS_CIRCUITS_PLATFORM: linux-x86_64 + NOMOS_CIRCUITS_REBUILD_RAPIDSNARK: "1" + RAPIDSNARK_FORCE_REBUILD: "1" + RAPIDSNARK_BUILD_GMP: "0" + RAPIDSNARK_USE_ASM: "OFF" + run: | + CIRCUITS_DIR="${NOMOS_CIRCUITS}" + chmod +x scripts/setup-nomos-circuits.sh + scripts/setup-nomos-circuits.sh v0.3.1 "$CIRCUITS_DIR" + # Copy into build context so Docker doesn't need network + rm -rf testing-framework/assets/stack/kzgrs_test_params + mkdir -p testing-framework/assets/stack/kzgrs_test_params + if command -v rsync >/dev/null 2>&1; then + rsync -a --delete "$CIRCUITS_DIR"/ testing-framework/assets/stack/kzgrs_test_params/ + else + rm -rf testing-framework/assets/stack/kzgrs_test_params/* + cp -a "$CIRCUITS_DIR"/. testing-framework/assets/stack/kzgrs_test_params/ + fi + echo "NOMOS_CIRCUITS=$CIRCUITS_DIR" >> "$GITHUB_ENV" + echo "CIRCUITS_OVERRIDE=testing-framework/assets/stack/kzgrs_test_params" >> "$GITHUB_ENV" + + - name: Build compose test image + env: + DOCKER_CLI_HINTS: "false" + IMAGE_TAG: ${{ env.NOMOS_TESTNET_IMAGE }} + CIRCUITS_OVERRIDE: ${{ env.CIRCUITS_OVERRIDE }} + run: | + chmod +x testing-framework/assets/stack/scripts/build_test_image.sh + testing-framework/assets/stack/scripts/build_test_image.sh + + - name: Run compose mixed workload binary + env: + POL_PROOF_DEV_MODE: "true" + COMPOSE_NODE_PAIRS: "1x1" + NOMOS_TESTNET_IMAGE: ${{ env.NOMOS_TESTNET_IMAGE }} + COMPOSE_RUNNER_HOST: ${{ env.ACT == 'true' && 'host.docker.internal' || '127.0.0.1' }} + RUST_BACKTRACE: "1" + NOMOS_TESTS_TRACING: "true" + NOMOS_LOG_DIR: "${{ github.workspace }}/.tmp/compose-logs" + NOMOS_LOG_LEVEL: "info" + run: | + mkdir -p "$TMPDIR" + if [ "${{ env.ACT }}" = "true" ]; then + export COMPOSE_RUNNER_PRESERVE=1 + fi + cargo run -p runner-examples --bin compose_runner -- --nocapture + + - name: Collect compose logs + if: failure() + run: | + mkdir -p ci-artifacts/compose + if [ -d "${TMPDIR}/compose-logs" ]; then + tar -czf ci-artifacts/compose/node-logs.tgz -C "${TMPDIR}/compose-logs" . + fi + mkdir -p ci-artifacts/compose + docker ps -a --filter "name=nomos-compose-" --format '{{.ID}} {{.Names}} {{.Status}}' > ci-artifacts/compose/containers.txt || true + for id in $(docker ps -a --filter "name=nomos-compose-" -q); do + docker logs "$id" > "ci-artifacts/compose/${id}.log" 2>&1 || true + done + + - name: Upload compose artifacts + if: failure() && env.ACT != 'true' + uses: actions/upload-artifact@v4 + with: + name: compose-mixed-workload-logs + path: ci-artifacts + + - name: Cleanup compose containers + if: always() && env.ACT != 'true' + run: | + ids=$(docker ps -a --filter "name=nomos-compose-" -q) + if [ -n "$ids" ]; then + docker rm -f $ids + fi + + book: + runs-on: ubuntu-latest + env: + RUSTUP_TOOLCHAIN: nightly-2025-09-14 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo- + - name: Install mdBook toolchain + run: | + MDBOOK_VERSION=0.4.40 + LINKCHECK_VERSION=0.7.7 + MERMAID_VERSION=0.12.6 + cargo +nightly-2025-09-14 install --locked mdbook --version ${MDBOOK_VERSION} + cargo +nightly-2025-09-14 install mdbook-linkcheck --version ${LINKCHECK_VERSION} + cargo +nightly-2025-09-14 install --locked mdbook-mermaid --version ${MERMAID_VERSION} + cargo +nightly-2025-09-14 install --locked typos-cli --version 1.20.11 + - name: Spell check (typos) + run: typos --format brief book/src + - name: Markdown lint + run: npx -y markdownlint-cli2 "book/src/**/*.md" + - name: Build book + run: mdbook build book + - name: Check links + run: mdbook-linkcheck book diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000..efcfb12 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,56 @@ +name: Pre-commit + +on: + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: pre-commit-${{ github.ref }} + cancel-in-progress: true + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install nomos circuits + run: | + ./scripts/setup-nomos-circuits.sh v0.3.1 "$HOME/.nomos-circuits" + echo "NOMOS_CIRCUITS=$HOME/.nomos-circuits" >> "$GITHUB_ENV" + + - name: Set up Rust toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-09-14 + components: rustfmt, clippy + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.x' + + - name: Cache cargo registry + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo- + + - name: Cache pre-commit + uses: actions/cache@v4 + with: + path: ~/.cache/pre-commit + key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} + restore-keys: ${{ runner.os }}-pre-commit- + + - name: Install pre-commit + run: pip install pre-commit + + - name: Run pre-commit + run: pre-commit run --all-files diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4a35d34 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +/target +**/target +.tmp/ +# IDE / OS cruft +.idea/ +.DS_Store + +# Builder/test artifacts +.tmp_check/ +.tmp_docker/ +ci-artifacts/ +tests/kzgrs/circuits_bundle/ +NOMOS_RUST_SOURCES_ONLY.txt +dump.zsh + +# Local test artifacts (kept when NOMOS_TESTS_KEEP_LOGS=1) +tests/workflows/.tmp* +tests/workflows/.tmp*/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..f1c365f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,40 @@ +repos: + - repo: https://github.com/doublify/pre-commit-rust + rev: v1.0 + hooks: + - id: fmt + # We're running `fmt` with `--all` and `pass_filenames: false` to format the entire workspace at once. + # Otherwise, `pre-commit` passes staged files one by one, which can lead to inconsistent results + # due to, presumably, the lack of full workspace context. + entry: cargo +nightly-2025-09-14 fmt + pass_filenames: false + - id: clippy + args: ["--all", "--all-targets", "--all-features", "--", "-D", "warnings"] + - repo: https://github.com/EmbarkStudios/cargo-deny + rev: 0.18.2 + hooks: + - id: cargo-deny + args: + - check + - --hide-inclusion-graph + - -c + - .cargo-deny.toml + - --show-stats + - -D + - warnings + - repo: https://github.com/ComPWA/taplo-pre-commit + rev: v0.9.3 + hooks: + - id: taplo-format + - id: taplo-lint + - repo: https://github.com/bnjbvr/cargo-machete + rev: ba1bcd4 # No tag yet with .pre-commit-hooks.yml + hooks: + - id: cargo-machete + - repo: local + hooks: + - id: cargo-hack-check + language: script + name: cargo hack check + entry: ./hooks/cargo-hack.sh + stages: [manual] diff --git a/.taplo.toml b/.taplo.toml new file mode 100644 index 0000000..5bd77ee --- /dev/null +++ b/.taplo.toml @@ -0,0 +1,10 @@ +exclude = ["target/**"] + +[formatting] +align_entries = true +allowed_blank_lines = 1 +column_width = 120 +keys = ["build-dependencies", "dependencies", "dev-dependencies"] +reorder_arrays = true +reorder_inline_tables = true +reorder_keys = true diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..08bca06 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,8903 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom 0.3.4", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "archery" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e0a5f99dfebb87bb342d0f53bb92c81842e100bbb915223e38349580e5441d" +dependencies = [ + "triomphe", +] + +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-bn254" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec 0.5.0", + "ark-ff 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3a13b34da09176a8baba701233fdffbaa7c1b1192ce031a3da4e55ce1f1a56" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-snark", + "ark-std 0.4.0", + "blake2", + "derivative", + "digest", + "sha2", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.5", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.111", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "ark-groth16" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20ceafa83848c3e390f1cbf124bc3193b3e639b3f02009e0e290809a501b95fc" +dependencies = [ + "ark-crypto-primitives", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.5", +] + +[[package]] +name = "ark-poly-commit" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a741492629ffcd228337676dc223a28551aa6792eedb8a2a22c767f00df6c89" +dependencies = [ + "ark-crypto-primitives", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest", +] + +[[package]] +name = "ark-relations" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00796b6efc05a3f48225e59cb6a2cda78881e7c390872d5786aaf112f31fb4f0" +dependencies = [ + "ark-ff 0.4.2", + "ark-std 0.4.0", + "tracing", + "tracing-subscriber 0.2.25", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive 0.4.2", + "ark-std 0.4.0", + "digest", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "ark-snark" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84d3cc6833a335bb8a600241889ead68ee89a3cf8448081fb7694c0fe503da63" +dependencies = [ + "ark-ff 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom 7.1.3", + "num-traits", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "asn1_der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" + +[[package]] +name = "async-channel" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-ctrlc" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907279f6e91a51c8ec7cac24711e8308f21da7c10c7700ca2f7e125694ed2df1" +dependencies = [ + "ctrlc", +] + +[[package]] +name = "async-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" +dependencies = [ + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix 1.1.2", + "slab", + "windows-sys 0.61.2", +] + +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.12", + "log", + "url", +] + +[[package]] +name = "attohttpc" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" +dependencies = [ + "base64 0.22.1", + "http 1.4.0", + "log", + "url", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "az" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" + +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom 0.2.16", + "instant", + "rand 0.8.5", +] + +[[package]] +name = "backon" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +dependencies = [ + "fastrand", + "gloo-timers", + "tokio", +] + +[[package]] +name = "backtrace" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-link", +] + +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base256emoji" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" +dependencies = [ + "const-str", + "match-lookup", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.111", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + +[[package]] +name = "blst" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "broadcast-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "derivative", + "futures", + "nomos-core", + "overwatch", + "serde", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "bstr" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +dependencies = [ + "serde", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "cached" +version = "0.55.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0839c297f8783316fcca9d90344424e968395413f0662a5481f79c6648bbc14" +dependencies = [ + "hashbrown 0.14.5", + "once_cell", + "thiserror 2.0.17", + "web-time", +] + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.2.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd405d82c84ff7f35739f175f67d8b9fb7687a0e84ccdc78bd3568839827cf07" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom 7.1.3", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cfg_eval" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "cfgsync" +version = "0.1.0" +dependencies = [ + "axum", + "clap", + "groth16", + "hex", + "key-management-system", + "nomos-core", + "nomos-da-network-core", + "nomos-executor", + "nomos-libp2p", + "nomos-node", + "nomos-tracing-service", + "nomos-utils", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "serde_path_to_error", + "serde_with", + "serde_yaml", + "subnetworks-assignations", + "testing-framework-config", + "tokio", + "tracing", +] + +[[package]] +name = "chain-common" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "nomos-core", + "serde", +] + +[[package]] +name = "chain-leader" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "chain-common", + "chain-service", + "cryptarchia-engine", + "ed25519-dalek", + "futures", + "nomos-blend-service", + "nomos-core", + "nomos-da-sampling", + "nomos-ledger", + "nomos-time", + "nomos-wallet", + "overwatch", + "serde", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-futures", + "tx-service", + "zksign", +] + +[[package]] +name = "chain-network" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "chain-common", + "chain-service", + "cryptarchia-engine", + "cryptarchia-sync", + "futures", + "nomos-core", + "nomos-da-sampling", + "nomos-ledger", + "nomos-network", + "nomos-time", + "overwatch", + "rand 0.8.5", + "serde", + "serde_with", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tracing-futures", + "tx-service", +] + +[[package]] +name = "chain-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "bytes", + "cryptarchia-engine", + "cryptarchia-sync", + "ed25519-dalek", + "futures", + "groth16", + "nomos-core", + "nomos-ledger", + "nomos-network", + "nomos-storage", + "nomos-utils", + "num-bigint", + "overwatch", + "serde", + "serde_with", + "services-utils", + "strum", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-futures", +] + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + +[[package]] +name = "chrono-tz" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93698b29de5e97ad0ae26447b344c482a7284c737d9ddc5f9e52b74a336671bb" +dependencies = [ + "chrono", + "chrono-tz-build", + "phf", +] + +[[package]] +name = "chrono-tz-build" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c088aee841df9c3041febbb73934cfc39708749bf96dc827e3359cd39ef11b1" +dependencies = [ + "parse-zoneinfo", + "phf", + "phf_codegen", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "circuits-prover" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-utils", + "tempfile", +] + +[[package]] +name = "circuits-utils" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "dirs", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_derive" +version = "4.5.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "clap_lex" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" + +[[package]] +name = "color-eyre" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5920befb47832a6d61ee3a3a846565cfa39b331331e68a3b1d1116630f2f26d" +dependencies = [ + "backtrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", +] + +[[package]] +name = "common-http-client" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "broadcast-service", + "futures", + "nomos-core", + "nomos-da-messages", + "nomos-http-api-common", + "reqwest", + "serde", + "serde_json", + "thiserror 1.0.69", + "url", +] + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-hex" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" +dependencies = [ + "cfg-if", + "cpufeatures", + "proptest", + "serde_core", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const-str" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + +[[package]] +name = "convert_case" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + +[[package]] +name = "counter" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f009fcafa949dc1fc46a762dae84d0c2687d3b550906b633c4979d58d2c6ae52" +dependencies = [ + "num-traits", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "cryptarchia-engine" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "cfg_eval", + "nomos-utils", + "serde", + "serde_with", + "thiserror 1.0.69", + "time", + "tokio", + "tracing", +] + +[[package]] +name = "cryptarchia-sync" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "bytes", + "cryptarchia-engine", + "futures", + "libp2p", + "libp2p-stream", + "nomos-core", + "rand 0.8.5", + "serde", + "serde_with", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array 0.14.7", + "typenum", +] + +[[package]] +name = "ctrlc" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" +dependencies = [ + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.2", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "serde", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.111", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "data-encoding-macro" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +dependencies = [ + "data-encoding", + "syn 2.0.111", +] + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "uuid", +] + +[[package]] +name = "default-net" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4898b43aed56499fad6b294d15b3e76a51df68079bf492e5daae38ca084e003" +dependencies = [ + "dlopen2 0.4.1", + "libc", + "memalloc", + "netlink-packet-core 0.5.0", + "netlink-packet-route 0.15.0", + "netlink-sys", + "once_cell", + "system-configuration 0.5.1", + "windows 0.32.0", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom 7.1.3", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "deunicode" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd57806937c9cc163efc8ea3910e00a62e2aeb0b8119f1793a978088f8f6b04" + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.61.2", +] + +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "dlopen2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b121caccfc363e4d9a4589528f3bef7c71b83c6ed01c8dc68cbeeb7fd29ec698" +dependencies = [ + "dlopen2_derive", + "libc", + "once_cell", + "winapi", +] + +[[package]] +name = "dlopen2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" +dependencies = [ + "libc", + "once_cell", + "winapi", +] + +[[package]] +name = "dlopen2_derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a09ac8bb8c16a282264c379dffba707b9c998afc7506009137f3c6136888078" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "dtoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "serde", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "event-listener" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener", + "pin-project-lite", +] + +[[package]] +name = "executor-http-client" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "common-http-client", + "futures", + "nomos-core", + "nomos-http-api-common", + "reqwest", + "serde", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] + +[[package]] +name = "fixed" +version = "1.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707070ccf8c4173548210893a0186e29c266901b71ed20cd9e2ca0193dfe95c3" +dependencies = [ + "az", + "bytemuck", + "half", + "serde", + "typenum", +] + +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "fork_stream" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc54cf296aa5a82dfffcc911fc7a37b0dcba605725bbb4db486f7b24d7667f9d" +dependencies = [ + "futures", + "pin-project", +] + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror 1.0.69", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-bounded" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" +dependencies = [ + "futures-timer", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls 0.23.35", + "rustls-pki-types", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "1.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf57c49a95fd1fe24b90b3033bee6dc7e8f1288d51494cb44e627c295e38542" +dependencies = [ + "rustversion", + "serde_core", + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "globset" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52dfc19153a48bde0cbd630453615c8151bce3a5adfac7a0aebfbf0a1e1f57e3" +dependencies = [ + "aho-corasick", + "bstr", + "log", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "globwalk" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757" +dependencies = [ + "bitflags 2.10.0", + "ignore", + "walkdir", +] + +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if", + "dashmap", + "futures", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand 0.8.5", + "smallvec", + "spinning_top", +] + +[[package]] +name = "groth16" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-groth16", + "ark-serialize 0.4.2", + "generic-array 1.3.5", + "hex", + "num-bigint", + "serde", + "serde_json", + "thiserror 2.0.17", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.4.0", + "indexmap 2.12.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + +[[package]] +name = "hickory-proto" +version = "0.25.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d00147af6310f4392a31680db52a3ed45a2e0f68eb18e8c3fe5537ecc96d9e2" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "socket2 0.5.10", + "thiserror 2.0.17", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5762f69ebdbd4ddb2e975cd24690bf21fe6b2604039189c26acddbc427f12887" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.2", + "resolv-conf", + "smallvec", + "thiserror 2.0.17", + "tokio", + "tracing", +] + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.4.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http 1.4.0", + "http-body 1.0.1", + "pin-project-lite", +] + +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humansize" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7" +dependencies = [ + "libm", +] + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http 1.4.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "rustls-native-certs", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http 1.4.0", + "hyper 1.8.1", + "hyper-util", + "rustls 0.23.35", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.32", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper 1.8.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.1", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.62.2", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "if-addrs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "if-watch" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +dependencies = [ + "async-io", + "core-foundation", + "fnv", + "futures", + "if-addrs", + "ipnet", + "log", + "netlink-packet-core 0.7.0", + "netlink-packet-route 0.17.1", + "netlink-proto", + "netlink-sys", + "rtnetlink", + "system-configuration 0.6.1", + "tokio", + "windows 0.53.0", +] + +[[package]] +name = "igd-next" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" +dependencies = [ + "async-trait", + "attohttpc 0.24.1", + "bytes", + "futures", + "http 1.4.0", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "log", + "rand 0.8.5", + "tokio", + "url", + "xmltree", +] + +[[package]] +name = "igd-next" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516893339c97f6011282d5825ac94fc1c7aad5cad26bdc2d0cee068c0bf97f97" +dependencies = [ + "async-trait", + "attohttpc 0.30.1", + "bytes", + "futures", + "http 1.4.0", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "log", + "rand 0.9.2", + "tokio", + "url", + "xmltree", +] + +[[package]] +name = "ignore" +version = "0.4.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3d782a365a015e0f5c04902246139249abf769125006fbe7649e2ee88169b4a" +dependencies = [ + "crossbeam-deque", + "globset", + "log", + "memchr", + "regex-automata", + "same-file", + "walkdir", + "winapi-util", +] + +[[package]] +name = "indenter" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inferno" +version = "0.11.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" +dependencies = [ + "ahash", + "indexmap 2.12.1", + "is-terminal", + "itoa", + "log", + "num-format", + "once_cell", + "quick-xml", + "rgb", + "str_stack", +] + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.10", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jf-crhf" +version = "0.1.1" +source = "git+https://github.com/EspressoSystems/jellyfish?tag=jf-crhf-v0.1.1#8f3dce0bc2bd161b4648f6ac029dcc1a23aaf4c5" +dependencies = [ + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "jf-poseidon2" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/jellyfish.git?rev=dc166cf0f803c3e5067f9dfcc21e3dade986a447#dc166cf0f803c3e5067f9dfcc21e3dade986a447" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ff 0.4.2", + "ark-std 0.4.0", + "displaydoc", + "hex", + "jf-crhf", + "lazy_static", + "nimue", + "zeroize", +] + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "json-patch" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec9ad60d674508f3ca8f380a928cfe7b096bc729c4e2dbfe3852bc45da3ab30b" +dependencies = [ + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "jsonpath-rust" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06cc127b7c3d270be504572364f9569761a180b981919dd0d87693a7f5fb7829" +dependencies = [ + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2", + "signature", +] + +[[package]] +name = "k8s-openapi" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc3606fd16aca7989db2f84bb25684d0270c6d6fa1dbcd0025af7b4130523a6" +dependencies = [ + "base64 0.21.7", + "bytes", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "key-management-system" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "ed25519-dalek", + "groth16", + "key-management-system-macros", + "log", + "nomos-blend-message", + "nomos-utils", + "overwatch", + "poq", + "poseidon2", + "serde", + "thiserror 2.0.17", + "tokio", + "tracing", + "zeroize", + "zksign", +] + +[[package]] +name = "key-management-system-macros" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "kube" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3499c8d60c763246c7a213f51caac1e9033f46026904cb89bc8951ae8601f26e" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "033450dfa0762130565890dadf2f8835faedf749376ca13345bcd8ecd6b5f29f" +dependencies = [ + "base64 0.21.7", + "bytes", + "chrono", + "either", + "futures", + "home", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-rustls 0.24.2", + "hyper-timeout 0.4.1", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "pin-project", + "rustls 0.21.12", + "rustls-pemfile", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tower 0.4.13", + "tower-http 0.4.4", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5bba93d054786eba7994d03ce522f368ef7d48c88a1826faa28478d85fb63ae" +dependencies = [ + "chrono", + "form_urlencoded", + "http 0.2.12", + "json-patch", + "k8s-openapi", + "once_cell", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "kube-runtime" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d8893eb18fbf6bb6c80ef6ee7dd11ec32b1dc3c034c988ac1b3a84d46a230ae" +dependencies = [ + "ahash", + "async-trait", + "backoff", + "derivative", + "futures", + "hashbrown 0.14.5", + "json-patch", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "smallvec", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "kzgrs" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-bls12-381", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-poly-commit", + "ark-serialize 0.4.2", + "blake2", + "blst", + "num-bigint", + "num-traits", + "rand 0.8.5", + "thiserror 1.0.69", +] + +[[package]] +name = "kzgrs-backend" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "blake2", + "itertools 0.12.1", + "kzgrs", + "nomos-core", + "rand 0.8.5", + "serde", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libp2p" +version = "0.55.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72dc443ddd0254cb49a794ed6b6728400ee446a0f7ab4a07d0209ee98de20e9" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom 0.2.16", + "libp2p-allow-block-list", + "libp2p-autonat", + "libp2p-connection-limits", + "libp2p-core", + "libp2p-dns", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-mdns", + "libp2p-metrics", + "libp2p-quic", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-upnp", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror 2.0.17", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38944b7cb981cc93f2f0fb411ff82d0e983bd226fbcc8d559639a3a73236568b" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", +] + +[[package]] +name = "libp2p-autonat" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e297bfc6cabb70c6180707f8fa05661b77ecb9cb67e8e8e1c469301358fa21d0" +dependencies = [ + "async-trait", + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-request-response", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "rand_core 0.6.4", + "thiserror 2.0.17", + "tracing", + "web-time", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efe9323175a17caa8a2ed4feaf8a548eeef5e0b72d03840a0eab4bcb0210ce1c" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", +] + +[[package]] +name = "libp2p-core" +version = "0.43.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d28e2d2def7c344170f5c6450c0dbe3dfef655610dbfde2f6ac28a527abbe36" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "parking_lot", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "thiserror 2.0.17", + "tracing", + "unsigned-varint 0.8.0", + "web-time", +] + +[[package]] +name = "libp2p-dns" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b780a1150214155b0ed1cdf09fbd2e1b0442604f9146a431d1b21d23eef7bd7" +dependencies = [ + "async-trait", + "futures", + "hickory-resolver", + "libp2p-core", + "libp2p-identity", + "parking_lot", + "smallvec", + "tracing", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d558548fa3b5a8e9b66392f785921e363c57c05dcadfda4db0d41ae82d313e4a" +dependencies = [ + "async-channel", + "asynchronous-codec", + "base64 0.22.1", + "byteorder", + "bytes", + "either", + "fnv", + "futures", + "futures-timer", + "getrandom 0.2.16", + "hashlink", + "hex_fmt", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "prometheus-client", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "regex", + "sha2", + "tracing", + "web-time", +] + +[[package]] +name = "libp2p-identify" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c06862544f02d05d62780ff590cc25a75f5c2b9df38ec7a370dcae8bb873cf" +dependencies = [ + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "libp2p-identity" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "hkdf", + "k256", + "multihash", + "quick-protobuf", + "rand 0.8.5", + "serde", + "sha2", + "thiserror 2.0.17", + "tracing", + "zeroize", +] + +[[package]] +name = "libp2p-kad" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bab0466a27ebe955bcbc27328fae5429c5b48c915fd6174931414149802ec23" +dependencies = [ + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "sha2", + "smallvec", + "thiserror 2.0.17", + "tracing", + "uint", + "web-time", +] + +[[package]] +name = "libp2p-mdns" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d0ba095e1175d797540e16b62e7576846b883cb5046d4159086837b36846cc" +dependencies = [ + "futures", + "hickory-proto", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "smallvec", + "socket2 0.5.10", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-metrics" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ce58c64292e87af624fcb86465e7dd8342e46a388d71e8fec0ab37ee789630a" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-swarm", + "pin-project", + "prometheus-client", + "web-time", +] + +[[package]] +name = "libp2p-quic" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41432a159b00424a0abaa2c80d786cddff81055ac24aa127e0cf375f7858d880" +dependencies = [ + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "quinn", + "rand 0.8.5", + "ring", + "rustls 0.23.35", + "socket2 0.5.10", + "thiserror 2.0.17", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-request-response" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "548fe44a80ff275d400f1b26b090d441d83ef73efabbeb6415f4ce37e5aed865" +dependencies = [ + "async-trait", + "futures", + "futures-bounded", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "smallvec", + "tracing", +] + +[[package]] +name = "libp2p-stream" +version = "0.3.0-alpha" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826716f1ee125895f1fb44911413cba023485b552ff96c7a2159bd037ac619bb" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "tracing", +] + +[[package]] +name = "libp2p-swarm" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "803399b4b6f68adb85e63ab573ac568154b193e9a640f03e0f2890eabbcb37f8" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm-derive", + "lru", + "multistream-select", + "once_cell", + "rand 0.8.5", + "smallvec", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "libp2p-tcp" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65346fb4d36035b23fec4e7be4c320436ba53537ce9b6be1d1db1f70c905cad0" +dependencies = [ + "futures", + "futures-timer", + "if-watch", + "libc", + "libp2p-core", + "socket2 0.5.10", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-tls" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ff65a82e35375cbc31ebb99cacbbf28cb6c4fefe26bf13756ddcf708d40080" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring", + "rustls 0.23.35", + "rustls-webpki 0.103.8", + "thiserror 2.0.17", + "x509-parser", + "yasna", +] + +[[package]] +name = "libp2p-upnp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" +dependencies = [ + "futures", + "futures-timer", + "igd-next 0.15.1", + "libp2p-core", + "libp2p-swarm", + "tokio", + "tracing", +] + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags 2.10.0", + "libc", +] + +[[package]] +name = "librocksdb-sys" +version = "0.17.3+10.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "libc", + "libz-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "light-poseidon" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47a1ccadd0bb5a32c196da536fd72c59183de24a055f6bf0513bf845fefab862" +dependencies = [ + "ark-bn254 0.5.0", + "ark-ff 0.5.0", + "num-bigint", + "thiserror 1.0.69", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "loki-api" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdc38a304f59a03e6efa3876766a48c70a766a93f88341c3fff4212834b8e327" +dependencies = [ + "prost", + "prost-types", +] + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "match-lookup" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memalloc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df39d232f5c40b0891c10216992c2f250c054105cb1e56f0fc9032db6203ecc1" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "memmap2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +dependencies = [ + "libc", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "mmr" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "groth16", + "poseidon2", + "rpds", + "serde", +] + +[[package]] +name = "moka" +version = "0.12.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "uuid", +] + +[[package]] +name = "multiaddr" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.8.0", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" +dependencies = [ + "base-x", + "base256emoji", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" +dependencies = [ + "core2", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "multistream-select" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project", + "smallvec", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "natpmp" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77366fa8ce34e2e1322dd97da65f11a62f451bd3daae8be6993c00800f61dd07" +dependencies = [ + "async-trait", + "cc", + "netdev", + "tokio", +] + +[[package]] +name = "netdev" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f901362e84cd407be6f8cd9d3a46bccf09136b095792785401ea7d283c79b91d" +dependencies = [ + "dlopen2 0.5.0", + "ipnet", + "libc", + "netlink-packet-core 0.7.0", + "netlink-packet-route 0.17.1", + "netlink-sys", + "once_cell", + "system-configuration 0.6.1", + "windows-sys 0.52.0", +] + +[[package]] +name = "netlink-packet-core" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5cf0b54effda4b91615c40ff0fd12d0d4c9a6e0f5116874f03941792ff535a" +dependencies = [ + "anyhow", + "byteorder", + "libc", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +dependencies = [ + "anyhow", + "byteorder", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea993e32c77d87f01236c38f572ecb6c311d592e56a06262a007fd2a6e31253c" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core 0.5.0", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core 0.7.0", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror 1.0.69", +] + +[[package]] +name = "netlink-proto" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core 0.7.0", + "netlink-sys", + "thiserror 2.0.17", +] + +[[package]] +name = "netlink-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +dependencies = [ + "bytes", + "futures", + "libc", + "log", + "tokio", +] + +[[package]] +name = "nimue" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0dc7d3b2b7bd112c0cecf7d6f4f16a174ee7a98e27615b1d08256d0176588f2" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "digest", + "generic-array 0.14.7", + "hex", + "keccak", + "log", + "rand 0.8.5", + "zeroize", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nom" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" +dependencies = [ + "memchr", +] + +[[package]] +name = "nomos-api" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "bytes", + "chain-service", + "futures", + "kzgrs-backend", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-libp2p", + "nomos-network", + "nomos-sdp", + "nomos-storage", + "overwatch", + "serde", + "serde_json", + "subnetworks-assignations", + "tokio", + "tokio-stream", + "tracing", + "tx-service", + "utoipa", + "utoipa-swagger-ui", +] + +[[package]] +name = "nomos-blend-message" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "blake2", + "derivative", + "ed25519-dalek", + "generic-array 1.3.5", + "groth16", + "itertools 0.14.0", + "nomos-core", + "nomos-utils", + "num-bigint", + "poq", + "serde", + "serde_with", + "thiserror 1.0.69", + "tracing", + "x25519-dalek", +] + +[[package]] +name = "nomos-blend-network" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "either", + "futures", + "futures-timer", + "libp2p", + "nomos-blend-message", + "nomos-blend-scheduling", + "nomos-core", + "nomos-libp2p", + "tracing", +] + +[[package]] +name = "nomos-blend-scheduling" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "derivative", + "fork_stream", + "futures", + "hex", + "multiaddr", + "nomos-blend-message", + "nomos-core", + "rand 0.8.5", + "serde", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "nomos-blend-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "chain-service", + "cryptarchia-engine", + "fork_stream", + "futures", + "groth16", + "key-management-system", + "libp2p", + "libp2p-stream", + "nomos-blend-message", + "nomos-blend-network", + "nomos-blend-scheduling", + "nomos-core", + "nomos-ledger", + "nomos-libp2p", + "nomos-network", + "nomos-time", + "nomos-utils", + "overwatch", + "poq", + "rand 0.8.5", + "rs-merkle-tree", + "serde", + "serde_with", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "zksign", +] + +[[package]] +name = "nomos-core" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "async-trait", + "bincode", + "blake2", + "bytes", + "const-hex", + "cryptarchia-engine", + "ed25519", + "ed25519-dalek", + "futures", + "generic-array 1.3.5", + "groth16", + "hex", + "multiaddr", + "nom 8.0.0", + "num-bigint", + "pol", + "poseidon2", + "serde", + "serde_with", + "strum", + "thiserror 1.0.69", + "tracing", + "zksign", +] + +[[package]] +name = "nomos-da-dispersal" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "ed25519", + "ed25519-dalek", + "futures", + "kzgrs-backend", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-tracing", + "nomos-utils", + "overwatch", + "serde", + "serde_with", + "services-utils", + "subnetworks-assignations", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tracing", + "zksign", +] + +[[package]] +name = "nomos-da-messages" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "blake2", + "futures", + "kzgrs-backend", + "nomos-core", + "serde", + "tokio", +] + +[[package]] +name = "nomos-da-network-core" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "cached", + "either", + "fixed", + "futures", + "indexmap 2.12.1", + "kzgrs-backend", + "libp2p", + "libp2p-stream", + "log", + "nomos-core", + "nomos-da-messages", + "nomos-utils", + "rand 0.9.2", + "serde", + "serde_with", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "nomos-da-network-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "arc-swap", + "async-trait", + "bitvec", + "blake2", + "broadcast-service", + "common-http-client", + "futures", + "kzgrs-backend", + "libp2p", + "libp2p-identity", + "log", + "multiaddr", + "nomos-core", + "nomos-da-messages", + "nomos-da-network-core", + "nomos-libp2p", + "nomos-sdp", + "nomos-storage", + "nomos-tracing", + "nomos-utils", + "overwatch", + "rand 0.8.5", + "serde", + "services-utils", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "nomos-da-sampling" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "futures", + "hex", + "kzgrs-backend", + "libp2p-identity", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-storage", + "nomos-tracing", + "overwatch", + "rand 0.8.5", + "serde", + "services-utils", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tx-service", +] + +[[package]] +name = "nomos-da-verifier" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "futures", + "kzgrs-backend", + "libp2p", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-storage", + "nomos-tracing", + "nomos-utils", + "overwatch", + "serde", + "serde_with", + "services-utils", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tx-service", +] + +[[package]] +name = "nomos-executor" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "axum", + "broadcast-service", + "clap", + "color-eyre", + "futures", + "kzgrs-backend", + "nomos-api", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-http-api-common", + "nomos-libp2p", + "nomos-network", + "nomos-node", + "nomos-sdp", + "nomos-storage", + "nomos-time", + "overwatch", + "serde", + "serde_yaml", + "services-utils", + "subnetworks-assignations", + "tokio", + "tower 0.4.13", + "tower-http 0.5.2", + "tracing", + "tx-service", + "utoipa", + "utoipa-swagger-ui", +] + +[[package]] +name = "nomos-http-api-common" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "axum", + "governor", + "nomos-core", + "pprof", + "serde", + "serde_with", + "tokio", + "tower_governor", + "tracing", +] + +[[package]] +name = "nomos-ledger" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "cryptarchia-engine", + "ed25519", + "groth16", + "mmr", + "nomos-core", + "nomos-utils", + "num-bigint", + "rand 0.8.5", + "rpds", + "serde", + "thiserror 1.0.69", + "utxotree", + "zksign", +] + +[[package]] +name = "nomos-libp2p" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "backon", + "blake2", + "cryptarchia-sync", + "default-net", + "either", + "futures", + "hex", + "igd-next 0.16.2", + "libp2p", + "multiaddr", + "natpmp", + "netdev", + "nomos-utils", + "num_enum", + "rand 0.8.5", + "serde", + "serde_with", + "thiserror 1.0.69", + "tokio", + "tracing", + "zerocopy", +] + +[[package]] +name = "nomos-network" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "cryptarchia-sync", + "futures", + "nomos-core", + "nomos-libp2p", + "overwatch", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "nomos-node" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "axum", + "broadcast-service", + "chain-leader", + "chain-network", + "chain-service", + "clap", + "color-eyre", + "derivative", + "futures", + "groth16", + "hex", + "http 1.4.0", + "key-management-system", + "kzgrs-backend", + "nomos-api", + "nomos-blend-message", + "nomos-blend-scheduling", + "nomos-blend-service", + "nomos-core", + "nomos-da-messages", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-http-api-common", + "nomos-libp2p", + "nomos-network", + "nomos-sdp", + "nomos-storage", + "nomos-system-sig", + "nomos-time", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "nomos-wallet", + "num-bigint", + "overwatch", + "pol", + "poq", + "serde", + "serde_json", + "serde_with", + "serde_yaml", + "services-utils", + "subnetworks-assignations", + "time", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-http 0.5.2", + "tracing", + "tx-service", + "utoipa", + "utoipa-swagger-ui", + "zksign", +] + +[[package]] +name = "nomos-sdp" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "ed25519-dalek", + "futures", + "nomos-core", + "overwatch", + "serde", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tracing", + "tx-service", + "zksign", +] + +[[package]] +name = "nomos-storage" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "cryptarchia-engine", + "futures", + "libp2p-identity", + "multiaddr", + "nomos-core", + "overwatch", + "rocksdb", + "serde", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "nomos-system-sig" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-ctrlc", + "async-trait", + "overwatch", + "tracing", +] + +[[package]] +name = "nomos-time" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "cfg_eval", + "cryptarchia-engine", + "futures", + "log", + "nomos-utils", + "overwatch", + "serde", + "serde_with", + "sntpc", + "thiserror 2.0.17", + "time", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "nomos-tracing" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "opentelemetry", + "opentelemetry-http", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "opentelemetry_sdk", + "rand 0.8.5", + "reqwest", + "serde", + "tokio", + "tracing", + "tracing-appender", + "tracing-gelf", + "tracing-loki", + "tracing-opentelemetry", + "tracing-subscriber 0.3.20", + "url", +] + +[[package]] +name = "nomos-tracing-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "nomos-tracing", + "overwatch", + "serde", + "tracing", + "tracing-appender", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "nomos-utils" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "blake2", + "cipher", + "const-hex", + "humantime", + "overwatch", + "rand 0.8.5", + "serde", + "serde_with", + "time", +] + +[[package]] +name = "nomos-wallet" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "chain-service", + "futures", + "groth16", + "hex", + "key-management-system", + "nomos-core", + "nomos-ledger", + "nomos-storage", + "overwatch", + "serde", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tracing", + "wallet", + "zksign", +] + +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec", + "itoa", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + +[[package]] +name = "object" +version = "0.37.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" +dependencies = [ + "memchr", +] + +[[package]] +name = "oid-registry" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "570074cc999d1a58184080966e5bd3bf3a9a4af650c3b05047c2621e7405cd17" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror 1.0.69", +] + +[[package]] +name = "opentelemetry-http" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6351496aeaa49d7c267fb480678d85d1cd30c5edb20b497c48c56f62a8c14b99" +dependencies = [ + "async-trait", + "bytes", + "http 1.4.0", + "opentelemetry", + "reqwest", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29e1f9c8b032d4f635c730c0efcf731d5e2530ea13fa8bef7939ddc8420696bd" +dependencies = [ + "async-trait", + "futures-core", + "http 1.4.0", + "opentelemetry", + "opentelemetry-http", + "opentelemetry-proto", + "opentelemetry_sdk", + "prost", + "thiserror 1.0.69", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9d3968ce3aefdcca5c27e3c4ea4391b37547726a70893aab52d3de95d5f8b34" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db945c1eaea8ac6a9677185357480d215bb6999faa9f691d0c4d4d641eab7a09" + +[[package]] +name = "opentelemetry_sdk" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c627d9f4c9cdc1f21a29ee4bfbd6028fcb8bcf2a857b43f3abdf72c9c862f3" +dependencies = [ + "async-trait", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry", + "percent-encoding", + "rand 0.8.5", + "thiserror 1.0.69", + "tokio", + "tokio-stream", +] + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "overwatch" +version = "0.1.0" +source = "git+https://github.com/logos-co/Overwatch?rev=f5a9902#f5a99022f389d65adbd55e51f1e3f9eead62432a" +dependencies = [ + "async-trait", + "futures", + "overwatch-derive", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", +] + +[[package]] +name = "overwatch-derive" +version = "0.1.0" +source = "git+https://github.com/logos-co/Overwatch?rev=f5a9902#f5a99022f389d65adbd55e51f1e3f9eead62432a" +dependencies = [ + "convert_case", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "owo-colors" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "parse-zoneinfo" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" +dependencies = [ + "regex", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64 0.22.1", + "serde_core", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pest" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22" +dependencies = [ + "memchr", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f72981ade67b1ca6adc26ec221be9f463f2b5839c7508998daa17c23d94d7f" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee9efd8cdb50d719a80088b76f81aec7c41ed6d522ee750178f83883d271625" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "pest_meta" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf1d70880e76bdc13ba52eafa6239ce793d85c8e43896507e43dd8984ff05b82" +dependencies = [ + "pest", + "sha2", +] + +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "pol" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-prover", + "circuits-utils", + "groth16", + "num-bigint", + "num-traits", + "serde", + "serde_json", + "thiserror 2.0.17", + "witness-generator", +] + +[[package]] +name = "polling" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix 1.1.2", + "windows-sys 0.61.2", +] + +[[package]] +name = "poq" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-prover", + "circuits-utils", + "groth16", + "num-bigint", + "pol", + "serde", + "serde_json", + "thiserror 2.0.17", + "witness-generator", +] + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "poseidon2" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ff 0.4.2", + "jf-poseidon2", + "num-bigint", +] + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "pprof" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38a01da47675efa7673b032bf8efd8214f1917d89685e07e395ab125ea42b187" +dependencies = [ + "aligned-vec", + "backtrace", + "cfg-if", + "criterion", + "findshlibs", + "inferno", + "libc", + "log", + "nix 0.26.4", + "once_cell", + "protobuf", + "protobuf-codegen", + "smallvec", + "spin", + "symbolic-demangle", + "tempfile", + "thiserror 2.0.17", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "prometheus-http-query" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcebfa99f03ae51220778316b37d24981e36322c82c24848f48c5bd0f64cbdb" +dependencies = [ + "enum-as-inner", + "mime", + "reqwest", + "serde", + "time", + "url", +] + +[[package]] +name = "proptest" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +dependencies = [ + "bitflags 2.10.0", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "protobuf" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" +dependencies = [ + "once_cell", + "protobuf-support", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-codegen" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d3976825c0014bbd2f3b34f0001876604fe87e0c86cd8fa54251530f1544ace" +dependencies = [ + "anyhow", + "once_cell", + "protobuf", + "protobuf-parse", + "regex", + "tempfile", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-parse" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4aeaa1f2460f1d348eeaeed86aea999ce98c1bded6f089ff8514c9d9dbdc973" +dependencies = [ + "anyhow", + "indexmap 2.12.1", + "log", + "protobuf", + "protobuf-support", + "tempfile", + "thiserror 1.0.69", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" +dependencies = [ + "thiserror 1.0.69", +] + +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror 1.0.69", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "quick-xml" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +dependencies = [ + "memchr", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.35", + "socket2 0.6.1", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls 0.23.35", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.1", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "raw-cpuid" +version = "11.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "redox_users" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 2.0.17", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "reqwest" +version = "0.12.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-rustls 0.27.7", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "native-tls", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.35", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tokio-rustls 0.26.4", + "tokio-util", + "tower 0.5.2", + "tower-http 0.6.7", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "resolv-conf" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rgb" +version = "0.8.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6a884d2998352bb4daf0183589aec883f16a6da1f4dde84d8e2e9a5409a1ce" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rocksdb" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rpds" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e75f485e819d4d3015e6c0d55d02a4fd3db47c1993d9e603e0361fba2bffb34" +dependencies = [ + "archery", + "serde", +] + +[[package]] +name = "rs-merkle-tree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7a3ef170810c387d31b64c0b59734abb0839dac2a8d137909e271bfdec9b1e0" +dependencies = [ + "ark-bn254 0.5.0", + "ark-ff 0.5.0", + "byteorder", + "futures", + "light-poseidon", + "quote", + "rand 0.9.2", + "syn 1.0.109", + "thiserror 2.0.17", + "tiny-keccak", + "tokio", +] + +[[package]] +name = "rtnetlink" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +dependencies = [ + "futures", + "log", + "netlink-packet-core 0.7.0", + "netlink-packet-route 0.17.1", + "netlink-packet-utils", + "netlink-proto", + "netlink-sys", + "nix 0.26.4", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "runner-examples" +version = "0.1.0" +dependencies = [ + "testing-framework-core", + "testing-framework-runner-compose", + "testing-framework-runner-k8s", + "testing-framework-runner-local", + "testing-framework-workflows", + "tokio", + "tracing", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "rust-embed" +version = "8.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "947d7f3fad52b283d261c4c99a084937e2fe492248cb9a68a8435a861b8798ca" +dependencies = [ + "rust-embed-impl", + "rust-embed-utils", + "walkdir", +] + +[[package]] +name = "rust-embed-impl" +version = "8.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fa2c8c9e8711e10f9c4fd2d64317ef13feaab820a4c51541f1a8c8e2e851ab2" +dependencies = [ + "proc-macro2", + "quote", + "rust-embed-utils", + "syn 2.0.111", + "walkdir", +] + +[[package]] +name = "rust-embed-utils" +version = "8.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b161f275cb337fe0a44d924a5f4df0ed69c2c39519858f931ce61c779d3475" +dependencies = [ + "sha2", + "walkdir", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom 7.1.3", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.23.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki 0.103.8", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "rw-stream-sink" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +dependencies = [ + "futures", + "pin-project", + "static_assertions", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "serde_core", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.12.1", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "services-utils" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "futures", + "log", + "overwatch", + "serde", + "serde_json", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "slug" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882a80f72ee45de3cc9a5afeb2da0331d58df69e4e7d8eeb5d3c7784ae67e724" +dependencies = [ + "deunicode", + "wasm-bindgen", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + +[[package]] +name = "sntpc" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78f778a0f82b3cf5d75f858eceee38e84d5292f1d03415e88cc4ec45ca6ba8a2" +dependencies = [ + "cfg-if", + "tokio", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "str_stack" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "subnetworks-assignations" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "counter", + "libp2p-identity", + "nomos-core", + "nomos-utils", + "rand 0.8.5", + "serde", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "symbolic-common" +version = "12.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3d8046c5674ab857104bc4559d505f4809b8060d57806e45d49737c97afeb60" +dependencies = [ + "debugid", + "memmap2", + "stable_deref_trait", + "uuid", +] + +[[package]] +name = "symbolic-demangle" +version = "12.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1accb6e5c4b0f682de907623912e616b44be1c9e725775155546669dbff720ec" +dependencies = [ + "rustc-demangle", + "symbolic-common", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "system-configuration-sys 0.6.0", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix 1.1.2", + "windows-sys 0.61.2", +] + +[[package]] +name = "tera" +version = "1.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8004bca281f2d32df3bacd59bc67b312cb4c70cea46cbd79dbe8ac5ed206722" +dependencies = [ + "chrono", + "chrono-tz", + "globwalk", + "humansize", + "lazy_static", + "percent-encoding", + "pest", + "pest_derive", + "rand 0.8.5", + "regex", + "serde", + "serde_json", + "slug", + "unicode-segmentation", +] + +[[package]] +name = "testing-framework-config" +version = "0.1.0" +dependencies = [ + "blst", + "chain-leader", + "chain-network", + "chain-service", + "cryptarchia-engine", + "cryptarchia-sync", + "ed25519-dalek", + "groth16", + "hex", + "key-management-system", + "nomos-api", + "nomos-blend-message", + "nomos-blend-service", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-executor", + "nomos-ledger", + "nomos-libp2p", + "nomos-node", + "nomos-sdp", + "nomos-time", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "nomos-wallet", + "num-bigint", + "rand 0.8.5", + "serde", + "subnetworks-assignations", + "time", + "tracing", + "zksign", +] + +[[package]] +name = "testing-framework-core" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "broadcast-service", + "chain-service", + "common-http-client", + "futures", + "groth16", + "hex", + "key-management-system", + "kzgrs-backend", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-executor", + "nomos-http-api-common", + "nomos-libp2p", + "nomos-network", + "nomos-node", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "prometheus-http-query", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "serde_with", + "serde_yaml", + "tempfile", + "testing-framework-config", + "thiserror 2.0.17", + "tokio", + "tracing", + "tx-service", +] + +[[package]] +name = "testing-framework-runner-compose" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "cfgsync", + "groth16", + "nomos-core", + "nomos-ledger", + "nomos-tracing-service", + "reqwest", + "serde", + "tempfile", + "tera", + "testing-framework-core", + "tests", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "uuid", + "zksign", +] + +[[package]] +name = "testing-framework-runner-k8s" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "k8s-openapi", + "kube", + "reqwest", + "serde", + "serde_yaml", + "tempfile", + "testing-framework-core", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "testing-framework-runner-local" +version = "0.1.0" +dependencies = [ + "async-trait", + "testing-framework-core", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "testing-framework-workflows" +version = "0.1.0" +dependencies = [ + "async-trait", + "ed25519-dalek", + "executor-http-client", + "nomos-core", + "rand 0.8.5", + "testing-framework-config", + "testing-framework-core", + "thiserror 2.0.17", + "tokio", + "tracing", + "zksign", +] + +[[package]] +name = "tests" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "blst", + "broadcast-service", + "chain-leader", + "chain-network", + "chain-service", + "common-http-client", + "cryptarchia-engine", + "cryptarchia-sync", + "ed25519-dalek", + "executor-http-client", + "futures", + "futures-util", + "groth16", + "hex", + "key-management-system", + "kzgrs-backend", + "nomos-api", + "nomos-blend-message", + "nomos-blend-service", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-executor", + "nomos-http-api-common", + "nomos-ledger", + "nomos-libp2p", + "nomos-network", + "nomos-node", + "nomos-sdp", + "nomos-time", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "nomos-wallet", + "num-bigint", + "rand 0.8.5", + "reqwest", + "serde_json", + "serde_yaml", + "serial_test", + "subnetworks-assignations", + "tempfile", + "time", + "tokio", + "tracing", + "tx-service", + "zksign", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.1", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd86198d9ee903fedd2f9a2e72014287c0d9167e4ae43b5853007205dda1b76" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls 0.23.35", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "slab", + "tokio", +] + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap 2.12.1", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-timeout 0.5.2", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +dependencies = [ + "base64 0.21.7", + "bitflags 2.10.0", + "bytes", + "futures-core", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "http-range-header", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tower_governor" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3790eac6ad3fb8d9d96c2b040ae06e2517aa24b067545d1078b96ae72f7bb9a7" +dependencies = [ + "axum", + "forwarded-header-value", + "governor", + "http 1.4.0", + "pin-project", + "thiserror 1.0.69", + "tower 0.4.13", + "tracing", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.69", + "time", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-gelf" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c0170f1bf67b749d4377c2da1d99d6e722600051ee53870cfb6f618611e29e" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "hostname", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing-core", + "tracing-futures", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-loki" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3beec919fbdf99d719de8eda6adae3281f8a5b71ae40431f44dc7423053d34" +dependencies = [ + "loki-api", + "reqwest", + "serde", + "serde_json", + "snap", + "tokio", + "tokio-stream", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", + "tracing-subscriber 0.3.20", + "url", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc58af5d3f6c5811462cabb3289aec0093f7338e367e5a33d28c0433b3c7360b" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-subscriber 0.3.20", + "web-time", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "triomphe" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tx-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "futures", + "nomos-core", + "nomos-network", + "nomos-storage", + "overwatch", + "rand 0.8.5", + "serde", + "serde_json", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "utoipa", +] + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utoipa" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" +dependencies = [ + "indexmap 2.12.1", + "serde", + "serde_json", + "utoipa-gen", +] + +[[package]] +name = "utoipa-gen" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20c24e8ab68ff9ee746aad22d39b5535601e6416d1b0feeabf78be986a5c4392" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "utoipa-swagger-ui" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "943e0ff606c6d57d410fd5663a4d7c074ab2c5f14ab903b9514565e59fa1189e" +dependencies = [ + "axum", + "mime_guess", + "regex", + "reqwest", + "rust-embed", + "serde", + "serde_json", + "utoipa", + "zip", +] + +[[package]] +name = "utxotree" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "groth16", + "num-bigint", + "poseidon2", + "rpds", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "uuid" +version = "1.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wallet" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "nomos-core", + "nomos-ledger", + "num-bigint", + "rpds", + "thiserror 2.0.17", + "tracing", + "zksign", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.111", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.44", +] + +[[package]] +name = "widestring" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbedf6db9096bc2364adce0ae0aa636dcd89f3c3f2cd67947062aaf0ca2a10ec" +dependencies = [ + "windows_aarch64_msvc 0.32.0", + "windows_i686_gnu 0.32.0", + "windows_i686_msvc 0.32.0", + "windows_x86_64_gnu 0.32.0", + "windows_x86_64_msvc 0.32.0", +] + +[[package]] +name = "windows" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" +dependencies = [ + "windows-core 0.53.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" +dependencies = [ + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result 0.4.1", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "witness-generator" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-utils", + "tempfile", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "x509-parser" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom 7.1.3", + "oid-registry", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "xml-rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea879c944afe8a2b25fef16bb4ba234f47c694565e97383b36f3a878219065c" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf955aa904d6040f70dc8e9384444cb1030aed272ba3cb09bbc4ab9e7c1f34f5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "zip" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cc23c04387f4da0374be4533ad1208cbb091d5c11d070dfef13676ad6497164" +dependencies = [ + "arbitrary", + "crc32fast", + "crossbeam-utils", + "displaydoc", + "flate2", + "indexmap 2.12.1", + "num_enum", + "thiserror 1.0.69", +] + +[[package]] +name = "zksign" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?rev=2f60a0372c228968c3526c341ebc7e58bbd178dd#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "circuits-prover", + "circuits-utils", + "generic-array 1.3.5", + "groth16", + "num-bigint", + "poseidon2", + "serde", + "serde_json", + "thiserror 2.0.17", + "tracing", + "witness-generator", + "zeroize", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..badb04f --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,101 @@ +[workspace] +members = [ + "examples", + "testing-framework/configs", + "testing-framework/core", + "testing-framework/runners/compose", + "testing-framework/runners/k8s", + "testing-framework/runners/local", + "testing-framework/tools/cfgsync", + "testing-framework/workflows", +] +resolver = "2" + +[workspace.package] +categories = [] +description = "Nomos testing framework workspace (split out from nomos-node)" +edition = "2024" +keywords = ["framework", "nomos", "testing"] +license = "MIT OR Apache-2.0" +readme = "README.md" +repository = "https://example.invalid/nomos-testing-local" +version = "0.1.0" + +[workspace.lints.rust] +unsafe_code = "allow" + +[workspace.lints.clippy] +all = "allow" + +[workspace.dependencies] +# Local testing framework crates +testing-framework-config = { default-features = false, path = "testing-framework/configs" } +testing-framework-core = { default-features = false, path = "testing-framework/core" } +testing-framework-runner-compose = { default-features = false, path = "testing-framework/runners/compose" } +testing-framework-runner-k8s = { default-features = false, path = "testing-framework/runners/k8s" } +testing-framework-runner-local = { default-features = false, path = "testing-framework/runners/local" } +testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" } + +# Nomos git dependencies (pinned to latest master) +broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +cfgsync = { default-features = false, path = "testing-framework/tools/cfgsync" } +chain-leader = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd", features = [ + "pol-dev-mode", +] } +chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +executor-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +key-management-system = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +kzgrs = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +kzgrs-backend = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-cli = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-da-dispersal = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-da-network-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-da-network-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-da-sampling = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-da-verifier = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-executor = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-sdp = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-time = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +nomos-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +subnetworks-assignations = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } +zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "2f60a0372c228968c3526c341ebc7e58bbd178dd" } + +# External crates +async-trait = { default-features = false, version = "0.1" } +bytes = { default-features = false, version = "1.3" } +hex = { default-features = false, version = "0.4.3" } +libp2p = { default-features = false, version = "0.55" } +overwatch = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" } +overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" } +rand = { default-features = false, version = "0.8" } +reqwest = { default-features = false, version = "0.12" } +serde = { default-features = true, version = "1.0", features = ["derive"] } +serde_json = { default-features = false, version = "1.0" } +serde_with = { default-features = false, version = "3.14.0" } +serde_yaml = { default-features = false, version = "0.9.33" } +tempfile = { default-features = false, version = "3" } +thiserror = { default-features = false, version = "2.0" } +tokio = { default-features = false, version = "1" } +tracing = { default-features = false, version = "0.1" } diff --git a/README.md b/README.md new file mode 100644 index 0000000..3a0a135 --- /dev/null +++ b/README.md @@ -0,0 +1,8 @@ +# Nomos Testing + +This repo is the standalone Nomos testing framework. For docs, quick start, and examples, read the mdBook at https://logos-co.github.io/nomos-testing/ (sources in `book/`) — start with: +- What you’ll learn: https://logos-co.github.io/nomos-testing/what-you-will-learn.html +- Quick examples: https://logos-co.github.io/nomos-testing/examples.html and https://logos-co.github.io/nomos-testing/examples-advanced.html +- Runners (compose/k8s/local): https://logos-co.github.io/nomos-testing/runners.html + +Key crates live under `testing-framework/` (core, runners, workflows, configs) with integration tests in `tests/workflows/`. Compose/k8s assets sit in `testing-framework/assets/stack/`. diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 0000000..6a4c4b8 --- /dev/null +++ b/book/book.toml @@ -0,0 +1,13 @@ +[book] +authors = ["Nomos Testing"] +language = "en" +src = "src" +title = "Nomos Testing Book" + +[build] +# Keep book output in target/ to avoid polluting the workspace root. +build-dir = "../target/book" + +[output.html] +additional-js = ["theme/mermaid-init.js"] +default-theme = "light" diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 0000000..6a9f604 --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,31 @@ +# Summary +- [Project Context Primer](project-context-primer.md) +- [What You Will Learn](what-you-will-learn.md) +- [Part I — Foundations](part-i.md) + - [Introduction](introduction.md) + - [Architecture Overview](architecture-overview.md) + - [Testing Philosophy](testing-philosophy.md) + - [Scenario Lifecycle](scenario-lifecycle.md) + - [Design Rationale](design-rationale.md) +- [Part II — User Guide](part-ii.md) + - [Workspace Layout](workspace-layout.md) + - [Annotated Tree](annotated-tree.md) + - [Authoring Scenarios](authoring-scenarios.md) + - [Core Content: Workloads & Expectations](workloads.md) + - [Core Content: ScenarioBuilderExt Patterns](scenario-builder-ext-patterns.md) + - [Best Practices](best-practices.md) + - [Examples](examples.md) + - [Advanced & Artificial Examples](examples-advanced.md) + - [Running Scenarios](running-scenarios.md) + - [Runners](runners.md) + - [Operations](operations.md) +- [Part III — Developer Reference](part-iii.md) + - [Scenario Model (Developer Level)](scenario-model.md) + - [Extending the Framework](extending.md) + - [Example: New Workload & Expectation (Rust)](custom-workload-example.md) + - [Internal Crate Reference](internal-crate-reference.md) +- [Part IV — Appendix](part-iv.md) + - [Builder API Quick Reference](dsl-cheat-sheet.md) + - [Troubleshooting Scenarios](troubleshooting.md) + - [FAQ](faq.md) + - [Glossary](glossary.md) diff --git a/book/src/annotated-tree.md b/book/src/annotated-tree.md new file mode 100644 index 0000000..be8627b --- /dev/null +++ b/book/src/annotated-tree.md @@ -0,0 +1,96 @@ +# Annotated Tree + +Directory structure with key paths annotated: + +``` +nomos-testing/ +├─ testing-framework/ # Core library crates +│ ├─ configs/ # Node config builders, topology generation, tracing/logging config +│ ├─ core/ # Scenario model (ScenarioBuilder), runtime (Runner, Deployer), topology, node spawning +│ ├─ workflows/ # Workloads (transactions, DA, chaos), expectations (liveness), builder DSL extensions +│ ├─ runners/ # Deployment backends +│ │ ├─ local/ # LocalDeployer (spawns local processes) +│ │ ├─ compose/ # ComposeDeployer (Docker Compose + Prometheus) +│ │ └─ k8s/ # K8sDeployer (Kubernetes Helm) +│ └─ assets/ # Docker/K8s stack assets +│ └─ stack/ +│ ├─ kzgrs_test_params/ # KZG circuit parameters (fetch via setup-nomos-circuits.sh) +│ ├─ monitoring/ # Prometheus config +│ ├─ scripts/ # Container entrypoints, image builder +│ └─ cfgsync.yaml # Config sync server template +│ +├─ examples/ # PRIMARY ENTRY POINT: runnable binaries +│ └─ src/bin/ +│ ├─ local_runner.rs # Local processes demo (POL_PROOF_DEV_MODE=true) +│ ├─ compose_runner.rs # Docker Compose demo (requires image) +│ └─ k8s_runner.rs # Kubernetes demo (requires cluster + image) +│ +├─ scripts/ # Helper utilities +│ └─ setup-nomos-circuits.sh # Fetch KZG circuit parameters +│ +└─ book/ # This documentation (mdBook) +``` + +## Key Directories Explained + +### `testing-framework/` +Core library crates providing the testing API. + +| Crate | Purpose | Key Exports | +|-------|---------|-------------| +| `configs` | Node configuration builders | Topology generation, tracing config | +| `core` | Scenario model & runtime | `ScenarioBuilder`, `Deployer`, `Runner` | +| `workflows` | Workloads & expectations | `ScenarioBuilderExt`, `ChaosBuilderExt` | +| `runners/local` | Local process deployer | `LocalDeployer` | +| `runners/compose` | Docker Compose deployer | `ComposeDeployer` | +| `runners/k8s` | Kubernetes deployer | `K8sDeployer` | + +### `testing-framework/assets/stack/` +Docker/K8s deployment assets: +- **`kzgrs_test_params/`**: Circuit parameters (override via `NOMOS_KZGRS_PARAMS_PATH`) +- **`monitoring/`**: Prometheus config +- **`scripts/`**: Container entrypoints and image builder +- **`cfgsync.yaml`**: Configuration sync server template + +### `examples/` (Start Here!) +**Runnable binaries** demonstrating framework usage: +- `local_runner.rs` — Local processes +- `compose_runner.rs` — Docker Compose (requires `NOMOS_TESTNET_IMAGE` built) +- `k8s_runner.rs` — Kubernetes (requires cluster + image) + +**Run with:** `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin ` + +**All runners require `POL_PROOF_DEV_MODE=true`** to avoid expensive proof generation. + +### `scripts/` +Helper utilities: +- **`setup-nomos-circuits.sh`**: Fetch KZG parameters from releases + +## Observability + +**Compose runner** includes: +- **Prometheus** at `http://localhost:9090` (metrics scraping) +- Node metrics exposed per validator/executor +- Access in expectations: `ctx.telemetry().prometheus_endpoint()` + +**Logging** controlled by: +- `NOMOS_LOG_DIR` — Write per-node log files +- `NOMOS_LOG_LEVEL` — Global log level (error/warn/info/debug/trace) +- `NOMOS_LOG_FILTER` — Target-specific filtering (e.g., `consensus=trace,da=debug`) +- `NOMOS_TESTS_TRACING` — Enable file logging for local runner + +See [Logging and Observability](operations.md#logging-and-observability) for details. + +## Navigation Guide + +| To Do This | Go Here | +|------------|---------| +| **Run an example** | `examples/src/bin/` → `cargo run -p runner-examples --bin ` | +| **Write a custom scenario** | `testing-framework/core/` → Implement using `ScenarioBuilder` | +| **Add a new workload** | `testing-framework/workflows/src/workloads/` → Implement `Workload` trait | +| **Add a new expectation** | `testing-framework/workflows/src/expectations/` → Implement `Expectation` trait | +| **Modify node configs** | `testing-framework/configs/src/topology/configs/` | +| **Extend builder DSL** | `testing-framework/workflows/src/builder/` → Add trait methods | +| **Add a new deployer** | `testing-framework/runners/` → Implement `Deployer` trait | + +For detailed guidance, see [Internal Crate Reference](internal-crate-reference.md). diff --git a/book/src/architecture-overview.md b/book/src/architecture-overview.md new file mode 100644 index 0000000..f954a03 --- /dev/null +++ b/book/src/architecture-overview.md @@ -0,0 +1,139 @@ +# Architecture Overview + +The framework follows a clear flow: **Topology → Scenario → Deployer → Runner → Workloads → Expectations**. + +## Core Flow + +```mermaid +flowchart LR + A(Topology
shape cluster) --> B(Scenario
plan) + B --> C(Deployer
provision & readiness) + C --> D(Runner
orchestrate execution) + D --> E(Workloads
drive traffic) + E --> F(Expectations
verify outcomes) +``` + +### Components + +- **Topology** describes the cluster: how many nodes, their roles, and the high-level network and data-availability parameters they should follow. +- **Scenario** combines that topology with the activities to run and the checks to perform, forming a single plan. +- **Deployer** provisions infrastructure on the chosen backend (local processes, Docker Compose, or Kubernetes), waits for readiness, and returns a Runner. +- **Runner** orchestrates scenario execution: starts workloads, observes signals, evaluates expectations, and triggers cleanup. +- **Workloads** generate traffic and conditions that exercise the system. +- **Expectations** observe the run and judge success or failure once activity completes. + +Each layer has a narrow responsibility so that cluster shape, deployment choice, +traffic generation, and health checks can evolve independently while fitting +together predictably. + +## Entry Points + +The framework is consumed via **runnable example binaries** in `examples/src/bin/`: + +- `local_runner.rs` — Spawns nodes as local processes +- `compose_runner.rs` — Deploys via Docker Compose (requires `NOMOS_TESTNET_IMAGE` built) +- `k8s_runner.rs` — Deploys via Kubernetes Helm (requires cluster + image) + +**Run with:** `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin ` + +**Important:** All runners require `POL_PROOF_DEV_MODE=true` to avoid expensive Groth16 proof generation that causes timeouts. + +These binaries use the framework API (`ScenarioBuilder`) to construct and execute scenarios. + +## Builder API + +Scenarios are defined using a fluent builder pattern: + +```rust +let mut plan = ScenarioBuilder::topology() + .network_star() // Topology configuration + .validators(3) + .executors(2) + .apply() + .wallets(50) // Wallet seeding + .transactions() // Transaction workload + .rate(5) + .users(20) + .apply() + .da() // DA workload + .channel_rate(1) + .blob_rate(2) + .apply() + .expect_consensus_liveness() // Expectations + .with_run_duration(Duration::from_secs(90)) + .build(); +``` + +**Key API Points:** +- Topology uses `.topology().validators(N).executors(M).apply()` pattern (not `with_node_counts`) +- Workloads are configured via extension traits (`ScenarioBuilderExt`, `ChaosBuilderExt`) +- Chaos workloads require `.enable_node_control()` and a compatible runner + +## Deployers + +Three deployer implementations: + +| Deployer | Backend | Prerequisites | Node Control | +|----------|---------|---------------|--------------| +| `LocalDeployer` | Local processes | Binaries in sibling checkout | No | +| `ComposeDeployer` | Docker Compose | `NOMOS_TESTNET_IMAGE` built | Yes | +| `K8sDeployer` | Kubernetes Helm | Cluster + image loaded | Not yet | + +**Compose-specific features:** +- Includes Prometheus at `http://localhost:9090` (override via `TEST_FRAMEWORK_PROMETHEUS_PORT`) +- Optional OTLP trace/metrics endpoints (`NOMOS_OTLP_ENDPOINT`, `NOMOS_OTLP_METRICS_ENDPOINT`) +- Node control for chaos testing (restart validators/executors) + +## Assets and Images + +### Docker Image +Built via `testing-framework/assets/stack/scripts/build_test_image.sh`: +- Embeds KZG circuit parameters from `testing-framework/assets/stack/kzgrs_test_params/` +- Includes runner scripts: `run_nomos_node.sh`, `run_nomos_executor.sh` +- Tagged as `NOMOS_TESTNET_IMAGE` (default: `nomos-testnet:local`) + +### Circuit Assets +KZG parameters required for DA workloads: +- **Default path:** `testing-framework/assets/stack/kzgrs_test_params/` +- **Override:** `NOMOS_KZGRS_PARAMS_PATH=/custom/path` +- **Fetch via:** `scripts/setup-nomos-circuits.sh v0.3.1 /tmp/circuits` + +### Compose Stack +Templates and configs in `testing-framework/runners/compose/assets/`: +- `docker-compose.yml.tera` — Stack template (validators, executors, Prometheus) +- Cfgsync config: `testing-framework/assets/stack/cfgsync.yaml` +- Monitoring: `testing-framework/assets/stack/monitoring/prometheus.yml` + +## Logging Architecture + +**Two separate logging pipelines:** + +| Component | Configuration | Output | +|-----------|--------------|--------| +| **Runner binaries** | `RUST_LOG` | Framework orchestration logs | +| **Node processes** | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER`, `NOMOS_LOG_DIR` | Consensus, DA, mempool logs | + +**Node logging:** +- **Local runner:** Writes to temporary directories by default (cleaned up). Set `NOMOS_TESTS_TRACING=true` + `NOMOS_LOG_DIR` for persistent files. +- **Compose runner:** Default logs to container stdout/stderr (`docker logs`). Optional per-node files if `NOMOS_LOG_DIR` is set and mounted. +- **K8s runner:** Logs to pod stdout/stderr (`kubectl logs`). Optional per-node files if `NOMOS_LOG_DIR` is set and mounted. + +**File naming:** Per-node files use prefix `nomos-node-{index}` or `nomos-executor-{index}` (may include timestamps). + +## Observability + +**Prometheus (Compose only):** +- Exposed at `http://localhost:9090` (configurable) +- Scrapes all validator and executor metrics +- Accessible in expectations: `ctx.telemetry().prometheus_endpoint()` + +**Node APIs:** +- HTTP endpoints per node for consensus info, network status, DA membership +- Accessible in expectations: `ctx.node_clients().validators().get(0)` + +**OTLP (optional):** +- Trace endpoint: `NOMOS_OTLP_ENDPOINT=http://localhost:4317` +- Metrics endpoint: `NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318` +- Disabled by default (no noise if unset) + +For detailed logging configuration, see [Logging and Observability](operations.md#logging-and-observability). diff --git a/book/src/authoring-scenarios.md b/book/src/authoring-scenarios.md new file mode 100644 index 0000000..a7035e0 --- /dev/null +++ b/book/src/authoring-scenarios.md @@ -0,0 +1,20 @@ +# Authoring Scenarios + +Creating a scenario is a declarative exercise: + +1. **Shape the topology**: decide how many validators and executors to run, and + what high-level network and data-availability characteristics matter for the + test. +2. **Attach workloads**: pick traffic generators that align with your goals + (transactions, data-availability blobs, or chaos for resilience probes). +3. **Define expectations**: specify the health signals that must hold when the + run finishes (e.g., consensus liveness, inclusion of submitted activity; see + [Core Content: Workloads & Expectations](workloads.md)). +4. **Set duration**: choose a run window long enough to observe meaningful + block progression and the effects of your workloads. +5. **Choose a runner**: target local processes for fast iteration, Docker + Compose for reproducible multi-node stacks, or Kubernetes for cluster-grade + validation. For environment considerations, see [Operations](operations.md). + +Keep scenarios small and explicit: make the intended behavior and the success +criteria clear so failures are easy to interpret and act upon. diff --git a/book/src/best-practices.md b/book/src/best-practices.md new file mode 100644 index 0000000..7521b81 --- /dev/null +++ b/book/src/best-practices.md @@ -0,0 +1,17 @@ +# Best Practices + +- **State your intent**: document the goal of each scenario (throughput, DA + validation, resilience) so expectation choices are obvious. +- **Keep runs meaningful**: choose durations that allow multiple blocks and make + timing-based assertions trustworthy. +- **Separate concerns**: start with deterministic workloads for functional + checks; add chaos in dedicated resilience scenarios to avoid noisy failures. +- **Reuse patterns**: standardize on shared topology and workload presets so + results are comparable across environments and teams. +- **Observe first, tune second**: rely on liveness and inclusion signals to + interpret outcomes before tweaking rates or topology. +- **Environment fit**: pick runners that match the feedback loop you need—local + for speed (including fast CI smoke tests), compose for reproducible stacks + (recommended for CI), k8s for cluster-grade fidelity. +- **Minimal surprises**: seed only necessary wallets and keep configuration + deltas explicit when moving between CI and developer machines. diff --git a/book/src/chaos.md b/book/src/chaos.md new file mode 100644 index 0000000..83356e0 --- /dev/null +++ b/book/src/chaos.md @@ -0,0 +1,58 @@ +# Chaos Workloads + +Chaos in the framework uses node control to introduce failures and validate +recovery. The built-in restart workload lives in +`testing_framework_workflows::workloads::chaos::RandomRestartWorkload`. + +## How it works +- Requires `NodeControlCapability` (`enable_node_control()` in the scenario + builder) and a runner that provides a `NodeControlHandle`. +- Randomly selects nodes (validators, executors) to restart based on your + include/exclude flags. +- Respects min/max delay between restarts and a target cooldown to avoid + flapping the same node too frequently. +- Runs alongside other workloads; expectations should account for the added + disruption. +- Support varies by runner: node control is not provided by the local runner + and is not yet implemented for the k8s runner. Use a runner that advertises + `NodeControlHandle` support (e.g., compose) for chaos workloads. + +## Usage +```rust +use std::time::Duration; +use testing_framework_core::scenario::ScenarioBuilder; +use testing_framework_workflows::workloads::chaos::RandomRestartWorkload; + +let plan = ScenarioBuilder::topology() + .network_star() + .validators(2) + .executors(1) + .apply() + .enable_node_control() + .with_workload( + RandomRestartWorkload::new( + Duration::from_secs(45), // min delay + Duration::from_secs(75), // max delay + Duration::from_secs(120), // target cooldown + true, // include validators + true, // include executors + ) + ) + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(150)) + .build(); +// deploy with a runner that supports node control and run the scenario +``` + +## Expectations to pair +- **Consensus liveness**: ensure blocks keep progressing despite restarts. +- **Height convergence**: optionally check all nodes converge after the chaos + window. +- Any workload-specific inclusion checks if you’re also driving tx/DA traffic. + +## Best practices +- Keep delays/cooldowns realistic; avoid back-to-back restarts that would never + happen in production. +- Limit chaos scope: toggle validators vs executors based on what you want to + test. +- Combine with observability: monitor metrics/logs to explain failures. diff --git a/book/src/custom-workload-example.md b/book/src/custom-workload-example.md new file mode 100644 index 0000000..341527b --- /dev/null +++ b/book/src/custom-workload-example.md @@ -0,0 +1,116 @@ +# Example: New Workload & Expectation (Rust) + +A minimal, end-to-end illustration of adding a custom workload and matching +expectation. This shows the shape of the traits and where to plug into the +framework; expand the logic to fit your real test. + +## Workload: simple reachability probe + +Key ideas: +- **name**: identifies the workload in logs. +- **expectations**: workloads can bundle defaults so callers don’t forget checks. +- **init**: derive inputs from the generated topology (e.g., pick a target node). +- **start**: drive async activity using the shared `RunContext`. + +```rust +use std::sync::Arc; +use async_trait::async_trait; +use testing_framework_core::scenario::{ + DynError, Expectation, RunContext, RunMetrics, Workload, +}; +use testing_framework_core::topology::GeneratedTopology; + +pub struct ReachabilityWorkload { + target_idx: usize, + bundled: Vec>, +} + +impl ReachabilityWorkload { + pub fn new(target_idx: usize) -> Self { + Self { + target_idx, + bundled: vec![Box::new(ReachabilityExpectation::new(target_idx))], + } + } +} + +#[async_trait] +impl Workload for ReachabilityWorkload { + fn name(&self) -> &'static str { + "reachability_workload" + } + + fn expectations(&self) -> Vec> { + self.bundled.clone() + } + + fn init( + &mut self, + topology: &GeneratedTopology, + _metrics: &RunMetrics, + ) -> Result<(), DynError> { + if topology.validators().get(self.target_idx).is_none() { + return Err("no validator at requested index".into()); + } + Ok(()) + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + let client = ctx + .clients() + .validators() + .get(self.target_idx) + .ok_or("missing target client")?; + + // Pseudo-action: issue a lightweight RPC to prove reachability. + client.health_check().await.map_err(|e| e.into()) + } +} +``` + +## Expectation: confirm the target stayed reachable + +Key ideas: +- **start_capture**: snapshot baseline if needed (not used here). +- **evaluate**: assert the condition after workloads finish. + +```rust +use async_trait::async_trait; +use testing_framework_core::scenario::{DynError, Expectation, RunContext}; + +pub struct ReachabilityExpectation { + target_idx: usize, +} + +impl ReachabilityExpectation { + pub fn new(target_idx: usize) -> Self { + Self { target_idx } + } +} + +#[async_trait] +impl Expectation for ReachabilityExpectation { + fn name(&self) -> &str { + "target_reachable" + } + + async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { + let client = ctx + .clients() + .validators() + .get(self.target_idx) + .ok_or("missing target client")?; + + client.health_check().await.map_err(|e| { + format!("target became unreachable during run: {e}").into() + }) + } +} +``` + +## How to wire it +- Build your scenario as usual and call `.with_workload(ReachabilityWorkload::new(0))`. +- The bundled expectation is attached automatically; you can add more with + `.with_expectation(...)` if needed. +- Keep the logic minimal and fast for smoke tests; grow it into richer probes + for deeper scenarios. diff --git a/book/src/design-rationale.md b/book/src/design-rationale.md new file mode 100644 index 0000000..94961b6 --- /dev/null +++ b/book/src/design-rationale.md @@ -0,0 +1,7 @@ +# Design Rationale + +- **Modular crates** keep configuration, orchestration, workloads, and runners decoupled so each can evolve without breaking the others. +- **Pluggable runners** let the same scenario run on a laptop, a Docker host, or a Kubernetes cluster, making validation portable across environments. +- **Separated workloads and expectations** clarify intent: what traffic to generate versus how to judge success. This simplifies review and reuse. +- **Declarative topology** makes cluster shape explicit and repeatable, reducing surprise when moving between CI and developer machines. +- **Maintainability through predictability**: a clear flow from plan to deployment to verification lowers the cost of extending the framework and interpreting failures. diff --git a/book/src/dsl-cheat-sheet.md b/book/src/dsl-cheat-sheet.md new file mode 100644 index 0000000..064bcb4 --- /dev/null +++ b/book/src/dsl-cheat-sheet.md @@ -0,0 +1,133 @@ +# Builder API Quick Reference + +Quick reference for the scenario builder DSL. All methods are chainable. + +## Imports + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_runner_compose::ComposeDeployer; +use testing_framework_runner_k8s::K8sDeployer; +use testing_framework_workflows::{ScenarioBuilderExt, ChaosBuilderExt}; +use std::time::Duration; +``` + +## Topology + +```rust +ScenarioBuilder::topology() + .network_star() // Star topology (all connect to seed node) + .validators(3) // Number of validator nodes + .executors(2) // Number of executor nodes + .apply() // Finish topology configuration +``` + +## Wallets + +```rust +.wallets(50) // Seed 50 funded wallet accounts +``` + +## Transaction Workload + +```rust +.transactions() + .rate(5) // 5 transactions per block + .users(20) // Use 20 of the seeded wallets + .apply() // Finish transaction workload config +``` + +## DA Workload + +```rust +.da() + .channel_rate(1) // 1 channel operation per block + .blob_rate(2) // 2 blob dispersals per block + .apply() // Finish DA workload config +``` + +## Chaos Workload (Requires `enable_node_control()`) + +```rust +.enable_node_control() // Enable node control capability +.chaos() + .restart() // Random restart chaos + .min_delay(Duration::from_secs(30)) // Min time between restarts + .max_delay(Duration::from_secs(60)) // Max time between restarts + .target_cooldown(Duration::from_secs(45)) // Cooldown after restart + .apply() // Finish chaos workload config +``` + +## Expectations + +```rust +.expect_consensus_liveness() // Assert blocks are produced continuously +``` + +## Run Duration + +```rust +.with_run_duration(Duration::from_secs(120)) // Run for 120 seconds +``` + +## Build + +```rust +.build() // Construct the final Scenario +``` + +## Deployers + +```rust +// Local processes +let deployer = LocalDeployer::default(); + +// Docker Compose +let deployer = ComposeDeployer::default(); + +// Kubernetes +let deployer = K8sDeployer::default(); +``` + +## Execution + +```rust +let runner = deployer.deploy(&plan).await?; +let _handle = runner.run(&mut plan).await?; +``` + +## Complete Example + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +async fn run_test() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(3) + .executors(2) + .apply() + .wallets(50) + .transactions() + .rate(5) // 5 transactions per block + .users(20) + .apply() + .da() + .channel_rate(1) // 1 channel operation per block + .blob_rate(2) // 2 blob dispersals per block + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(90)) + .build(); + + let deployer = LocalDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` diff --git a/book/src/examples-advanced.md b/book/src/examples-advanced.md new file mode 100644 index 0000000..ace3068 --- /dev/null +++ b/book/src/examples-advanced.md @@ -0,0 +1,178 @@ +# Advanced Examples + +Realistic advanced scenarios demonstrating framework capabilities for production testing. + +## Summary + +| Example | Topology | Workloads | Deployer | Key Feature | +|---------|----------|-----------|----------|-------------| +| Load Progression | 3 validators + 2 executors | Increasing tx rate | Compose | Dynamic load testing | +| Sustained Load | 4 validators + 2 executors | High tx + DA rate | Compose | Stress testing | +| Aggressive Chaos | 4 validators + 2 executors | Frequent restarts + traffic | Compose | Resilience validation | + +## Load Progression Test + +Test consensus under progressively increasing transaction load: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_compose::ComposeDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +async fn load_progression_test() -> Result<(), Box> { + for rate in [5, 10, 20, 30] { + println!("Testing with rate: {}", rate); + + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(3) + .executors(2) + .apply() + .wallets(50) + .transactions() + .rate(rate) + .users(20) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(60)) + .build(); + + let deployer = ComposeDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + } + + Ok(()) +} +``` + +**When to use:** Finding the maximum sustainable transaction rate for a given topology. + +## Sustained Load Test + +Run high transaction and DA load for extended duration: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_compose::ComposeDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +async fn sustained_load_test() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(4) + .executors(2) + .apply() + .wallets(100) + .transactions() + .rate(15) + .users(50) + .apply() + .da() + .channel_rate(2) + .blob_rate(3) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(300)) + .build(); + + let deployer = ComposeDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` + +**When to use:** Validating stability under continuous high load over extended periods. + +## Aggressive Chaos Test + +Frequent node restarts with active traffic: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_compose::ComposeDeployer; +use testing_framework_workflows::{ScenarioBuilderExt, ChaosBuilderExt}; +use std::time::Duration; + +async fn aggressive_chaos_test() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(4) + .executors(2) + .apply() + .enable_node_control() + .wallets(50) + .transactions() + .rate(10) + .users(20) + .apply() + .chaos() + .restart() + .min_delay(Duration::from_secs(10)) + .max_delay(Duration::from_secs(20)) + .target_cooldown(Duration::from_secs(15)) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(180)) + .build(); + + let deployer = ComposeDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` + +**When to use:** Validating recovery and liveness under aggressive failure conditions. + +**Note:** Requires `ComposeDeployer` for node control support. + +## Extension Ideas + +These scenarios require custom implementations but demonstrate framework extensibility: + +### Network Partition Recovery + +**Concept:** Test consensus recovery after network partitions. + +**Requirements:** +- Needs `block_peer()` / `unblock_peer()` methods in `NodeControlHandle` +- Partition subsets of validators, wait, then restore connectivity +- Verify chain convergence after partition heals + +**Why useful:** Tests the most realistic failure mode in distributed systems. + +**Current blocker:** Node control doesn't yet support network-level actions (only process restarts). + +### Block Timing Consistency + +**Concept:** Verify block production intervals stay within expected bounds. + +**Implementation approach:** +- Custom expectation that consumes `BlockFeed` +- Collect block timestamps during run +- Assert intervals are within `(slot_duration * active_slot_coeff) ± tolerance` + +**Why useful:** Validates consensus timing under various loads. + +### Invalid Transaction Fuzzing + +**Concept:** Submit malformed transactions and verify they're rejected properly. + +**Implementation approach:** +- Custom workload that generates invalid transactions (bad signatures, insufficient funds, malformed structure) +- Expectation verifies mempool rejects them and they never appear in blocks +- Test mempool resilience and filtering + +**Why useful:** Ensures mempool doesn't crash or include invalid transactions under fuzzing. + +### Wallet Balance Verification + +**Concept:** Track wallet balances and verify state consistency. + +**Description:** After transaction workload completes, query all wallet balances via node API and verify total supply is conserved. Requires tracking initial state, submitted transactions, and final balances. Validates that the ledger maintains correctness under load (no funds lost or created). This is a **state assertion** expectation that checks correctness, not just liveness. diff --git a/book/src/examples.md b/book/src/examples.md new file mode 100644 index 0000000..56b4c8c --- /dev/null +++ b/book/src/examples.md @@ -0,0 +1,163 @@ +# Examples + +Concrete scenario shapes that illustrate how to combine topologies, workloads, +and expectations. + +**Runnable examples:** The repo includes complete binaries in `examples/src/bin/`: +- `local_runner.rs` — Local processes +- `compose_runner.rs` — Docker Compose (requires `NOMOS_TESTNET_IMAGE` built) +- `k8s_runner.rs` — Kubernetes (requires cluster access and image loaded) + +Run with: `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin ` + +**All runners require `POL_PROOF_DEV_MODE=true`** to avoid expensive proof generation. + +**Code patterns** below show how to build scenarios. Wrap these in `#[tokio::test]` functions for integration tests, or `#[tokio::main]` for binaries. + +## Simple consensus liveness + +Minimal test that validates basic block production: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +async fn simple_consensus() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(3) + .executors(0) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(30)) + .build(); + + let deployer = LocalDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` + +**When to use**: smoke tests for consensus on minimal hardware. + +## Transaction workload + +Test consensus under transaction load: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +async fn transaction_workload() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(2) + .executors(0) + .apply() + .wallets(20) + .transactions() + .rate(5) + .users(10) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(60)) + .build(); + + let deployer = LocalDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` + +**When to use**: validate transaction submission and inclusion. + +## DA + transaction workload + +Combined test stressing both transaction and DA layers: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +async fn da_and_transactions() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(3) + .executors(2) + .apply() + .wallets(30) + .transactions() + .rate(5) + .users(15) + .apply() + .da() + .channel_rate(1) + .blob_rate(2) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(90)) + .build(); + + let deployer = LocalDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` + +**When to use**: end-to-end coverage of transaction and DA layers. + +## Chaos resilience + +Test system resilience under node restarts: + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_compose::ComposeDeployer; +use testing_framework_workflows::{ScenarioBuilderExt, ChaosBuilderExt}; +use std::time::Duration; + +async fn chaos_resilience() -> Result<(), Box> { + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(4) + .executors(2) + .apply() + .enable_node_control() + .wallets(20) + .transactions() + .rate(3) + .users(10) + .apply() + .chaos() + .restart() + .min_delay(Duration::from_secs(20)) + .max_delay(Duration::from_secs(40)) + .target_cooldown(Duration::from_secs(30)) + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(120)) + .build(); + + let deployer = ComposeDeployer::default(); + let runner = deployer.deploy(&plan).await?; + let _handle = runner.run(&mut plan).await?; + + Ok(()) +} +``` + +**When to use**: resilience validation and operational readiness drills. + +**Note**: Chaos tests require `ComposeDeployer` or another runner with node control support. diff --git a/book/src/extending.md b/book/src/extending.md new file mode 100644 index 0000000..f96d9c2 --- /dev/null +++ b/book/src/extending.md @@ -0,0 +1,31 @@ +# Extending the Framework + +## Adding a workload +1) Implement `testing_framework_core::scenario::Workload`: + - Provide a name and any bundled expectations. + - In `init`, derive inputs from `GeneratedTopology` and `RunMetrics`; fail + fast if prerequisites are missing (e.g., wallet data, node addresses). + - In `start`, drive async traffic using the `RunContext` clients. +2) Expose the workload from a module under `testing-framework/workflows` and + consider adding a DSL helper for ergonomic wiring. + +## Adding an expectation +1) Implement `testing_framework_core::scenario::Expectation`: + - Use `start_capture` to snapshot baseline metrics. + - Use `evaluate` to assert outcomes after workloads finish; return all errors + so the runner can aggregate them. +2) Export it from `testing-framework/workflows` if it is reusable. + +## Adding a runner +1) Implement `testing_framework_core::scenario::Deployer` for your backend. + - Produce a `RunContext` with `NodeClients`, metrics endpoints, and optional + `NodeControlHandle`. + - Guard cleanup with `CleanupGuard` to reclaim resources even on failures. +2) Mirror the readiness and block-feed probes used by the existing runners so + workloads can rely on consistent signals. + +## Adding topology helpers +- Extend `testing_framework_core::topology::TopologyBuilder` with new layouts or + configuration presets (e.g., specialized DA parameters). Keep defaults safe: + ensure at least one participant and clamp dispersal factors as the current + helpers do. diff --git a/book/src/faq.md b/book/src/faq.md new file mode 100644 index 0000000..907a62d --- /dev/null +++ b/book/src/faq.md @@ -0,0 +1,33 @@ +# FAQ + +**Why block-oriented timing?** +Slots advance at a fixed rate (NTP-synchronized, 2s by default), so reasoning +about blocks and consensus intervals keeps assertions aligned with protocol +behavior rather than arbitrary wall-clock durations. + +**Can I reuse the same scenario across runners?** +Yes. The plan stays the same; swap runners (local, compose, k8s) to target +different environments. + +**When should I enable chaos workloads?** +Only when testing resilience or operational recovery; keep functional smoke +tests deterministic. + +**How long should runs be?** +The framework enforces a minimum of **2× slot duration** (4 seconds with default 2s slots), but practical recommendations: + +- **Smoke tests**: 30s minimum (~14 blocks with default 2s slots, 0.9 coefficient) +- **Transaction workloads**: 60s+ (~27 blocks) to observe inclusion patterns +- **DA workloads**: 90s+ (~40 blocks) to account for dispersal and sampling +- **Chaos tests**: 120s+ (~54 blocks) to allow recovery after restarts + +Very short runs (< 30s) risk false confidence—one or two lucky blocks don't prove liveness. + +**Do I always need seeded wallets?** +Only for transaction scenarios. Data-availability or pure chaos scenarios may +not require them, but liveness checks still need validators producing blocks. + +**What if expectations fail but workloads “look fine”?** +Trust expectations first—they capture the intended success criteria. Use the +observability signals and runner logs to pinpoint why the system missed the +target. diff --git a/book/src/glossary.md b/book/src/glossary.md new file mode 100644 index 0000000..6c5bf0f --- /dev/null +++ b/book/src/glossary.md @@ -0,0 +1,52 @@ +# Glossary + +- **Validator**: node role responsible for participating in consensus and block + production. +- **Executor**: a validator node with the DA dispersal service enabled. Executors + can submit transactions and disperse blob data to the DA network, in addition + to performing all validator functions. +- **DA (Data Availability)**: subsystem ensuring blobs or channel data are + published and retrievable for validation. +- **Deployer**: component that provisions infrastructure (spawns processes, + creates containers, or launches pods), waits for readiness, and returns a + Runner. Examples: LocalDeployer, ComposeDeployer, K8sDeployer. +- **Runner**: component returned by deployers that orchestrates scenario + execution—starts workloads, observes signals, evaluates expectations, and + triggers cleanup. +- **Workload**: traffic or behavior generator that exercises the system during a + scenario run. +- **Expectation**: post-run assertion that judges whether the system met the + intended success criteria. +- **Topology**: declarative description of the cluster shape, roles, and + high-level parameters for a scenario. +- **Scenario**: immutable plan combining topology, workloads, expectations, and + run duration. +- **Blockfeed**: stream of block observations used for liveness or inclusion + signals during a run. +- **Control capability**: the ability for a runner to start, stop, or restart + nodes, used by chaos workloads. +- **Slot duration**: time interval between consensus rounds in Cryptarchia. Blocks + are produced at multiples of the slot duration based on lottery outcomes. +- **Block cadence**: observed rate of block production in a live network, measured + in blocks per second or seconds per block. +- **Cooldown**: waiting period after a chaos action (e.g., node restart) before + triggering the next action, allowing the system to stabilize. +- **Run window**: total duration a scenario executes, specified via + `with_run_duration()`. Framework auto-extends to at least 2× slot duration. +- **Readiness probe**: health check performed by runners to ensure nodes are + reachable and responsive before starting workloads. Prevents false negatives + from premature traffic. +- **Liveness**: property that the system continues making progress (producing + blocks) under specified conditions. Contrasts with safety/correctness which + verifies that state transitions are accurate. +- **State assertion**: expectation that verifies specific values in the system + state (e.g., wallet balances, UTXO sets) rather than just progress signals. + Also called "correctness expectations." +- **Mantle transaction**: transaction type in Nomos that can contain UTXO transfers + (LedgerTx) and operations (Op), including channel data (ChannelBlob). +- **Channel**: logical grouping for DA blobs; each blob belongs to a channel and + references a parent blob in the same channel, creating a chain of related data. +- **POL_PROOF_DEV_MODE**: environment variable that disables expensive Groth16 zero-knowledge + proof generation for leader election. **Required for all runners** (local, compose, k8s) + for practical testing—without it, proof generation causes timeouts. Should never be + used in production environments. diff --git a/book/src/internal-crate-reference.md b/book/src/internal-crate-reference.md new file mode 100644 index 0000000..e8f4cee --- /dev/null +++ b/book/src/internal-crate-reference.md @@ -0,0 +1,123 @@ +# Internal Crate Reference + +High-level roles of the crates that make up the framework: + +- **Configs** (`testing-framework/configs/`): Prepares reusable configuration primitives for nodes, networking, tracing, data availability, and wallets, shared by all scenarios and runners. Includes topology generation and circuit asset resolution. + +- **Core scenario orchestration** (`testing-framework/core/`): Houses the topology and scenario model, runtime coordination, node clients, and readiness/health probes. Defines `Deployer` and `Runner` traits, `ScenarioBuilder`, and `RunContext`. + +- **Workflows** (`testing-framework/workflows/`): Packages workloads (transaction, DA, chaos) and expectations (consensus liveness) into reusable building blocks. Offers fluent DSL extensions (`ScenarioBuilderExt`, `ChaosBuilderExt`). + +- **Runners** (`testing-framework/runners/{local,compose,k8s}/`): Implements deployment backends (local host, Docker Compose, Kubernetes) that all consume the same scenario plan. Each provides a `Deployer` implementation (`LocalDeployer`, `ComposeDeployer`, `K8sDeployer`). + +- **Runner Examples** (`examples/runner-examples`): Runnable binaries demonstrating framework usage and serving as living documentation. These are the **primary entry point** for running scenarios (`local_runner.rs`, `compose_runner.rs`, `k8s_runner.rs`). + +## Where to Add New Capabilities + +| What You're Adding | Where It Goes | Examples | +|-------------------|---------------|----------| +| **Node config parameter** | `testing-framework/configs/src/topology/configs/` | Slot duration, log levels, DA params | +| **Topology feature** | `testing-framework/core/src/topology/` | New network layouts, node roles | +| **Scenario capability** | `testing-framework/core/src/scenario/` | New capabilities, context methods | +| **Workload** | `testing-framework/workflows/src/workloads/` | New traffic generators | +| **Expectation** | `testing-framework/workflows/src/expectations/` | New success criteria | +| **Builder API** | `testing-framework/workflows/src/builder/` | DSL extensions, fluent methods | +| **Deployer** | `testing-framework/runners/` | New deployment backends | +| **Example scenario** | `examples/src/bin/` | Demonstration binaries | + +## Extension Workflow + +### Adding a New Workload + +1. **Define the workload** in `testing-framework/workflows/src/workloads/your_workload.rs`: + ```rust + use async_trait::async_trait; + use testing_framework_core::scenario::{Workload, RunContext, DynError}; + + pub struct YourWorkload { + // config fields + } + + #[async_trait] + impl Workload for YourWorkload { + fn name(&self) -> &'static str { "your_workload" } + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + // implementation + Ok(()) + } + } + ``` + +2. **Add builder extension** in `testing-framework/workflows/src/builder/mod.rs`: + ```rust + pub trait ScenarioBuilderExt { + fn your_workload(self) -> YourWorkloadBuilder; + } + ``` + +3. **Use in examples** in `examples/src/bin/your_scenario.rs`: + ```rust + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(3) + .executors(0) + .apply() + .your_workload() // Your new DSL method + .apply() + .build(); + ``` + +### Adding a New Expectation + +1. **Define the expectation** in `testing-framework/workflows/src/expectations/your_expectation.rs`: + ```rust + use async_trait::async_trait; + use testing_framework_core::scenario::{Expectation, RunContext, DynError}; + + pub struct YourExpectation { + // config fields + } + + #[async_trait] + impl Expectation for YourExpectation { + fn name(&self) -> &str { "your_expectation" } + async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { + // implementation + Ok(()) + } + } + ``` + +2. **Add builder extension** in `testing-framework/workflows/src/builder/mod.rs`: + ```rust + pub trait ScenarioBuilderExt { + fn expect_your_condition(self) -> Self; + } + ``` + +### Adding a New Deployer + +1. **Implement `Deployer` trait** in `testing-framework/runners/your_runner/src/deployer.rs`: + ```rust + use async_trait::async_trait; + use testing_framework_core::scenario::{Deployer, Runner, Scenario}; + + pub struct YourDeployer; + + #[async_trait] + impl Deployer for YourDeployer { + type Error = YourError; + + async fn deploy(&self, scenario: &Scenario) -> Result { + // Provision infrastructure + // Wait for readiness + // Return Runner + } + } + ``` + +2. **Provide cleanup** and handle node control if supported. + +3. **Add example** in `examples/src/bin/your_runner.rs`. + +For detailed examples, see [Extending the Framework](extending.md) and [Custom Workload Example](custom-workload-example.md). diff --git a/book/src/introduction.md b/book/src/introduction.md new file mode 100644 index 0000000..d153b67 --- /dev/null +++ b/book/src/introduction.md @@ -0,0 +1,15 @@ +# Introduction + +The Nomos Testing Framework is a purpose-built toolkit for exercising Nomos in +realistic, multi-node environments. It solves the gap between small, isolated +tests and full-system validation by letting teams describe a cluster layout, +drive meaningful traffic, and assert the outcomes in one coherent plan. + +It is for protocol engineers, infrastructure operators, and QA teams who need +repeatable confidence that validators, executors, and data-availability +components work together under network and timing constraints. + +Multi-node integration testing is required because many Nomos behaviors—block +progress, data availability, liveness under churn—only emerge when several +roles interact over real networking and time. This framework makes those checks +declarative, observable, and portable across environments. diff --git a/book/src/node-control.md b/book/src/node-control.md new file mode 100644 index 0000000..9cf953e --- /dev/null +++ b/book/src/node-control.md @@ -0,0 +1,76 @@ +# Node Control & RunContext + +The deployer supplies a `RunContext` that workloads and expectations share. It +provides: + +- Topology descriptors (`GeneratedTopology`) +- Client handles (`NodeClients` / `ClusterClient`) for HTTP/RPC calls +- Metrics (`RunMetrics`, `Metrics`) and block feed +- Optional `NodeControlHandle` for managing nodes + +## Current Chaos Capabilities and Limitations + +The framework currently supports **process-level chaos** (node restarts) for +resilience testing: + +**Supported:** +- Restart validators (`restart_validator`) +- Restart executors (`restart_executor`) +- Random restart workload via `.chaos().restart()` + +**Not Yet Supported:** +- Network partitions (blocking peers, packet loss) +- Resource constraints (CPU throttling, memory limits) +- Byzantine behavior injection (invalid blocks, bad signatures) +- Selective peer blocking/unblocking + +For network partition testing, see [Extension Ideas](examples-advanced.md#extension-ideas) +which describes the proposed `block_peer`/`unblock_peer` API (not yet implemented). + +## Accessing node control in workloads/expectations + +Check for control support and use it conditionally: + +```rust +use testing_framework_core::scenario::{Expectation, RunContext, Workload}; + +struct RestartWorkload; + +impl Workload for RestartWorkload { + fn name(&self) -> &'static str { "restart_workload" } + + async fn start(&self, ctx: &RunContext) -> Result<(), Box> { + if let Some(control) = ctx.node_control() { + // Restart the first validator (index 0) if supported. + control.restart_validator(0).await?; + } + Ok(()) + } +} +``` + +When chaos workloads need control, require `enable_node_control()` in the +scenario builder and deploy with a runner that supports it. + +## Current API surface + +The `NodeControlHandle` trait currently provides: + +```rust +pub trait NodeControlHandle: Send + Sync { + async fn restart_validator(&self, index: usize) -> Result<(), DynError>; + async fn restart_executor(&self, index: usize) -> Result<(), DynError>; +} +``` + +Future extensions may include peer blocking/unblocking or other control +operations. For now, focus on restart-based chaos patterns as shown in the +chaos workload examples. + +## Considerations + +- Always guard control usage: not all runners expose `NodeControlHandle`. +- Treat control as best-effort: failures should surface as test failures, but + workloads should degrade gracefully when control is absent. +- Combine control actions with expectations (e.g., restart then assert height + convergence) to keep scenarios meaningful. diff --git a/book/src/operations.md b/book/src/operations.md new file mode 100644 index 0000000..ba0a03c --- /dev/null +++ b/book/src/operations.md @@ -0,0 +1,412 @@ +# Operations + +Operational readiness focuses on prerequisites, environment fit, and clear +signals: + +- **Prerequisites**: keep a sibling `nomos-node` checkout available; ensure the + chosen runner’s platform needs are met (local binaries for host runs, Docker + for compose, cluster access for k8s). +- **Artifacts**: DA scenarios require KZG parameters (circuit assets) located at + `testing-framework/assets/stack/kzgrs_test_params`. Fetch them via + `scripts/setup-nomos-circuits.sh` or override the path with `NOMOS_KZGRS_PARAMS_PATH`. +- **Environment flags**: `POL_PROOF_DEV_MODE=true` is **required for all runners** + (local, compose, k8s) unless you want expensive Groth16 proof generation that + will cause tests to timeout. Configure logging via `NOMOS_LOG_DIR`, `NOMOS_LOG_LEVEL`, + and `NOMOS_LOG_FILTER` (see [Logging and Observability](#logging-and-observability) + for details). Note that nodes ignore `RUST_LOG` and only respond to `NOMOS_*` variables. +- **Readiness checks**: verify runners report node readiness before starting + workloads; this avoids false negatives from starting too early. +- **Failure triage**: map failures to missing prerequisites (wallet seeding, + node control availability), runner platform issues, or unmet expectations. + Start with liveness signals, then dive into workload-specific assertions. + +Treat operational hygiene—assets present, prerequisites satisfied, observability +reachable—as the first step to reliable scenario outcomes. + +## CI Usage + +Both **LocalDeployer** and **ComposeDeployer** work in CI environments: + +**LocalDeployer in CI:** +- Faster (no Docker overhead) +- Good for quick smoke tests +- **Trade-off:** Less isolation (processes share host) + +**ComposeDeployer in CI (recommended):** +- Better isolation (containerized) +- Reproducible environment +- Includes Prometheus/observability +- **Trade-off:** Slower startup (Docker image build) +- **Trade-off:** Requires Docker daemon + +See `.github/workflows/compose-mixed.yml` for a complete CI example using ComposeDeployer. + +## Running Examples + +### Local Runner + +```bash +POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner +``` + +**Optional environment variables:** +- `LOCAL_DEMO_VALIDATORS=3` — Number of validators (default: 1) +- `LOCAL_DEMO_EXECUTORS=2` — Number of executors (default: 1) +- `LOCAL_DEMO_RUN_SECS=120` — Run duration in seconds (default: 60) +- `NOMOS_TESTS_TRACING=true` — Enable persistent file logging (required with `NOMOS_LOG_DIR`) +- `NOMOS_LOG_DIR=/tmp/logs` — Directory for per-node log files (only with `NOMOS_TESTS_TRACING=true`) +- `NOMOS_LOG_LEVEL=debug` — Set log level (default: info) +- `NOMOS_LOG_FILTER=consensus=trace,da=debug` — Fine-grained module filtering (rate is per-block, not per-second) + +**Note:** The default `local_runner` example includes DA workload, so circuit assets in `testing-framework/assets/stack/kzgrs_test_params/` are required (fetch via `scripts/setup-nomos-circuits.sh`). + +### Compose Runner + +**Prerequisites:** +1. **Docker daemon running** +2. **Circuit assets** in `testing-framework/assets/stack/kzgrs_test_params` (fetched via `scripts/setup-nomos-circuits.sh`) +3. **Test image built** (see below) + +**Build the test image:** +```bash +# Fetch circuit assets first +chmod +x scripts/setup-nomos-circuits.sh +scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits +cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ + +# Build image (embeds assets) +chmod +x testing-framework/assets/stack/scripts/build_test_image.sh +testing-framework/assets/stack/scripts/build_test_image.sh +``` + +**Run the example:** +```bash +NOMOS_TESTNET_IMAGE=nomos-testnet:local \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin compose_runner +``` + +**Required environment variables:** +- `NOMOS_TESTNET_IMAGE=nomos-testnet:local` — Image tag (must match built image) +- `POL_PROOF_DEV_MODE=true` — **Critical:** Without this, proof generation is CPU-intensive and tests will timeout + +**Optional environment variables:** +- `COMPOSE_NODE_PAIRS=1x1` — Topology: "validators×executors" (default varies by example) +- `TEST_FRAMEWORK_PROMETHEUS_PORT=9091` — Override Prometheus port (default: 9090) +- `COMPOSE_RUNNER_HOST=127.0.0.1` — Host address for port mappings (default: 127.0.0.1) +- `COMPOSE_RUNNER_PRESERVE=1` — Keep containers running after test (for debugging) +- `NOMOS_LOG_DIR=/tmp/compose-logs` — Write logs to files inside containers (requires copy-out or volume mount) +- `NOMOS_LOG_LEVEL=debug` — Set log level + +**Compose-specific features:** +- **Node control support**: Only runner that supports chaos testing (`.enable_node_control()` + `.chaos()` workloads) +- **Prometheus observability**: Metrics at `http://localhost:9090` + +**Important:** Chaos workloads (random restarts) **only work with ComposeDeployer**. LocalDeployer and K8sDeployer do not support node control. + +### K8s Runner + +**Prerequisites:** +1. **Kubernetes cluster** with `kubectl` configured and working +2. **Circuit assets** in `testing-framework/assets/stack/kzgrs_test_params` +3. **Test image built** (same as Compose: `testing-framework/assets/stack/scripts/build_test_image.sh`) +4. **Image available in cluster** (loaded via `kind`, `minikube`, or pushed to registry) +5. **POL_PROOF_DEV_MODE=true** environment variable set + +**Load image into cluster:** +```bash +# For kind clusters +export NOMOS_TESTNET_IMAGE=nomos-testnet:local +kind load docker-image nomos-testnet:local + +# For minikube +minikube image load nomos-testnet:local + +# For remote clusters (push to registry) +docker tag nomos-testnet:local your-registry/nomos-testnet:local +docker push your-registry/nomos-testnet:local +export NOMOS_TESTNET_IMAGE=your-registry/nomos-testnet:local +``` + +**Run the example:** +```bash +export NOMOS_TESTNET_IMAGE=nomos-testnet:local +export POL_PROOF_DEV_MODE=true +cargo run -p runner-examples --bin k8s_runner +``` + +**Important:** +- K8s runner mounts `testing-framework/assets/stack/kzgrs_test_params` as a hostPath volume. Ensure this directory exists and contains circuit assets on the node where pods will be scheduled. +- **No node control support yet**: Chaos workloads (`.enable_node_control()`) will fail. Use ComposeDeployer for chaos testing. + +## Circuit Assets (KZG Parameters) + +DA workloads require KZG cryptographic parameters for polynomial commitment schemes. + +### Asset Location + +**Default path:** `testing-framework/assets/stack/kzgrs_test_params` + +**Override:** Set `NOMOS_KZGRS_PARAMS_PATH` to use a custom location: +```bash +NOMOS_KZGRS_PARAMS_PATH=/path/to/custom/params cargo run -p runner-examples --bin local_runner +``` + +### Getting Circuit Assets + +**Option 1: Use helper script** (recommended): +```bash +# From the repository root +chmod +x scripts/setup-nomos-circuits.sh +scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits + +# Copy to default location +cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ +``` + +**Option 2: Build locally** (advanced): +```bash +# Requires Go, Rust, and circuit build tools +make kzgrs_test_params +``` + +### CI Workflow + +The CI automatically fetches and places assets: +```yaml +- name: Install circuits for host build + run: | + scripts/setup-nomos-circuits.sh v0.3.1 "$TMPDIR/nomos-circuits" + cp -a "$TMPDIR/nomos-circuits"/. testing-framework/assets/stack/kzgrs_test_params/ +``` + +### When Are Assets Needed? + +| Runner | When Required | +|--------|---------------| +| **Local** | Always (for DA workloads) | +| **Compose** | During image build (baked into `NOMOS_TESTNET_IMAGE`) | +| **K8s** | During image build + deployed to cluster via hostPath volume | + +**Error without assets:** +``` +Error: missing KZG parameters at testing-framework/assets/stack/kzgrs_test_params +``` + +## Logging and Observability + +### Node Logging vs Framework Logging + +**Critical distinction:** Node logs and framework logs use different configuration mechanisms. + +| Component | Controlled By | Purpose | +|-----------|--------------|---------| +| **Framework binaries** (`cargo run -p runner-examples --bin local_runner`) | `RUST_LOG` | Runner orchestration, deployment logs | +| **Node processes** (validators, executors spawned by runner) | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER`, `NOMOS_LOG_DIR` | Consensus, DA, mempool, network logs | + +**Common mistake:** Setting `RUST_LOG=debug` only increases verbosity of the runner binary itself. Node logs remain at their default level unless you also set `NOMOS_LOG_LEVEL=debug`. + +**Example:** +```bash +# This only makes the RUNNER verbose, not the nodes: +RUST_LOG=debug cargo run -p runner-examples --bin local_runner + +# This makes the NODES verbose: +NOMOS_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner + +# Both verbose (typically not needed): +RUST_LOG=debug NOMOS_LOG_LEVEL=debug cargo run -p runner-examples --bin local_runner +``` + +### Logging Environment Variables + +| Variable | Default | Effect | +|----------|---------|--------| +| `NOMOS_LOG_DIR` | None (console only) | Directory for per-node log files. If unset, logs go to stdout/stderr. | +| `NOMOS_LOG_LEVEL` | `info` | Global log level: `error`, `warn`, `info`, `debug`, `trace` | +| `NOMOS_LOG_FILTER` | None | Fine-grained target filtering (e.g., `consensus=trace,da=debug`) | +| `NOMOS_TESTS_TRACING` | `false` | Enable tracing subscriber for local runner file logging | +| `NOMOS_OTLP_ENDPOINT` | None | OTLP trace endpoint (optional, disables OTLP noise if unset) | +| `NOMOS_OTLP_METRICS_ENDPOINT` | None | OTLP metrics endpoint (optional) | + +**Example:** Full debug logging to files: +```bash +NOMOS_TESTS_TRACING=true \ +NOMOS_LOG_DIR=/tmp/test-logs \ +NOMOS_LOG_LEVEL=debug \ +NOMOS_LOG_FILTER="nomos_consensus=trace,nomos_da_sampling=debug" \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin local_runner +``` + +### Per-Node Log Files + +When `NOMOS_LOG_DIR` is set, each node writes logs to separate files: + +**File naming pattern:** +- **Validators**: Prefix `nomos-node-0`, `nomos-node-1`, etc. (may include timestamp suffix) +- **Executors**: Prefix `nomos-executor-0`, `nomos-executor-1`, etc. (may include timestamp suffix) + +**Local runner caveat:** By default, the local runner writes logs to temporary directories in the working directory. These are automatically cleaned up after tests complete. To preserve logs, you MUST set both `NOMOS_TESTS_TRACING=true` AND `NOMOS_LOG_DIR=/path/to/logs`. + +### Filter Target Names + +Common target prefixes for `NOMOS_LOG_FILTER`: + +| Target Prefix | Subsystem | +|---------------|-----------| +| `nomos_consensus` | Consensus (Cryptarchia) | +| `nomos_da_sampling` | DA sampling service | +| `nomos_da_dispersal` | DA dispersal service | +| `nomos_da_verifier` | DA verification | +| `nomos_mempool` | Transaction mempool | +| `nomos_blend` | Mix network/privacy layer | +| `chain_network` | P2P networking | +| `chain_leader` | Leader election | + +**Example filter:** +```bash +NOMOS_LOG_FILTER="nomos_consensus=trace,nomos_da_sampling=debug,chain_network=info" +``` + +### Accessing Logs Per Runner + +#### Local Runner + +**Default (temporary directories, auto-cleanup):** +```bash +POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner +# Logs written to temporary directories in working directory +# Automatically cleaned up after test completes +``` + +**Persistent file output:** +```bash +NOMOS_TESTS_TRACING=true \ +NOMOS_LOG_DIR=/tmp/local-logs \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin local_runner + +# After test completes: +ls /tmp/local-logs/ +# Files with prefix: nomos-node-0*, nomos-node-1*, nomos-executor-0* +# May include timestamps in filename +``` + +**Both flags required:** You MUST set both `NOMOS_TESTS_TRACING=true` (enables tracing file sink) AND `NOMOS_LOG_DIR` (specifies directory) to get persistent logs. + +#### Compose Runner + +**Via Docker logs (default, recommended):** +```bash +# List containers (note the UUID prefix in names) +docker ps --filter "name=nomos-compose-" + +# Stream logs from specific container +docker logs -f + +# Or use name pattern matching: +docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1) +``` + +**Via file collection (advanced):** + +Setting `NOMOS_LOG_DIR` writes files **inside the container**. To access them, you must either: + +1. **Copy files out after the run:** +```bash +NOMOS_LOG_DIR=/logs \ +NOMOS_TESTNET_IMAGE=nomos-testnet:local \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin compose_runner + +# After test, copy files from containers: +docker ps --filter "name=nomos-compose-" +docker cp :/logs/nomos-node-0* /tmp/ +``` + +2. **Mount a host volume** (requires modifying compose template): +```yaml +volumes: + - /tmp/host-logs:/logs # Add to docker-compose.yml.tera +``` + +**Recommendation:** Use `docker logs` by default. File collection inside containers is complex and rarely needed. + +**Keep containers for debugging:** +```bash +COMPOSE_RUNNER_PRESERVE=1 \ +NOMOS_TESTNET_IMAGE=nomos-testnet:local \ +cargo run -p runner-examples --bin compose_runner +# Containers remain running after test—inspect with docker logs or docker exec +``` + +**Note:** Container names follow pattern `nomos-compose-{uuid}-validator-{index}-1` where `{uuid}` changes per run. + +#### K8s Runner + +**Via kubectl logs (use label selectors):** +```bash +# List pods +kubectl get pods + +# Stream logs using label selectors (recommended) +kubectl logs -l app=nomos-validator -f +kubectl logs -l app=nomos-executor -f + +# Stream logs from specific pod +kubectl logs -f nomos-validator-0 + +# Previous logs from crashed pods +kubectl logs --previous -l app=nomos-validator +``` + +**Download logs for offline analysis:** +```bash +# Using label selectors +kubectl logs -l app=nomos-validator --tail=1000 > all-validators.log +kubectl logs -l app=nomos-executor --tail=1000 > all-executors.log + +# Specific pods +kubectl logs nomos-validator-0 > validator-0.log +kubectl logs nomos-executor-1 > executor-1.log +``` + +**Specify namespace (if not using default):** +```bash +kubectl logs -n my-namespace -l app=nomos-validator -f +``` + +### OTLP and Telemetry + +**OTLP exporters are optional.** If you see errors about unreachable OTLP endpoints, it's safe to ignore them unless you're actively collecting traces/metrics. + +**To enable OTLP:** +```bash +NOMOS_OTLP_ENDPOINT=http://localhost:4317 \ +NOMOS_OTLP_METRICS_ENDPOINT=http://localhost:4318 \ +cargo run -p runner-examples --bin local_runner +``` + +**To silence OTLP errors:** Simply leave these variables unset (the default). + +### Observability: Prometheus and Node APIs + +Runners expose metrics and node HTTP endpoints for expectation code and debugging: + +**Prometheus (Compose only):** +- Default: `http://localhost:9090` +- Override: `TEST_FRAMEWORK_PROMETHEUS_PORT=9091` +- Access from expectations: `ctx.telemetry().prometheus_endpoint()` + +**Node APIs:** +- Access from expectations: `ctx.node_clients().validators().get(0)` +- Endpoints: consensus info, network info, DA membership, etc. +- See `testing-framework/core/src/nodes/api_client.rs` for available methods + +```mermaid +flowchart TD + Expose[Runner exposes endpoints/ports] --> Collect[Runtime collects block/health signals] + Collect --> Consume[Expectations consume signals
decide pass/fail] + Consume --> Inspect[Operators inspect logs/metrics
when failures arise] +``` diff --git a/book/src/part-i.md b/book/src/part-i.md new file mode 100644 index 0000000..74e4ac6 --- /dev/null +++ b/book/src/part-i.md @@ -0,0 +1,4 @@ +# Part I — Foundations + +Conceptual chapters that establish the mental model for the framework and how +it approaches multi-node testing. diff --git a/book/src/part-ii.md b/book/src/part-ii.md new file mode 100644 index 0000000..36eb205 --- /dev/null +++ b/book/src/part-ii.md @@ -0,0 +1,4 @@ +# Part II — User Guide + +Practical guidance for shaping scenarios, combining workloads and expectations, +and running them across different environments. diff --git a/book/src/part-iii.md b/book/src/part-iii.md new file mode 100644 index 0000000..107c890 --- /dev/null +++ b/book/src/part-iii.md @@ -0,0 +1,4 @@ +# Part III — Developer Reference + +Deep dives for contributors who extend the framework, evolve its abstractions, +or maintain the crate set. diff --git a/book/src/part-iv.md b/book/src/part-iv.md new file mode 100644 index 0000000..51b08b6 --- /dev/null +++ b/book/src/part-iv.md @@ -0,0 +1,4 @@ +# Part IV — Appendix + +Quick-reference material and supporting guidance to keep scenarios discoverable, +debuggable, and consistent. diff --git a/book/src/project-context-primer.md b/book/src/project-context-primer.md new file mode 100644 index 0000000..14a844c --- /dev/null +++ b/book/src/project-context-primer.md @@ -0,0 +1,16 @@ +# Project Context Primer + +This book focuses on the Nomos Testing Framework. It assumes familiarity with +the Nomos architecture, but for completeness, here is a short primer. + +- **Nomos** is a modular blockchain protocol composed of validators, executors, + and a data-availability (DA) subsystem. +- **Validators** participate in consensus and produce blocks. +- **Executors** are validators with the DA dispersal service enabled. They perform + all validator functions plus submit blob data to the DA network. +- **Data Availability (DA)** ensures that blob data submitted via channel operations + in transactions is published and retrievable by the network. + +These roles interact tightly, which is why meaningful testing must be performed +in multi-node environments that include real networking, timing, and DA +interaction. diff --git a/book/src/quickstart.md b/book/src/quickstart.md new file mode 100644 index 0000000..d076f8b --- /dev/null +++ b/book/src/quickstart.md @@ -0,0 +1,187 @@ +# Quickstart + +Get a working example running quickly. + +## Prerequisites + +- Rust toolchain (nightly) +- Sibling `nomos-node` checkout built and available +- This repository cloned +- Unix-like system (tested on Linux and macOS) + +## Your First Test + +The framework ships with runnable example binaries in `examples/src/bin/`. Let's start with the local runner: + +```bash +# From the nomos-testing directory +POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner +``` + +This runs a complete scenario with **defaults**: 1 validator + 1 executor, mixed transaction + DA workload (5 tx/block + 1 channel + 1 blob), 60s duration. + +**Core API Pattern** (simplified example): + +```rust +use testing_framework_core::scenario::{Deployer, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_workflows::ScenarioBuilderExt; +use std::time::Duration; + +// Define the scenario (1 validator + 1 executor, tx + DA workload) +let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(1) + .executors(1) + .apply() + .wallets(64) + .transactions() + .rate(5) // 5 transactions per block + .users(8) + .apply() + .da() + .channel_rate(1) // 1 channel operation per block + .blob_rate(1) // 1 blob dispersal per block + .apply() + .expect_consensus_liveness() + .with_run_duration(Duration::from_secs(60)) + .build(); + +// Deploy and run +let deployer = LocalDeployer::default(); +let runner = deployer.deploy(&plan).await?; +let _handle = runner.run(&mut plan).await?; +``` + +**Note:** The examples are binaries with `#[tokio::main]`, not test functions. If you want to write integration tests, wrap this pattern in `#[tokio::test]` functions in your own test suite. + +**Important:** `POL_PROOF_DEV_MODE=true` disables expensive Groth16 zero-knowledge proof generation for leader election. Without it, proof generation is CPU-intensive and tests will timeout. **This is required for all runners** (local, compose, k8s) for practical testing. Never use in production. + +**What you should see:** +- Nodes spawn as local processes +- Consensus starts producing blocks +- Scenario runs for the configured duration +- Node logs written to temporary directories in working directory (auto-cleaned up after test) +- To persist logs: set `NOMOS_TESTS_TRACING=true` and `NOMOS_LOG_DIR=/path/to/logs` (files will have prefix like `nomos-node-0*`, may include timestamps) + +## What Just Happened? + +Let's unpack the code: + +### 1. Topology Configuration + +```rust +ScenarioBuilder::topology() + .network_star() // Star topology: all nodes connect to seed + .validators(1) // 1 validator node + .executors(1) // 1 executor node (validator + DA dispersal) + .apply() +``` + +This defines **what** your test network looks like. + +### 2. Wallet Seeding + +```rust +.wallets(64) // Seed 64 funded wallet accounts +``` + +Provides funded accounts for transaction submission. + +### 3. Workloads + +```rust +.transactions() + .rate(5) // 5 transactions per block + .users(8) // Use 8 of the 64 wallets + .apply() +.da() + .channel_rate(1) // 1 channel operation per block + .blob_rate(1) // 1 blob dispersal per block + .apply() +``` + +Generates both transaction and DA traffic to stress both subsystems. + +### 4. Expectation + +```rust +.expect_consensus_liveness() +``` + +This says **what success means**: blocks must be produced continuously. + +### 5. Run Duration + +```rust +.with_run_duration(Duration::from_secs(60)) +``` + +Run for 60 seconds (~27 blocks with default 2s slots, 0.9 coefficient). Framework ensures this is at least 2× the consensus slot duration. + +### 6. Deploy and Execute + +```rust +let deployer = LocalDeployer::default(); // Use local process deployer +let runner = deployer.deploy(&plan).await?; // Provision infrastructure +let _handle = runner.run(&mut plan).await?; // Execute workloads & expectations +``` + +**Deployer** provisions the infrastructure. **Runner** orchestrates execution. + +## Adjust the Topology + +The binary accepts environment variables to adjust defaults: + +```bash +# Scale up to 3 validators + 2 executors, run for 2 minutes +LOCAL_DEMO_VALIDATORS=3 \ +LOCAL_DEMO_EXECUTORS=2 \ +LOCAL_DEMO_RUN_SECS=120 \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin local_runner +``` + +## Try Docker Compose + +Use the same API with a different deployer for reproducible containerized environment: + +```bash +# Build the test image first (includes circuit assets) +chmod +x scripts/setup-nomos-circuits.sh +scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits +cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/ + +chmod +x testing-framework/assets/stack/scripts/build_test_image.sh +testing-framework/assets/stack/scripts/build_test_image.sh + +# Run with Compose +NOMOS_TESTNET_IMAGE=nomos-testnet:local \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin compose_runner +``` + +**Benefit:** Reproducible containerized environment with Prometheus at `http://localhost:9090`. + +**In code:** Just swap the deployer: + +```rust +use testing_framework_runner_compose::ComposeDeployer; + +// ... same scenario definition ... + +let deployer = ComposeDeployer::default(); // Use Docker Compose +let runner = deployer.deploy(&plan).await?; +let _handle = runner.run(&mut plan).await?; +``` + +## Next Steps + +Now that you have a working test: + +- **Understand the philosophy**: [Testing Philosophy](testing-philosophy.md) +- **Learn the architecture**: [Architecture Overview](architecture-overview.md) +- **See more examples**: [Examples](examples.md) +- **API reference**: [Builder API Quick Reference](dsl-cheat-sheet.md) +- **Debug failures**: [Troubleshooting](troubleshooting.md) + diff --git a/book/src/runners.md b/book/src/runners.md new file mode 100644 index 0000000..54698d4 --- /dev/null +++ b/book/src/runners.md @@ -0,0 +1,44 @@ +# Runners + +Runners turn a scenario plan into a live environment while keeping the plan +unchanged. Choose based on feedback speed, reproducibility, and fidelity. For +environment and operational considerations, see [Operations](operations.md). + +**Important:** All runners require `POL_PROOF_DEV_MODE=true` to avoid expensive Groth16 proof generation that causes timeouts. + +## Local runner +- Launches node processes directly on the host. +- Fastest feedback loop and minimal orchestration overhead. +- Best for development-time iteration and debugging. +- **Can run in CI** for fast smoke tests. +- **Node control:** Not supported (chaos workloads not available) + +## Docker Compose runner +- Starts nodes in containers to provide a reproducible multi-node stack on a + single machine. +- Discovers service ports and wires observability for convenient inspection. +- Good balance between fidelity and ease of setup. +- **Recommended for CI pipelines** (isolated environment, reproducible). +- **Node control:** Supported (can restart nodes for chaos testing) + +## Kubernetes runner +- Deploys nodes onto a cluster for higher-fidelity, longer-running scenarios. +- Suits CI with cluster access or shared test environments where cluster behavior + and scheduling matter. +- **Node control:** Not supported yet (chaos workloads not available) + +### Common expectations +- All runners require at least one validator and, for transaction scenarios, + access to seeded wallets. +- Readiness probes gate workload start so traffic begins only after nodes are + reachable. +- Environment flags can relax timeouts or increase tracing when diagnostics are + needed. + +```mermaid +flowchart TD + Plan[Scenario Plan] --> RunSel{Runner
(local | compose | k8s)} + RunSel --> Provision[Provision & readiness] + Provision --> Runtime[Runtime + observability] + Runtime --> Exec[Workloads & Expectations execute] +``` diff --git a/book/src/running-scenarios.md b/book/src/running-scenarios.md new file mode 100644 index 0000000..6c7d598 --- /dev/null +++ b/book/src/running-scenarios.md @@ -0,0 +1,18 @@ +# Running Scenarios + +Running a scenario follows the same conceptual flow regardless of environment: + +1. Select or author a scenario plan that pairs a topology with workloads, + expectations, and a suitable run window. +2. Choose a deployer aligned with your environment (local, compose, or k8s) and + ensure its prerequisites are available. +3. Deploy the plan through the deployer, which provisions infrastructure and + returns a runner. +4. The runner orchestrates workload execution for the planned duration; keep + observability signals visible so you can correlate outcomes. +5. The runner evaluates expectations and captures results as the primary + pass/fail signal. + +Use the same plan across different deployers to compare behavior between local +development and CI or cluster settings. For environment prerequisites and +flags, see [Operations](operations.md). diff --git a/book/src/scenario-builder-ext-patterns.md b/book/src/scenario-builder-ext-patterns.md new file mode 100644 index 0000000..e365e72 --- /dev/null +++ b/book/src/scenario-builder-ext-patterns.md @@ -0,0 +1,17 @@ +# Core Content: ScenarioBuilderExt Patterns + +Patterns that keep scenarios readable and reusable: + +- **Topology-first**: start by shaping the cluster (counts, layout) so later + steps inherit a clear foundation. +- **Bundle defaults**: use the DSL helpers to attach common expectations (like + liveness) whenever you add a matching workload, reducing forgotten checks. +- **Intentional rates**: express traffic in per-block terms to align with + protocol timing rather than wall-clock assumptions. +- **Opt-in chaos**: enable restart patterns only in scenarios meant to probe + resilience; keep functional smoke tests deterministic. +- **Wallet clarity**: seed only the number of actors you need; it keeps + transaction scenarios deterministic and interpretable. + +These patterns make scenario definitions self-explanatory while staying aligned +with the framework’s block-oriented timing model. diff --git a/book/src/scenario-lifecycle.md b/book/src/scenario-lifecycle.md new file mode 100644 index 0000000..839fbd5 --- /dev/null +++ b/book/src/scenario-lifecycle.md @@ -0,0 +1,18 @@ +# Scenario Lifecycle + +1. **Build the plan**: Declare a topology, attach workloads and expectations, and set the run window. The plan is the single source of truth for what will happen. +2. **Deploy**: Hand the plan to a deployer. It provisions the environment on the chosen backend, waits for nodes to signal readiness, and returns a runner. +3. **Drive workloads**: The runner starts traffic and behaviors (transactions, data-availability activity, restarts) for the planned duration. +4. **Observe blocks and signals**: Track block progression and other high-level metrics during or after the run window to ground assertions in protocol time. +5. **Evaluate expectations**: Once activity stops (and optional cooldown completes), the runner checks liveness and workload-specific outcomes to decide pass or fail. +6. **Cleanup**: Tear down resources so successive runs start fresh and do not inherit leaked state. + +```mermaid +flowchart LR + P[Plan
topology + workloads + expectations] --> D[Deploy
deployer provisions] + D --> R[Runner
orchestrates execution] + R --> W[Drive Workloads] + W --> O[Observe
blocks/metrics] + O --> E[Evaluate Expectations] + E --> C[Cleanup] +``` diff --git a/book/src/scenario-model.md b/book/src/scenario-model.md new file mode 100644 index 0000000..ea742c0 --- /dev/null +++ b/book/src/scenario-model.md @@ -0,0 +1,23 @@ +# Scenario Model (Developer Level) + +The scenario model defines clear, composable responsibilities: + +- **Topology**: a declarative description of the cluster—how many nodes, their + roles, and the broad network and data-availability characteristics. It + represents the intended shape of the system under test. +- **Scenario**: a plan combining topology, workloads, expectations, and a run + window. Building a scenario validates prerequisites (like seeded wallets) and + ensures the run lasts long enough to observe meaningful block progression. +- **Workloads**: asynchronous tasks that generate traffic or conditions. They + use shared context to interact with the deployed cluster and may bundle + default expectations. +- **Expectations**: post-run assertions. They can capture baselines before + workloads start and evaluate success once activity stops. +- **Runtime**: coordinates workloads and expectations for the configured + duration, enforces cooldowns when control actions occur, and ensures cleanup + so runs do not leak resources. + +Developers extending the model should keep these boundaries strict: topology +describes, scenarios assemble, deployers provision, runners orchestrate, +workloads drive, and expectations judge outcomes. For guidance on adding new +capabilities, see [Extending the Framework](extending.md). diff --git a/book/src/testing-philosophy.md b/book/src/testing-philosophy.md new file mode 100644 index 0000000..4169333 --- /dev/null +++ b/book/src/testing-philosophy.md @@ -0,0 +1,155 @@ +# Testing Philosophy + +This framework embodies specific principles that shape how you author and run +scenarios. Understanding these principles helps you write effective tests and +interpret results correctly. + +## Declarative over Imperative + +Describe **what** you want to test, not **how** to orchestrate it: + +```rust +// Good: declarative +ScenarioBuilder::topology() + .network_star() + .validators(2) + .executors(1) + .apply() + .transactions() + .rate(5) // 5 transactions per block + .apply() + .expect_consensus_liveness() + .build(); + +// Bad: imperative (framework doesn't work this way) +// spawn_validator(); spawn_executor(); +// loop { submit_tx(); check_block(); } +``` + +**Why it matters:** The framework handles deployment, readiness, and cleanup. +You focus on test intent, not infrastructure orchestration. + +## Protocol Time, Not Wall Time + +Reason in **blocks** and **consensus intervals**, not wall-clock seconds. + +**Consensus defaults:** +- Slot duration: 2 seconds (NTP-synchronized, configurable via `CONSENSUS_SLOT_TIME`) +- Active slot coefficient: 0.9 (90% block probability per slot) +- Expected rate: ~27 blocks per minute + +```rust +// Good: protocol-oriented thinking +let plan = ScenarioBuilder::topology() + .network_star() + .validators(2) + .executors(1) + .apply() + .transactions() + .rate(5) // 5 transactions per block + .apply() + .with_run_duration(Duration::from_secs(60)) // Let framework calculate expected blocks + .expect_consensus_liveness() // "Did we produce the expected blocks?" + .build(); + +// Bad: wall-clock assumptions +// "I expect exactly 30 blocks in 60 seconds" +// This breaks on slow CI where slot timing might drift +``` + +**Why it matters:** Slot timing is fixed (2s by default, NTP-synchronized), so the +expected number of blocks is predictable: ~27 blocks in 60s with the default +0.9 active slot coefficient. The framework calculates expected blocks from slot +duration and run window, making assertions protocol-based rather than tied to +specific wall-clock expectations. Assert on "blocks produced relative to slots" +not "blocks produced in exact wall-clock seconds". + +## Determinism First, Chaos When Needed + +**Default scenarios are repeatable:** +- Fixed topology +- Predictable traffic rates +- Deterministic checks + +**Chaos is opt-in:** +```rust +// Separate: functional test (deterministic) +let plan = ScenarioBuilder::topology() + .network_star() + .validators(2) + .executors(1) + .apply() + .transactions() + .rate(5) // 5 transactions per block + .apply() + .expect_consensus_liveness() + .build(); + +// Separate: chaos test (introduces randomness) +let chaos_plan = ScenarioBuilder::topology() + .network_star() + .validators(3) + .executors(2) + .apply() + .enable_node_control() + .chaos() + .restart() + .apply() + .transactions() + .rate(5) // 5 transactions per block + .apply() + .expect_consensus_liveness() + .build(); +``` + +**Why it matters:** Mixing determinism with chaos creates noisy, hard-to-debug +failures. Separate concerns make failures actionable. + +## Observable Health Signals + +Prefer **user-facing signals** over internal state: + +**Good checks:** +- Blocks progressing at expected rate (liveness) +- Transactions included within N blocks (inclusion) +- DA blobs retrievable (availability) + +**Avoid internal checks:** +- Memory pool size +- Internal service state +- Cache hit rates + +**Why it matters:** User-facing signals reflect actual system health. +Internal state can be "healthy" while the system is broken from a user +perspective. + +## Minimum Run Windows + +Always run long enough for **meaningful block production**: + +```rust +// Bad: too short +.with_run_duration(Duration::from_secs(5)) // ~2 blocks (with default 2s slots, 0.9 coeff) + +// Good: enough blocks for assertions +.with_run_duration(Duration::from_secs(60)) // ~27 blocks (with default 2s slots, 0.9 coeff) +``` + +**Note:** Block counts assume default consensus parameters: +- Slot duration: 2 seconds (configurable via `CONSENSUS_SLOT_TIME`) +- Active slot coefficient: 0.9 (90% block probability per slot) +- Formula: `blocks ≈ (duration / slot_duration) × active_slot_coeff` + +If upstream changes these parameters, adjust your duration expectations accordingly. + +The framework enforces minimum durations (at least 2× slot duration), but be explicit. Very short runs risk false confidence—one lucky block doesn't prove liveness. + +## Summary + +These principles keep scenarios: +- **Portable** across environments (protocol time, declarative) +- **Debuggable** (determinism, separation of concerns) +- **Meaningful** (observable signals, sufficient duration) + +When authoring scenarios, ask: "Does this test the protocol behavior or +my local environment quirks?" diff --git a/book/src/topology-chaos.md b/book/src/topology-chaos.md new file mode 100644 index 0000000..4cf25e8 --- /dev/null +++ b/book/src/topology-chaos.md @@ -0,0 +1,33 @@ +# Topology & Chaos Patterns + +This page focuses on cluster manipulation: node control, chaos patterns, and +what the tooling supports today. + +## Node control availability +- **Supported**: restart/peer control via `NodeControlHandle` (compose runner). +- **Not supported**: local runner does not expose node control; k8s runner does + not support it yet. + +## Chaos patterns to consider +- **Restarts**: random restarts with minimum delay/cooldown to test recovery. +- **Partitions**: block/unblock peers to simulate partial isolation, then assert + height convergence after healing. +- **Validator churn**: stop one validator and start another (new key) mid-run to + test membership changes; expect convergence. +- **Load SLOs**: push tx/DA rates and assert inclusion/availability budgets + instead of only liveness. +- **API probes**: poll HTTP/RPC endpoints during chaos to ensure external + contracts stay healthy (shape + latency). + +## Expectations to pair +- **Liveness/height convergence** after chaos windows. +- **SLO checks**: inclusion latency, DA responsiveness, API latency/shape. +- **Recovery checks**: ensure nodes that were isolated or restarted catch up to + cluster height within a timeout. + +## Guidance +- Keep chaos realistic: avoid flapping or patterns you wouldn't operate in prod. +- Scope chaos: choose validators vs executors intentionally; don't restart all + nodes at once unless you're testing full outages. +- Combine chaos with observability: capture block feed/metrics and API health so + failures are diagnosable. diff --git a/book/src/troubleshooting.md b/book/src/troubleshooting.md new file mode 100644 index 0000000..f07a466 --- /dev/null +++ b/book/src/troubleshooting.md @@ -0,0 +1,249 @@ +# Troubleshooting Scenarios + +**Prerequisites for All Runners:** +- **`POL_PROOF_DEV_MODE=true`** MUST be set for all runners (local, compose, k8s) to avoid expensive Groth16 proof generation that causes timeouts +- **KZG circuit assets** must be present at `testing-framework/assets/stack/kzgrs_test_params/` for DA workloads (fetch via `scripts/setup-nomos-circuits.sh`) + +## Quick Symptom Guide + +Common symptoms and likely causes: + +- **No or slow block progression**: missing `POL_PROOF_DEV_MODE=true`, missing KZG circuit assets for DA workloads, too-short run window, port conflicts, or resource exhaustion—set required env vars, verify assets, extend duration, check node logs for startup errors. +- **Transactions not included**: unfunded or misconfigured wallets (check `.wallets(N)` vs `.users(M)`), transaction rate exceeding block capacity, or rates exceeding block production speed—reduce rate, increase wallet count, verify wallet setup in logs. +- **Chaos stalls the run**: chaos (node control) only works with ComposeDeployer; LocalDeployer and K8sDeployer don't support it (won't "stall", just can't execute chaos workloads). With compose, aggressive restart cadence can prevent consensus recovery—widen restart intervals. +- **Observability gaps**: metrics or logs unreachable because ports clash or services are not exposed—adjust observability ports and confirm runner wiring. +- **Flaky behavior across runs**: mixing chaos with functional smoke tests or inconsistent topology between environments—separate deterministic and chaos scenarios and standardize topology presets. + +## Where to Find Logs + +### Log Location Quick Reference + +| Runner | Default Output | With `NOMOS_LOG_DIR` + Flags | Access Command | +|--------|---------------|------------------------------|----------------| +| **Local** | Temporary directories (cleaned up) | Per-node files with prefix `nomos-node-{index}` (requires `NOMOS_TESTS_TRACING=true`) | `cat $NOMOS_LOG_DIR/nomos-node-0*` | +| **Compose** | Docker container stdout/stderr | Per-node files inside containers (if path is mounted) | `docker ps` then `docker logs ` | +| **K8s** | Pod stdout/stderr | Per-node files inside pods (if path is mounted) | `kubectl logs -l app=nomos-validator` | + +**Important Notes:** +- **Local runner**: Logs go to system temporary directories (NOT in working directory) by default and are automatically cleaned up after tests. To persist logs, you MUST set both `NOMOS_TESTS_TRACING=true` AND `NOMOS_LOG_DIR=/path/to/logs`. +- **Compose/K8s**: Per-node log files only exist inside containers/pods if `NOMOS_LOG_DIR` is set AND the path is writable inside the container/pod. By default, rely on `docker logs` or `kubectl logs`. +- **File naming**: Log files use prefix `nomos-node-{index}*` or `nomos-executor-{index}*` with timestamps, e.g., `nomos-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix). +- **Container names**: Compose containers include project UUID, e.g., `nomos-compose--validator-0-1` where `` is randomly generated per run + +### Accessing Node Logs by Runner + +#### Local Runner + +**Console output (default):** +```bash +POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner 2>&1 | tee test.log +``` + +**Persistent file output:** +```bash +NOMOS_TESTS_TRACING=true \ +NOMOS_LOG_DIR=/tmp/debug-logs \ +NOMOS_LOG_LEVEL=debug \ +POL_PROOF_DEV_MODE=true \ +cargo run -p runner-examples --bin local_runner + +# Inspect logs (note: filenames include timestamps): +ls /tmp/debug-logs/ +# Example: nomos-node-0.2024-12-01T10-30-45.log +tail -f /tmp/debug-logs/nomos-node-0* # Use wildcard to match timestamp +``` + +#### Compose Runner + +**Stream live logs:** +```bash +# List running containers (note the UUID prefix in names) +docker ps --filter "name=nomos-compose-" + +# Find your container ID or name from the list, then: +docker logs -f + +# Or filter by name pattern: +docker logs -f $(docker ps --filter "name=nomos-compose-.*-validator-0" -q | head -1) + +# Show last 100 lines +docker logs --tail 100 +``` + +**Keep containers for post-mortem debugging:** +```bash +COMPOSE_RUNNER_PRESERVE=1 \ +NOMOS_TESTNET_IMAGE=nomos-testnet:local \ +cargo run -p runner-examples --bin compose_runner + +# After test failure, containers remain running: +docker ps --filter "name=nomos-compose-" +docker exec -it /bin/sh +docker logs > debug.log +``` + +**Note:** Container names follow the pattern `nomos-compose-{uuid}-validator-{index}-1` or `nomos-compose-{uuid}-executor-{index}-1`, where `{uuid}` is randomly generated per run. + +#### K8s Runner + +**Important:** Always verify your namespace and use label selectors instead of assuming pod names. + +**Stream pod logs (use label selectors):** +```bash +# Check your namespace first +kubectl config view --minify | grep namespace + +# All validator pods (add -n if not using default) +kubectl logs -l app=nomos-validator -f + +# All executor pods +kubectl logs -l app=nomos-executor -f + +# Specific pod by name (find exact name first) +kubectl get pods -l app=nomos-validator # Find the exact pod name +kubectl logs -f # Then use it + +# With explicit namespace +kubectl logs -n my-namespace -l app=nomos-validator -f +``` + +**Download logs from crashed pods:** +```bash +# Previous logs from crashed pod +kubectl get pods -l app=nomos-validator # Find crashed pod name first +kubectl logs --previous > crashed-validator.log + +# Or use label selector for all crashed validators +for pod in $(kubectl get pods -l app=nomos-validator -o name); do + kubectl logs --previous $pod > $(basename $pod)-previous.log 2>&1 +done +``` + +**Access logs from all pods:** +```bash +# All pods in current namespace +for pod in $(kubectl get pods -o name); do + echo "=== $pod ===" + kubectl logs $pod +done > all-logs.txt + +# Or use label selectors (recommended) +kubectl logs -l app=nomos-validator --tail=500 > validators.log +kubectl logs -l app=nomos-executor --tail=500 > executors.log + +# With explicit namespace +kubectl logs -n my-namespace -l app=nomos-validator --tail=500 > validators.log +``` + +## Debugging Workflow + +When a test fails, follow this sequence: + +### 1. Check Framework Output +Start with the test harness output—did expectations fail? Was there a deployment error? + +**Look for:** +- Expectation failure messages +- Timeout errors +- Deployment/readiness failures + +### 2. Verify Node Readiness +Ensure all nodes started successfully and became ready before workloads began. + +**Commands:** +```bash +# Local: check process list +ps aux | grep nomos + +# Compose: check container status (note UUID in names) +docker ps -a --filter "name=nomos-compose-" + +# K8s: check pod status (use label selectors, add -n if needed) +kubectl get pods -l app=nomos-validator +kubectl get pods -l app=nomos-executor +kubectl describe pod # Get name from above first +``` + +### 3. Inspect Node Logs +Focus on the first node that exhibited problems or the node with the highest index (often the last to start). + +**Common error patterns:** +- "Failed to bind address" → port conflict +- "Connection refused" → peer not ready or network issue +- "Proof verification failed" or "Proof generation timeout" → missing `POL_PROOF_DEV_MODE=true` (REQUIRED for all runners) +- "Failed to load KZG parameters" or "Circuit file not found" → missing KZG circuit assets at `testing-framework/assets/stack/kzgrs_test_params/` +- "Insufficient funds" → wallet seeding issue (increase `.wallets(N)` or reduce `.users(M)`) + +### 4. Check Log Levels +If logs are too sparse, increase verbosity: + +```bash +NOMOS_LOG_LEVEL=debug \ +NOMOS_LOG_FILTER="nomos_consensus=trace,nomos_da_sampling=debug" \ +cargo run -p runner-examples --bin local_runner +``` + +### 5. Verify Observability Endpoints +If expectations report observability issues: + +**Prometheus (Compose):** +```bash +curl http://localhost:9090/-/healthy +``` + +**Node HTTP APIs:** +```bash +curl http://localhost:18080/consensus/info # Adjust port per node +``` + +### 6. Compare with Known-Good Scenario +Run a minimal baseline test (e.g., 2 validators, consensus liveness only). If it passes, the issue is in your workload or topology configuration. + +## Common Error Messages + +### "Consensus liveness expectation failed" +- **Cause**: Not enough blocks produced during run window, missing `POL_PROOF_DEV_MODE=true` (causes slow proof generation), or missing KZG assets for DA workloads +- **Fix**: + 1. Verify `POL_PROOF_DEV_MODE=true` is set (REQUIRED for all runners) + 2. Verify KZG assets exist at `testing-framework/assets/stack/kzgrs_test_params/` (for DA workloads) + 3. Extend `with_run_duration()` to allow more blocks + 4. Check node logs for proof generation or DA errors + 5. Reduce transaction/DA rate if nodes are overwhelmed + +### "Wallet seeding failed" +- **Cause**: Topology doesn't have enough funded wallets for the workload +- **Fix**: Increase `.wallets(N)` count or reduce `.users(M)` in transaction workload (ensure N ≥ M) + +### "Node control not available" +- **Cause**: Runner doesn't support node control (only ComposeDeployer does), or `enable_node_control()` wasn't called +- **Fix**: + 1. Use ComposeDeployer for chaos tests (LocalDeployer and K8sDeployer don't support node control) + 2. Ensure `.enable_node_control()` is called in scenario before `.chaos()` + +### "Readiness timeout" +- **Cause**: Nodes didn't become responsive within expected time (often due to missing prerequisites) +- **Fix**: + 1. **Verify `POL_PROOF_DEV_MODE=true` is set** (REQUIRED for all runners—without it, proof generation is too slow) + 2. Check node logs for startup errors (port conflicts, missing assets) + 3. Verify network connectivity between nodes + 4. For DA workloads, ensure KZG circuit assets are present + +### "Port already in use" +- **Cause**: Previous test didn't clean up, or another process holds the port +- **Fix**: Kill orphaned processes (`pkill nomos-node`), wait for Docker cleanup (`docker compose down`), or restart Docker + +### "Image not found: nomos-testnet:local" +- **Cause**: Docker image not built for Compose/K8s runners, or KZG assets not baked into image +- **Fix**: + 1. Fetch KZG assets: `scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits` + 2. Copy to assets: `cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/` + 3. Build image: `testing-framework/assets/stack/scripts/build_test_image.sh` + +### "Failed to load KZG parameters" or "Circuit file not found" +- **Cause**: DA workload requires KZG circuit assets that aren't present +- **Fix**: + 1. Fetch assets: `scripts/setup-nomos-circuits.sh v0.3.1 /tmp/nomos-circuits` + 2. Copy to expected path: `cp -r /tmp/nomos-circuits/* testing-framework/assets/stack/kzgrs_test_params/` + 3. For Compose/K8s: rebuild image with assets baked in + +For detailed logging configuration and observability setup, see [Operations](operations.md). diff --git a/book/src/usage-patterns.md b/book/src/usage-patterns.md new file mode 100644 index 0000000..d95ba77 --- /dev/null +++ b/book/src/usage-patterns.md @@ -0,0 +1,7 @@ +# Usage Patterns + +- **Shape a topology, pick a runner**: choose local for quick iteration, compose for reproducible multi-node stacks with observability, or k8s for cluster-grade validation. +- **Compose workloads deliberately**: pair transactions and data-availability traffic for end-to-end coverage; add chaos only when assessing recovery and resilience. +- **Align expectations with goals**: use liveness-style checks to confirm the system keeps up with planned activity, and add workload-specific assertions for inclusion or availability. +- **Reuse plans across environments**: keep the scenario constant while swapping runners to compare behavior between developer machines and CI clusters. +- **Iterate with clear signals**: treat expectation outcomes as the primary pass/fail indicator, and adjust topology or workloads based on what those signals reveal. diff --git a/book/src/what-you-will-learn.md b/book/src/what-you-will-learn.md new file mode 100644 index 0000000..294339b --- /dev/null +++ b/book/src/what-you-will-learn.md @@ -0,0 +1,6 @@ +# What You Will Learn + +This book gives you a clear mental model for Nomos multi-node testing, shows how +to author scenarios that pair realistic workloads with explicit expectations, +and guides you to run them across local, containerized, and cluster environments +without changing the plan. diff --git a/book/src/workloads.md b/book/src/workloads.md new file mode 100644 index 0000000..145df58 --- /dev/null +++ b/book/src/workloads.md @@ -0,0 +1,30 @@ +# Core Content: Workloads & Expectations + +Workloads describe the activity a scenario generates; expectations describe the +signals that must hold when that activity completes. Both are pluggable so +scenarios stay readable and purpose-driven. + +## Workloads +- **Transaction workload**: submits user-level transactions at a configurable + rate and can limit how many distinct actors participate. +- **Data-availability workload**: drives blob and channel activity to exercise + data-availability paths. +- **Chaos workload**: triggers controlled node restarts to test resilience and + recovery behaviors (requires a runner that can control nodes). + +## Expectations +- **Consensus liveness**: verifies the system continues to produce blocks in + line with the planned workload and timing window. +- **Workload-specific checks**: each workload can attach its own success + criteria (e.g., inclusion of submitted activity) so scenarios remain concise. + +Together, workloads and expectations let you express both the pressure applied +to the system and the definition of “healthy” for that run. + +```mermaid +flowchart TD + I[Inputs
topology + wallets + rates] --> Init[Workload init] + Init --> Drive[Drive traffic] + Drive --> Collect[Collect signals] + Collect --> Eval[Expectations evaluate] +``` diff --git a/book/src/workspace-layout.md b/book/src/workspace-layout.md new file mode 100644 index 0000000..366900a --- /dev/null +++ b/book/src/workspace-layout.md @@ -0,0 +1,20 @@ +# Workspace Layout + +The workspace focuses on multi-node integration testing and sits alongside a +`nomos-node` checkout. Its crates separate concerns to keep scenarios +repeatable and portable: + +- **Configs**: prepares high-level node, network, tracing, and wallet settings + used across test environments. +- **Core scenario orchestration**: the engine that holds topology descriptions, + scenario plans, runtimes, workloads, and expectations. +- **Workflows**: ready-made workloads (transactions, data-availability, chaos) + and reusable expectations assembled into a user-facing DSL. +- **Runners**: deployment backends for local processes, Docker Compose, and + Kubernetes, all consuming the same scenario plan. +- **Runner Examples** (`examples/runner-examples`): runnable binaries + (`local_runner.rs`, `compose_runner.rs`, `k8s_runner.rs`) that demonstrate + complete scenario execution with each deployer. + +This split keeps configuration, orchestration, reusable traffic patterns, and +deployment adapters loosely coupled while sharing one mental model for tests. diff --git a/book/theme/highlight-github.css b/book/theme/highlight-github.css new file mode 100644 index 0000000..56916f7 --- /dev/null +++ b/book/theme/highlight-github.css @@ -0,0 +1,45 @@ +/* GitHub-like light highlighting */ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + background: #f6f8fa; + color: #24292e; +} +.hljs-comment, +.hljs-quote { + color: #6a737d; + font-style: italic; +} +.hljs-keyword, +.hljs-selector-tag, +.hljs-type { + color: #d73a49; +} +.hljs-string, +.hljs-title, +.hljs-name, +.hljs-attr, +.hljs-symbol, +.hljs-bullet { + color: #005cc5; +} +.hljs-number, +.hljs-literal { + color: #005cc5; +} +.hljs-section, +.hljs-selector-id, +.hljs-selector-class { + color: #22863a; +} +.hljs-built_in, +.hljs-type { + color: #6f42c1; +} +.hljs-emphasis { + font-style: italic; +} +.hljs-strong { + font-weight: bold; +} diff --git a/book/theme/highlight-init.js b/book/theme/highlight-init.js new file mode 100644 index 0000000..db22591 --- /dev/null +++ b/book/theme/highlight-init.js @@ -0,0 +1,17 @@ +(function () { + const highlight = (attempt = 0) => { + if (window.hljs) { + window.hljs.highlightAll(); + return; + } + if (attempt < 10) { + setTimeout(() => highlight(attempt + 1), 100); + } + }; + + if (document.readyState === "loading") { + document.addEventListener("DOMContentLoaded", () => highlight()); + } else { + highlight(); + } +})(); diff --git a/book/theme/mermaid-init.js b/book/theme/mermaid-init.js new file mode 100644 index 0000000..d129a17 --- /dev/null +++ b/book/theme/mermaid-init.js @@ -0,0 +1,43 @@ +// Lightweight client-side Mermaid rendering for mdBook. +(function () { + const CDN = "https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.min.js"; + + function loadMermaid(cb) { + if (window.mermaid) { + cb(); + return; + } + const script = document.createElement("script"); + script.src = CDN; + script.onload = cb; + script.onerror = () => console.warn("Failed to load mermaid from CDN:", CDN); + document.head.appendChild(script); + } + + function renderMermaidBlocks() { + const codeBlocks = Array.from( + document.querySelectorAll("pre code.language-mermaid") + ); + if (codeBlocks.length === 0) { + return; + } + + codeBlocks.forEach((codeBlock, idx) => { + const pre = codeBlock.parentElement; + const container = document.createElement("div"); + container.className = "mermaid"; + container.textContent = codeBlock.textContent; + container.id = `mermaid-diagram-${idx}`; + pre.replaceWith(container); + }); + + if (window.mermaid) { + window.mermaid.initialize({ startOnLoad: false }); + window.mermaid.run(); + } + } + + document.addEventListener("DOMContentLoaded", () => { + loadMermaid(renderMermaidBlocks); + }); +})(); diff --git a/book/theme/mermaid-overlay.js b/book/theme/mermaid-overlay.js new file mode 100644 index 0000000..c9a8d04 --- /dev/null +++ b/book/theme/mermaid-overlay.js @@ -0,0 +1,57 @@ +(function () { + const openOverlay = (svg) => { + const overlay = document.createElement("div"); + overlay.className = "mermaid-overlay"; + + const content = document.createElement("div"); + content.className = "mermaid-overlay__content"; + + const clone = svg.cloneNode(true); + clone.removeAttribute("width"); + clone.removeAttribute("height"); + clone.style.width = "95vw"; + clone.style.maxWidth = "1400px"; + clone.style.height = "auto"; + clone.style.display = "block"; + clone.style.margin = "0 auto"; + + content.appendChild(clone); + overlay.appendChild(content); + document.body.appendChild(overlay); + + const close = () => overlay.remove(); + overlay.addEventListener("click", close); + document.addEventListener( + "keydown", + (e) => { + if (e.key === "Escape") { + close(); + } + }, + { once: true } + ); + }; + + const bind = () => { + document.querySelectorAll(".mermaid svg").forEach((svg) => { + if (svg.dataset.overlayBound === "true") { + return; + } + svg.style.cursor = "zoom-in"; + svg.addEventListener("click", () => openOverlay(svg)); + svg.dataset.overlayBound = "true"; + }); + }; + + const init = () => { + bind(); + // Mermaid renders asynchronously; bind again after a short delay. + setTimeout(bind, 500); + }; + + if (document.readyState === "loading") { + document.addEventListener("DOMContentLoaded", init); + } else { + init(); + } +})(); diff --git a/book/theme/mermaid.css b/book/theme/mermaid.css new file mode 100644 index 0000000..6fd2b3f --- /dev/null +++ b/book/theme/mermaid.css @@ -0,0 +1,38 @@ +.mermaid { + max-width: 100%; + overflow-x: auto; +} + +.mermaid svg { + width: 100% !important; + height: auto !important; +} + +.mermaid-overlay { + position: fixed; + inset: 0; + background: rgba(0, 0, 0, 0.75); + display: flex; + align-items: center; + justify-content: center; + z-index: 9999; + cursor: zoom-out; +} + +.mermaid-overlay__content { + background: #fff; + padding: 16px; + max-width: 95vw; + max-height: 95vh; + overflow: auto; + border-radius: 8px; + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.25); +} + +.mermaid-overlay__content svg { + width: 95vw !important; + max-width: 1400px; + height: auto !important; + display: block; + margin: 0 auto; +} diff --git a/examples/Cargo.toml b/examples/Cargo.toml new file mode 100644 index 0000000..5e278dc --- /dev/null +++ b/examples/Cargo.toml @@ -0,0 +1,23 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "runner-examples" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[dependencies] +testing-framework-core = { workspace = true } +testing-framework-runner-compose = { workspace = true } +testing-framework-runner-k8s = { workspace = true } +testing-framework-runner-local = { workspace = true } +testing-framework-workflows = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "rt-multi-thread", "time"] } +tracing = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } + +[lints] +workspace = true diff --git a/examples/src/bin/compose_runner.rs b/examples/src/bin/compose_runner.rs new file mode 100644 index 0000000..e76786f --- /dev/null +++ b/examples/src/bin/compose_runner.rs @@ -0,0 +1,98 @@ +use std::time::Duration; + +use runner_examples::{ChaosBuilderExt as _, ScenarioBuilderExt as _}; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_compose::{ComposeDeployer, ComposeRunnerError}; +use tracing::{info, warn}; + +const DEFAULT_VALIDATORS: usize = 1; +const DEFAULT_EXECUTORS: usize = 1; +const DEFAULT_RUN_SECS: u64 = 60; +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + let validators = read_env("COMPOSE_DEMO_VALIDATORS", DEFAULT_VALIDATORS); + let executors = read_env("COMPOSE_DEMO_EXECUTORS", DEFAULT_EXECUTORS); + let run_secs = read_env("COMPOSE_DEMO_RUN_SECS", DEFAULT_RUN_SECS); + info!( + validators, + executors, run_secs, "starting compose runner demo" + ); + + if let Err(err) = run_compose_case(validators, executors, Duration::from_secs(run_secs)).await { + warn!("compose runner demo failed: {err}"); + std::process::exit(1); + } +} + +#[rustfmt::skip] +async fn run_compose_case( + validators: usize, + executors: usize, + run_duration: Duration, +) -> Result<(), Box> { + info!( + validators, + executors, + duration_secs = run_duration.as_secs(), + "building scenario plan" + ); + + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(validators) + .executors(executors) + .apply() + .enable_node_control() + .chaos() + .restart() + // Keep chaos restarts outside the test run window to avoid crash loops on restart. + .min_delay(Duration::from_secs(120)) + .max_delay(Duration::from_secs(180)) + .target_cooldown(Duration::from_secs(240)) + .apply() + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply() + .with_run_duration(run_duration) + .expect_consensus_liveness() + .build(); + + let deployer = ComposeDeployer::new(); + info!("deploying compose stack"); + let runner: Runner = match deployer.deploy(&plan).await { + Ok(runner) => runner, + Err(ComposeRunnerError::DockerUnavailable) => { + warn!("Docker is unavailable; cannot run compose demo"); + return Ok(()); + } + Err(err) => return Err(err.into()), + }; + if !runner.context().telemetry().is_configured() { + warn!("compose runner should expose prometheus metrics"); + } + + info!("running scenario"); + runner.run(&mut plan).await.map(|_| ()).map_err(Into::into) +} + +fn read_env(key: &str, default: T) -> T +where + T: std::str::FromStr + Copy, +{ + std::env::var(key) + .ok() + .and_then(|raw| raw.parse::().ok()) + .unwrap_or(default) +} diff --git a/examples/src/bin/k8s_runner.rs b/examples/src/bin/k8s_runner.rs new file mode 100644 index 0000000..1236c9e --- /dev/null +++ b/examples/src/bin/k8s_runner.rs @@ -0,0 +1,109 @@ +use std::time::Duration; + +use runner_examples::ScenarioBuilderExt as _; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_k8s::{K8sDeployer, K8sRunnerError}; +use tracing::{info, warn}; + +const DEFAULT_RUN_SECS: u64 = 60; +const DEFAULT_VALIDATORS: usize = 1; +const DEFAULT_EXECUTORS: usize = 1; +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + let validators = read_env("K8S_DEMO_VALIDATORS", DEFAULT_VALIDATORS); + let executors = read_env("K8S_DEMO_EXECUTORS", DEFAULT_EXECUTORS); + let run_secs = read_env("K8S_DEMO_RUN_SECS", DEFAULT_RUN_SECS); + info!(validators, executors, run_secs, "starting k8s runner demo"); + + if let Err(err) = run_k8s_case(validators, executors, Duration::from_secs(run_secs)).await { + warn!("k8s runner demo failed: {err}"); + std::process::exit(1); + } +} + +#[rustfmt::skip] +async fn run_k8s_case( + validators: usize, + executors: usize, + run_duration: Duration, +) -> Result<(), Box> { + info!( + validators, + executors, + duration_secs = run_duration.as_secs(), + "building scenario plan" + ); + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(validators) + .executors(executors) + .apply() + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply() + .with_run_duration(run_duration) + .expect_consensus_liveness() + .build(); + + let deployer = K8sDeployer::new(); + info!("deploying k8s stack"); + let runner: Runner = match deployer.deploy(&plan).await { + Ok(runner) => runner, + Err(K8sRunnerError::ClientInit { source }) => { + warn!("Kubernetes cluster unavailable ({source}); skipping"); + return Ok(()); + } + Err(err) => return Err(err.into()), + }; + + if !runner.context().telemetry().is_configured() { + warn!("k8s runner should expose prometheus metrics"); + } + + let validator_clients = runner.context().node_clients().validator_clients().to_vec(); + + info!("running scenario"); + let _handle = runner + .run(&mut plan) + .await + .map(|_| ()) + .map_err(|err| format!("k8s scenario failed: {err}"))?; + + for (idx, client) in validator_clients.iter().enumerate() { + let info = client + .consensus_info() + .await + .map_err(|err| format!("validator {idx} consensus_info failed: {err}"))?; + if info.height < 5 { + return Err(format!( + "validator {idx} height {} should reach at least 5 blocks", + info.height + ) + .into()); + } + } + + Ok(()) +} + +fn read_env(key: &str, default: T) -> T +where + T: std::str::FromStr + Copy, +{ + std::env::var(key) + .ok() + .and_then(|raw| raw.parse::().ok()) + .unwrap_or(default) +} diff --git a/examples/src/bin/local_runner.rs b/examples/src/bin/local_runner.rs new file mode 100644 index 0000000..0e796ff --- /dev/null +++ b/examples/src/bin/local_runner.rs @@ -0,0 +1,84 @@ +use std::time::Duration; + +use runner_examples::ScenarioBuilderExt as _; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use tracing::{info, warn}; + +const DEFAULT_VALIDATORS: usize = 1; +const DEFAULT_EXECUTORS: usize = 1; +const DEFAULT_RUN_SECS: u64 = 60; +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + if std::env::var("POL_PROOF_DEV_MODE").is_err() { + warn!("POL_PROOF_DEV_MODE=true is required for the local runner demo"); + std::process::exit(1); + } + + let validators = read_env("LOCAL_DEMO_VALIDATORS", DEFAULT_VALIDATORS); + let executors = read_env("LOCAL_DEMO_EXECUTORS", DEFAULT_EXECUTORS); + let run_secs = read_env("LOCAL_DEMO_RUN_SECS", DEFAULT_RUN_SECS); + info!( + validators, + executors, run_secs, "starting local runner demo" + ); + + if let Err(err) = run_local_case(validators, executors, Duration::from_secs(run_secs)).await { + warn!("local runner demo failed: {err}"); + std::process::exit(1); + } +} + +#[rustfmt::skip] +async fn run_local_case( + validators: usize, + executors: usize, + run_duration: Duration, +) -> Result<(), Box> { + info!( + validators, + executors, + duration_secs = run_duration.as_secs(), + "building scenario plan" + ); + let mut plan = ScenarioBuilder::topology() + .network_star() + .validators(validators) + .executors(executors) + .apply() + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply() + .with_run_duration(run_duration) + .expect_consensus_liveness() + .build(); + + let deployer = LocalDeployer::default(); + info!("deploying local nodes"); + let runner: Runner = deployer.deploy(&plan).await?; + info!("running scenario"); + runner.run(&mut plan).await.map(|_| ())?; + Ok(()) +} + +fn read_env(key: &str, default: T) -> T +where + T: std::str::FromStr + Copy, +{ + std::env::var(key) + .ok() + .and_then(|raw| raw.parse::().ok()) + .unwrap_or(default) +} diff --git a/examples/src/lib.rs b/examples/src/lib.rs new file mode 100644 index 0000000..8738278 --- /dev/null +++ b/examples/src/lib.rs @@ -0,0 +1,11 @@ +use testing_framework_core::scenario::Metrics; +pub use testing_framework_workflows::{ + builder::{ChaosBuilderExt, ScenarioBuilderExt}, + expectations, util, workloads, +}; + +/// Metrics are currently disabled in this branch; return a stub handle. +#[must_use] +pub const fn configure_prometheus_metrics() -> Metrics { + Metrics::empty() +} diff --git a/examples/tests/local_runner_bin_smoke.rs b/examples/tests/local_runner_bin_smoke.rs new file mode 100644 index 0000000..2ba0ef5 --- /dev/null +++ b/examples/tests/local_runner_bin_smoke.rs @@ -0,0 +1,28 @@ +use std::process::Command; + +// Manually run the local runner binary as a smoke test. +// This spins up real nodes and should be invoked explicitly: +// POL_PROOF_DEV_MODE=true cargo test -p runner-examples --test +// local_runner_bin_smoke -- --ignored --nocapture +#[test] +#[ignore = "runs local_runner binary (~2min) and requires local assets/binaries"] +fn local_runner_bin_smoke() { + let status = Command::new("cargo") + .args([ + "run", + "-p", + "runner-examples", + "--bin", + "local_runner", + "--", + "--nocapture", + ]) + .env("POL_PROOF_DEV_MODE", "true") + .env("LOCAL_DEMO_RUN_SECS", "120") + .env("LOCAL_DEMO_VALIDATORS", "1") + .env("LOCAL_DEMO_EXECUTORS", "1") + .status() + .expect("failed to spawn cargo run"); + + assert!(status.success(), "local runner binary exited with {status}"); +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..1b79eb8 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,12 @@ +[toolchain] +# Keep this version in sync also in the following places: +# * Dockerfile +# * flake.nix +# * testing-framework/assets/stack/Dockerfile +# Also, update the version of the nightly toolchain to the latest nightly of the new version specified in the following places: +# * .github/workflows/code-check.yml (fmt job) +# * .pre-commit-config.yml (fmt hook) +# Then, if there is any new allow-by-default rustc lint introduced/stabilized, add it to the respective entry in our `config.toml`. +channel = "nightly-2025-09-14" +# Even if clippy should be included in the default profile, in some cases it is not installed. So we force it with an explicit declaration. +components = ["clippy", "rustfmt"] diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..4efac2b --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,5 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Crate" +reorder_imports = true +reorder_modules = true +wrap_comments = true diff --git a/scripts/build-rapidsnark.sh b/scripts/build-rapidsnark.sh new file mode 100755 index 0000000..ab0e8b8 --- /dev/null +++ b/scripts/build-rapidsnark.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# +# Rebuild the rapidsnark prover for the current architecture. +# +# Usage: ./scripts/build-rapidsnark.sh + +set -euo pipefail + +if [ $# -lt 1 ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +TARGET_ARCH="$(uname -m)" +CIRCUITS_DIR="$1" +RAPIDSNARK_REPO="${RAPIDSNARK_REPO:-https://github.com/iden3/rapidsnark.git}" +RAPIDSNARK_REF="${RAPIDSNARK_REF:-main}" +FORCE_REBUILD="${RAPIDSNARK_FORCE_REBUILD:-0}" +BUILD_DIR="" +PACKAGE_DIR="" +CMAKE_TARGET_PLATFORM="" +USE_ASM="${RAPIDSNARK_USE_ASM:-ON}" +CMAKE_C_FLAGS="${RAPIDSNARK_C_FLAGS:-}" +CMAKE_CXX_FLAGS="${RAPIDSNARK_CXX_FLAGS:-}" + +if [ ! -d "$CIRCUITS_DIR" ]; then + echo "circuits directory '$CIRCUITS_DIR' does not exist" >&2 + exit 1 +fi + +system_gmp_package() { + local multiarch arch + arch="${1:-${TARGET_ARCH}}" + multiarch="$(gcc -print-multiarch 2>/dev/null || echo "${arch}-linux-gnu")" + local lib_path="/usr/lib/${multiarch}/libgmp.a" + if [ ! -f "$lib_path" ]; then + echo "system libgmp.a not found at $lib_path" >&2 + return 1 + fi + mkdir -p "depends/gmp/package_${arch}/lib" "depends/gmp/package_${arch}/include" + cp "$lib_path" "depends/gmp/package_${arch}/lib/" + # Headers are small; copy the public ones the build expects. + cp /usr/include/gmp*.h "depends/gmp/package_${arch}/include/" || true +} + +case "$TARGET_ARCH" in + arm64 | aarch64) + CMAKE_TARGET_PLATFORM="aarch64" + BUILD_DIR="build_prover_arm64" + PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_arm64}" + ;; + x86_64) + if [ "$FORCE_REBUILD" != "1" ]; then + echo "rapidsnark rebuild skipped for architecture '$TARGET_ARCH' (set RAPIDSNARK_FORCE_REBUILD=1 to override)" >&2 + exit 0 + fi + if [ -z "$CMAKE_C_FLAGS" ]; then + # Keep CPU requirements minimal so binaries run under emulation (e.g. act on Apple hosts). + CMAKE_C_FLAGS="-march=x86-64 -mno-avx -mno-avx2 -mno-sse4.2" + fi + if [ -z "$CMAKE_CXX_FLAGS" ]; then + CMAKE_CXX_FLAGS="$CMAKE_C_FLAGS" + fi + # Assembly paths assume modern CPU features; disable by default for x86_64 unless overridden. + if [ "${RAPIDSNARK_USE_ASM:-}" = "" ]; then + USE_ASM="OFF" + fi + CMAKE_TARGET_PLATFORM="x86_64" + BUILD_DIR="build_prover_x86_64" + PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_x86_64}" + ;; + *) + if [ "$FORCE_REBUILD" != "1" ]; then + echo "rapidsnark rebuild skipped for unsupported architecture '$TARGET_ARCH'" >&2 + exit 0 + fi + CMAKE_TARGET_PLATFORM="$TARGET_ARCH" + BUILD_DIR="build_prover_${TARGET_ARCH}" + PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_${TARGET_ARCH}}" + ;; +esac + +workdir="$(mktemp -d)" +trap 'rm -rf "$workdir"' EXIT + +echo "Building rapidsnark ($RAPIDSNARK_REF) for $TARGET_ARCH..." >&2 +git clone --depth 1 --branch "$RAPIDSNARK_REF" "$RAPIDSNARK_REPO" "$workdir/rapidsnark" >&2 +cd "$workdir/rapidsnark" +git submodule update --init --recursive >&2 + +if [ "${RAPIDSNARK_BUILD_GMP:-1}" = "1" ]; then + if [ -z "${RAPIDSNARK_GMP_TARGET:-}" ]; then + if [ "$CMAKE_TARGET_PLATFORM" = "x86_64" ]; then + GMP_TARGET="host" + else + GMP_TARGET="$CMAKE_TARGET_PLATFORM" + fi + else + GMP_TARGET="$RAPIDSNARK_GMP_TARGET" + fi + ./build_gmp.sh "$GMP_TARGET" >&2 +else + echo "Using system libgmp to satisfy rapidsnark dependencies" >&2 + system_gmp_package "$CMAKE_TARGET_PLATFORM" +fi + +rm -rf "$BUILD_DIR" +mkdir "$BUILD_DIR" +cd "$BUILD_DIR" +cmake .. \ + -DTARGET_PLATFORM="$CMAKE_TARGET_PLATFORM" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="../${PACKAGE_DIR}" \ + -DBUILD_SHARED_LIBS=OFF \ + -DUSE_ASM="$USE_ASM" \ + ${CMAKE_C_FLAGS:+-DCMAKE_C_FLAGS="$CMAKE_C_FLAGS"} \ + ${CMAKE_CXX_FLAGS:+-DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS"} >&2 +cmake --build . --target prover verifier -- -j"$(nproc)" >&2 + +install -m 0755 "src/prover" "$CIRCUITS_DIR/prover" +install -m 0755 "src/verifier" "$CIRCUITS_DIR/verifier" +echo "rapidsnark prover installed to $CIRCUITS_DIR/prover" >&2 diff --git a/scripts/setup-nomos-circuits.sh b/scripts/setup-nomos-circuits.sh new file mode 100755 index 0000000..bbd2dd1 --- /dev/null +++ b/scripts/setup-nomos-circuits.sh @@ -0,0 +1,226 @@ +#!/bin/bash +# +# Setup script for nomos-circuits +# +# Usage: ./setup-nomos-circuits.sh [VERSION] [INSTALL_DIR] +# +# Arguments: +# VERSION - Optional. Version to install (default: v0.3.1) +# INSTALL_DIR - Optional. Installation directory (default: $HOME/.nomos-circuits) +# +# Examples: +# ./setup-nomos-circuits.sh # Install default version to default location +# ./setup-nomos-circuits.sh v0.2.0 # Install specific version to default location +# ./setup-nomos-circuits.sh v0.2.0 /opt/circuits # Install to custom location + +set -e + +# Default values +VERSION="${1:-v0.3.1}" +DEFAULT_INSTALL_DIR="$HOME/.nomos-circuits" +INSTALL_DIR="${2:-$DEFAULT_INSTALL_DIR}" +REPO="logos-co/nomos-circuits" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +print_info() { + echo -e "${BLUE}ℹ${NC} $1" +} + +print_success() { + echo -e "${GREEN}✓${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}⚠${NC} $1" +} + +print_error() { + echo -e "${RED}✗${NC} $1" +} + +# Detect OS and architecture +detect_platform() { + local os="" + local arch="" + + # Detect OS + case "$(uname -s)" in + Linux*) os="linux";; + Darwin*) os="macos";; + MINGW*|MSYS*|CYGWIN*) os="windows";; + *) print_error "Unsupported operating system: $(uname -s)"; exit 1;; + esac + + # Detect architecture + case "$(uname -m)" in + x86_64) arch="x86_64";; + aarch64) arch="aarch64";; + arm64) arch="aarch64";; + *) print_error "Unsupported architecture: $(uname -m)"; exit 1;; + esac + + echo "${os}-${arch}" +} + +# Check if installation directory exists and get confirmation +check_existing_installation() { + if [ -d "$INSTALL_DIR" ]; then + print_warning "Installation directory already exists: $INSTALL_DIR" + + # Check if it has a VERSION file + if [ -f "$INSTALL_DIR/VERSION" ]; then + local current_version=$(cat "$INSTALL_DIR/VERSION") + print_info "Currently installed version: $current_version" + fi + + # In non-interactive environments (CI), automatically overwrite + if [ ! -t 0 ]; then + print_info "Non-interactive environment detected, automatically overwriting..." + else + # Interactive environment - ask for confirmation + echo + read -p "Do you want to overwrite it? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + print_info "Installation cancelled." + exit 0 + fi + fi + + print_info "Removing existing installation..." + rm -rf "$INSTALL_DIR" + fi +} + +# Download and extract the release +download_release() { + local platform="$1" + local artifact="nomos-circuits-${VERSION}-${platform}.tar.gz" + local url="https://github.com/${REPO}/releases/download/${VERSION}/${artifact}" + local temp_dir=$(mktemp -d) + + print_info "Downloading nomos-circuits ${VERSION} for ${platform}..." + print_info "URL: $url" + + # Build curl command with optional authentication + local curl_cmd="curl -L" + if [ -n "$GITHUB_TOKEN" ]; then + curl_cmd="$curl_cmd --header 'authorization: Bearer ${GITHUB_TOKEN}'" + fi + curl_cmd="$curl_cmd -o ${temp_dir}/${artifact} $url" + + if ! eval "$curl_cmd"; then + print_error "Failed to download release artifact" + print_error "Please check that version ${VERSION} exists for platform ${platform}" + print_error "Available releases: https://github.com/${REPO}/releases" + rm -rf "$temp_dir" + return 1 + fi + + print_success "Download complete" + + print_info "Extracting to ${INSTALL_DIR}..." + mkdir -p "$INSTALL_DIR" + + if ! tar -xzf "${temp_dir}/${artifact}" -C "$INSTALL_DIR" --strip-components=1; then + print_error "Failed to extract archive" + rm -rf "$temp_dir" + return 1 + fi + + rm -rf "$temp_dir" + print_success "Extraction complete" +} + +# Handle macOS code signing/quarantine issues +handle_macos_quarantine() { + print_info "macOS detected: Removing quarantine attributes from executables..." + + # Remove quarantine attribute from all executable files + if find "$INSTALL_DIR" -type f -perm +111 -exec xattr -d com.apple.quarantine {} \; 2>/dev/null; then + print_success "Quarantine attributes removed" + else + print_warning "Could not remove quarantine attributes (they may not exist)" + fi +} + +# Main installation process +main() { + print_info "Setting up nomos-circuits ${VERSION}" + print_info "Installation directory: $INSTALL_DIR" + echo + + # Detect platform (allow override via NOMOS_CIRCUITS_PLATFORM) + local platform_override="${NOMOS_CIRCUITS_PLATFORM:-}" + local platform + if [ -n "$platform_override" ]; then + platform="$platform_override" + print_info "Using overridden platform: $platform" + else + platform=$(detect_platform) + print_info "Detected platform: $platform" + fi + + # Check existing installation + check_existing_installation + + # Download and extract (retry with x86_64 bundle on aarch64 if needed) + if ! download_release "$platform"; then + if [[ "$platform" == linux-aarch64 ]]; then + print_warning "Falling back to linux-x86_64 circuits bundle; will rebuild prover for aarch64." + rm -rf "$INSTALL_DIR" + if ! download_release "linux-x86_64"; then + exit 1 + fi + else + exit 1 + fi + fi + + # Handle macOS quarantine if needed + if [[ "$platform" == macos-* ]]; then + echo + handle_macos_quarantine + fi + + if [[ "${NOMOS_CIRCUITS_REBUILD_RAPIDSNARK:-0}" == "1" || "$platform" == *"aarch64" ]]; then + echo + print_info "Rebuilding rapidsnark prover for ${platform}..." + "${SCRIPT_DIR}/build-rapidsnark.sh" "$INSTALL_DIR" + fi + + echo + print_success "Installation complete!" + echo + print_info "nomos-circuits ${VERSION} is now installed at: $INSTALL_DIR" + print_info "The following circuits are available:" + + # Discover circuits by finding directories that contain a witness_generator + for dir in "$INSTALL_DIR"/*/; do + if [ -d "$dir" ]; then + local circuit_name + circuit_name=$(basename "$dir") + if [ -f "$dir/witness_generator" ]; then + echo " • $circuit_name" + fi + fi + done + + # Only show export instructions if not using the default location + if [ "$INSTALL_DIR" != "$DEFAULT_INSTALL_DIR" ]; then + echo + print_info "Since you're using a custom installation directory, set the environment variable:" + print_info " export NOMOS_CIRCUITS=$INSTALL_DIR" + echo + fi +} + +# Run main +main diff --git a/testing-framework/assets/stack/Dockerfile b/testing-framework/assets/stack/Dockerfile new file mode 100644 index 0000000..d11400a --- /dev/null +++ b/testing-framework/assets/stack/Dockerfile @@ -0,0 +1,125 @@ +# syntax=docker/dockerfile:1 +# check=skip=SecretsUsedInArgOrEnv +# Ignore warnings about sensitive information as this is test data. + +ARG VERSION=v0.3.1 +ARG CIRCUITS_OVERRIDE + +# =========================== +# BUILD IMAGE +# =========================== + +FROM rust:1.91.0-slim-bookworm AS builder + +ARG VERSION +ARG CIRCUITS_OVERRIDE + +LABEL maintainer="augustinas@status.im" \ + source="https://github.com/logos-co/nomos-node" \ + description="Nomos testnet build image" + +WORKDIR /workspace +COPY . . + +# Reduce debug artifact size. +ENV CARGO_PROFILE_DEV_DEBUG=0 + +# Install dependencies needed for building RocksDB. +RUN apt-get update && apt-get install -yq \ + git gcc g++ clang make cmake m4 xz-utils libgmp-dev libssl-dev pkg-config ca-certificates curl wget file + +RUN mkdir -p /opt/circuits && \ + select_circuits_source() { \ + # Prefer an explicit override when it exists (file or directory). \ + if [ -n "$CIRCUITS_OVERRIDE" ] && [ -e "/workspace/${CIRCUITS_OVERRIDE}" ]; then \ + echo "/workspace/${CIRCUITS_OVERRIDE}"; \ + return 0; \ + fi; \ + # Fall back to the workspace bundle shipped with the repo. \ + if [ -e "/workspace/tests/kzgrs/kzgrs_test_params" ]; then \ + echo "/workspace/tests/kzgrs/kzgrs_test_params"; \ + return 0; \ + fi; \ + return 1; \ + }; \ + if CIRCUITS_PATH="$(select_circuits_source)"; then \ + echo "Using prebuilt circuits bundle from ${CIRCUITS_PATH#/workspace/}"; \ + if [ -d "$CIRCUITS_PATH" ]; then \ + cp -R "${CIRCUITS_PATH}/." /opt/circuits; \ + else \ + cp "${CIRCUITS_PATH}" /opt/circuits/; \ + fi; \ + fi; \ + TARGET_ARCH="$(uname -m)"; \ + if [ -f "/opt/circuits/prover" ]; then \ + PROVER_INFO="$(file -b /opt/circuits/prover || true)"; \ + case "$TARGET_ARCH" in \ + x86_64) EXPECT_ARCH="x86-64" ;; \ + aarch64|arm64) EXPECT_ARCH="aarch64" ;; \ + *) EXPECT_ARCH="$TARGET_ARCH" ;; \ + esac; \ + if [ -n "$PROVER_INFO" ] && ! echo "$PROVER_INFO" | grep -qi "$EXPECT_ARCH"; then \ + echo "Circuits prover architecture ($PROVER_INFO) does not match target ${TARGET_ARCH}; rebuilding rapidsnark binaries"; \ + chmod +x scripts/build-rapidsnark.sh && \ + RAPIDSNARK_FORCE_REBUILD=1 \ + scripts/build-rapidsnark.sh /opt/circuits; \ + fi; \ + fi; \ + if [ ! -f "/opt/circuits/pol/verification_key.json" ]; then \ + echo "Local circuits missing pol artifacts; downloading ${VERSION} bundle and rebuilding"; \ + chmod +x scripts/setup-nomos-circuits.sh && \ + NOMOS_CIRCUITS_REBUILD_RAPIDSNARK=1 \ + RAPIDSNARK_BUILD_GMP=1 \ + scripts/setup-nomos-circuits.sh "$VERSION" "/opt/circuits"; \ + fi + +ENV NOMOS_CIRCUITS=/opt/circuits +ENV CARGO_TARGET_DIR=/workspace/target + +# Fetch the nomos-node sources pinned in Cargo.lock and build the runtime binaries. +RUN if [ ! -d /workspace/nomos-node ]; then \ + git clone https://github.com/logos-co/nomos-node.git /workspace/nomos-node; \ + fi && \ + cd /workspace/nomos-node && \ + git fetch --depth 1 origin 2f60a0372c228968c3526c341ebc7e58bbd178dd && \ + git checkout 2f60a0372c228968c3526c341ebc7e58bbd178dd && \ + git reset --hard && git clean -fdx && \ + cargo build --locked --all-features --bins && \ + rm -rf /workspace/nomos-node/target/debug/incremental + +# Build cfgsync binaries from this workspace. +RUN cargo build --locked --all-features --manifest-path /workspace/testing-framework/tools/cfgsync/Cargo.toml --bins + +# =========================== +# NODE IMAGE +# =========================== + +FROM debian:bookworm-slim + +ARG VERSION + +LABEL maintainer="augustinas@status.im" \ + source="https://github.com/logos-co/nomos-node" \ + description="Nomos node image" + +RUN apt-get update && apt-get install -yq \ + libstdc++6 \ + libgmp10 \ + libgomp1 \ + libssl3 \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /opt/circuits /opt/circuits + +COPY --from=builder /workspace/target/debug/nomos-node /usr/bin/nomos-node +COPY --from=builder /workspace/target/debug/nomos-executor /usr/bin/nomos-executor +COPY --from=builder /workspace/target/debug/nomos-cli /usr/bin/nomos-cli +COPY --from=builder /workspace/target/debug/cfgsync-server /usr/bin/cfgsync-server +COPY --from=builder /workspace/target/debug/cfgsync-client /usr/bin/cfgsync-client + +ENV NOMOS_CIRCUITS=/opt/circuits + +EXPOSE 3000 8080 9000 60000 + +ENTRYPOINT ["/usr/bin/nomos-node"] diff --git a/testing-framework/assets/stack/README.md b/testing-framework/assets/stack/README.md new file mode 100644 index 0000000..3559b9f --- /dev/null +++ b/testing-framework/assets/stack/README.md @@ -0,0 +1,60 @@ +# Docker Compose Testnet for Nomos + +The Nomos Docker Compose Testnet contains four distinct service types: + +- **Nomos Node Services**: Multiple dynamically spawned Nomos nodes that synchronizes their configuration via cfgsync utility. + +## Building + +Upon making modifications to the codebase or the Dockerfile, the Nomos images must be rebuilt: + +```bash +docker compose build +``` + +## Configuring + +Configuration of the Docker testnet is accomplished using the `.env` file. An example configuration can be found in `.env.example`. + +To adjust the count of Nomos nodes, modify the variable: + +```bash +DOCKER_COMPOSE_LIBP2P_REPLICAS=100 +``` + +## Running + +Initiate the testnet by executing the following command: + +```bash +docker compose up +``` + +This command will merge all output logs and display them in Stdout. For a more refined output, it's recommended to first run: + +```bash +docker compose up -d +``` + +Followed by: + +```bash +docker compose logs -f nomos-node +``` + +## Using testnet + +Bootstrap node is accessible from the host via `3000` and `18080` ports. To expose other nomos nodes, please update `nomos-node` service in the `compose.yml` file with this configuration: + +```bash + nomos-node-0: + ports: + - "3001-3010:3000" # Use range depending on the number of nomos node replicas. + - "18081-18190:18080" +``` + +After running `docker compose up`, the randomly assigned ports can be viewed with `ps` command: + +```bash +docker compose ps +``` diff --git a/testing-framework/assets/stack/cfgsync.yaml b/testing-framework/assets/stack/cfgsync.yaml new file mode 100644 index 0000000..8cd48e5 --- /dev/null +++ b/testing-framework/assets/stack/cfgsync.yaml @@ -0,0 +1,49 @@ +port: 4400 +n_hosts: 4 +timeout: 10 + +# ConsensusConfig related parameters +security_param: 10 +active_slot_coeff: 0.9 + +# DaConfig related parameters +subnetwork_size: 2 +dispersal_factor: 2 +num_samples: 1 +num_subnets: 2 +old_blobs_check_interval: "5.0" +blobs_validity_duration: "60.0" +global_params_path: "/kzgrs_test_params" +min_dispersal_peers: 1 +min_replication_peers: 1 +monitor_failure_time_window: "5.0" +balancer_interval: "5.0" +# Dispersal mempool publish strategy +mempool_publish_strategy: !SampleSubnetworks + sample_threshold: 2 + timeout: "2.0" + cooldown: "0.0001" + +replication_settings: + seen_message_cache_size: 204800 + seen_message_ttl: "900.0" +retry_shares_limit: 5 +retry_commitments_limit: 5 + +# Tracing +tracing_settings: + logger: !Loki + endpoint: http://loki:3100/ + host_identifier: node + tracing: !Otlp + endpoint: http://tempo:4317/ + sample_ratio: 0.5 + service_name: node + filter: !EnvFilter + filters: + nomos: debug + metrics: !Otlp + endpoint: http://prometheus:9090/api/v1/otlp/v1/metrics + host_identifier: node + console: None + level: INFO diff --git a/testing-framework/assets/stack/kzgrs_test_params b/testing-framework/assets/stack/kzgrs_test_params new file mode 100644 index 0000000..0389d61 Binary files /dev/null and b/testing-framework/assets/stack/kzgrs_test_params differ diff --git a/testing-framework/assets/stack/monitoring/grafana/dashboards.yml b/testing-framework/assets/stack/monitoring/grafana/dashboards.yml new file mode 100644 index 0000000..95a6604 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/grafana/dashboards.yml @@ -0,0 +1,8 @@ +apiVersion: 1 +providers: + - name: 'default' + orgId: 1 + folder: '' + type: 'file' + options: + path: '/var/lib/grafana/dashboards' diff --git a/testing-framework/assets/stack/monitoring/grafana/dashboards/testnet_metrics.json b/testing-framework/assets/stack/monitoring/grafana/dashboards/testnet_metrics.json new file mode 100644 index 0000000..454c260 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/grafana/dashboards/testnet_metrics.json @@ -0,0 +1,237 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 1, + "links": [], + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "disableTextWrap": false, + "editorMode": "builder", + "expr": "da_mempool_pending_items", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Mempool: Pending DA blobs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "consensus_processed_blocks", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Consensus: Processed Blocks", + "type": "timeseries" + } + ], + "preload": false, + "schemaVersion": 40, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Testnet Metrics", + "uid": "ce6ebepwk737kf", + "version": 5, + "weekStart": "" +} diff --git a/testing-framework/assets/stack/monitoring/grafana/datasources.yaml b/testing-framework/assets/stack/monitoring/grafana/datasources.yaml new file mode 100644 index 0000000..945f148 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/grafana/datasources.yaml @@ -0,0 +1,37 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + org_id: 1 + url: http://prometheus:9090 + is_default: true + version: 1 + editable: true + + - name: Tempo + type: tempo + access: proxy + org_id: 1 + url: http://tempo:3200 + is_default: false + version: 1 + editable: true + uid: tempods + + - name: Loki + type: loki + access: proxy + org_id: 1 + url: http://loki:3100 + is_default: false + version: 1 + editable: true + jsonData: + derivedFields: + - name: trace_id + matcherRegex: "\"trace_id\":\"(\\w+)\"" + url: "$${__value.raw}" + datasourceUid: tempods + diff --git a/testing-framework/assets/stack/monitoring/grafana/grafana.ini b/testing-framework/assets/stack/monitoring/grafana/grafana.ini new file mode 100644 index 0000000..3c60e13 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/grafana/grafana.ini @@ -0,0 +1,51 @@ +instance_name = nomos dashboard + +;[dashboards.json] +;enabled = true +;path = /home/git/grafana/grafana-dashboards/dashboards + + +#################################### Auth ########################## +[auth] +disable_login_form = false + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +enabled = true + +# specify organization name that should be used for unauthenticated users +;org_name = Public + +# specify role for unauthenticated users +; org_role = Admin +org_role = Viewer + +;[security] +;admin_user = ocr +;admin_password = ocr + +;[users] +# disable user signup / registration +;allow_sign_up = false + +# Set to true to automatically assign new users to the default organization (id 1) +;auto_assign_org = true + +# Default role new users will be automatically assigned (if disabled above is set to true) +;auto_assign_org_role = Viewer + +#################################### SMTP / Emailing ########################## +;[smtp] +;enabled = false +;host = localhost:25 +;user = +;password = +;cert_file = +;key_file = +;skip_verify = false +;from_address = admin@grafana.localhost + +;[emails] +;welcome_email_on_sign_up = false + diff --git a/testing-framework/assets/stack/monitoring/grafana/plugins.env b/testing-framework/assets/stack/monitoring/grafana/plugins.env new file mode 100644 index 0000000..2a4b487 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/grafana/plugins.env @@ -0,0 +1 @@ +GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel diff --git a/testing-framework/assets/stack/monitoring/prometheus.yml b/testing-framework/assets/stack/monitoring/prometheus.yml new file mode 100644 index 0000000..7f91b55 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/prometheus.yml @@ -0,0 +1,4 @@ +global: + evaluation_interval: 15s + external_labels: + monitor: "Monitoring" diff --git a/testing-framework/assets/stack/monitoring/tempo.yaml b/testing-framework/assets/stack/monitoring/tempo.yaml new file mode 100644 index 0000000..22473b5 --- /dev/null +++ b/testing-framework/assets/stack/monitoring/tempo.yaml @@ -0,0 +1,53 @@ +stream_over_http_enabled: true +server: + http_listen_port: 3200 + log_level: info + +query_frontend: + search: + duration_slo: 5s + throughput_bytes_slo: 1.073741824e+09 + trace_by_id: + duration_slo: 5s + +distributor: + receivers: # this configuration will listen on all ports and protocols that tempo is capable of. + otlp: + protocols: + grpc: + endpoint: "0.0.0.0:4317" + +ingester: + max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally + +compactor: + compaction: + block_retention: 24h + +metrics_generator: + registry: + external_labels: + source: tempo + cluster: docker-compose + storage: + path: /var/tempo/generator/wal + remote_write: + - url: http://prometheus:9090/api/v1/write + send_exemplars: true + traces_storage: + path: /var/tempo/generator/traces + +storage: + trace: + backend: local # backend configuration to use + wal: + path: /var/tempo/wal # where to store the wal locally + local: + path: /var/tempo/blocks + +overrides: + defaults: + metrics_generator: + processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator + generate_native_histograms: both + diff --git a/testing-framework/assets/stack/scripts/build_test_image.sh b/testing-framework/assets/stack/scripts/build_test_image.sh new file mode 100755 index 0000000..63636b7 --- /dev/null +++ b/testing-framework/assets/stack/scripts/build_test_image.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -euo pipefail + +# Builds the testnet image with circuits. Prefers a local circuits bundle +# (tests/kzgrs/kzgrs_test_params) or a custom override; otherwise downloads +# from logos-co/nomos-circuits. + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)" +DOCKERFILE_PATH="${ROOT_DIR}/testing-framework/assets/stack/Dockerfile" +IMAGE_TAG="${IMAGE_TAG:-nomos-testnet:local}" +VERSION="${VERSION:-v0.3.1}" +CIRCUITS_OVERRIDE="${CIRCUITS_OVERRIDE:-testing-framework/assets/stack/kzgrs_test_params}" + +echo "Workspace root: ${ROOT_DIR}" +echo "Image tag: ${IMAGE_TAG}" +echo "Circuits override: ${CIRCUITS_OVERRIDE:-}" +echo "Circuits version (fallback download): ${VERSION}" + +build_args=( + -f "${DOCKERFILE_PATH}" + -t "${IMAGE_TAG}" + "${ROOT_DIR}" +) + +# Pass override/version args to the Docker build. +if [ -n "${CIRCUITS_OVERRIDE}" ]; then + build_args+=(--build-arg "CIRCUITS_OVERRIDE=${CIRCUITS_OVERRIDE}") +fi +build_args+=(--build-arg "VERSION=${VERSION}") + +echo "Running: docker build ${build_args[*]}" +docker build "${build_args[@]}" + +cat <&2; exit 1 ;; + esac + case "$(uname -m)" in + x86_64) arch="x86_64" ;; + aarch64|arm64) arch="aarch64" ;; + *) echo "Unsupported architecture: $(uname -m)" >&2; exit 1 ;; + esac + echo "${os}-${arch}" +} + +download_release() { + local platform="$1" + local artifact="nomos-circuits-${VERSION}-${platform}.tar.gz" + local url="https://github.com/${REPO}/releases/download/${VERSION}/${artifact}" + local temp_dir + temp_dir=$(mktemp -d) + + echo "Downloading nomos-circuits ${VERSION} for ${platform}..." + if [ -n "${GITHUB_TOKEN:-}" ]; then + auth_header="Authorization: Bearer ${GITHUB_TOKEN}" + else + auth_header="" + fi + + if ! curl -L ${auth_header:+-H "$auth_header"} -o "${temp_dir}/${artifact}" "${url}"; then + echo "Failed to download release artifact from ${url}" >&2 + rm -rf "${temp_dir}" + exit 1 + fi + + echo "Extracting to ${INSTALL_DIR}..." + rm -rf "${INSTALL_DIR}" + mkdir -p "${INSTALL_DIR}" + if ! tar -xzf "${temp_dir}/${artifact}" -C "${INSTALL_DIR}" --strip-components=1; then + echo "Failed to extract ${artifact}" >&2 + rm -rf "${temp_dir}" + exit 1 + fi + rm -rf "${temp_dir}" +} + +platform=$(detect_platform) +echo "Setting up nomos-circuits ${VERSION} for ${platform}" +echo "Installing to ${INSTALL_DIR}" + +download_release "${platform}" + +echo "Installation complete. Circuits installed at: ${INSTALL_DIR}" +echo "If using a custom directory, set NOMOS_CIRCUITS=${INSTALL_DIR}" diff --git a/testing-framework/configs/Cargo.toml b/testing-framework/configs/Cargo.toml new file mode 100644 index 0000000..3cc8f2f --- /dev/null +++ b/testing-framework/configs/Cargo.toml @@ -0,0 +1,51 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-config" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[dependencies] +blst = "0.3.11" +chain-leader = { workspace = true } +chain-network = { workspace = true } +chain-service = { workspace = true } +cryptarchia-engine = { workspace = true, features = ["serde"] } +cryptarchia-sync = { workspace = true } +ed25519-dalek = { version = "2.2.0", features = ["rand_core", "serde"] } +groth16 = { workspace = true } +hex = { version = "0.4.3", default-features = false } +key-management-system = { workspace = true } +nomos-api = { workspace = true } +nomos-blend-message = { workspace = true } +nomos-blend-service = { workspace = true, features = ["libp2p"] } +nomos-core = { workspace = true } +nomos-da-dispersal = { workspace = true } +nomos-da-network-core = { workspace = true } +nomos-da-network-service = { workspace = true } +nomos-da-sampling = { workspace = true } +nomos-da-verifier = { workspace = true } +nomos-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] } +nomos-ledger = { workspace = true, features = ["serde"] } +nomos-libp2p = { workspace = true } +nomos-node = { workspace = true, default-features = false, features = ["testing"] } +nomos-sdp = { workspace = true } +nomos-time = { workspace = true } +nomos-tracing = { workspace = true } +nomos-tracing-service = { workspace = true } +nomos-utils = { workspace = true } +nomos-wallet = { workspace = true } +num-bigint = { version = "0.4", default-features = false } +rand = { workspace = true } +serde = { workspace = true, features = ["derive"] } +subnetworks-assignations = { workspace = true } +time = { version = "0.3", default-features = true } +tracing = { workspace = true } +zksign = { workspace = true } + +[lints] +workspace = true diff --git a/testing-framework/configs/src/common/kms.rs b/testing-framework/configs/src/common/kms.rs new file mode 100644 index 0000000..cea5d6a --- /dev/null +++ b/testing-framework/configs/src/common/kms.rs @@ -0,0 +1,14 @@ +use groth16::fr_to_bytes; +use key_management_system::{ + backend::preload::KeyId, + keys::{Key, secured_key::SecuredKey as _}, +}; + +#[must_use] +pub fn key_id_for_preload_backend(key: &Key) -> KeyId { + let key_id_bytes = match key { + Key::Ed25519(ed25519_secret_key) => ed25519_secret_key.as_public_key().to_bytes(), + Key::Zk(zk_secret_key) => fr_to_bytes(zk_secret_key.as_public_key().as_fr()), + }; + hex::encode(key_id_bytes) +} diff --git a/testing-framework/configs/src/common/mod.rs b/testing-framework/configs/src/common/mod.rs new file mode 100644 index 0000000..5550fd5 --- /dev/null +++ b/testing-framework/configs/src/common/mod.rs @@ -0,0 +1 @@ +pub mod kms; diff --git a/testing-framework/configs/src/lib.rs b/testing-framework/configs/src/lib.rs new file mode 100644 index 0000000..de9fb03 --- /dev/null +++ b/testing-framework/configs/src/lib.rs @@ -0,0 +1,45 @@ +use std::{env, net::Ipv4Addr, ops::Mul as _, sync::LazyLock, time::Duration}; + +use nomos_core::sdp::ProviderId; +use nomos_libp2p::{Multiaddr, PeerId, multiaddr}; + +pub mod common; +pub mod nodes; +pub mod topology; + +static IS_SLOW_TEST_ENV: LazyLock = + LazyLock::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true")); + +pub static IS_DEBUG_TRACING: LazyLock = LazyLock::new(|| { + env::var("NOMOS_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true")) +}); + +/// In slow test environments like Codecov, use 2x timeout. +#[must_use] +pub fn adjust_timeout(d: Duration) -> Duration { + if *IS_SLOW_TEST_ENV { d.mul(2) } else { d } +} + +#[must_use] +pub fn node_address_from_port(port: u16) -> Multiaddr { + multiaddr(Ipv4Addr::LOCALHOST, port) +} + +#[must_use] +pub fn secret_key_to_peer_id(node_key: nomos_libp2p::ed25519::SecretKey) -> PeerId { + PeerId::from_public_key( + &nomos_libp2p::ed25519::Keypair::from(node_key) + .public() + .into(), + ) +} + +#[must_use] +pub fn secret_key_to_provider_id(node_key: nomos_libp2p::ed25519::SecretKey) -> ProviderId { + ProviderId::try_from( + nomos_libp2p::ed25519::Keypair::from(node_key) + .public() + .to_bytes(), + ) + .unwrap() +} diff --git a/testing-framework/configs/src/nodes/executor.rs b/testing-framework/configs/src/nodes/executor.rs new file mode 100644 index 0000000..334cf29 --- /dev/null +++ b/testing-framework/configs/src/nodes/executor.rs @@ -0,0 +1,330 @@ +use std::{ + collections::HashSet, + num::{NonZeroU64, NonZeroUsize}, + path::PathBuf, + time::Duration, +}; + +use chain_leader::LeaderSettings; +use chain_network::{ChainNetworkSettings, OrphanConfig, SyncConfig}; +use chain_service::{CryptarchiaSettings, StartingState}; +use cryptarchia_engine::time::SlotConfig; +use key_management_system::keys::{Key, ZkKey}; +use nomos_blend_service::{ + core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings, ZkSettings}, + settings::TimingSettings, +}; +use nomos_da_dispersal::{ + DispersalServiceSettings, + backend::kzgrs::{DispersalKZGRSBackendSettings, EncoderSettings}, +}; +use nomos_da_network_core::protocols::sampling::SubnetsConfig; +use nomos_da_network_service::{ + NetworkConfig as DaNetworkConfig, + api::http::ApiAdapterSettings, + backends::libp2p::{ + common::DaNetworkBackendSettings, executor::DaNetworkExecutorBackendSettings, + }, +}; +use nomos_da_sampling::{ + DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings, + verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings, +}; +use nomos_da_verifier::{ + DaVerifierServiceSettings, + backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig}, + storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings, +}; +use nomos_executor::config::Config as ExecutorConfig; +use nomos_node::{ + RocksBackendSettings, + api::backend::AxumBackendSettings as NodeAxumBackendSettings, + config::{ + blend::{ + deployment::{self as blend_deployment}, + serde as blend_serde, + }, + deployment::{CustomDeployment, Settings as NodeDeploymentSettings}, + mempool::MempoolConfig, + network::deployment::Settings as NetworkDeploymentSettings, + }, +}; +use nomos_sdp::SdpSettings; +use nomos_time::{ + TimeServiceSettings, + backends::{NtpTimeBackendSettings, ntp::async_client::NTPClientSettings}, +}; +use nomos_utils::math::NonNegativeF64; +use nomos_wallet::WalletServiceSettings; + +use crate::{ + adjust_timeout, + common::kms::key_id_for_preload_backend, + topology::configs::{ + GeneralConfig, blend::GeneralBlendConfig as TopologyBlendConfig, wallet::WalletAccount, + }, +}; + +#[must_use] +#[expect(clippy::too_many_lines, reason = "TODO: Address this at some point.")] +pub fn create_executor_config(config: GeneralConfig) -> ExecutorConfig { + let (blend_user_config, deployment_settings) = build_blend_service_config(&config.blend_config); + ExecutorConfig { + network: config.network_config, + blend: blend_user_config, + deployment: deployment_settings, + cryptarchia: CryptarchiaSettings { + config: config.consensus_config.ledger_config.clone(), + starting_state: StartingState::Genesis { + genesis_tx: config.consensus_config.genesis_tx, + }, + // Disable on-disk recovery in compose tests to avoid serde errors on + // non-string keys and keep services alive. + recovery_file: PathBuf::new(), + bootstrap: chain_service::BootstrapConfig { + prolonged_bootstrap_period: config.bootstrapping_config.prolonged_bootstrap_period, + force_bootstrap: false, + offline_grace_period: chain_service::OfflineGracePeriodConfig { + grace_period: Duration::from_secs(20 * 60), + state_recording_interval: Duration::from_secs(60), + }, + }, + }, + chain_network: ChainNetworkSettings { + config: config.consensus_config.ledger_config.clone(), + network_adapter_settings: + chain_network::network::adapters::libp2p::LibP2pAdapterSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + bootstrap: chain_network::BootstrapConfig { + ibd: chain_network::IbdConfig { + peers: HashSet::new(), + delay_before_new_download: Duration::from_secs(10), + }, + }, + sync: SyncConfig { + orphan: OrphanConfig { + max_orphan_cache_size: NonZeroUsize::new(5) + .expect("Max orphan cache size must be non-zero"), + }, + }, + }, + cryptarchia_leader: LeaderSettings { + transaction_selector_settings: (), + config: config.consensus_config.ledger_config.clone(), + leader_config: config.consensus_config.leader_config.clone(), + blend_broadcast_settings: + nomos_blend_service::core::network::libp2p::Libp2pBroadcastSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + }, + da_network: DaNetworkConfig { + backend: DaNetworkExecutorBackendSettings { + validator_settings: DaNetworkBackendSettings { + node_key: config.da_config.node_key, + listening_address: config.da_config.listening_address, + policy_settings: config.da_config.policy_settings, + monitor_settings: config.da_config.monitor_settings, + balancer_interval: config.da_config.balancer_interval, + redial_cooldown: config.da_config.redial_cooldown, + replication_settings: config.da_config.replication_settings, + subnets_settings: SubnetsConfig { + num_of_subnets: config.da_config.num_samples as usize, + shares_retry_limit: config.da_config.retry_shares_limit, + commitments_retry_limit: config.da_config.retry_commitments_limit, + }, + }, + num_subnets: config.da_config.num_subnets, + }, + membership: config.da_config.membership.clone(), + api_adapter_settings: ApiAdapterSettings { + api_port: config.api_config.address.port(), + is_secure: false, + }, + subnet_refresh_interval: config.da_config.subnets_refresh_interval, + subnet_threshold: config.da_config.num_samples as usize, + min_session_members: config.da_config.num_samples as usize, + }, + da_verifier: DaVerifierServiceSettings { + share_verifier_settings: KzgrsDaVerifierSettings { + global_params_path: config.da_config.global_params_path.clone(), + domain_size: config.da_config.num_subnets as usize, + }, + tx_verifier_settings: (), + network_adapter_settings: (), + storage_adapter_settings: VerifierStorageAdapterSettings { + blob_storage_directory: "./".into(), + }, + mempool_trigger_settings: MempoolPublishTriggerConfig { + publish_threshold: NonNegativeF64::try_from(0.8).unwrap(), + share_duration: Duration::from_secs(5), + prune_duration: Duration::from_secs(30), + prune_interval: Duration::from_secs(5), + }, + }, + tracing: config.tracing_config.tracing_settings, + http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + da_sampling: DaSamplingServiceSettings { + sampling_settings: KzgrsSamplingBackendSettings { + num_samples: config.da_config.num_samples, + num_subnets: config.da_config.num_subnets, + old_blobs_check_interval: config.da_config.old_blobs_check_interval, + blobs_validity_duration: config.da_config.blobs_validity_duration, + }, + share_verifier_settings: SamplingVerifierSettings { + global_params_path: config.da_config.global_params_path.clone(), + domain_size: config.da_config.num_subnets as usize, + }, + commitments_wait_duration: Duration::from_secs(1), + sdp_blob_trigger_sampling_delay: adjust_timeout(Duration::from_secs(5)), + }, + storage: RocksBackendSettings { + db_path: "./db".into(), + read_only: false, + column_family: Some("blocks".into()), + }, + da_dispersal: DispersalServiceSettings { + backend: DispersalKZGRSBackendSettings { + encoder_settings: EncoderSettings { + num_columns: config.da_config.num_subnets as usize, + with_cache: false, + global_params_path: config.da_config.global_params_path, + }, + dispersal_timeout: Duration::from_secs(20), + retry_cooldown: Duration::from_secs(3), + retry_limit: 2, + }, + }, + time: TimeServiceSettings { + backend_settings: NtpTimeBackendSettings { + ntp_server: config.time_config.ntp_server, + ntp_client_settings: NTPClientSettings { + timeout: config.time_config.timeout, + listening_interface: config.time_config.interface, + }, + update_interval: config.time_config.update_interval, + slot_config: SlotConfig { + slot_duration: config.time_config.slot_duration, + chain_start_time: config.time_config.chain_start_time, + }, + epoch_config: config.consensus_config.ledger_config.epoch_config, + base_period_length: config.consensus_config.ledger_config.base_period_length(), + }, + }, + mempool: MempoolConfig { + pool_recovery_path: "./recovery/mempool.json".into(), + }, + sdp: SdpSettings { declaration: None }, + wallet: WalletServiceSettings { + known_keys: { + let mut keys = HashSet::from_iter([config.consensus_config.leader_config.pk]); + keys.extend( + config + .consensus_config + .wallet_accounts + .iter() + .map(WalletAccount::public_key), + ); + keys + }, + }, + key_management: config.kms_config, + + testing_http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.testing_http_address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + } +} + +fn build_blend_service_config( + config: &TopologyBlendConfig, +) -> (blend_serde::Config, NodeDeploymentSettings) { + let zk_key_id = + key_id_for_preload_backend(&Key::from(ZkKey::new(config.secret_zk_key.clone()))); + + let backend_core = &config.backend_core; + let backend_edge = &config.backend_edge; + + let user = blend_serde::Config { + common: blend_serde::common::Config { + non_ephemeral_signing_key: config.private_key.clone(), + recovery_path_prefix: PathBuf::from("./recovery/blend"), + }, + core: blend_serde::core::Config { + backend: blend_serde::core::BackendConfig { + listening_address: backend_core.listening_address.clone(), + core_peering_degree: backend_core.core_peering_degree.clone(), + edge_node_connection_timeout: backend_core.edge_node_connection_timeout, + max_edge_node_incoming_connections: backend_core.max_edge_node_incoming_connections, + max_dial_attempts_per_peer: backend_core.max_dial_attempts_per_peer, + }, + zk: ZkSettings { + secret_key_kms_id: zk_key_id, + }, + }, + edge: blend_serde::edge::Config { + backend: blend_serde::edge::BackendConfig { + max_dial_attempts_per_peer_per_message: backend_edge + .max_dial_attempts_per_peer_per_message, + replication_factor: backend_edge.replication_factor, + }, + }, + }; + + let deployment_settings = blend_deployment::Settings { + common: blend_deployment::CommonSettings { + num_blend_layers: NonZeroU64::try_from(1).unwrap(), + minimum_network_size: NonZeroU64::try_from(1).unwrap(), + timing: TimingSettings { + round_duration: Duration::from_secs(1), + rounds_per_interval: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session: NonZeroU64::try_from(648_000u64).unwrap(), + rounds_per_observation_window: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session_transition_period: NonZeroU64::try_from(30u64).unwrap(), + epoch_transition_period_in_slots: NonZeroU64::try_from(2_600).unwrap(), + }, + protocol_name: backend_core.protocol_name.clone(), + }, + core: blend_deployment::CoreSettings { + scheduler: SchedulerSettings { + cover: CoverTrafficSettings { + intervals_for_safety_buffer: 100, + message_frequency_per_round: NonNegativeF64::try_from(1f64).unwrap(), + }, + delayer: MessageDelayerSettings { + maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64).unwrap(), + }, + }, + minimum_messages_coefficient: backend_core.minimum_messages_coefficient, + normalization_constant: backend_core.normalization_constant, + }, + }; + + let deployment = NodeDeploymentSettings::Custom(CustomDeployment { + blend: deployment_settings, + network: NetworkDeploymentSettings { + identify_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/identify/1.0.0", + ), + kademlia_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/kad/1.0.0", + ), + }, + }); + + (user, deployment) +} diff --git a/testing-framework/configs/src/nodes/mod.rs b/testing-framework/configs/src/nodes/mod.rs new file mode 100644 index 0000000..aa3e506 --- /dev/null +++ b/testing-framework/configs/src/nodes/mod.rs @@ -0,0 +1,2 @@ +pub mod executor; +pub mod validator; diff --git a/testing-framework/configs/src/nodes/validator.rs b/testing-framework/configs/src/nodes/validator.rs new file mode 100644 index 0000000..d9440ac --- /dev/null +++ b/testing-framework/configs/src/nodes/validator.rs @@ -0,0 +1,319 @@ +use std::{ + collections::HashSet, + num::{NonZeroU64, NonZeroUsize}, + path::PathBuf, + time::Duration, +}; + +use chain_leader::LeaderSettings; +use chain_network::{ChainNetworkSettings, OrphanConfig, SyncConfig}; +use chain_service::{CryptarchiaSettings, StartingState}; +use cryptarchia_engine::time::SlotConfig; +use key_management_system::keys::{Key, ZkKey}; +use nomos_blend_service::{ + core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings, ZkSettings}, + settings::TimingSettings, +}; +use nomos_da_network_core::{ + protocols::sampling::SubnetsConfig, swarm::DAConnectionPolicySettings, +}; +use nomos_da_network_service::{ + NetworkConfig as DaNetworkConfig, api::http::ApiAdapterSettings, + backends::libp2p::common::DaNetworkBackendSettings, +}; +use nomos_da_sampling::{ + DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings, + verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings, +}; +use nomos_da_verifier::{ + DaVerifierServiceSettings, + backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig}, + storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings, +}; +use nomos_node::{ + Config as ValidatorConfig, RocksBackendSettings, + api::backend::AxumBackendSettings as NodeAxumBackendSettings, + config::{ + blend::{ + deployment::{self as blend_deployment}, + serde as blend_serde, + }, + deployment::{CustomDeployment, Settings as NodeDeploymentSettings}, + mempool::MempoolConfig, + network::deployment::Settings as NetworkDeploymentSettings, + }, +}; +use nomos_sdp::SdpSettings; +use nomos_time::{ + TimeServiceSettings, + backends::{NtpTimeBackendSettings, ntp::async_client::NTPClientSettings}, +}; +use nomos_utils::math::NonNegativeF64; +use nomos_wallet::WalletServiceSettings; + +use crate::{ + adjust_timeout, + common::kms::key_id_for_preload_backend, + topology::configs::{ + GeneralConfig, blend::GeneralBlendConfig as TopologyBlendConfig, wallet::WalletAccount, + }, +}; + +#[must_use] +#[expect( + clippy::too_many_lines, + reason = "Validator config wiring aggregates many service settings" +)] +pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig { + let da_policy_settings = config.da_config.policy_settings; + let (blend_user_config, deployment_settings) = build_blend_service_config(&config.blend_config); + ValidatorConfig { + network: config.network_config, + blend: blend_user_config, + deployment: deployment_settings, + cryptarchia: CryptarchiaSettings { + config: config.consensus_config.ledger_config.clone(), + starting_state: StartingState::Genesis { + genesis_tx: config.consensus_config.genesis_tx, + }, + // Disable on-disk recovery in compose tests to avoid serde errors on + // non-string keys and keep services alive. + recovery_file: PathBuf::new(), + bootstrap: chain_service::BootstrapConfig { + prolonged_bootstrap_period: config.bootstrapping_config.prolonged_bootstrap_period, + force_bootstrap: false, + offline_grace_period: chain_service::OfflineGracePeriodConfig { + grace_period: Duration::from_secs(20 * 60), + state_recording_interval: Duration::from_secs(60), + }, + }, + }, + chain_network: ChainNetworkSettings { + config: config.consensus_config.ledger_config.clone(), + network_adapter_settings: + chain_network::network::adapters::libp2p::LibP2pAdapterSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + bootstrap: chain_network::BootstrapConfig { + ibd: chain_network::IbdConfig { + peers: HashSet::new(), + delay_before_new_download: Duration::from_secs(10), + }, + }, + sync: SyncConfig { + orphan: OrphanConfig { + max_orphan_cache_size: NonZeroUsize::new(5) + .expect("Max orphan cache size must be non-zero"), + }, + }, + }, + cryptarchia_leader: LeaderSettings { + transaction_selector_settings: (), + config: config.consensus_config.ledger_config.clone(), + leader_config: config.consensus_config.leader_config.clone(), + blend_broadcast_settings: + nomos_blend_service::core::network::libp2p::Libp2pBroadcastSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + }, + da_network: DaNetworkConfig { + backend: DaNetworkBackendSettings { + node_key: config.da_config.node_key, + listening_address: config.da_config.listening_address, + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: 0, + min_replication_peers: da_policy_settings.min_replication_peers, + max_dispersal_failures: da_policy_settings.max_dispersal_failures, + max_sampling_failures: da_policy_settings.max_sampling_failures, + max_replication_failures: da_policy_settings.max_replication_failures, + malicious_threshold: da_policy_settings.malicious_threshold, + }, + monitor_settings: config.da_config.monitor_settings, + balancer_interval: config.da_config.balancer_interval, + redial_cooldown: config.da_config.redial_cooldown, + replication_settings: config.da_config.replication_settings, + subnets_settings: SubnetsConfig { + num_of_subnets: config.da_config.num_samples as usize, + shares_retry_limit: config.da_config.retry_shares_limit, + commitments_retry_limit: config.da_config.retry_commitments_limit, + }, + }, + membership: config.da_config.membership.clone(), + api_adapter_settings: ApiAdapterSettings { + api_port: config.api_config.address.port(), + is_secure: false, + }, + subnet_refresh_interval: config.da_config.subnets_refresh_interval, + subnet_threshold: config.da_config.num_samples as usize, + min_session_members: config.da_config.num_samples as usize, + }, + da_verifier: DaVerifierServiceSettings { + share_verifier_settings: KzgrsDaVerifierSettings { + global_params_path: config.da_config.global_params_path.clone(), + domain_size: config.da_config.num_subnets as usize, + }, + tx_verifier_settings: (), + network_adapter_settings: (), + storage_adapter_settings: VerifierStorageAdapterSettings { + blob_storage_directory: "./".into(), + }, + mempool_trigger_settings: MempoolPublishTriggerConfig { + publish_threshold: NonNegativeF64::try_from(0.8).unwrap(), + share_duration: Duration::from_secs(5), + prune_duration: Duration::from_secs(30), + prune_interval: Duration::from_secs(5), + }, + }, + tracing: config.tracing_config.tracing_settings, + http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + da_sampling: DaSamplingServiceSettings { + sampling_settings: KzgrsSamplingBackendSettings { + num_samples: config.da_config.num_samples, + num_subnets: config.da_config.num_subnets, + old_blobs_check_interval: config.da_config.old_blobs_check_interval, + blobs_validity_duration: config.da_config.blobs_validity_duration, + }, + share_verifier_settings: SamplingVerifierSettings { + global_params_path: config.da_config.global_params_path, + domain_size: config.da_config.num_subnets as usize, + }, + commitments_wait_duration: Duration::from_secs(1), + sdp_blob_trigger_sampling_delay: adjust_timeout(Duration::from_secs(5)), + }, + storage: RocksBackendSettings { + db_path: "./db".into(), + read_only: false, + column_family: Some("blocks".into()), + }, + time: TimeServiceSettings { + backend_settings: NtpTimeBackendSettings { + ntp_server: config.time_config.ntp_server, + ntp_client_settings: NTPClientSettings { + timeout: config.time_config.timeout, + listening_interface: config.time_config.interface, + }, + update_interval: config.time_config.update_interval, + slot_config: SlotConfig { + slot_duration: config.time_config.slot_duration, + chain_start_time: config.time_config.chain_start_time, + }, + epoch_config: config.consensus_config.ledger_config.epoch_config, + base_period_length: config.consensus_config.ledger_config.base_period_length(), + }, + }, + mempool: MempoolConfig { + pool_recovery_path: "./recovery/mempool.json".into(), + }, + sdp: SdpSettings { declaration: None }, + wallet: WalletServiceSettings { + known_keys: { + let mut keys = HashSet::from_iter([config.consensus_config.leader_config.pk]); + keys.extend( + config + .consensus_config + .wallet_accounts + .iter() + .map(WalletAccount::public_key), + ); + keys + }, + }, + key_management: config.kms_config, + testing_http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.testing_http_address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + } +} + +fn build_blend_service_config( + config: &TopologyBlendConfig, +) -> (blend_serde::Config, NodeDeploymentSettings) { + let zk_key_id = + key_id_for_preload_backend(&Key::from(ZkKey::new(config.secret_zk_key.clone()))); + + let backend_core = &config.backend_core; + let backend_edge = &config.backend_edge; + + let user = blend_serde::Config { + common: blend_serde::common::Config { + non_ephemeral_signing_key: config.private_key.clone(), + recovery_path_prefix: PathBuf::from("./recovery/blend"), + }, + core: blend_serde::core::Config { + backend: blend_serde::core::BackendConfig { + listening_address: backend_core.listening_address.clone(), + core_peering_degree: backend_core.core_peering_degree.clone(), + edge_node_connection_timeout: backend_core.edge_node_connection_timeout, + max_edge_node_incoming_connections: backend_core.max_edge_node_incoming_connections, + max_dial_attempts_per_peer: backend_core.max_dial_attempts_per_peer, + }, + zk: ZkSettings { + secret_key_kms_id: zk_key_id, + }, + }, + edge: blend_serde::edge::Config { + backend: blend_serde::edge::BackendConfig { + max_dial_attempts_per_peer_per_message: backend_edge + .max_dial_attempts_per_peer_per_message, + replication_factor: backend_edge.replication_factor, + }, + }, + }; + + let deployment_settings = blend_deployment::Settings { + common: blend_deployment::CommonSettings { + num_blend_layers: NonZeroU64::try_from(1).unwrap(), + minimum_network_size: NonZeroU64::try_from(1).unwrap(), + timing: TimingSettings { + round_duration: Duration::from_secs(1), + rounds_per_interval: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session: NonZeroU64::try_from(648_000u64).unwrap(), + rounds_per_observation_window: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session_transition_period: NonZeroU64::try_from(30u64).unwrap(), + epoch_transition_period_in_slots: NonZeroU64::try_from(2_600).unwrap(), + }, + protocol_name: backend_core.protocol_name.clone(), + }, + core: blend_deployment::CoreSettings { + scheduler: SchedulerSettings { + cover: CoverTrafficSettings { + intervals_for_safety_buffer: 100, + message_frequency_per_round: NonNegativeF64::try_from(1f64).unwrap(), + }, + delayer: MessageDelayerSettings { + maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64).unwrap(), + }, + }, + minimum_messages_coefficient: backend_core.minimum_messages_coefficient, + normalization_constant: backend_core.normalization_constant, + }, + }; + + let deployment = NodeDeploymentSettings::Custom(CustomDeployment { + blend: deployment_settings, + network: NetworkDeploymentSettings { + identify_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/identify/1.0.0", + ), + kademlia_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/kad/1.0.0", + ), + }, + }); + + (user, deployment) +} diff --git a/testing-framework/configs/src/topology/configs/api.rs b/testing-framework/configs/src/topology/configs/api.rs new file mode 100644 index 0000000..bd759c7 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/api.rs @@ -0,0 +1,23 @@ +use std::net::SocketAddr; + +use nomos_utils::net::get_available_tcp_port; + +#[derive(Clone)] +pub struct GeneralApiConfig { + pub address: SocketAddr, + pub testing_http_address: SocketAddr, +} + +#[must_use] +pub fn create_api_configs(ids: &[[u8; 32]]) -> Vec { + ids.iter() + .map(|_| GeneralApiConfig { + address: format!("127.0.0.1:{}", get_available_tcp_port().unwrap()) + .parse() + .unwrap(), + testing_http_address: format!("127.0.0.1:{}", get_available_tcp_port().unwrap()) + .parse() + .unwrap(), + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/blend.rs b/testing-framework/configs/src/topology/configs/blend.rs new file mode 100644 index 0000000..2d1e0ea --- /dev/null +++ b/testing-framework/configs/src/topology/configs/blend.rs @@ -0,0 +1,72 @@ +use core::time::Duration; +use std::{num::NonZeroU64, str::FromStr as _}; + +use ed25519_dalek::SigningKey; +use nomos_blend_message::crypto::keys::Ed25519PrivateKey; +use nomos_blend_service::{ + core::backends::libp2p::Libp2pBlendBackendSettings as Libp2pCoreBlendBackendSettings, + edge::backends::libp2p::Libp2pBlendBackendSettings as Libp2pEdgeBlendBackendSettings, +}; +use nomos_libp2p::{Multiaddr, protocol_name::StreamProtocol}; +use num_bigint::BigUint; +use zksign::SecretKey; + +#[derive(Clone)] +pub struct GeneralBlendConfig { + pub backend_core: Libp2pCoreBlendBackendSettings, + pub backend_edge: Libp2pEdgeBlendBackendSettings, + pub private_key: Ed25519PrivateKey, + pub secret_zk_key: SecretKey, + pub signer: SigningKey, +} + +/// Builds blend configs for each node. +/// +/// # Panics +/// +/// Panics if the provided port strings cannot be parsed into valid `Multiaddr`s +/// or if any of the numeric blend parameters are zero, which would make the +/// libp2p configuration invalid. +#[must_use] +pub fn create_blend_configs(ids: &[[u8; 32]], ports: &[u16]) -> Vec { + ids.iter() + .zip(ports) + .map(|(id, port)| { + let signer = SigningKey::from_bytes(id); + + let private_key = Ed25519PrivateKey::from(*id); + // We need unique ZK secret keys, so we just derive them deterministically from + // the generated Ed25519 public keys, which are guaranteed to be unique because + // they are in turned derived from node ID. + let secret_zk_key = + SecretKey::from(BigUint::from_bytes_le(private_key.public_key().as_bytes())); + GeneralBlendConfig { + backend_core: Libp2pCoreBlendBackendSettings { + listening_address: Multiaddr::from_str(&format!( + "/ip4/127.0.0.1/udp/{port}/quic-v1", + )) + .unwrap(), + core_peering_degree: 1..=3, + minimum_messages_coefficient: NonZeroU64::try_from(1) + .expect("Minimum messages coefficient cannot be zero."), + normalization_constant: 1.03f64 + .try_into() + .expect("Normalization constant cannot be negative."), + edge_node_connection_timeout: Duration::from_secs(1), + max_edge_node_incoming_connections: 300, + max_dial_attempts_per_peer: NonZeroU64::try_from(3) + .expect("Max dial attempts per peer cannot be zero."), + protocol_name: StreamProtocol::new("/blend/integration-tests"), + }, + backend_edge: Libp2pEdgeBlendBackendSettings { + max_dial_attempts_per_peer_per_message: 1.try_into().unwrap(), + protocol_name: StreamProtocol::new("/blend/integration-tests"), + replication_factor: 1.try_into().unwrap(), + }, + private_key, + secret_zk_key, + signer, + } + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/bootstrap.rs b/testing-framework/configs/src/topology/configs/bootstrap.rs new file mode 100644 index 0000000..14e51a5 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/bootstrap.rs @@ -0,0 +1,20 @@ +use std::time::Duration; + +#[derive(Clone)] +pub struct GeneralBootstrapConfig { + pub prolonged_bootstrap_period: Duration, +} + +pub const SHORT_PROLONGED_BOOTSTRAP_PERIOD: Duration = Duration::from_secs(1); + +#[must_use] +pub fn create_bootstrap_configs( + ids: &[[u8; 32]], + prolonged_bootstrap_period: Duration, +) -> Vec { + ids.iter() + .map(|_| GeneralBootstrapConfig { + prolonged_bootstrap_period, + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/consensus.rs b/testing-framework/configs/src/topology/configs/consensus.rs new file mode 100644 index 0000000..49c8292 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/consensus.rs @@ -0,0 +1,343 @@ +use std::{num::NonZero, sync::Arc}; + +use chain_leader::LeaderConfig; +use cryptarchia_engine::EpochConfig; +use ed25519_dalek::ed25519::signature::SignerMut as _; +use groth16::CompressedGroth16Proof; +use nomos_core::{ + mantle::{ + MantleTx, Note, OpProof, Utxo, + genesis_tx::GenesisTx, + ledger::Tx as LedgerTx, + ops::{ + Op, + channel::{ChannelId, Ed25519PublicKey, MsgId, inscribe::InscriptionOp}, + }, + }, + sdp::{DeclarationMessage, Locator, ProviderId, ServiceParameters, ServiceType}, +}; +use nomos_node::{SignedMantleTx, Transaction as _}; +use num_bigint::BigUint; +use zksign::{PublicKey, SecretKey}; + +use super::wallet::{WalletAccount, WalletConfig}; + +#[derive(Clone)] +pub struct ConsensusParams { + pub n_participants: usize, + pub security_param: NonZero, + pub active_slot_coeff: f64, +} + +impl ConsensusParams { + #[must_use] + pub const fn default_for_participants(n_participants: usize) -> Self { + Self { + n_participants, + // by setting the slot coeff to 1, we also increase the probability of multiple blocks + // (forks) being produced in the same slot (epoch). Setting the security + // parameter to some value > 1 ensures nodes have some time to sync before + // deciding on the longest chain. + security_param: NonZero::new(10).unwrap(), + // a block should be produced (on average) every slot + active_slot_coeff: 0.9, + } + } +} + +#[derive(Clone)] +pub struct ProviderInfo { + pub service_type: ServiceType, + pub provider_sk: ed25519_dalek::SigningKey, + pub zk_sk: SecretKey, + pub locator: Locator, + pub note: ServiceNote, +} + +impl ProviderInfo { + #[must_use] + pub fn provider_id(&self) -> ProviderId { + ProviderId(self.provider_sk.verifying_key()) + } + + #[must_use] + pub fn zk_id(&self) -> PublicKey { + self.zk_sk.to_public_key() + } +} + +/// General consensus configuration for a chosen participant, that later could +/// be converted into a specific service or services configuration. +#[derive(Clone)] +pub struct GeneralConsensusConfig { + pub leader_config: LeaderConfig, + pub ledger_config: nomos_ledger::Config, + pub genesis_tx: GenesisTx, + pub utxos: Vec, + pub blend_notes: Vec, + pub da_notes: Vec, + pub wallet_accounts: Vec, +} + +#[derive(Clone)] +pub struct ServiceNote { + pub pk: PublicKey, + pub sk: SecretKey, + pub note: Note, + pub output_index: usize, +} + +fn create_genesis_tx(utxos: &[Utxo]) -> GenesisTx { + // Create a genesis inscription op (similar to config.yaml) + let inscription = InscriptionOp { + channel_id: ChannelId::from([0; 32]), + inscription: vec![103, 101, 110, 101, 115, 105, 115], // "genesis" in bytes + parent: MsgId::root(), + signer: Ed25519PublicKey::from_bytes(&[0; 32]).unwrap(), + }; + + // Create ledger transaction with the utxos as outputs + let outputs: Vec = utxos.iter().map(|u| u.note).collect(); + let ledger_tx = LedgerTx::new(vec![], outputs); + + // Create the mantle transaction + let mantle_tx = MantleTx { + ops: vec![Op::ChannelInscribe(inscription)], + ledger_tx, + execution_gas_price: 0, + storage_gas_price: 0, + }; + let signed_mantle_tx = SignedMantleTx { + mantle_tx, + ops_proofs: vec![OpProof::NoProof], + ledger_tx_proof: zksign::Signature::new(CompressedGroth16Proof::from_bytes(&[0u8; 128])), + }; + + // Wrap in GenesisTx + GenesisTx::from_tx(signed_mantle_tx).expect("Invalid genesis transaction") +} + +#[must_use] +pub fn create_consensus_configs( + ids: &[[u8; 32]], + consensus_params: &ConsensusParams, + wallet: &WalletConfig, +) -> Vec { + let mut leader_keys = Vec::new(); + let mut blend_notes = Vec::new(); + let mut da_notes = Vec::new(); + + let utxos = create_utxos_for_leader_and_services( + ids, + &mut leader_keys, + &mut blend_notes, + &mut da_notes, + ); + let utxos = append_wallet_utxos(utxos, wallet); + let genesis_tx = create_genesis_tx(&utxos); + let ledger_config = nomos_ledger::Config { + epoch_config: EpochConfig { + epoch_stake_distribution_stabilization: NonZero::new(3).unwrap(), + epoch_period_nonce_buffer: NonZero::new(3).unwrap(), + epoch_period_nonce_stabilization: NonZero::new(4).unwrap(), + }, + consensus_config: cryptarchia_engine::Config { + security_param: consensus_params.security_param, + active_slot_coeff: consensus_params.active_slot_coeff, + }, + sdp_config: nomos_ledger::mantle::sdp::Config { + service_params: Arc::new( + [ + ( + ServiceType::BlendNetwork, + ServiceParameters { + lock_period: 10, + inactivity_period: 20, + retention_period: 100, + timestamp: 0, + session_duration: 1000, + }, + ), + ( + ServiceType::DataAvailability, + ServiceParameters { + lock_period: 10, + inactivity_period: 20, + retention_period: 100, + timestamp: 0, + session_duration: 1000, + }, + ), + ] + .into(), + ), + min_stake: nomos_core::sdp::MinStake { + threshold: 1, + timestamp: 0, + }, + }, + }; + + leader_keys + .into_iter() + .map(|(pk, sk)| GeneralConsensusConfig { + leader_config: LeaderConfig { pk, sk }, + ledger_config: ledger_config.clone(), + genesis_tx: genesis_tx.clone(), + utxos: utxos.clone(), + da_notes: da_notes.clone(), + blend_notes: blend_notes.clone(), + wallet_accounts: wallet.accounts.clone(), + }) + .collect() +} + +fn create_utxos_for_leader_and_services( + ids: &[[u8; 32]], + leader_keys: &mut Vec<(PublicKey, SecretKey)>, + blend_notes: &mut Vec, + da_notes: &mut Vec, +) -> Vec { + let derive_key_material = |prefix: &[u8], id_bytes: &[u8]| -> [u8; 16] { + let mut sk_data = [0; 16]; + let prefix_len = prefix.len(); + + sk_data[..prefix_len].copy_from_slice(prefix); + let remaining_len = 16 - prefix_len; + sk_data[prefix_len..].copy_from_slice(&id_bytes[..remaining_len]); + + sk_data + }; + + let mut utxos = Vec::new(); + + // Assume output index which will be set by the ledger tx. + let mut output_index = 0; + + // Create notes for leader, Blend and DA declarations. + for &id in ids { + let sk_leader_data = derive_key_material(b"ld", &id); + let sk_leader = SecretKey::from(BigUint::from_bytes_le(&sk_leader_data)); + let pk_leader = sk_leader.to_public_key(); + leader_keys.push((pk_leader, sk_leader)); + utxos.push(Utxo { + note: Note::new(1_000, pk_leader), + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + output_index += 1; + + let sk_da_data = derive_key_material(b"da", &id); + let sk_da = SecretKey::from(BigUint::from_bytes_le(&sk_da_data)); + let pk_da = sk_da.to_public_key(); + let note_da = Note::new(1, pk_da); + da_notes.push(ServiceNote { + pk: pk_da, + sk: sk_da, + note: note_da, + output_index, + }); + utxos.push(Utxo { + note: note_da, + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + output_index += 1; + + let sk_blend_data = derive_key_material(b"bn", &id); + let sk_blend = SecretKey::from(BigUint::from_bytes_le(&sk_blend_data)); + let pk_blend = sk_blend.to_public_key(); + let note_blend = Note::new(1, pk_blend); + blend_notes.push(ServiceNote { + pk: pk_blend, + sk: sk_blend, + note: note_blend, + output_index, + }); + utxos.push(Utxo { + note: note_blend, + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + output_index += 1; + } + + utxos +} + +fn append_wallet_utxos(mut utxos: Vec, wallet: &WalletConfig) -> Vec { + for account in &wallet.accounts { + utxos.push(Utxo { + note: Note::new(account.value, account.public_key()), + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + } + + utxos +} + +#[must_use] +pub fn create_genesis_tx_with_declarations( + ledger_tx: LedgerTx, + providers: Vec, +) -> GenesisTx { + let inscription = InscriptionOp { + channel_id: ChannelId::from([0; 32]), + inscription: vec![103, 101, 110, 101, 115, 105, 115], // "genesis" in bytes + parent: MsgId::root(), + signer: Ed25519PublicKey::from_bytes(&[0; 32]).unwrap(), + }; + + let ledger_tx_hash = ledger_tx.hash(); + + let mut ops = vec![Op::ChannelInscribe(inscription)]; + + for provider in &providers { + let utxo = Utxo { + tx_hash: ledger_tx_hash, + output_index: provider.note.output_index, + note: provider.note.note, + }; + let declaration = DeclarationMessage { + service_type: provider.service_type, + locators: vec![provider.locator.clone()], + provider_id: provider.provider_id(), + zk_id: provider.zk_id(), + locked_note_id: utxo.id(), + }; + ops.push(Op::SDPDeclare(declaration)); + } + + let mantle_tx = MantleTx { + ops, + ledger_tx, + execution_gas_price: 0, + storage_gas_price: 0, + }; + + let mantle_tx_hash = mantle_tx.hash(); + let mut ops_proofs = vec![OpProof::NoProof]; + + for mut provider in providers { + let zk_sig = + SecretKey::multi_sign(&[provider.note.sk, provider.zk_sk], mantle_tx_hash.as_ref()) + .unwrap(); + let ed25519_sig = provider + .provider_sk + .sign(mantle_tx_hash.as_signing_bytes().as_ref()); + + ops_proofs.push(OpProof::ZkAndEd25519Sigs { + zk_sig, + ed25519_sig, + }); + } + + let signed_mantle_tx = SignedMantleTx { + mantle_tx, + ops_proofs, + ledger_tx_proof: zksign::Signature::new(CompressedGroth16Proof::from_bytes(&[0u8; 128])), + }; + + GenesisTx::from_tx(signed_mantle_tx).expect("Invalid genesis transaction") +} diff --git a/testing-framework/configs/src/topology/configs/da.rs b/testing-framework/configs/src/topology/configs/da.rs new file mode 100644 index 0000000..d22d81e --- /dev/null +++ b/testing-framework/configs/src/topology/configs/da.rs @@ -0,0 +1,242 @@ +use std::{ + collections::{HashMap, HashSet}, + env, + path::{Path, PathBuf}, + process, + str::FromStr as _, + sync::LazyLock, + time::Duration, +}; + +use ed25519_dalek::SigningKey; +use nomos_core::sdp::SessionNumber; +use nomos_da_network_core::swarm::{ + DAConnectionMonitorSettings, DAConnectionPolicySettings, ReplicationConfig, +}; +use nomos_libp2p::{Multiaddr, PeerId, ed25519}; +use nomos_node::NomosDaMembership; +use num_bigint::BigUint; +use rand::random; +use subnetworks_assignations::{MembershipCreator as _, MembershipHandler as _}; +use tracing::warn; +use zksign::SecretKey; + +use crate::secret_key_to_peer_id; + +pub static GLOBAL_PARAMS_PATH: LazyLock = LazyLock::new(resolve_global_params_path); + +fn resolve_global_params_path() -> String { + if let Ok(path) = env::var("NOMOS_KZGRS_PARAMS_PATH") { + return path; + } + + let workspace_root = env::var("CARGO_WORKSPACE_DIR") + .map(PathBuf::from) + .ok() + .or_else(|| { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .map(Path::to_path_buf) + }) + .unwrap_or_else(|| PathBuf::from(env!("CARGO_MANIFEST_DIR"))); + + let params_path = workspace_root.join("testing-framework/assets/stack/kzgrs_test_params"); + match params_path.canonicalize() { + Ok(path) => path.to_string_lossy().to_string(), + Err(err) => { + warn!( + ?err, + path = %params_path.display(), + "falling back to non-canonical KZG params path; set NOMOS_KZGRS_PARAMS_PATH to override" + ); + params_path.to_string_lossy().to_string() + } + } +} + +#[derive(Clone)] +pub struct DaParams { + pub subnetwork_size: usize, + pub dispersal_factor: usize, + pub num_samples: u16, + pub num_subnets: u16, + pub old_blobs_check_interval: Duration, + pub blobs_validity_duration: Duration, + pub global_params_path: String, + pub policy_settings: DAConnectionPolicySettings, + pub monitor_settings: DAConnectionMonitorSettings, + pub balancer_interval: Duration, + pub redial_cooldown: Duration, + pub replication_settings: ReplicationConfig, + pub subnets_refresh_interval: Duration, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, +} + +impl Default for DaParams { + fn default() -> Self { + Self { + subnetwork_size: 2, + dispersal_factor: 1, + num_samples: 1, + num_subnets: 2, + old_blobs_check_interval: Duration::from_secs(5), + blobs_validity_duration: Duration::from_secs(60), + global_params_path: GLOBAL_PARAMS_PATH.to_string(), + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: 1, + min_replication_peers: 1, + max_dispersal_failures: 0, + max_sampling_failures: 0, + max_replication_failures: 0, + malicious_threshold: 0, + }, + monitor_settings: DAConnectionMonitorSettings { + failure_time_window: Duration::from_secs(5), + ..Default::default() + }, + balancer_interval: Duration::from_secs(1), + redial_cooldown: Duration::ZERO, + replication_settings: ReplicationConfig { + seen_message_cache_size: 1000, + seen_message_ttl: Duration::from_secs(3600), + }, + subnets_refresh_interval: Duration::from_secs(30), + retry_shares_limit: 1, + retry_commitments_limit: 1, + } + } +} + +#[derive(Debug, Clone)] +pub struct GeneralDaConfig { + pub node_key: ed25519::SecretKey, + pub signer: SigningKey, + pub peer_id: PeerId, + pub membership: NomosDaMembership, + pub listening_address: Multiaddr, + pub blob_storage_directory: PathBuf, + pub global_params_path: String, + pub verifier_sk: String, + pub verifier_index: HashSet, + pub num_samples: u16, + pub num_subnets: u16, + pub old_blobs_check_interval: Duration, + pub blobs_validity_duration: Duration, + pub policy_settings: DAConnectionPolicySettings, + pub monitor_settings: DAConnectionMonitorSettings, + pub balancer_interval: Duration, + pub redial_cooldown: Duration, + pub replication_settings: ReplicationConfig, + pub subnets_refresh_interval: Duration, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, + pub secret_zk_key: SecretKey, +} + +#[must_use] +pub fn create_da_configs( + ids: &[[u8; 32]], + da_params: &DaParams, + ports: &[u16], +) -> Vec { + let mut node_keys = vec![]; + let mut peer_ids = vec![]; + let mut listening_addresses = vec![]; + + for (i, id) in ids.iter().enumerate() { + let mut node_key_bytes = *id; + let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes) + .expect("Failed to generate secret key from bytes"); + node_keys.push(node_key.clone()); + + let peer_id = secret_key_to_peer_id(node_key); + peer_ids.push(peer_id); + + let listening_address = + Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}/quic-v1", ports[i],)) + .expect("Failed to create multiaddr"); + listening_addresses.push(listening_address); + } + + let membership = { + let template = NomosDaMembership::new( + SessionNumber::default(), + da_params.subnetwork_size, + da_params.dispersal_factor, + ); + let mut assignations: HashMap> = HashMap::new(); + if peer_ids.is_empty() { + for id in 0..da_params.subnetwork_size { + assignations.insert(u16::try_from(id).unwrap_or_default(), HashSet::new()); + } + } else { + let mut sorted_peers = peer_ids.clone(); + sorted_peers.sort_unstable(); + let dispersal = da_params.dispersal_factor.max(1); + let mut peer_cycle = sorted_peers.iter().cycle(); + for id in 0..da_params.subnetwork_size { + let mut members = HashSet::new(); + for _ in 0..dispersal { + // cycle() only yields None when the iterator is empty, which we guard against. + if let Some(peer) = peer_cycle.next() { + members.insert(*peer); + } + } + assignations.insert(u16::try_from(id).unwrap_or_default(), members); + } + } + + template.init(SessionNumber::default(), assignations) + }; + + ids.iter() + .zip(node_keys) + .enumerate() + .map(|(i, (id, node_key))| { + let blob_storage_directory = env::temp_dir().join(format!( + "nomos-da-blob-{}-{i}-{}", + process::id(), + random::() + )); + let _ = std::fs::create_dir_all(&blob_storage_directory); + let verifier_sk = blst::min_sig::SecretKey::key_gen(id, &[]).unwrap(); + let verifier_sk_bytes = verifier_sk.to_bytes(); + let peer_id = peer_ids[i]; + let signer = SigningKey::from_bytes(id); + let subnetwork_ids = membership.membership(&peer_id); + + // We need unique ZK secret keys, so we just derive them deterministically from + // the generated Ed25519 public keys, which are guaranteed to be unique because + // they are in turned derived from node ID. + let secret_zk_key = + SecretKey::from(BigUint::from_bytes_le(signer.verifying_key().as_bytes())); + + GeneralDaConfig { + node_key, + signer, + peer_id, + secret_zk_key, + membership: membership.clone(), + listening_address: listening_addresses[i].clone(), + blob_storage_directory, + global_params_path: da_params.global_params_path.clone(), + verifier_sk: hex::encode(verifier_sk_bytes), + verifier_index: subnetwork_ids, + num_samples: da_params.num_samples, + num_subnets: da_params.num_subnets, + old_blobs_check_interval: da_params.old_blobs_check_interval, + blobs_validity_duration: da_params.blobs_validity_duration, + policy_settings: da_params.policy_settings.clone(), + monitor_settings: da_params.monitor_settings.clone(), + balancer_interval: da_params.balancer_interval, + redial_cooldown: da_params.redial_cooldown, + replication_settings: da_params.replication_settings, + subnets_refresh_interval: da_params.subnets_refresh_interval, + retry_shares_limit: da_params.retry_shares_limit, + retry_commitments_limit: da_params.retry_commitments_limit, + } + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/deployment.rs b/testing-framework/configs/src/topology/configs/deployment.rs new file mode 100644 index 0000000..6d6d3fe --- /dev/null +++ b/testing-framework/configs/src/topology/configs/deployment.rs @@ -0,0 +1,67 @@ +use core::{num::NonZeroU64, time::Duration}; + +use nomos_blend_service::{ + core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings}, + settings::TimingSettings, +}; +use nomos_libp2p::protocol_name::StreamProtocol; +use nomos_node::config::{ + blend::deployment::{ + CommonSettings as BlendCommonSettings, CoreSettings as BlendCoreSettings, + Settings as BlendDeploymentSettings, + }, + deployment::{CustomDeployment, Settings as DeploymentSettings}, + network::deployment::Settings as NetworkDeploymentSettings, +}; +use nomos_utils::math::NonNegativeF64; + +#[must_use] +pub fn default_e2e_deployment_settings() -> DeploymentSettings { + DeploymentSettings::Custom(CustomDeployment { + blend: BlendDeploymentSettings { + common: BlendCommonSettings { + minimum_network_size: NonZeroU64::try_from(30u64) + .expect("Minimum network size cannot be zero."), + num_blend_layers: NonZeroU64::try_from(3) + .expect("Number of blend layers cannot be zero."), + timing: TimingSettings { + round_duration: Duration::from_secs(1), + rounds_per_interval: NonZeroU64::try_from(30u64) + .expect("Rounds per interval cannot be zero."), + // (21,600 blocks * 30s per block) / 1s per round = 648,000 rounds + rounds_per_session: NonZeroU64::try_from(648_000u64) + .expect("Rounds per session cannot be zero."), + rounds_per_observation_window: NonZeroU64::try_from(30u64) + .expect("Rounds per observation window cannot be zero."), + rounds_per_session_transition_period: NonZeroU64::try_from(30u64) + .expect("Rounds per session transition period cannot be zero."), + epoch_transition_period_in_slots: NonZeroU64::try_from(2_600) + .expect("Epoch transition period in slots cannot be zero."), + }, + protocol_name: StreamProtocol::new("/blend/integration-tests"), + }, + core: BlendCoreSettings { + minimum_messages_coefficient: NonZeroU64::try_from(1) + .expect("Minimum messages coefficient cannot be zero."), + normalization_constant: 1.03f64 + .try_into() + .expect("Normalization constant cannot be negative."), + scheduler: SchedulerSettings { + cover: CoverTrafficSettings { + intervals_for_safety_buffer: 100, + message_frequency_per_round: NonNegativeF64::try_from(1f64) + .expect("Message frequency per round cannot be negative."), + }, + delayer: MessageDelayerSettings { + maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64) + .expect("Maximum release delay between rounds cannot be zero."), + }, + }, + }, + }, + network: NetworkDeploymentSettings { + identify_protocol_name: StreamProtocol::new("/integration/nomos/identify/1.0.0"), + kademlia_protocol_name: StreamProtocol::new("/integration/nomos/kad/1.0.0"), + }, + }) +} diff --git a/testing-framework/configs/src/topology/configs/mod.rs b/testing-framework/configs/src/topology/configs/mod.rs new file mode 100644 index 0000000..2a73e58 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/mod.rs @@ -0,0 +1,164 @@ +pub mod api; +pub mod blend; +pub mod bootstrap; +pub mod consensus; +pub mod da; +pub mod network; +pub mod time; +pub mod tracing; +pub mod wallet; + +use blend::GeneralBlendConfig; +use consensus::{GeneralConsensusConfig, ProviderInfo, create_genesis_tx_with_declarations}; +use da::GeneralDaConfig; +use key_management_system::{ + backend::preload::PreloadKMSBackendSettings, + keys::{Ed25519Key, Key, ZkKey}, +}; +use network::GeneralNetworkConfig; +use nomos_core::{ + mantle::GenesisTx as _, + sdp::{Locator, ServiceType}, +}; +use nomos_utils::net::get_available_udp_port; +use rand::{Rng as _, thread_rng}; +use tracing::GeneralTracingConfig; +use wallet::WalletConfig; + +use crate::{ + common::kms::key_id_for_preload_backend, + topology::configs::{ + api::GeneralApiConfig, + bootstrap::{GeneralBootstrapConfig, SHORT_PROLONGED_BOOTSTRAP_PERIOD}, + consensus::ConsensusParams, + da::DaParams, + network::NetworkParams, + time::GeneralTimeConfig, + }, +}; + +#[derive(Clone)] +pub struct GeneralConfig { + pub api_config: GeneralApiConfig, + pub consensus_config: GeneralConsensusConfig, + pub bootstrapping_config: GeneralBootstrapConfig, + pub da_config: GeneralDaConfig, + pub network_config: GeneralNetworkConfig, + pub blend_config: GeneralBlendConfig, + pub tracing_config: GeneralTracingConfig, + pub time_config: GeneralTimeConfig, + pub kms_config: PreloadKMSBackendSettings, +} + +#[must_use] +pub fn create_general_configs(n_nodes: usize) -> Vec { + create_general_configs_with_network(n_nodes, &NetworkParams::default()) +} + +#[must_use] +pub fn create_general_configs_with_network( + n_nodes: usize, + network_params: &NetworkParams, +) -> Vec { + create_general_configs_with_blend_core_subset(n_nodes, n_nodes, network_params) +} + +#[must_use] +pub fn create_general_configs_with_blend_core_subset( + n_nodes: usize, + // TODO: Instead of this, define a config struct for each node. + // That would be also useful for non-even token distributions: https://github.com/logos-co/nomos/issues/1888 + n_blend_core_nodes: usize, + network_params: &NetworkParams, +) -> Vec { + assert!( + n_blend_core_nodes <= n_nodes, + "n_blend_core_nodes({n_blend_core_nodes}) must be less than or equal to n_nodes({n_nodes})", + ); + + // Blend relies on each node declaring a different ZK public key, so we need + // different IDs to generate different keys. + let mut ids: Vec<_> = (0..n_nodes).map(|i| [i as u8; 32]).collect(); + let mut da_ports = vec![]; + let mut blend_ports = vec![]; + + for id in &mut ids { + thread_rng().fill(id); + da_ports.push(get_available_udp_port().unwrap()); + blend_ports.push(get_available_udp_port().unwrap()); + } + + let consensus_params = ConsensusParams::default_for_participants(n_nodes); + let mut consensus_configs = + consensus::create_consensus_configs(&ids, &consensus_params, &WalletConfig::default()); + let bootstrap_config = + bootstrap::create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let network_configs = network::create_network_configs(&ids, network_params); + let da_configs = da::create_da_configs(&ids, &DaParams::default(), &da_ports); + let api_configs = api::create_api_configs(&ids); + let blend_configs = blend::create_blend_configs(&ids, &blend_ports); + let tracing_configs = tracing::create_tracing_configs(&ids); + let time_config = time::default_time_config(); + + let providers: Vec<_> = blend_configs + .iter() + .enumerate() + .take(n_blend_core_nodes) + .map(|(i, blend_conf)| ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator(blend_conf.backend_core.listening_address.clone()), + note: consensus_configs[0].blend_notes[i].clone(), + }) + .collect(); + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + // Set Blend and DA keys in KMS of each node config. + let kms_configs: Vec<_> = blend_configs + .iter() + .map(|blend_conf| { + let ed_key = Ed25519Key::new(blend_conf.signer.clone()); + let zk_key = ZkKey::new(blend_conf.secret_zk_key.clone()); + PreloadKMSBackendSettings { + keys: [ + ( + key_id_for_preload_backend(&Key::from(ed_key.clone())), + Key::from(ed_key), + ), + ( + key_id_for_preload_backend(&Key::from(zk_key.clone())), + Key::from(zk_key), + ), + ] + .into(), + } + }) + .collect(); + + let mut general_configs = vec![]; + + for i in 0..n_nodes { + general_configs.push(GeneralConfig { + api_config: api_configs[i].clone(), + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrap_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_configs[i].clone(), + }); + } + + general_configs +} diff --git a/testing-framework/configs/src/topology/configs/network.rs b/testing-framework/configs/src/topology/configs/network.rs new file mode 100644 index 0000000..2e8fe2a --- /dev/null +++ b/testing-framework/configs/src/topology/configs/network.rs @@ -0,0 +1,116 @@ +use std::time::Duration; + +use nomos_libp2p::{ + IdentifySettings, KademliaSettings, Multiaddr, NatSettings, ed25519, gossipsub, +}; +use nomos_node::config::network::serde::{BackendSettings, Config, SwarmConfig}; +use nomos_utils::net::get_available_udp_port; + +use crate::node_address_from_port; + +#[derive(Default, Clone)] +pub enum Libp2pNetworkLayout { + #[default] + Star, + Chain, + Full, +} + +#[derive(Default, Clone)] +pub struct NetworkParams { + pub libp2p_network_layout: Libp2pNetworkLayout, +} + +pub type GeneralNetworkConfig = Config; + +fn default_swarm_config() -> SwarmConfig { + SwarmConfig { + host: std::net::Ipv4Addr::UNSPECIFIED, + port: 60000, + node_key: ed25519::SecretKey::generate(), + gossipsub_config: gossipsub::Config::default(), + kademlia_config: KademliaSettings::default(), + identify_config: IdentifySettings::default(), + chain_sync_config: cryptarchia_sync::Config::default(), + nat_config: NatSettings::default(), + } +} + +#[must_use] +pub fn create_network_configs( + ids: &[[u8; 32]], + network_params: &NetworkParams, +) -> Vec { + let swarm_configs: Vec = ids + .iter() + .map(|id| { + let mut node_key_bytes = *id; + let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes) + .expect("Failed to generate secret key from bytes"); + + SwarmConfig { + node_key, + port: get_available_udp_port().unwrap(), + chain_sync_config: cryptarchia_sync::Config { + peer_response_timeout: Duration::from_secs(60), + }, + ..default_swarm_config() + } + }) + .collect(); + + let all_initial_peers = initial_peers_by_network_layout(&swarm_configs, network_params); + + swarm_configs + .iter() + .zip(all_initial_peers) + .map(|(swarm_config, initial_peers)| GeneralNetworkConfig { + backend: BackendSettings { + initial_peers, + inner: swarm_config.to_owned(), + }, + }) + .collect() +} + +fn initial_peers_by_network_layout( + swarm_configs: &[SwarmConfig], + network_params: &NetworkParams, +) -> Vec> { + let mut all_initial_peers = vec![]; + + match network_params.libp2p_network_layout { + Libp2pNetworkLayout::Star => { + // First node is the hub - has no initial peers + all_initial_peers.push(vec![]); + let first_addr = node_address_from_port(swarm_configs[0].port); + + // All other nodes connect to the first node + for _ in 1..swarm_configs.len() { + all_initial_peers.push(vec![first_addr.clone()]); + } + } + Libp2pNetworkLayout::Chain => { + // First node has no initial peers + all_initial_peers.push(vec![]); + + // Each subsequent node connects to the previous one + for i in 1..swarm_configs.len() { + let prev_addr = node_address_from_port(swarm_configs[i - 1].port); + all_initial_peers.push(vec![prev_addr]); + } + } + Libp2pNetworkLayout::Full => { + // Each node connects to all previous nodes, unidirectional connections + for i in 0..swarm_configs.len() { + let mut peers = vec![]; + for swarm_config in swarm_configs.iter().take(i) { + peers.push(node_address_from_port(swarm_config.port)); + } + all_initial_peers.push(peers); + } + } + } + + all_initial_peers +} diff --git a/testing-framework/configs/src/topology/configs/time.rs b/testing-framework/configs/src/topology/configs/time.rs new file mode 100644 index 0000000..e6f65c7 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/time.rs @@ -0,0 +1,35 @@ +use std::{ + net::{IpAddr, Ipv4Addr}, + str::FromStr as _, + time::Duration, +}; + +use time::OffsetDateTime; + +const DEFAULT_SLOT_TIME: u64 = 2; +const CONSENSUS_SLOT_TIME_VAR: &str = "CONSENSUS_SLOT_TIME"; + +#[derive(Clone, Debug)] +pub struct GeneralTimeConfig { + pub slot_duration: Duration, + pub chain_start_time: OffsetDateTime, + pub ntp_server: String, + pub timeout: Duration, + pub interface: IpAddr, + pub update_interval: Duration, +} + +#[must_use] +pub fn default_time_config() -> GeneralTimeConfig { + let slot_duration = std::env::var(CONSENSUS_SLOT_TIME_VAR) + .map(|s| ::from_str(&s).unwrap()) + .unwrap_or(DEFAULT_SLOT_TIME); + GeneralTimeConfig { + slot_duration: Duration::from_secs(slot_duration), + chain_start_time: OffsetDateTime::now_utc(), + ntp_server: String::from("pool.ntp.org"), + timeout: Duration::from_secs(5), + interface: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + update_interval: Duration::from_secs(16), + } +} diff --git a/testing-framework/configs/src/topology/configs/tracing.rs b/testing-framework/configs/src/topology/configs/tracing.rs new file mode 100644 index 0000000..efdc3f5 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/tracing.rs @@ -0,0 +1,148 @@ +use std::{env, path::PathBuf}; + +use nomos_tracing::{ + logging::{local::FileConfig, loki::LokiConfig}, + metrics::otlp::OtlpMetricsConfig, + tracing::otlp::OtlpTracingConfig, +}; +use nomos_tracing_service::{ + ConsoleLayer, FilterLayer, LoggerLayer, MetricsLayer, TracingLayer, TracingSettings, +}; +use tracing::Level; + +use crate::IS_DEBUG_TRACING; + +#[derive(Clone, Default)] +pub struct GeneralTracingConfig { + pub tracing_settings: TracingSettings, +} + +impl GeneralTracingConfig { + fn local_debug_tracing(id: usize) -> Self { + let host_identifier = format!("node-{id}"); + let otlp_tracing = otlp_tracing_endpoint() + .and_then(|endpoint| endpoint.parse().ok()) + .map(|endpoint| { + TracingLayer::Otlp(OtlpTracingConfig { + endpoint, + sample_ratio: 0.5, + service_name: host_identifier.clone(), + }) + }) + .unwrap_or(TracingLayer::None); + let otlp_metrics = otlp_metrics_endpoint() + .and_then(|endpoint| endpoint.parse().ok()) + .map(|endpoint| { + MetricsLayer::Otlp(OtlpMetricsConfig { + endpoint, + host_identifier: host_identifier.clone(), + }) + }) + .unwrap_or(MetricsLayer::None); + + let filter = file_filter_override().unwrap_or_else(|| { + nomos_tracing::filter::envfilter::EnvFilterConfig { + filters: std::iter::once(&("nomos", "debug")) + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())) + .collect(), + } + }); + + Self { + tracing_settings: TracingSettings { + logger: LoggerLayer::Loki(LokiConfig { + endpoint: "http://localhost:3100".try_into().unwrap(), + host_identifier: host_identifier.clone(), + }), + tracing: otlp_tracing, + filter: FilterLayer::EnvFilter(filter), + metrics: otlp_metrics, + console: ConsoleLayer::None, + level: Level::DEBUG, + }, + } + } +} + +fn otlp_tracing_endpoint() -> Option { + env::var("NOMOS_OTLP_ENDPOINT").ok() +} + +fn otlp_metrics_endpoint() -> Option { + env::var("NOMOS_OTLP_METRICS_ENDPOINT").ok() +} + +#[must_use] +pub fn create_tracing_configs(ids: &[[u8; 32]]) -> Vec { + if *IS_DEBUG_TRACING { + create_debug_configs(ids) + } else { + create_default_configs(ids) + } +} + +fn create_debug_configs(ids: &[[u8; 32]]) -> Vec { + ids.iter() + .enumerate() + .map(|(i, _)| (i, GeneralTracingConfig::local_debug_tracing(i))) + .map(|(i, cfg)| apply_file_logger_override(cfg, i)) + .map(maybe_disable_otlp_layers) + .collect() +} + +fn create_default_configs(ids: &[[u8; 32]]) -> Vec { + ids.iter() + .enumerate() + .map(|(i, _)| (i, GeneralTracingConfig::default())) + .map(|(i, cfg)| apply_file_logger_override(cfg, i)) + .map(maybe_disable_otlp_layers) + .collect() +} + +fn apply_file_logger_override( + mut cfg: GeneralTracingConfig, + node_index: usize, +) -> GeneralTracingConfig { + if let Ok(dir) = std::env::var("NOMOS_LOG_DIR") { + let directory = PathBuf::from(dir); + cfg.tracing_settings.logger = LoggerLayer::File(FileConfig { + directory, + prefix: Some(format!("nomos-node-{node_index}").into()), + }); + cfg.tracing_settings.level = file_log_level(); + } + cfg +} + +fn file_log_level() -> Level { + env::var("NOMOS_LOG_LEVEL") + .ok() + .and_then(|raw| raw.parse::().ok()) + .unwrap_or(Level::INFO) +} + +fn file_filter_override() -> Option { + env::var("NOMOS_LOG_FILTER") + .ok() + .map(|raw| nomos_tracing::filter::envfilter::EnvFilterConfig { + filters: raw + .split(',') + .filter_map(|pair| { + let mut parts = pair.splitn(2, '='); + let target = parts.next()?.trim().to_string(); + let level = parts.next()?.trim().to_string(); + (!target.is_empty() && !level.is_empty()).then_some((target, level)) + }) + .collect(), + }) +} + +fn maybe_disable_otlp_layers(mut cfg: GeneralTracingConfig) -> GeneralTracingConfig { + if otlp_tracing_endpoint().is_none() { + cfg.tracing_settings.tracing = TracingLayer::None; + } + if otlp_metrics_endpoint().is_none() { + cfg.tracing_settings.metrics = MetricsLayer::None; + } + cfg +} diff --git a/testing-framework/configs/src/topology/configs/wallet.rs b/testing-framework/configs/src/topology/configs/wallet.rs new file mode 100644 index 0000000..74648b5 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/wallet.rs @@ -0,0 +1,79 @@ +use std::num::NonZeroUsize; + +use num_bigint::BigUint; +use zksign::{PublicKey, SecretKey}; + +/// Collection of wallet accounts that should be funded at genesis. +#[derive(Clone, Default, Debug, serde::Serialize, serde::Deserialize)] +pub struct WalletConfig { + pub accounts: Vec, +} + +impl WalletConfig { + #[must_use] + pub const fn new(accounts: Vec) -> Self { + Self { accounts } + } + + #[must_use] + pub fn uniform(total_funds: u64, users: NonZeroUsize) -> Self { + let user_count = users.get() as u64; + assert!(user_count > 0, "wallet user count must be non-zero"); + assert!( + total_funds >= user_count, + "wallet funds must allocate at least 1 token per user" + ); + + let base_allocation = total_funds / user_count; + let mut remainder = total_funds % user_count; + + let accounts = (0..users.get()) + .map(|idx| { + let mut amount = base_allocation; + if remainder > 0 { + amount += 1; + remainder -= 1; + } + + WalletAccount::deterministic(idx as u64, amount) + }) + .collect(); + + Self { accounts } + } +} + +/// Wallet account that holds funds in the genesis state. +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct WalletAccount { + pub label: String, + pub secret_key: SecretKey, + pub value: u64, +} + +impl WalletAccount { + #[must_use] + pub fn new(label: impl Into, secret_key: SecretKey, value: u64) -> Self { + assert!(value > 0, "wallet account value must be positive"); + Self { + label: label.into(), + secret_key, + value, + } + } + + #[must_use] + pub fn deterministic(index: u64, value: u64) -> Self { + let mut seed = [0u8; 32]; + seed[..2].copy_from_slice(b"wl"); + seed[2..10].copy_from_slice(&index.to_le_bytes()); + + let secret_key = SecretKey::from(BigUint::from_bytes_le(&seed)); + Self::new(format!("wallet-user-{index}"), secret_key, value) + } + + #[must_use] + pub fn public_key(&self) -> PublicKey { + self.secret_key.to_public_key() + } +} diff --git a/testing-framework/configs/src/topology/mod.rs b/testing-framework/configs/src/topology/mod.rs new file mode 100644 index 0000000..3810d5b --- /dev/null +++ b/testing-framework/configs/src/topology/mod.rs @@ -0,0 +1 @@ +pub mod configs; diff --git a/testing-framework/core/Cargo.toml b/testing-framework/core/Cargo.toml new file mode 100644 index 0000000..759db31 --- /dev/null +++ b/testing-framework/core/Cargo.toml @@ -0,0 +1,52 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-core" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[features] +default = [] + +[dependencies] +anyhow = "1" +async-trait = "0.1" +broadcast-service = { workspace = true } +chain-service = { workspace = true } +common-http-client = { workspace = true } +futures = { default-features = false, version = "0.3" } +groth16 = { workspace = true } +hex = { version = "0.4.3", default-features = false } +key-management-system = { workspace = true } +kzgrs-backend = { workspace = true } +nomos-core = { workspace = true } +nomos-da-network-core = { workspace = true } +nomos-da-network-service = { workspace = true } +nomos-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] } +nomos-http-api-common = { workspace = true } +nomos-libp2p = { workspace = true } +nomos-network = { workspace = true, features = ["libp2p"] } +nomos-node = { workspace = true, default-features = false, features = ["testing"] } +nomos-tracing = { workspace = true } +nomos-tracing-service = { workspace = true } +nomos-utils = { workspace = true } +prometheus-http-query = "0.8" +rand = { workspace = true } +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true } +serde_json = { workspace = true } +serde_with = { workspace = true } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +testing-framework-config = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "process", "rt-multi-thread", "time"] } +tracing = { workspace = true } +tx-service = { workspace = true, features = ["libp2p", "mock"] } diff --git a/testing-framework/core/src/lib.rs b/testing-framework/core/src/lib.rs new file mode 100644 index 0000000..d4d4c86 --- /dev/null +++ b/testing-framework/core/src/lib.rs @@ -0,0 +1,19 @@ +pub mod nodes; +pub mod scenario; +pub mod topology; + +use std::{env, ops::Mul as _, sync::LazyLock, time::Duration}; + +pub use testing_framework_config::{ + IS_DEBUG_TRACING, node_address_from_port, secret_key_to_peer_id, secret_key_to_provider_id, + topology::configs::da::GLOBAL_PARAMS_PATH, +}; + +static IS_SLOW_TEST_ENV: LazyLock = + LazyLock::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true")); + +/// In slow test environments like Codecov, use 2x timeout. +#[must_use] +pub fn adjust_timeout(d: Duration) -> Duration { + if *IS_SLOW_TEST_ENV { d.mul(2) } else { d } +} diff --git a/testing-framework/core/src/nodes/api_client.rs b/testing-framework/core/src/nodes/api_client.rs new file mode 100644 index 0000000..f9a69d8 --- /dev/null +++ b/testing-framework/core/src/nodes/api_client.rs @@ -0,0 +1,282 @@ +use std::net::SocketAddr; + +use chain_service::CryptarchiaInfo; +use common_http_client::CommonHttpClient; +use nomos_core::{block::Block, da::BlobId, mantle::SignedMantleTx, sdp::SessionNumber}; +use nomos_da_network_core::swarm::{BalancerStats, MonitorStats}; +use nomos_da_network_service::MembershipResponse; +use nomos_http_api_common::paths::{ + CRYPTARCHIA_INFO, DA_BALANCER_STATS, DA_BLACKLISTED_PEERS, DA_BLOCK_PEER, DA_GET_MEMBERSHIP, + DA_HISTORIC_SAMPLING, DA_MONITOR_STATS, DA_UNBLOCK_PEER, MEMPOOL_ADD_TX, NETWORK_INFO, + STORAGE_BLOCK, +}; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_node::{HeaderId, api::testing::handlers::HistoricSamplingRequest}; +use reqwest::{Client, RequestBuilder, Response, Url}; +use serde::{Serialize, de::DeserializeOwned}; +use serde_json::Value; + +pub const DA_GET_TESTING_ENDPOINT_ERROR: &str = "Failed to connect to testing endpoint. The binary was likely built without the 'testing' \ + feature. Try: cargo build --workspace --all-features"; + +/// Thin async client for node HTTP/testing endpoints. +#[derive(Clone)] +pub struct ApiClient { + pub(crate) base_url: Url, + pub(crate) testing_url: Option, + client: Client, + pub(crate) http_client: CommonHttpClient, +} + +impl ApiClient { + #[must_use] + /// Construct from socket addresses. + pub fn new(base_addr: SocketAddr, testing_addr: Option) -> Self { + let base_url = + Url::parse(&format!("http://{base_addr}")).expect("Valid base address for node"); + let testing_url = testing_addr + .map(|addr| Url::parse(&format!("http://{addr}")).expect("Valid testing address")); + Self::from_urls(base_url, testing_url) + } + + #[must_use] + /// Construct from prebuilt URLs. + pub fn from_urls(base_url: Url, testing_url: Option) -> Self { + let client = Client::new(); + Self { + base_url, + testing_url, + http_client: CommonHttpClient::new_with_client(client.clone(), None), + client, + } + } + + #[must_use] + /// Testing URL, when built with testing features. + pub fn testing_url(&self) -> Option { + self.testing_url.clone() + } + + /// Build a GET request against the base API. + pub fn get_builder(&self, path: &str) -> RequestBuilder { + self.client.get(self.join_base(path)) + } + + /// Issue a GET request against the base API. + pub async fn get_response(&self, path: &str) -> reqwest::Result { + self.client.get(self.join_base(path)).send().await + } + + /// GET and decode JSON from the base API. + pub async fn get_json(&self, path: &str) -> reqwest::Result + where + T: DeserializeOwned, + { + self.get_response(path) + .await? + .error_for_status()? + .json() + .await + } + + /// POST JSON to the base API and decode a response. + pub async fn post_json_decode(&self, path: &str, body: &T) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + R: DeserializeOwned, + { + self.post_json_response(path, body) + .await? + .error_for_status()? + .json() + .await + } + + /// POST JSON to the base API and return the raw response. + pub async fn post_json_response(&self, path: &str, body: &T) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + { + self.client + .post(self.join_base(path)) + .json(body) + .send() + .await + } + + /// POST JSON to the base API and expect a success status. + pub async fn post_json_unit(&self, path: &str, body: &T) -> reqwest::Result<()> + where + T: Serialize + Sync + ?Sized, + { + self.post_json_response(path, body) + .await? + .error_for_status()?; + Ok(()) + } + + /// GET and decode JSON from the testing API. + pub async fn get_testing_json(&self, path: &str) -> reqwest::Result + where + T: DeserializeOwned, + { + self.get_testing_response(path) + .await? + .error_for_status()? + .json() + .await + } + + /// POST JSON to the testing API and decode a response. + pub async fn post_testing_json_decode(&self, path: &str, body: &T) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + R: DeserializeOwned, + { + self.post_testing_json_response(path, body) + .await? + .error_for_status()? + .json() + .await + } + + /// POST JSON to the testing API and expect a success status. + pub async fn post_testing_json_unit(&self, path: &str, body: &T) -> reqwest::Result<()> + where + T: Serialize + Sync + ?Sized, + { + self.post_testing_json_response(path, body) + .await? + .error_for_status()?; + Ok(()) + } + + /// POST JSON to the testing API and return the raw response. + pub async fn post_testing_json_response( + &self, + path: &str, + body: &T, + ) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + { + let testing_url = self + .testing_url + .as_ref() + .expect(DA_GET_TESTING_ENDPOINT_ERROR); + self.client + .post(Self::join_url(testing_url, path)) + .json(body) + .send() + .await + } + + /// GET from the testing API and return the raw response. + pub async fn get_testing_response(&self, path: &str) -> reqwest::Result { + let testing_url = self + .testing_url + .as_ref() + .expect(DA_GET_TESTING_ENDPOINT_ERROR); + self.client + .get(Self::join_url(testing_url, path)) + .send() + .await + } + + /// Block a peer via the DA testing API. + pub async fn block_peer(&self, peer_id: &str) -> reqwest::Result { + self.post_json_decode(DA_BLOCK_PEER, &peer_id).await + } + + /// Unblock a peer via the DA testing API. + pub async fn unblock_peer(&self, peer_id: &str) -> reqwest::Result { + self.post_json_decode(DA_UNBLOCK_PEER, &peer_id).await + } + + /// Fetch the list of blacklisted peers. + pub async fn blacklisted_peers(&self) -> reqwest::Result> { + self.get_json(DA_BLACKLISTED_PEERS).await + } + + /// Fetch balancer stats from DA API. + pub async fn balancer_stats(&self) -> reqwest::Result { + self.get_json(DA_BALANCER_STATS).await + } + + /// Fetch monitor stats from DA API. + pub async fn monitor_stats(&self) -> reqwest::Result { + self.get_json(DA_MONITOR_STATS).await + } + + /// Fetch consensus info from the base API. + pub async fn consensus_info(&self) -> reqwest::Result { + self.get_json(CRYPTARCHIA_INFO).await + } + + /// Fetch libp2p network info. + pub async fn network_info(&self) -> reqwest::Result { + self.get_json(NETWORK_INFO).await + } + + /// Fetch a block by hash from storage. + pub async fn storage_block( + &self, + id: &HeaderId, + ) -> reqwest::Result>> { + self.post_json_decode(STORAGE_BLOCK, id).await + } + + /// Query DA membership via testing API. + pub async fn da_get_membership( + &self, + session_id: &SessionNumber, + ) -> reqwest::Result { + self.post_testing_json_decode(DA_GET_MEMBERSHIP, session_id) + .await + } + + /// Query historic sampling via testing API. + pub async fn da_historic_sampling( + &self, + request: &HistoricSamplingRequest, + ) -> reqwest::Result { + self.post_testing_json_decode(DA_HISTORIC_SAMPLING, request) + .await + } + + /// Submit a mantle transaction through the base API. + pub async fn submit_transaction(&self, tx: &SignedMantleTx) -> reqwest::Result<()> { + self.post_json_unit(MEMPOOL_ADD_TX, tx).await + } + + /// Execute a custom request built by the caller. + pub async fn get_headers_raw(&self, builder: RequestBuilder) -> reqwest::Result { + builder.send().await + } + + /// Fetch raw mempool metrics from the testing endpoint. + pub async fn mempool_metrics(&self, pool: &str) -> reqwest::Result { + self.get_json(&format!("/{pool}/metrics")).await + } + + #[must_use] + /// Base API URL. + pub const fn base_url(&self) -> &Url { + &self.base_url + } + + #[must_use] + /// Underlying common HTTP client wrapper. + pub const fn http_client(&self) -> &CommonHttpClient { + &self.http_client + } + + fn join_base(&self, path: &str) -> Url { + Self::join_url(&self.base_url, path) + } + + fn join_url(base: &Url, path: &str) -> Url { + let trimmed = path.trim_start_matches('/'); + base.join(trimmed).expect("valid relative path") + } +} diff --git a/testing-framework/core/src/nodes/executor.rs b/testing-framework/core/src/nodes/executor.rs new file mode 100644 index 0000000..c47da0d --- /dev/null +++ b/testing-framework/core/src/nodes/executor.rs @@ -0,0 +1,300 @@ +use std::{ + collections::HashSet, + path::PathBuf, + process::{Child, Command, Stdio}, + time::Duration, +}; + +use broadcast_service::BlockInfo; +use chain_service::CryptarchiaInfo; +use futures::Stream; +use kzgrs_backend::common::share::{DaLightShare, DaShare, DaSharesCommitments}; +use nomos_core::{ + block::Block, da::BlobId, header::HeaderId, mantle::SignedMantleTx, sdp::SessionNumber, +}; +use nomos_da_network_core::swarm::{BalancerStats, MonitorStats}; +use nomos_da_network_service::MembershipResponse; +use nomos_executor::config::Config; +use nomos_http_api_common::paths::{DA_GET_SHARES_COMMITMENTS, MANTLE_METRICS, MEMPOOL_ADD_TX}; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_node::api::testing::handlers::HistoricSamplingRequest; +use nomos_tracing::logging::local::FileConfig; +use nomos_tracing_service::LoggerLayer; +use reqwest::Url; +use serde_yaml::{Mapping, Number as YamlNumber, Value}; +pub use testing_framework_config::nodes::executor::create_executor_config; + +use super::{ApiClient, create_tempdir, persist_tempdir, should_persist_tempdir}; +use crate::{IS_DEBUG_TRACING, adjust_timeout, nodes::LOGS_PREFIX}; + +const BIN_PATH: &str = "target/debug/nomos-executor"; + +fn binary_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../") + .join(BIN_PATH) +} + +pub struct Executor { + tempdir: tempfile::TempDir, + child: Child, + config: Config, + api: ApiClient, +} + +fn inject_ibd_into_cryptarchia(yaml_value: &mut Value) { + let Some(cryptarchia) = cryptarchia_section(yaml_value) else { + return; + }; + ensure_network_adapter(cryptarchia); + ensure_sync_defaults(cryptarchia); + ensure_ibd_bootstrap(cryptarchia); +} + +fn cryptarchia_section(yaml_value: &mut Value) -> Option<&mut Mapping> { + yaml_value + .as_mapping_mut() + .and_then(|root| root.get_mut(&Value::String("cryptarchia".into()))) + .and_then(Value::as_mapping_mut) +} + +fn ensure_network_adapter(cryptarchia: &mut Mapping) { + if cryptarchia.contains_key(&Value::String("network_adapter_settings".into())) { + return; + } + let mut network = Mapping::new(); + network.insert( + Value::String("topic".into()), + Value::String(nomos_node::CONSENSUS_TOPIC.into()), + ); + cryptarchia.insert( + Value::String("network_adapter_settings".into()), + Value::Mapping(network), + ); +} + +fn ensure_sync_defaults(cryptarchia: &mut Mapping) { + if cryptarchia.contains_key(&Value::String("sync".into())) { + return; + } + let mut orphan = Mapping::new(); + orphan.insert( + Value::String("max_orphan_cache_size".into()), + Value::Number(YamlNumber::from(5)), + ); + let mut sync = Mapping::new(); + sync.insert(Value::String("orphan".into()), Value::Mapping(orphan)); + cryptarchia.insert(Value::String("sync".into()), Value::Mapping(sync)); +} + +fn ensure_ibd_bootstrap(cryptarchia: &mut Mapping) { + let Some(bootstrap) = cryptarchia + .get_mut(&Value::String("bootstrap".into())) + .and_then(Value::as_mapping_mut) + else { + return; + }; + + let ibd_key = Value::String("ibd".into()); + if bootstrap.contains_key(&ibd_key) { + return; + } + + let mut ibd = Mapping::new(); + ibd.insert(Value::String("peers".into()), Value::Sequence(vec![])); + + bootstrap.insert(ibd_key, Value::Mapping(ibd)); +} + +impl Drop for Executor { + fn drop(&mut self) { + if should_persist_tempdir() + && let Err(e) = persist_tempdir(&mut self.tempdir, "nomos-executor") + { + println!("failed to persist tempdir: {e}"); + } + + if let Err(e) = self.child.kill() { + println!("failed to kill the child process: {e}"); + } + } +} + +impl Executor { + pub async fn spawn(mut config: Config) -> Self { + let dir = create_tempdir().unwrap(); + let config_path = dir.path().join("executor.yaml"); + let file = std::fs::File::create(&config_path).unwrap(); + + if !*IS_DEBUG_TRACING { + // setup logging so that we can intercept it later in testing + config.tracing.logger = LoggerLayer::File(FileConfig { + directory: dir.path().to_owned(), + prefix: Some(LOGS_PREFIX.into()), + }); + } + + config.storage.db_path = dir.path().join("db"); + dir.path().clone_into( + &mut config + .da_verifier + .storage_adapter_settings + .blob_storage_directory, + ); + + let addr = config.http.backend_settings.address; + let testing_addr = config.testing_http.backend_settings.address; + + let mut yaml_value = serde_yaml::to_value(&config).unwrap(); + inject_ibd_into_cryptarchia(&mut yaml_value); + serde_yaml::to_writer(file, &yaml_value).unwrap(); + let child = Command::new(binary_path()) + .arg(&config_path) + .current_dir(dir.path()) + .stdout(Stdio::inherit()) + .spawn() + .unwrap(); + let node = Self { + child, + tempdir: dir, + config, + api: ApiClient::new(addr, Some(testing_addr)), + }; + tokio::time::timeout(adjust_timeout(Duration::from_secs(10)), async { + node.wait_online().await; + }) + .await + .unwrap(); + + node + } + + pub async fn block_peer(&self, peer_id: String) -> bool { + self.api.block_peer(&peer_id).await.unwrap() + } + + pub async fn unblock_peer(&self, peer_id: String) -> bool { + self.api.unblock_peer(&peer_id).await.unwrap() + } + + pub async fn blacklisted_peers(&self) -> Vec { + self.api.blacklisted_peers().await.unwrap() + } + + async fn wait_online(&self) { + loop { + let res = self.api.get_response(MANTLE_METRICS).await; + if res.is_ok() && res.unwrap().status().is_success() { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + + #[must_use] + pub const fn config(&self) -> &Config { + &self.config + } + + #[must_use] + pub fn url(&self) -> Url { + self.api.base_url().clone() + } + + #[must_use] + pub fn testing_url(&self) -> Option { + self.api.testing_url() + } + + pub async fn balancer_stats(&self) -> BalancerStats { + self.api.balancer_stats().await.unwrap() + } + + pub async fn monitor_stats(&self) -> MonitorStats { + self.api.monitor_stats().await.unwrap() + } + + pub async fn network_info(&self) -> Libp2pInfo { + self.api.network_info().await.unwrap() + } + + pub async fn consensus_info(&self) -> CryptarchiaInfo { + self.api.consensus_info().await.unwrap() + } + + pub async fn get_block(&self, id: HeaderId) -> Option> { + self.api.storage_block(&id).await.unwrap() + } + + pub async fn get_shares( + &self, + blob_id: BlobId, + requested_shares: HashSet<[u8; 2]>, + filter_shares: HashSet<[u8; 2]>, + return_available: bool, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_shares::( + self.api.base_url().clone(), + blob_id, + requested_shares, + filter_shares, + return_available, + ) + .await + } + + pub async fn get_commitments(&self, blob_id: BlobId) -> Option { + self.api + .post_json_decode(DA_GET_SHARES_COMMITMENTS, &blob_id) + .await + .unwrap() + } + + pub async fn get_storage_commitments( + &self, + blob_id: BlobId, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_storage_commitments::(self.api.base_url().clone(), blob_id) + .await + } + + pub async fn da_get_membership( + &self, + session_id: SessionNumber, + ) -> Result { + self.api.da_get_membership(&session_id).await + } + + pub async fn da_historic_sampling( + &self, + block_id: HeaderId, + blob_ids: I, + ) -> Result + where + I: IntoIterator, + { + let request = HistoricSamplingRequest { + block_id, + blob_ids: blob_ids.into_iter().collect(), + }; + + self.api.da_historic_sampling(&request).await + } + + pub async fn get_lib_stream( + &self, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_lib_stream(self.api.base_url().clone()) + .await + } + + pub async fn add_tx(&self, tx: SignedMantleTx) -> Result<(), reqwest::Error> { + self.api.post_json_unit(MEMPOOL_ADD_TX, &tx).await + } +} diff --git a/testing-framework/core/src/nodes/mod.rs b/testing-framework/core/src/nodes/mod.rs new file mode 100644 index 0000000..ee1534a --- /dev/null +++ b/testing-framework/core/src/nodes/mod.rs @@ -0,0 +1,35 @@ +mod api_client; +pub mod executor; +pub mod validator; + +use std::sync::LazyLock; + +pub use api_client::ApiClient; +use tempfile::TempDir; + +pub(crate) const LOGS_PREFIX: &str = "__logs"; +static KEEP_NODE_TEMPDIRS: LazyLock = + LazyLock::new(|| std::env::var("NOMOS_TESTS_KEEP_LOGS").is_ok()); + +fn create_tempdir() -> std::io::Result { + // It's easier to use the current location instead of OS-default tempfile + // location because Github Actions can easily access files in the current + // location using wildcard to upload them as artifacts. + TempDir::new_in(std::env::current_dir()?) +} + +fn persist_tempdir(tempdir: &mut TempDir, label: &str) -> std::io::Result<()> { + println!( + "{}: persisting directory at {}", + label, + tempdir.path().display() + ); + // we need ownership of the dir to persist it + let dir = std::mem::replace(tempdir, tempfile::tempdir()?); + let _ = dir.keep(); + Ok(()) +} + +pub(crate) fn should_persist_tempdir() -> bool { + std::thread::panicking() || *KEEP_NODE_TEMPDIRS +} diff --git a/testing-framework/core/src/nodes/validator.rs b/testing-framework/core/src/nodes/validator.rs new file mode 100644 index 0000000..ce99e76 --- /dev/null +++ b/testing-framework/core/src/nodes/validator.rs @@ -0,0 +1,362 @@ +use std::{ + collections::HashSet, + path::PathBuf, + process::{Child, Command, Stdio}, + time::Duration, +}; + +use broadcast_service::BlockInfo; +use chain_service::CryptarchiaInfo; +use futures::Stream; +use kzgrs_backend::common::share::{DaLightShare, DaShare, DaSharesCommitments}; +use nomos_core::{block::Block, da::BlobId, mantle::SignedMantleTx, sdp::SessionNumber}; +use nomos_da_network_core::swarm::{BalancerStats, MonitorStats}; +use nomos_da_network_service::MembershipResponse; +use nomos_http_api_common::paths::{CRYPTARCHIA_HEADERS, DA_GET_SHARES_COMMITMENTS}; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_node::{Config, HeaderId, api::testing::handlers::HistoricSamplingRequest}; +use nomos_tracing::logging::local::FileConfig; +use nomos_tracing_service::LoggerLayer; +use reqwest::Url; +use serde_yaml::{Mapping, Number as YamlNumber, Value}; +pub use testing_framework_config::nodes::validator::create_validator_config; +use tokio::time::error::Elapsed; +use tx_service::MempoolMetrics; + +use super::{ApiClient, create_tempdir, persist_tempdir, should_persist_tempdir}; +use crate::{IS_DEBUG_TRACING, adjust_timeout, nodes::LOGS_PREFIX}; + +const BIN_PATH: &str = "target/debug/nomos-node"; + +fn binary_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../") + .join(BIN_PATH) +} + +pub enum Pool { + Da, + Mantle, +} + +pub struct Validator { + tempdir: tempfile::TempDir, + child: Child, + config: Config, + api: ApiClient, +} + +fn inject_ibd_into_cryptarchia(yaml_value: &mut Value) { + let Some(cryptarchia) = cryptarchia_section(yaml_value) else { + return; + }; + ensure_network_adapter(cryptarchia); + ensure_sync_defaults(cryptarchia); + ensure_ibd_bootstrap(cryptarchia); +} + +fn cryptarchia_section(yaml_value: &mut Value) -> Option<&mut Mapping> { + yaml_value + .as_mapping_mut() + .and_then(|root| root.get_mut(&Value::String("cryptarchia".into()))) + .and_then(Value::as_mapping_mut) +} + +fn ensure_network_adapter(cryptarchia: &mut Mapping) { + if cryptarchia.contains_key(&Value::String("network_adapter_settings".into())) { + return; + } + let mut network = Mapping::new(); + network.insert( + Value::String("topic".into()), + Value::String(nomos_node::CONSENSUS_TOPIC.into()), + ); + cryptarchia.insert( + Value::String("network_adapter_settings".into()), + Value::Mapping(network), + ); +} + +fn ensure_sync_defaults(cryptarchia: &mut Mapping) { + if cryptarchia.contains_key(&Value::String("sync".into())) { + return; + } + let mut orphan = Mapping::new(); + orphan.insert( + Value::String("max_orphan_cache_size".into()), + Value::Number(YamlNumber::from(5)), + ); + let mut sync = Mapping::new(); + sync.insert(Value::String("orphan".into()), Value::Mapping(orphan)); + cryptarchia.insert(Value::String("sync".into()), Value::Mapping(sync)); +} + +fn ensure_ibd_bootstrap(cryptarchia: &mut Mapping) { + let Some(bootstrap) = cryptarchia + .get_mut(&Value::String("bootstrap".into())) + .and_then(Value::as_mapping_mut) + else { + return; + }; + + let ibd_key = Value::String("ibd".into()); + if bootstrap.contains_key(&ibd_key) { + return; + } + + let mut ibd = Mapping::new(); + ibd.insert(Value::String("peers".into()), Value::Sequence(vec![])); + + bootstrap.insert(ibd_key, Value::Mapping(ibd)); +} + +impl Drop for Validator { + fn drop(&mut self) { + if should_persist_tempdir() + && let Err(e) = persist_tempdir(&mut self.tempdir, "nomos-node") + { + println!("failed to persist tempdir: {e}"); + } + + if let Err(e) = self.child.kill() { + println!("failed to kill the child process: {e}"); + } + } +} + +impl Validator { + /// Check if the validator process is still running + pub fn is_running(&mut self) -> bool { + match self.child.try_wait() { + Ok(None) => true, + Ok(Some(_)) | Err(_) => false, + } + } + + /// Wait for the validator process to exit, with a timeout + /// Returns true if the process exited within the timeout, false otherwise + pub async fn wait_for_exit(&mut self, timeout: Duration) -> bool { + tokio::time::timeout(timeout, async { + loop { + if !self.is_running() { + return; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .is_ok() + } + + pub async fn spawn(mut config: Config) -> Result { + let dir = create_tempdir().unwrap(); + let config_path = dir.path().join("validator.yaml"); + let file = std::fs::File::create(&config_path).unwrap(); + + if !*IS_DEBUG_TRACING { + // setup logging so that we can intercept it later in testing + config.tracing.logger = LoggerLayer::File(FileConfig { + directory: dir.path().to_owned(), + prefix: Some(LOGS_PREFIX.into()), + }); + } + + config.storage.db_path = dir.path().join("db"); + dir.path().clone_into( + &mut config + .da_verifier + .storage_adapter_settings + .blob_storage_directory, + ); + + let addr = config.http.backend_settings.address; + let testing_addr = config.testing_http.backend_settings.address; + + let mut yaml_value = serde_yaml::to_value(&config).unwrap(); + inject_ibd_into_cryptarchia(&mut yaml_value); + serde_yaml::to_writer(file, &yaml_value).unwrap(); + let child = Command::new(binary_path()) + .arg(&config_path) + .current_dir(dir.path()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .spawn() + .unwrap(); + let node = Self { + child, + tempdir: dir, + config, + api: ApiClient::new(addr, Some(testing_addr)), + }; + + tokio::time::timeout(adjust_timeout(Duration::from_secs(10)), async { + node.wait_online().await; + }) + .await?; + + Ok(node) + } + + #[must_use] + pub fn url(&self) -> Url { + self.api.base_url().clone() + } + + #[must_use] + pub fn testing_url(&self) -> Option { + self.api.testing_url() + } + + async fn wait_online(&self) { + loop { + if self.api.consensus_info().await.is_ok() { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + + pub async fn get_block(&self, id: HeaderId) -> Option> { + self.api.storage_block(&id).await.unwrap() + } + + pub async fn get_commitments(&self, blob_id: BlobId) -> Option { + self.api + .post_json_decode(DA_GET_SHARES_COMMITMENTS, &blob_id) + .await + .unwrap() + } + + pub async fn get_mempoool_metrics(&self, pool: Pool) -> MempoolMetrics { + let discr = match pool { + Pool::Mantle => "mantle", + Pool::Da => "da", + }; + let res = self.api.mempool_metrics(discr).await.unwrap(); + MempoolMetrics { + pending_items: res["pending_items"].as_u64().unwrap() as usize, + last_item_timestamp: res["last_item_timestamp"].as_u64().unwrap(), + } + } + + pub async fn da_historic_sampling( + &self, + block_id: HeaderId, + blob_ids: I, + ) -> Result + where + I: IntoIterator, + { + let request = HistoricSamplingRequest { + block_id, + blob_ids: blob_ids.into_iter().collect(), + }; + + self.api.da_historic_sampling(&request).await + } + + // not async so that we can use this in `Drop` + #[must_use] + pub fn get_logs_from_file(&self) -> String { + println!( + "fetching logs from dir {}...", + self.tempdir.path().display() + ); + // std::thread::sleep(std::time::Duration::from_secs(50)); + std::fs::read_dir(self.tempdir.path()) + .unwrap() + .filter_map(|entry| { + let entry = entry.unwrap(); + let path = entry.path(); + (path.is_file() && path.to_str().unwrap().contains(LOGS_PREFIX)).then_some(path) + }) + .map(|f| std::fs::read_to_string(f).unwrap()) + .collect::() + } + + #[must_use] + pub const fn config(&self) -> &Config { + &self.config + } + + pub async fn get_headers(&self, from: Option, to: Option) -> Vec { + let mut req = self.api.get_builder(CRYPTARCHIA_HEADERS); + + if let Some(from) = from { + req = req.query(&[("from", from)]); + } + + if let Some(to) = to { + req = req.query(&[("to", to)]); + } + + let res = self.api.get_headers_raw(req).await; + + println!("res: {res:?}"); + + res.unwrap().json::>().await.unwrap() + } + + pub async fn consensus_info(&self) -> CryptarchiaInfo { + let info = self.api.consensus_info().await.unwrap(); + println!("{info:?}"); + info + } + + pub async fn balancer_stats(&self) -> BalancerStats { + self.api.balancer_stats().await.unwrap() + } + + pub async fn monitor_stats(&self) -> MonitorStats { + self.api.monitor_stats().await.unwrap() + } + + pub async fn da_get_membership( + &self, + session_id: SessionNumber, + ) -> Result { + self.api.da_get_membership(&session_id).await + } + + pub async fn network_info(&self) -> Libp2pInfo { + self.api.network_info().await.unwrap() + } + + pub async fn get_shares( + &self, + blob_id: BlobId, + requested_shares: HashSet<[u8; 2]>, + filter_shares: HashSet<[u8; 2]>, + return_available: bool, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_shares::( + self.api.base_url().clone(), + blob_id, + requested_shares, + filter_shares, + return_available, + ) + .await + } + + pub async fn get_storage_commitments( + &self, + blob_id: BlobId, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_storage_commitments::(self.api.base_url().clone(), blob_id) + .await + } + + pub async fn get_lib_stream( + &self, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_lib_stream(self.api.base_url().clone()) + .await + } +} diff --git a/testing-framework/core/src/scenario/capabilities.rs b/testing-framework/core/src/scenario/capabilities.rs new file mode 100644 index 0000000..83a2372 --- /dev/null +++ b/testing-framework/core/src/scenario/capabilities.rs @@ -0,0 +1,28 @@ +use async_trait::async_trait; + +use super::DynError; + +/// Marker type used by scenario builders to request node control support. +#[derive(Clone, Copy, Debug, Default)] +pub struct NodeControlCapability; + +/// Trait implemented by scenario capability markers to signal whether node +/// control is required. +pub trait RequiresNodeControl { + const REQUIRED: bool; +} + +impl RequiresNodeControl for () { + const REQUIRED: bool = false; +} + +impl RequiresNodeControl for NodeControlCapability { + const REQUIRED: bool = true; +} + +/// Interface exposed by runners that can restart nodes at runtime. +#[async_trait] +pub trait NodeControlHandle: Send + Sync { + async fn restart_validator(&self, index: usize) -> Result<(), DynError>; + async fn restart_executor(&self, index: usize) -> Result<(), DynError>; +} diff --git a/testing-framework/core/src/scenario/cfgsync.rs b/testing-framework/core/src/scenario/cfgsync.rs new file mode 100644 index 0000000..6fdb7f3 --- /dev/null +++ b/testing-framework/core/src/scenario/cfgsync.rs @@ -0,0 +1,172 @@ +use std::{fs::File, num::NonZero, path::Path, time::Duration}; + +use anyhow::{Context as _, Result}; +use nomos_da_network_core::swarm::ReplicationConfig; +use nomos_tracing_service::TracingSettings; +use nomos_utils::bounded_duration::{MinimalBoundedDuration, SECOND}; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::topology::{GeneratedTopology, configs::wallet::WalletConfig}; + +#[serde_as] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CfgSyncConfig { + pub port: u16, + pub n_hosts: usize, + pub timeout: u64, + pub security_param: NonZero, + pub active_slot_coeff: f64, + #[serde(default)] + pub wallet: WalletConfig, + #[serde(default)] + pub ids: Option>, + #[serde(default)] + pub da_ports: Option>, + #[serde(default)] + pub blend_ports: Option>, + pub subnetwork_size: usize, + pub dispersal_factor: usize, + pub num_samples: u16, + pub num_subnets: u16, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub old_blobs_check_interval: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub blobs_validity_duration: Duration, + pub global_params_path: String, + pub min_dispersal_peers: usize, + pub min_replication_peers: usize, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub monitor_failure_time_window: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub balancer_interval: Duration, + pub replication_settings: ReplicationConfig, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, + pub tracing_settings: TracingSettings, +} + +pub fn load_cfgsync_template(path: &Path) -> Result { + let file = File::open(path) + .with_context(|| format!("opening cfgsync template at {}", path.display()))?; + serde_yaml::from_reader(file).context("parsing cfgsync template") +} + +pub fn write_cfgsync_template(path: &Path, cfg: &CfgSyncConfig) -> Result<()> { + let file = File::create(path) + .with_context(|| format!("writing cfgsync template to {}", path.display()))?; + let serializable = SerializableCfgSyncConfig::from(cfg); + serde_yaml::to_writer(file, &serializable).context("serializing cfgsync template") +} + +pub fn render_cfgsync_yaml(cfg: &CfgSyncConfig) -> Result { + let serializable = SerializableCfgSyncConfig::from(cfg); + serde_yaml::to_string(&serializable).context("rendering cfgsync yaml") +} + +pub fn apply_topology_overrides( + cfg: &mut CfgSyncConfig, + topology: &GeneratedTopology, + use_kzg_mount: bool, +) { + let hosts = topology.validators().len() + topology.executors().len(); + cfg.n_hosts = hosts; + + let consensus = &topology.config().consensus_params; + cfg.security_param = consensus.security_param; + cfg.active_slot_coeff = consensus.active_slot_coeff; + + let config = topology.config(); + cfg.wallet = config.wallet_config.clone(); + cfg.ids = Some(topology.nodes().map(|node| node.id).collect()); + cfg.da_ports = Some(topology.nodes().map(|node| node.da_port).collect()); + cfg.blend_ports = Some(topology.nodes().map(|node| node.blend_port).collect()); + + let da = &config.da_params; + cfg.subnetwork_size = da.subnetwork_size; + cfg.dispersal_factor = da.dispersal_factor; + cfg.num_samples = da.num_samples; + cfg.num_subnets = da.num_subnets; + cfg.old_blobs_check_interval = da.old_blobs_check_interval; + cfg.blobs_validity_duration = da.blobs_validity_duration; + cfg.global_params_path = if use_kzg_mount { + "/kzgrs_test_params".into() + } else { + da.global_params_path.clone() + }; + cfg.min_dispersal_peers = da.policy_settings.min_dispersal_peers; + cfg.min_replication_peers = da.policy_settings.min_replication_peers; + cfg.monitor_failure_time_window = da.monitor_settings.failure_time_window; + cfg.balancer_interval = da.balancer_interval; + cfg.replication_settings = da.replication_settings; + cfg.retry_shares_limit = da.retry_shares_limit; + cfg.retry_commitments_limit = da.retry_commitments_limit; + cfg.tracing_settings = TracingSettings::default(); +} + +#[serde_as] +#[derive(Serialize)] +struct SerializableCfgSyncConfig { + port: u16, + n_hosts: usize, + timeout: u64, + security_param: NonZero, + active_slot_coeff: f64, + wallet: WalletConfig, + #[serde(skip_serializing_if = "Option::is_none")] + ids: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + da_ports: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + blend_ports: Option>, + subnetwork_size: usize, + dispersal_factor: usize, + num_samples: u16, + num_subnets: u16, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + old_blobs_check_interval: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + blobs_validity_duration: Duration, + global_params_path: String, + min_dispersal_peers: usize, + min_replication_peers: usize, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + monitor_failure_time_window: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + balancer_interval: Duration, + replication_settings: ReplicationConfig, + retry_shares_limit: usize, + retry_commitments_limit: usize, + tracing_settings: TracingSettings, +} + +impl From<&CfgSyncConfig> for SerializableCfgSyncConfig { + fn from(cfg: &CfgSyncConfig) -> Self { + Self { + port: cfg.port, + n_hosts: cfg.n_hosts, + timeout: cfg.timeout, + security_param: cfg.security_param, + active_slot_coeff: cfg.active_slot_coeff, + wallet: cfg.wallet.clone(), + ids: cfg.ids.clone(), + da_ports: cfg.da_ports.clone(), + blend_ports: cfg.blend_ports.clone(), + subnetwork_size: cfg.subnetwork_size, + dispersal_factor: cfg.dispersal_factor, + num_samples: cfg.num_samples, + num_subnets: cfg.num_subnets, + old_blobs_check_interval: cfg.old_blobs_check_interval, + blobs_validity_duration: cfg.blobs_validity_duration, + global_params_path: cfg.global_params_path.clone(), + min_dispersal_peers: cfg.min_dispersal_peers, + min_replication_peers: cfg.min_replication_peers, + monitor_failure_time_window: cfg.monitor_failure_time_window, + balancer_interval: cfg.balancer_interval, + replication_settings: cfg.replication_settings, + retry_shares_limit: cfg.retry_shares_limit, + retry_commitments_limit: cfg.retry_commitments_limit, + tracing_settings: cfg.tracing_settings.clone(), + } + } +} diff --git a/testing-framework/core/src/scenario/definition.rs b/testing-framework/core/src/scenario/definition.rs new file mode 100644 index 0000000..afc9614 --- /dev/null +++ b/testing-framework/core/src/scenario/definition.rs @@ -0,0 +1,328 @@ +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; + +use super::{ + NodeControlCapability, expectation::Expectation, runtime::context::RunMetrics, + workload::Workload, +}; +use crate::topology::{ + GeneratedTopology, TopologyBuilder, TopologyConfig, + configs::{network::Libp2pNetworkLayout, wallet::WalletConfig}, +}; + +const DEFAULT_FUNDS_PER_WALLET: u64 = 100; +const MIN_EXPECTATION_BLOCKS: u32 = 2; +const MIN_EXPECTATION_FALLBACK_SECS: u64 = 10; + +/// Immutable scenario definition shared between the runner, workloads, and +/// expectations. +pub struct Scenario { + topology: GeneratedTopology, + workloads: Vec>, + expectations: Vec>, + duration: Duration, + capabilities: Caps, +} + +impl Scenario { + fn new( + topology: GeneratedTopology, + workloads: Vec>, + expectations: Vec>, + duration: Duration, + capabilities: Caps, + ) -> Self { + Self { + topology, + workloads, + expectations, + duration, + capabilities, + } + } + + #[must_use] + pub const fn topology(&self) -> &GeneratedTopology { + &self.topology + } + + #[must_use] + pub fn workloads(&self) -> &[Arc] { + &self.workloads + } + + #[must_use] + pub fn expectations(&self) -> &[Box] { + &self.expectations + } + + #[must_use] + pub fn expectations_mut(&mut self) -> &mut [Box] { + &mut self.expectations + } + + #[must_use] + pub const fn duration(&self) -> Duration { + self.duration + } + + #[must_use] + pub const fn capabilities(&self) -> &Caps { + &self.capabilities + } +} + +/// Builder used by callers to describe the desired scenario. +pub struct Builder { + topology: TopologyBuilder, + workloads: Vec>, + expectations: Vec>, + duration: Duration, + capabilities: Caps, +} + +pub type ScenarioBuilder = Builder<()>; + +/// Builder for shaping the scenario topology. +pub struct TopologyConfigurator { + builder: Builder, + validators: usize, + executors: usize, + network_star: bool, +} + +impl Builder { + #[must_use] + /// Start a builder from a topology description. + pub fn new(topology: TopologyBuilder) -> Self { + Self { + topology, + workloads: Vec::new(), + expectations: Vec::new(), + duration: Duration::ZERO, + capabilities: Caps::default(), + } + } + + #[must_use] + pub fn with_node_counts(validators: usize, executors: usize) -> Self { + Self::new(TopologyBuilder::new(TopologyConfig::with_node_numbers( + validators, executors, + ))) + } + + /// Convenience constructor that immediately enters topology configuration, + /// letting callers set counts via `validators`/`executors`. + pub fn topology() -> TopologyConfigurator { + TopologyConfigurator::new(Self::new(TopologyBuilder::new(TopologyConfig::empty()))) + } +} + +impl Builder { + #[must_use] + /// Swap capabilities type carried with the scenario. + pub fn with_capabilities(self, capabilities: NewCaps) -> Builder { + let Self { + topology, + workloads, + expectations, + duration, + .. + } = self; + + Builder { + topology, + workloads, + expectations, + duration, + capabilities, + } + } + + #[must_use] + pub const fn capabilities(&self) -> &Caps { + &self.capabilities + } + + #[must_use] + pub const fn capabilities_mut(&mut self) -> &mut Caps { + &mut self.capabilities + } + + #[must_use] + pub fn with_workload(mut self, workload: W) -> Self + where + W: Workload + 'static, + { + self.expectations.extend(workload.expectations()); + self.workloads.push(Arc::new(workload)); + self + } + + #[must_use] + /// Add a standalone expectation not tied to a workload. + pub fn with_expectation(mut self, expectation: E) -> Self + where + E: Expectation + 'static, + { + self.expectations.push(Box::new(expectation)); + self + } + + #[must_use] + /// Configure the intended run duration. + pub const fn with_run_duration(mut self, duration: Duration) -> Self { + self.duration = duration; + self + } + + #[must_use] + /// Transform the topology builder. + pub fn map_topology(mut self, f: impl FnOnce(TopologyBuilder) -> TopologyBuilder) -> Self { + self.topology = f(self.topology); + self + } + + #[must_use] + /// Override wallet config for the topology. + pub fn with_wallet_config(mut self, wallet: WalletConfig) -> Self { + self.topology = self.topology.with_wallet_config(wallet); + self + } + + #[must_use] + pub fn wallets(self, users: usize) -> Self { + let user_count = NonZeroUsize::new(users).expect("wallet user count must be non-zero"); + let total_funds = DEFAULT_FUNDS_PER_WALLET + .checked_mul(users as u64) + .expect("wallet count exceeds capacity"); + let wallet = WalletConfig::uniform(total_funds, user_count); + self.with_wallet_config(wallet) + } + + #[must_use] + /// Finalize the scenario, computing run metrics and initializing + /// components. + pub fn build(self) -> Scenario { + let Self { + topology, + mut workloads, + mut expectations, + duration, + capabilities, + .. + } = self; + + let generated = topology.build(); + let duration = enforce_min_duration(&generated, duration); + let run_metrics = RunMetrics::from_topology(&generated, duration); + initialize_components(&generated, &run_metrics, &mut workloads, &mut expectations); + + Scenario::new(generated, workloads, expectations, duration, capabilities) + } +} + +impl TopologyConfigurator { + const fn new(builder: Builder) -> Self { + Self { + builder, + validators: 0, + executors: 0, + network_star: false, + } + } + + /// Set the number of validator nodes. + #[must_use] + pub fn validators(mut self, count: usize) -> Self { + self.validators = count; + self + } + + /// Set the number of executor nodes. + #[must_use] + pub fn executors(mut self, count: usize) -> Self { + self.executors = count; + self + } + + /// Use a star libp2p network layout. + #[must_use] + pub fn network_star(mut self) -> Self { + self.network_star = true; + self + } + + /// Finalize and return the underlying scenario builder. + #[must_use] + pub fn apply(self) -> Builder { + let participants = self.validators + self.executors; + assert!( + participants > 0, + "topology must include at least one node; call validators()/executors() before apply()" + ); + + let mut config = TopologyConfig::with_node_numbers(self.validators, self.executors); + if self.network_star { + config.network_params.libp2p_network_layout = Libp2pNetworkLayout::Star; + } + + let mut builder = self.builder; + builder.topology = TopologyBuilder::new(config); + builder + } +} + +impl Builder<()> { + #[must_use] + pub fn enable_node_control(self) -> Builder { + self.with_capabilities(NodeControlCapability) + } +} + +fn initialize_components( + descriptors: &GeneratedTopology, + run_metrics: &RunMetrics, + workloads: &mut [Arc], + expectations: &mut [Box], +) { + initialize_workloads(descriptors, run_metrics, workloads); + initialize_expectations(descriptors, run_metrics, expectations); +} + +fn initialize_workloads( + descriptors: &GeneratedTopology, + run_metrics: &RunMetrics, + workloads: &mut [Arc], +) { + for workload in workloads { + let inner = + Arc::get_mut(workload).expect("workload unexpectedly cloned before initialization"); + if let Err(err) = inner.init(descriptors, run_metrics) { + panic!("workload '{}' failed to initialize: {err}", inner.name()); + } + } +} + +fn initialize_expectations( + descriptors: &GeneratedTopology, + run_metrics: &RunMetrics, + expectations: &mut [Box], +) { + for expectation in expectations { + if let Err(err) = expectation.init(descriptors, run_metrics) { + panic!( + "expectation '{}' failed to initialize: {err}", + expectation.name() + ); + } + } +} + +fn enforce_min_duration(descriptors: &GeneratedTopology, requested: Duration) -> Duration { + let min_duration = descriptors.slot_duration().map_or_else( + || Duration::from_secs(MIN_EXPECTATION_FALLBACK_SECS), + |slot| slot * MIN_EXPECTATION_BLOCKS, + ); + + requested.max(min_duration) +} diff --git a/testing-framework/core/src/scenario/expectation.rs b/testing-framework/core/src/scenario/expectation.rs new file mode 100644 index 0000000..2fc0bc0 --- /dev/null +++ b/testing-framework/core/src/scenario/expectation.rs @@ -0,0 +1,24 @@ +use async_trait::async_trait; + +use super::{DynError, RunContext, runtime::context::RunMetrics}; +use crate::topology::GeneratedTopology; + +#[async_trait] +/// Defines a check evaluated during or after a scenario run. +pub trait Expectation: Send + Sync { + fn name(&self) -> &str; + + fn init( + &mut self, + _descriptors: &GeneratedTopology, + _run_metrics: &RunMetrics, + ) -> Result<(), DynError> { + Ok(()) + } + + async fn start_capture(&mut self, _ctx: &RunContext) -> Result<(), DynError> { + Ok(()) + } + + async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError>; +} diff --git a/testing-framework/core/src/scenario/http_probe.rs b/testing-framework/core/src/scenario/http_probe.rs new file mode 100644 index 0000000..4e26186 --- /dev/null +++ b/testing-framework/core/src/scenario/http_probe.rs @@ -0,0 +1,127 @@ +use std::time::Duration; + +use futures::future::try_join_all; +use nomos_http_api_common::paths; +use reqwest::Client as ReqwestClient; +use thiserror::Error; +use tokio::time::{sleep, timeout}; + +/// Role used for labelling readiness probes. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NodeRole { + Validator, + Executor, +} + +impl NodeRole { + #[must_use] + pub const fn label(self) -> &'static str { + match self { + Self::Validator => "validator", + Self::Executor => "executor", + } + } +} + +/// Error raised when HTTP readiness checks time out. +#[derive(Clone, Copy, Debug, Error)] +#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}", role = role.label())] +pub struct HttpReadinessError { + role: NodeRole, + port: u16, + timeout: Duration, +} + +impl HttpReadinessError { + #[must_use] + pub const fn new(role: NodeRole, port: u16, timeout: Duration) -> Self { + Self { + role, + port, + timeout, + } + } + + #[must_use] + pub const fn role(&self) -> NodeRole { + self.role + } + + #[must_use] + pub const fn port(&self) -> u16 { + self.port + } + + #[must_use] + pub const fn timeout(&self) -> Duration { + self.timeout + } +} + +/// Wait for HTTP readiness on the provided ports against localhost. +pub async fn wait_for_http_ports( + ports: &[u16], + role: NodeRole, + timeout_duration: Duration, + poll_interval: Duration, +) -> Result<(), HttpReadinessError> { + wait_for_http_ports_with_host(ports, role, "127.0.0.1", timeout_duration, poll_interval).await +} + +/// Wait for HTTP readiness on the provided ports against a specific host. +pub async fn wait_for_http_ports_with_host( + ports: &[u16], + role: NodeRole, + host: &str, + timeout_duration: Duration, + poll_interval: Duration, +) -> Result<(), HttpReadinessError> { + if ports.is_empty() { + return Ok(()); + } + + let client = ReqwestClient::new(); + let probes = ports.iter().copied().map(|port| { + wait_for_single_port( + client.clone(), + port, + role, + host, + timeout_duration, + poll_interval, + ) + }); + + try_join_all(probes).await.map(|_| ()) +} + +async fn wait_for_single_port( + client: ReqwestClient, + port: u16, + role: NodeRole, + host: &str, + timeout_duration: Duration, + poll_interval: Duration, +) -> Result<(), HttpReadinessError> { + let url = format!("http://{host}:{port}{}", paths::CRYPTARCHIA_INFO); + let probe = async { + loop { + let is_ready = client + .get(&url) + .send() + .await + .map(|response| response.status().is_success()) + .unwrap_or(false); + + if is_ready { + return; + } + + sleep(poll_interval).await; + } + }; + + timeout(timeout_duration, probe) + .await + .map_err(|_| HttpReadinessError::new(role, port, timeout_duration)) +} diff --git a/testing-framework/core/src/scenario/mod.rs b/testing-framework/core/src/scenario/mod.rs new file mode 100644 index 0000000..b6ef376 --- /dev/null +++ b/testing-framework/core/src/scenario/mod.rs @@ -0,0 +1,25 @@ +//! Scenario orchestration primitives shared by integration tests and runners. + +mod capabilities; +pub mod cfgsync; +mod definition; +mod expectation; +pub mod http_probe; +mod runtime; +mod workload; + +pub type DynError = Box; + +pub use capabilities::{NodeControlCapability, NodeControlHandle, RequiresNodeControl}; +pub use definition::{Builder, Scenario, ScenarioBuilder, TopologyConfigurator}; +pub use expectation::Expectation; +pub use runtime::{ + BlockFeed, BlockFeedTask, BlockRecord, BlockStats, CleanupGuard, Deployer, NodeClients, + RunContext, RunHandle, RunMetrics, Runner, ScenarioError, + metrics::{ + CONSENSUS_PROCESSED_BLOCKS, CONSENSUS_TRANSACTIONS_TOTAL, Metrics, MetricsError, + PrometheusEndpoint, PrometheusInstantSample, + }, + spawn_block_feed, +}; +pub use workload::Workload; diff --git a/testing-framework/core/src/scenario/runtime/block_feed.rs b/testing-framework/core/src/scenario/runtime/block_feed.rs new file mode 100644 index 0000000..535645e --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/block_feed.rs @@ -0,0 +1,185 @@ +use std::{ + collections::HashSet, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, + time::Duration, +}; + +use anyhow::{Context as _, Result}; +use nomos_core::{block::Block, mantle::SignedMantleTx}; +use nomos_node::HeaderId; +use tokio::{sync::broadcast, task::JoinHandle, time::sleep}; +use tracing::{debug, error}; + +use super::context::CleanupGuard; +use crate::nodes::ApiClient; + +const POLL_INTERVAL: Duration = Duration::from_secs(1); + +/// Broadcasts observed blocks to subscribers while tracking simple stats. +#[derive(Clone)] +pub struct BlockFeed { + inner: Arc, +} + +struct BlockFeedInner { + sender: broadcast::Sender>, + stats: Arc, +} + +/// Block header + payload snapshot emitted by the feed. +#[derive(Clone)] +pub struct BlockRecord { + pub header: HeaderId, + pub block: Arc>, +} + +/// Join handle for the background block feed task. +pub struct BlockFeedTask { + handle: JoinHandle<()>, +} + +impl BlockFeed { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver> { + self.inner.sender.subscribe() + } + + #[must_use] + pub fn stats(&self) -> Arc { + Arc::clone(&self.inner.stats) + } + + fn ingest(&self, header: HeaderId, block: Block) { + self.inner.stats.record_block(&block); + let record = Arc::new(BlockRecord { + header, + block: Arc::new(block), + }); + + let _ = self.inner.sender.send(record); + } +} + +impl BlockFeedTask { + #[must_use] + /// Create a task handle wrapper for the block scanner. + pub const fn new(handle: JoinHandle<()>) -> Self { + Self { handle } + } +} + +/// Spawn a background task to poll blocks from the given client and broadcast +/// them. +pub async fn spawn_block_feed(client: ApiClient) -> Result<(BlockFeed, BlockFeedTask)> { + let (sender, _) = broadcast::channel(1024); + let feed = BlockFeed { + inner: Arc::new(BlockFeedInner { + sender, + stats: Arc::new(BlockStats::default()), + }), + }; + + let mut scanner = BlockScanner::new(client, feed.clone()); + scanner.catch_up().await?; + + let handle = tokio::spawn(async move { scanner.run().await }); + + Ok((feed, BlockFeedTask::new(handle))) +} + +struct BlockScanner { + client: ApiClient, + feed: BlockFeed, + seen: HashSet, +} + +impl BlockScanner { + fn new(client: ApiClient, feed: BlockFeed) -> Self { + Self { + client, + feed, + seen: HashSet::new(), + } + } + + async fn run(&mut self) { + loop { + if let Err(err) = self.catch_up().await { + error!(%err, "block feed catch up failed"); + } + sleep(POLL_INTERVAL).await; + } + } + + async fn catch_up(&mut self) -> Result<()> { + let info = self.client.consensus_info().await?; + let tip = info.tip; + let mut remaining_height = info.height; + let mut stack = Vec::new(); + let mut cursor = tip; + + loop { + if self.seen.contains(&cursor) { + break; + } + + if remaining_height == 0 { + self.seen.insert(cursor); + break; + } + + let block = self + .client + .storage_block(&cursor) + .await? + .context("missing block while catching up")?; + + let parent = block.header().parent(); + stack.push((cursor, block)); + + if self.seen.contains(&parent) || parent == cursor { + break; + } + + cursor = parent; + remaining_height = remaining_height.saturating_sub(1); + } + + let mut processed = 0usize; + while let Some((header, block)) = stack.pop() { + self.feed.ingest(header, block); + self.seen.insert(header); + processed += 1; + } + + debug!(processed, "block feed processed catch up batch"); + Ok(()) + } +} + +impl CleanupGuard for BlockFeedTask { + fn cleanup(self: Box) { + self.handle.abort(); + } +} + +/// Accumulates simple counters over observed blocks. +#[derive(Default)] +pub struct BlockStats { + total_transactions: AtomicU64, +} + +impl BlockStats { + fn record_block(&self, block: &Block) { + self.total_transactions + .fetch_add(block.transactions().len() as u64, Ordering::Relaxed); + } + + #[must_use] + pub fn total_transactions(&self) -> u64 { + self.total_transactions.load(Ordering::Relaxed) + } +} diff --git a/testing-framework/core/src/scenario/runtime/context.rs b/testing-framework/core/src/scenario/runtime/context.rs new file mode 100644 index 0000000..9262033 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/context.rs @@ -0,0 +1,220 @@ +use std::{sync::Arc, time::Duration}; + +use super::{block_feed::BlockFeed, metrics::Metrics, node_clients::ClusterClient}; +use crate::{ + nodes::ApiClient, + scenario::{NodeClients, NodeControlHandle}, + topology::{GeneratedTopology, Topology, configs::wallet::WalletAccount}, +}; + +/// Shared runtime context available to workloads and expectations. +pub struct RunContext { + descriptors: GeneratedTopology, + cluster: Option, + node_clients: NodeClients, + metrics: RunMetrics, + telemetry: Metrics, + block_feed: BlockFeed, + node_control: Option>, +} + +impl RunContext { + /// Builds a run context, clamping the requested duration so we always run + /// for at least a couple of slot lengths (or a fallback window if slots are + /// unknown). + #[must_use] + pub fn new( + descriptors: GeneratedTopology, + cluster: Option, + node_clients: NodeClients, + run_duration: Duration, + telemetry: Metrics, + block_feed: BlockFeed, + node_control: Option>, + ) -> Self { + let metrics = RunMetrics::new(&descriptors, run_duration); + + Self { + descriptors, + cluster, + node_clients, + metrics, + telemetry, + block_feed, + node_control, + } + } + + #[must_use] + pub const fn descriptors(&self) -> &GeneratedTopology { + &self.descriptors + } + + #[must_use] + pub const fn topology(&self) -> Option<&Topology> { + self.cluster.as_ref() + } + + #[must_use] + pub const fn node_clients(&self) -> &NodeClients { + &self.node_clients + } + + #[must_use] + pub fn random_node_client(&self) -> Option<&ApiClient> { + self.node_clients.any_client() + } + + #[must_use] + pub fn block_feed(&self) -> BlockFeed { + self.block_feed.clone() + } + + #[must_use] + pub fn wallet_accounts(&self) -> &[WalletAccount] { + self.descriptors.wallet_accounts() + } + + #[must_use] + pub const fn telemetry(&self) -> &Metrics { + &self.telemetry + } + + #[must_use] + pub const fn run_duration(&self) -> Duration { + self.metrics.run_duration() + } + + #[must_use] + pub const fn expected_blocks(&self) -> u64 { + self.metrics.expected_consensus_blocks() + } + + #[must_use] + pub const fn run_metrics(&self) -> RunMetrics { + self.metrics + } + + #[must_use] + pub fn node_control(&self) -> Option> { + self.node_control.clone() + } + + #[must_use] + pub const fn cluster_client(&self) -> ClusterClient<'_> { + self.node_clients.cluster_client() + } +} + +/// Handle returned by the runner to control the lifecycle of the run. +pub struct RunHandle { + run_context: Arc, + cleanup_guard: Option>, +} + +impl Drop for RunHandle { + fn drop(&mut self) { + if let Some(guard) = self.cleanup_guard.take() { + guard.cleanup(); + } + } +} + +impl RunHandle { + #[must_use] + /// Build a handle from owned context and optional cleanup guard. + pub fn new(context: RunContext, cleanup_guard: Option>) -> Self { + Self { + run_context: Arc::new(context), + cleanup_guard, + } + } + + #[must_use] + /// Build a handle from a shared context reference. + pub(crate) fn from_shared( + context: Arc, + cleanup_guard: Option>, + ) -> Self { + Self { + run_context: context, + cleanup_guard, + } + } + + #[must_use] + /// Access the shared run context. + pub fn context(&self) -> &RunContext { + &self.run_context + } +} + +/// Derived metrics about the current run timing. +#[derive(Clone, Copy)] +pub struct RunMetrics { + run_duration: Duration, + expected_blocks: u64, + block_interval_hint: Option, +} + +impl RunMetrics { + #[must_use] + pub fn new(descriptors: &GeneratedTopology, run_duration: Duration) -> Self { + Self::from_topology(descriptors, run_duration) + } + + #[must_use] + pub fn from_topology(descriptors: &GeneratedTopology, run_duration: Duration) -> Self { + let slot_duration = descriptors.slot_duration(); + + let active_slot_coeff = descriptors.config().consensus_params.active_slot_coeff; + let expected_blocks = + calculate_expected_blocks(run_duration, slot_duration, active_slot_coeff); + + let block_interval_hint = + slot_duration.map(|duration| duration.mul_f64(active_slot_coeff.clamp(0.0, 1.0))); + + Self { + run_duration, + expected_blocks, + block_interval_hint, + } + } + + #[must_use] + pub const fn run_duration(&self) -> Duration { + self.run_duration + } + + #[must_use] + pub const fn expected_consensus_blocks(&self) -> u64 { + self.expected_blocks + } + + #[must_use] + pub const fn block_interval_hint(&self) -> Option { + self.block_interval_hint + } +} + +pub trait CleanupGuard: Send { + fn cleanup(self: Box); +} + +/// Computes the minimum duration we’ll allow for a scenario run so that the +/// scheduler can observe a few block opportunities even if the caller +/// requested an extremely short window. +fn calculate_expected_blocks( + run_duration: Duration, + slot_duration: Option, + active_slot_coeff: f64, +) -> u64 { + let Some(slot_duration) = slot_duration else { + return 0; + }; + let slot_secs = slot_duration.as_secs_f64(); + let run_secs = run_duration.as_secs_f64(); + let expected = run_secs / slot_secs * active_slot_coeff; + + expected.ceil().clamp(0.0, u64::MAX as f64) as u64 +} diff --git a/testing-framework/core/src/scenario/runtime/deployer.rs b/testing-framework/core/src/scenario/runtime/deployer.rs new file mode 100644 index 0000000..dfed4b1 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/deployer.rs @@ -0,0 +1,23 @@ +use async_trait::async_trait; + +use super::runner::Runner; +use crate::scenario::{DynError, Scenario}; + +/// Error returned when executing workloads or expectations. +#[derive(Debug, thiserror::Error)] +pub enum ScenarioError { + #[error("workload failure: {0}")] + Workload(#[source] DynError), + #[error("expectation capture failed: {0}")] + ExpectationCapture(#[source] DynError), + #[error("expectations failed:\n{0}")] + Expectations(#[source] DynError), +} + +/// Deploys a scenario into a target environment and returns a `Runner`. +#[async_trait] +pub trait Deployer: Send + Sync { + type Error; + + async fn deploy(&self, scenario: &Scenario) -> Result; +} diff --git a/testing-framework/core/src/scenario/runtime/metrics.rs b/testing-framework/core/src/scenario/runtime/metrics.rs new file mode 100644 index 0000000..4903513 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/metrics.rs @@ -0,0 +1,204 @@ +use std::{collections::HashMap, sync::Arc}; + +use prometheus_http_query::{Client as PrometheusClient, response::Data as PrometheusData}; +use reqwest::Url; +use tracing::warn; + +pub const CONSENSUS_PROCESSED_BLOCKS: &str = "consensus_processed_blocks"; +pub const CONSENSUS_TRANSACTIONS_TOTAL: &str = "consensus_transactions_total"; +const CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY: &str = + r#"sum(consensus_transactions_total{job=~"validator-.*"})"#; + +/// Telemetry handles available during a run. +#[derive(Clone, Default)] +pub struct Metrics { + prometheus: Option>, +} + +impl Metrics { + #[must_use] + pub const fn empty() -> Self { + Self { prometheus: None } + } + + pub fn from_prometheus(url: Url) -> Result { + let handle = Arc::new(PrometheusEndpoint::new(url)?); + Ok(Self::empty().with_prometheus_endpoint(handle)) + } + + pub fn from_prometheus_str(raw_url: &str) -> Result { + Url::parse(raw_url) + .map_err(|err| MetricsError::new(format!("invalid prometheus url: {err}"))) + .and_then(Self::from_prometheus) + } + + #[must_use] + pub fn with_prometheus_endpoint(mut self, handle: Arc) -> Self { + self.prometheus = Some(handle); + self + } + + #[must_use] + pub fn prometheus(&self) -> Option> { + self.prometheus.as_ref().map(Arc::clone) + } + + #[must_use] + pub const fn is_configured(&self) -> bool { + self.prometheus.is_some() + } + + pub fn instant_values(&self, query: &str) -> Result, MetricsError> { + let handle = self + .prometheus() + .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; + handle.instant_values(query) + } + + pub fn counter_value(&self, query: &str) -> Result { + let handle = self + .prometheus() + .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; + handle.counter_value(query) + } + + pub fn consensus_processed_blocks(&self) -> Result { + self.counter_value(CONSENSUS_PROCESSED_BLOCKS) + } + + pub fn consensus_transactions_total(&self) -> Result { + let handle = self + .prometheus() + .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; + + match handle.instant_samples(CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY) { + Ok(samples) if !samples.is_empty() => { + return Ok(samples.into_iter().map(|sample| sample.value).sum()); + } + Ok(_) => { + warn!( + query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY, + "validator-specific consensus transaction metric returned no samples; falling back to aggregate counter" + ); + } + Err(err) => { + warn!( + query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY, + error = %err, + "failed to query validator-specific consensus transaction metric; falling back to aggregate counter" + ); + } + } + + handle.counter_value(CONSENSUS_TRANSACTIONS_TOTAL) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum MetricsError { + #[error("{0}")] + Store(String), +} + +impl MetricsError { + #[must_use] + pub fn new(message: impl Into) -> Self { + Self::Store(message.into()) + } +} + +/// Lightweight wrapper around a Prometheus endpoint used by the framework. +pub struct PrometheusEndpoint { + base_url: Url, + client: PrometheusClient, +} + +/// Single sample from a Prometheus instant query. +#[derive(Clone, Debug)] +pub struct PrometheusInstantSample { + pub labels: HashMap, + pub timestamp: f64, + pub value: f64, +} + +impl PrometheusEndpoint { + pub fn new(base_url: Url) -> Result { + let client = PrometheusClient::try_from(base_url.as_str().to_owned()).map_err(|err| { + MetricsError::new(format!("failed to create prometheus client: {err}")) + })?; + + Ok(Self { base_url, client }) + } + + #[must_use] + pub const fn base_url(&self) -> &Url { + &self.base_url + } + + #[must_use] + pub fn port(&self) -> Option { + self.base_url.port_or_known_default() + } + + pub fn instant_samples( + &self, + query: &str, + ) -> Result, MetricsError> { + let query = query.to_owned(); + let client = self.client.clone(); + + let response = std::thread::spawn(move || -> Result<_, MetricsError> { + let runtime = tokio::runtime::Runtime::new() + .map_err(|err| MetricsError::new(format!("failed to create runtime: {err}")))?; + runtime + .block_on(async { client.query(&query).get().await }) + .map_err(|err| MetricsError::new(format!("prometheus query failed: {err}"))) + }) + .join() + .map_err(|_| MetricsError::new("prometheus query thread panicked"))??; + + let mut samples = Vec::new(); + match response.data() { + PrometheusData::Vector(vectors) => { + for vector in vectors { + samples.push(PrometheusInstantSample { + labels: vector.metric().clone(), + timestamp: vector.sample().timestamp(), + value: vector.sample().value(), + }); + } + } + PrometheusData::Matrix(ranges) => { + for range in ranges { + let labels = range.metric().clone(); + for sample in range.samples() { + samples.push(PrometheusInstantSample { + labels: labels.clone(), + timestamp: sample.timestamp(), + value: sample.value(), + }); + } + } + } + PrometheusData::Scalar(sample) => { + samples.push(PrometheusInstantSample { + labels: HashMap::new(), + timestamp: sample.timestamp(), + value: sample.value(), + }); + } + } + + Ok(samples) + } + + pub fn instant_values(&self, query: &str) -> Result, MetricsError> { + self.instant_samples(query) + .map(|samples| samples.into_iter().map(|sample| sample.value).collect()) + } + + pub fn counter_value(&self, query: &str) -> Result { + self.instant_values(query) + .map(|values| values.into_iter().sum()) + } +} diff --git a/testing-framework/core/src/scenario/runtime/mod.rs b/testing-framework/core/src/scenario/runtime/mod.rs new file mode 100644 index 0000000..82c060b --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/mod.rs @@ -0,0 +1,12 @@ +mod block_feed; +pub mod context; +mod deployer; +pub mod metrics; +mod node_clients; +mod runner; + +pub use block_feed::{BlockFeed, BlockFeedTask, BlockRecord, BlockStats, spawn_block_feed}; +pub use context::{CleanupGuard, RunContext, RunHandle, RunMetrics}; +pub use deployer::{Deployer, ScenarioError}; +pub use node_clients::NodeClients; +pub use runner::Runner; diff --git a/testing-framework/core/src/scenario/runtime/node_clients.rs b/testing-framework/core/src/scenario/runtime/node_clients.rs new file mode 100644 index 0000000..48b4c1b --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/node_clients.rs @@ -0,0 +1,147 @@ +use std::pin::Pin; + +use rand::{Rng as _, seq::SliceRandom as _, thread_rng}; + +use crate::{ + nodes::ApiClient, + scenario::DynError, + topology::{GeneratedTopology, Topology}, +}; + +/// Collection of API clients for the validator and executor set. +#[derive(Clone, Default)] +pub struct NodeClients { + validators: Vec, + executors: Vec, +} + +impl NodeClients { + #[must_use] + /// Build clients from preconstructed vectors. + pub const fn new(validators: Vec, executors: Vec) -> Self { + Self { + validators, + executors, + } + } + + #[must_use] + /// Derive clients from a spawned topology. + pub fn from_topology(_descriptors: &GeneratedTopology, topology: &Topology) -> Self { + let validator_clients = topology.validators().iter().map(|node| { + let testing = node.testing_url(); + ApiClient::from_urls(node.url(), testing) + }); + + let executor_clients = topology.executors().iter().map(|node| { + let testing = node.testing_url(); + ApiClient::from_urls(node.url(), testing) + }); + + Self::new(validator_clients.collect(), executor_clients.collect()) + } + + #[must_use] + /// Validator API clients. + pub fn validator_clients(&self) -> &[ApiClient] { + &self.validators + } + + #[must_use] + /// Executor API clients. + pub fn executor_clients(&self) -> &[ApiClient] { + &self.executors + } + + #[must_use] + /// Choose a random validator client if present. + pub fn random_validator(&self) -> Option<&ApiClient> { + if self.validators.is_empty() { + return None; + } + let mut rng = thread_rng(); + let idx = rng.gen_range(0..self.validators.len()); + self.validators.get(idx) + } + + #[must_use] + /// Choose a random executor client if present. + pub fn random_executor(&self) -> Option<&ApiClient> { + if self.executors.is_empty() { + return None; + } + let mut rng = thread_rng(); + let idx = rng.gen_range(0..self.executors.len()); + self.executors.get(idx) + } + + /// Iterator over all clients. + pub fn all_clients(&self) -> impl Iterator { + self.validators.iter().chain(self.executors.iter()) + } + + #[must_use] + /// Choose any random client from validators+executors. + pub fn any_client(&self) -> Option<&ApiClient> { + let validator_count = self.validators.len(); + let executor_count = self.executors.len(); + let total = validator_count + executor_count; + if total == 0 { + return None; + } + let mut rng = thread_rng(); + let choice = rng.gen_range(0..total); + if choice < validator_count { + self.validators.get(choice) + } else { + self.executors.get(choice - validator_count) + } + } + + #[must_use] + /// Convenience wrapper for fan-out queries. + pub const fn cluster_client(&self) -> ClusterClient<'_> { + ClusterClient::new(self) + } +} + +pub struct ClusterClient<'a> { + node_clients: &'a NodeClients, +} + +impl<'a> ClusterClient<'a> { + #[must_use] + /// Build a cluster client that can try multiple nodes. + pub const fn new(node_clients: &'a NodeClients) -> Self { + Self { node_clients } + } + + /// Try all node clients until one call succeeds, shuffling order each time. + pub async fn try_all_clients( + &self, + mut f: impl for<'b> FnMut( + &'b ApiClient, + ) -> Pin> + Send + 'b>> + + Send, + ) -> Result + where + E: Into, + { + let mut clients: Vec<&ApiClient> = self.node_clients.all_clients().collect(); + if clients.is_empty() { + return Err("cluster client has no api clients".into()); + } + + clients.shuffle(&mut thread_rng()); + + let mut last_err = None; + for client in clients { + match f(client).await { + Ok(value) => return Ok(value), + Err(err) => last_err = Some(err.into()), + } + } + + Err(last_err.unwrap_or_else(|| "cluster client exhausted all nodes".into())) + } +} diff --git a/testing-framework/core/src/scenario/runtime/runner.rs b/testing-framework/core/src/scenario/runtime/runner.rs new file mode 100644 index 0000000..9901ab4 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/runner.rs @@ -0,0 +1,258 @@ +use std::{any::Any, panic::AssertUnwindSafe, sync::Arc, time::Duration}; + +use futures::FutureExt as _; +use tokio::{ + task::JoinSet, + time::{sleep, timeout}, +}; + +use super::deployer::ScenarioError; +use crate::scenario::{ + DynError, Expectation, Scenario, + runtime::context::{CleanupGuard, RunContext, RunHandle}, +}; + +type WorkloadOutcome = Result<(), DynError>; + +/// Represents a fully prepared environment capable of executing a scenario. +pub struct Runner { + context: Arc, + cleanup_guard: Option>, +} + +impl Runner { + /// Construct a runner from the run context and optional cleanup guard. + #[must_use] + pub fn new(context: RunContext, cleanup_guard: Option>) -> Self { + Self { + context: Arc::new(context), + cleanup_guard, + } + } + + /// Access the underlying run context. + #[must_use] + pub fn context(&self) -> Arc { + Arc::clone(&self.context) + } + + pub(crate) fn cleanup(&mut self) { + if let Some(guard) = self.cleanup_guard.take() { + guard.cleanup(); + } + } + + pub(crate) fn into_run_handle(mut self) -> RunHandle { + RunHandle::from_shared(Arc::clone(&self.context), self.cleanup_guard.take()) + } + + /// Executes the scenario by driving workloads first and then evaluating all + /// expectations. On any failure it cleans up resources and propagates the + /// error to the caller. + pub async fn run( + mut self, + scenario: &mut Scenario, + ) -> Result + where + Caps: Send + Sync, + { + let context = self.context(); + if let Err(error) = + Self::prepare_expectations(scenario.expectations_mut(), context.as_ref()).await + { + self.cleanup(); + return Err(error); + } + + if let Err(error) = Self::run_workloads(&context, scenario).await { + self.cleanup(); + return Err(error); + } + + Self::cooldown(&context).await; + + if let Err(error) = + Self::run_expectations(scenario.expectations_mut(), context.as_ref()).await + { + self.cleanup(); + return Err(error); + } + + Ok(self.into_run_handle()) + } + + async fn prepare_expectations( + expectations: &mut [Box], + context: &RunContext, + ) -> Result<(), ScenarioError> { + for expectation in expectations { + if let Err(source) = expectation.start_capture(context).await { + return Err(ScenarioError::ExpectationCapture(source)); + } + } + Ok(()) + } + + /// Spawns every workload, waits until the configured duration elapses (or a + /// workload fails), and then aborts the remaining tasks. + async fn run_workloads( + context: &Arc, + scenario: &Scenario, + ) -> Result<(), ScenarioError> + where + Caps: Send + Sync, + { + let mut workloads = Self::spawn_workloads(scenario, context); + let _ = Self::drive_until_timer(&mut workloads, scenario.duration()).await?; + Self::drain_workloads(&mut workloads).await + } + + /// Evaluates every registered expectation, aggregating failures so callers + /// can see all missing conditions in a single report. + async fn run_expectations( + expectations: &mut [Box], + context: &RunContext, + ) -> Result<(), ScenarioError> { + let mut failures: Vec<(String, DynError)> = Vec::new(); + for expectation in expectations { + if let Err(source) = expectation.evaluate(context).await { + failures.push((expectation.name().to_owned(), source)); + } + } + + if failures.is_empty() { + return Ok(()); + } + + let summary = failures + .into_iter() + .map(|(name, source)| format!("{name}: {source}")) + .collect::>() + .join("\n"); + + Err(ScenarioError::Expectations(summary.into())) + } + + async fn cooldown(context: &Arc) { + if let Some(wait) = Self::cooldown_duration(context) { + if !wait.is_zero() { + sleep(wait).await; + } + } + } + + fn cooldown_duration(context: &RunContext) -> Option { + let metrics = context.run_metrics(); + let needs_stabilization = context.node_control().is_some(); + if let Some(interval) = metrics.block_interval_hint() { + if interval.is_zero() { + return None; + } + let mut wait = interval.mul_f64(5.0); + if needs_stabilization { + let minimum = Duration::from_secs(30); + wait = wait.max(minimum); + } + Some(wait) + } else if needs_stabilization { + Some(Duration::from_secs(30)) + } else { + None + } + } + + /// Spawns each workload inside its own task and returns the join set for + /// cooperative management. + fn spawn_workloads( + scenario: &Scenario, + context: &Arc, + ) -> JoinSet + where + Caps: Send + Sync, + { + let mut workloads = JoinSet::new(); + for workload in scenario.workloads() { + let workload = Arc::clone(workload); + let ctx = Arc::clone(context); + + workloads.spawn(async move { + let outcome = AssertUnwindSafe(async { workload.start(ctx.as_ref()).await }) + .catch_unwind() + .await; + + outcome.unwrap_or_else(|panic| { + Err(format!("workload panicked: {}", panic_message(panic)).into()) + }) + }); + } + + workloads + } + + /// Polls workload tasks until the timeout fires or one reports an error. + async fn drive_until_timer( + workloads: &mut JoinSet, + duration: Duration, + ) -> Result { + let run_future = async { + while let Some(result) = workloads.join_next().await { + Self::map_join_result(result)?; + } + Ok(()) + }; + + timeout(duration, run_future) + .await + .map_or(Ok(true), |result| { + result?; + Ok(false) + }) + } + + /// Aborts and drains any remaining workload tasks so we do not leak work + /// across scenario runs. + async fn drain_workloads( + workloads: &mut JoinSet, + ) -> Result<(), ScenarioError> { + workloads.abort_all(); + + while let Some(result) = workloads.join_next().await { + Self::map_join_result(result)?; + } + + Ok(()) + } + + /// Converts the outcome of a workload task into the canonical scenario + /// error, tolerating cancellation when the runner aborts unfinished tasks. + fn map_join_result( + result: Result, + ) -> Result<(), ScenarioError> { + match result { + Ok(outcome) => outcome.map_err(ScenarioError::Workload), + Err(join_err) if join_err.is_cancelled() => Ok(()), + Err(join_err) => Err(ScenarioError::Workload( + format!("workload task failed: {join_err}").into(), + )), + } + } +} + +/// Attempts to turn a panic payload into a readable string for diagnostics. +fn panic_message(panic: Box) -> String { + panic.downcast::().map_or_else( + |panic| { + panic.downcast::<&'static str>().map_or_else( + |_| "unknown panic".to_owned(), + |message| (*message).to_owned(), + ) + }, + |message| *message, + ) +} + +impl Drop for Runner { + fn drop(&mut self) { + self.cleanup(); + } +} diff --git a/testing-framework/core/src/scenario/workload.rs b/testing-framework/core/src/scenario/workload.rs new file mode 100644 index 0000000..950530e --- /dev/null +++ b/testing-framework/core/src/scenario/workload.rs @@ -0,0 +1,24 @@ +use async_trait::async_trait; + +use super::{DynError, Expectation, RunContext, runtime::context::RunMetrics}; +use crate::topology::GeneratedTopology; + +#[async_trait] +/// Describes an action sequence executed during a scenario run. +pub trait Workload: Send + Sync { + fn name(&self) -> &str; + + fn expectations(&self) -> Vec> { + Vec::new() + } + + fn init( + &mut self, + _descriptors: &GeneratedTopology, + _run_metrics: &RunMetrics, + ) -> Result<(), DynError> { + Ok(()) + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError>; +} diff --git a/testing-framework/core/src/topology/mod.rs b/testing-framework/core/src/topology/mod.rs new file mode 100644 index 0000000..2bc8e08 --- /dev/null +++ b/testing-framework/core/src/topology/mod.rs @@ -0,0 +1,1458 @@ +pub mod configs { + pub use testing_framework_config::topology::configs::*; +} + +use std::{ + collections::{HashMap, HashSet}, + iter, + time::Duration, +}; + +use configs::{ + GeneralConfig, + consensus::{ProviderInfo, create_genesis_tx_with_declarations}, + da::{DaParams, create_da_configs}, + network::{Libp2pNetworkLayout, NetworkParams, create_network_configs}, + tracing::create_tracing_configs, + wallet::{WalletAccount, WalletConfig}, +}; +use futures::future::join_all; +use groth16::fr_to_bytes; +use key_management_system::{ + backend::preload::PreloadKMSBackendSettings, + keys::{Ed25519Key, Key, ZkKey}, +}; +use nomos_core::{ + mantle::GenesisTx as _, + sdp::{Locator, ServiceType, SessionNumber}, +}; +use nomos_da_network_core::swarm::{BalancerStats, DAConnectionPolicySettings}; +use nomos_da_network_service::MembershipResponse; +use nomos_http_api_common::paths; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_utils::net::get_available_udp_port; +use rand::{Rng as _, thread_rng}; +use reqwest::{Client, Url}; +use thiserror::Error; +use tokio::time::{sleep, timeout}; +use tracing::warn; + +use crate::{ + adjust_timeout, + nodes::{ + executor::{Executor, create_executor_config}, + validator::{Validator, create_validator_config}, + }, + topology::configs::{ + api::create_api_configs, + blend::{GeneralBlendConfig, create_blend_configs}, + bootstrap::{SHORT_PROLONGED_BOOTSTRAP_PERIOD, create_bootstrap_configs}, + consensus::{ConsensusParams, create_consensus_configs}, + da::GeneralDaConfig, + time::default_time_config, + }, +}; + +#[derive(Clone)] +/// High-level topology settings used to generate node configs for a scenario. +pub struct TopologyConfig { + pub n_validators: usize, + pub n_executors: usize, + pub consensus_params: ConsensusParams, + pub da_params: DaParams, + pub network_params: NetworkParams, + pub wallet_config: WalletConfig, +} + +impl TopologyConfig { + /// Create a config with zero nodes; counts must be set before building. + #[must_use] + pub fn empty() -> Self { + Self { + n_validators: 0, + n_executors: 0, + consensus_params: ConsensusParams::default_for_participants(1), + da_params: DaParams::default(), + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + /// Convenience config with two validators for consensus-only scenarios. + pub fn two_validators() -> Self { + Self { + n_validators: 2, + n_executors: 0, + consensus_params: ConsensusParams::default_for_participants(2), + da_params: DaParams::default(), + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + /// Single validator + single executor config for minimal dual-role setups. + pub fn validator_and_executor() -> Self { + Self { + n_validators: 1, + n_executors: 1, + consensus_params: ConsensusParams::default_for_participants(2), + da_params: DaParams { + dispersal_factor: 2, + subnetwork_size: 2, + num_subnets: 2, + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: 1, + min_replication_peers: 1, + max_dispersal_failures: 0, + max_sampling_failures: 0, + max_replication_failures: 0, + malicious_threshold: 0, + }, + balancer_interval: Duration::from_secs(1), + ..Default::default() + }, + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + /// Build a topology with explicit validator and executor counts. + pub fn with_node_numbers(validators: usize, executors: usize) -> Self { + let participants = validators + executors; + assert!(participants > 0, "topology must include at least one node"); + + let mut da_params = DaParams::default(); + let da_nodes = participants; + if da_nodes <= 1 { + da_params.subnetwork_size = 1; + da_params.num_subnets = 1; + da_params.dispersal_factor = 1; + da_params.policy_settings.min_dispersal_peers = 0; + da_params.policy_settings.min_replication_peers = 0; + } else { + let dispersal = da_nodes.min(da_params.dispersal_factor.max(2)); + da_params.dispersal_factor = dispersal; + da_params.subnetwork_size = da_params.subnetwork_size.max(dispersal); + da_params.num_subnets = da_params.subnetwork_size as u16; + let min_peers = dispersal.saturating_sub(1).max(1); + da_params.policy_settings.min_dispersal_peers = min_peers; + da_params.policy_settings.min_replication_peers = min_peers; + da_params.balancer_interval = Duration::from_secs(1); + } + + Self { + n_validators: validators, + n_executors: executors, + consensus_params: ConsensusParams::default_for_participants(participants), + da_params, + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + /// Build a topology with one executor and a configurable validator set. + pub fn validators_and_executor( + num_validators: usize, + num_subnets: usize, + dispersal_factor: usize, + ) -> Self { + Self { + n_validators: num_validators, + n_executors: 1, + consensus_params: ConsensusParams::default_for_participants(num_validators + 1), + da_params: DaParams { + dispersal_factor, + subnetwork_size: num_subnets, + num_subnets: num_subnets as u16, + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: num_subnets, + min_replication_peers: dispersal_factor - 1, + max_dispersal_failures: 0, + max_sampling_failures: 0, + max_replication_failures: 0, + malicious_threshold: 0, + }, + balancer_interval: Duration::from_secs(5), + ..Default::default() + }, + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + pub const fn wallet(&self) -> &WalletConfig { + &self.wallet_config + } +} + +/// Node role within the generated topology. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum NodeRole { + Validator, + Executor, +} + +/// Fully generated configuration for an individual node. +#[derive(Clone)] +pub struct GeneratedNodeConfig { + pub role: NodeRole, + pub index: usize, + pub id: [u8; 32], + pub general: GeneralConfig, + pub da_port: u16, + pub blend_port: u16, +} + +impl GeneratedNodeConfig { + #[must_use] + /// Logical role of the node. + pub const fn role(&self) -> NodeRole { + self.role + } + + #[must_use] + /// Zero-based index within its role group. + pub const fn index(&self) -> usize { + self.index + } + + #[must_use] + pub const fn network_port(&self) -> u16 { + self.general.network_config.backend.inner.port + } + + #[must_use] + pub const fn api_port(&self) -> u16 { + self.general.api_config.address.port() + } + + #[must_use] + pub const fn testing_http_port(&self) -> u16 { + self.general.api_config.testing_http_address.port() + } +} + +/// Collection of generated node configs and helpers to spawn or probe the +/// stack. +#[derive(Clone)] +pub struct GeneratedTopology { + config: TopologyConfig, + validators: Vec, + executors: Vec, +} + +impl GeneratedTopology { + #[must_use] + /// Underlying configuration used to derive the generated nodes. + pub const fn config(&self) -> &TopologyConfig { + &self.config + } + + #[must_use] + /// All validator configs. + pub fn validators(&self) -> &[GeneratedNodeConfig] { + &self.validators + } + + #[must_use] + /// All executor configs. + pub fn executors(&self) -> &[GeneratedNodeConfig] { + &self.executors + } + + /// Iterator over all node configs in role order. + pub fn nodes(&self) -> impl Iterator { + self.validators.iter().chain(self.executors.iter()) + } + + #[must_use] + /// Slot duration from the first node (assumes homogeneous configs). + pub fn slot_duration(&self) -> Option { + self.validators + .first() + .map(|node| node.general.time_config.slot_duration) + } + + #[must_use] + /// Wallet accounts configured for this topology. + pub fn wallet_accounts(&self) -> &[WalletAccount] { + &self.config.wallet_config.accounts + } + + pub async fn spawn_local(&self) -> Topology { + let configs = self + .nodes() + .map(|node| node.general.clone()) + .collect::>(); + + let (validators, executors) = Topology::spawn_validators_executors( + configs, + self.config.n_validators, + self.config.n_executors, + ) + .await; + + Topology { + validators, + executors, + } + } + + pub async fn wait_remote_readiness( + &self, + validator_endpoints: &[Url], + executor_endpoints: &[Url], + validator_membership_endpoints: Option<&[Url]>, + executor_membership_endpoints: Option<&[Url]>, + ) -> Result<(), ReadinessError> { + let total_nodes = self.validators.len() + self.executors.len(); + if total_nodes == 0 { + return Ok(()); + } + + assert_eq!( + self.validators.len(), + validator_endpoints.len(), + "validator endpoints must match topology" + ); + assert_eq!( + self.executors.len(), + executor_endpoints.len(), + "executor endpoints must match topology" + ); + + let mut endpoints = Vec::with_capacity(total_nodes); + endpoints.extend_from_slice(validator_endpoints); + endpoints.extend_from_slice(executor_endpoints); + + let labels = self.labels(); + let client = Client::new(); + let make_testing_base_url = |port: u16| -> Url { + Url::parse(&format!("http://127.0.0.1:{port}/")) + .expect("failed to construct local testing base url") + }; + + if endpoints.len() > 1 { + let listen_ports = self.listen_ports(); + let initial_peer_ports = self.initial_peer_ports(); + let expected_peer_counts = + find_expected_peer_counts(&listen_ports, &initial_peer_ports); + let network_check = HttpNetworkReadiness { + client: &client, + endpoints: &endpoints, + expected_peer_counts: &expected_peer_counts, + labels: &labels, + }; + + network_check.wait().await?; + } + + let mut membership_endpoints = Vec::with_capacity(total_nodes); + if let Some(urls) = validator_membership_endpoints { + assert_eq!( + self.validators.len(), + urls.len(), + "validator membership endpoints must match topology" + ); + membership_endpoints.extend_from_slice(urls); + } else { + membership_endpoints.extend( + self.validators + .iter() + .map(|node| make_testing_base_url(node.testing_http_port())), + ); + } + + if let Some(urls) = executor_membership_endpoints { + assert_eq!( + self.executors.len(), + urls.len(), + "executor membership endpoints must match topology" + ); + membership_endpoints.extend_from_slice(urls); + } else { + membership_endpoints.extend( + self.executors + .iter() + .map(|node| make_testing_base_url(node.testing_http_port())), + ); + } + + let membership_check = HttpMembershipReadiness { + client: &client, + endpoints: &membership_endpoints, + session: SessionNumber::from(0u64), + labels: &labels, + expect_non_empty: true, + }; + + membership_check.wait().await + } + + fn listen_ports(&self) -> Vec { + self.validators + .iter() + .map(|node| node.general.network_config.backend.inner.port) + .chain( + self.executors + .iter() + .map(|node| node.general.network_config.backend.inner.port), + ) + .collect() + } + + fn initial_peer_ports(&self) -> Vec> { + self.validators + .iter() + .map(|node| { + node.general + .network_config + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + }) + .chain(self.executors.iter().map(|node| { + node.general + .network_config + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + })) + .collect() + } + + fn labels(&self) -> Vec { + self.validators + .iter() + .enumerate() + .map(|(idx, node)| { + format!( + "validator#{idx}@{}", + node.general.network_config.backend.inner.port + ) + }) + .chain(self.executors.iter().enumerate().map(|(idx, node)| { + format!( + "executor#{idx}@{}", + node.general.network_config.backend.inner.port + ) + })) + .collect() + } +} + +/// Builder that produces `GeneratedTopology` instances from a `TopologyConfig`. +#[derive(Clone)] +pub struct TopologyBuilder { + config: TopologyConfig, + ids: Option>, + da_ports: Option>, + blend_ports: Option>, +} + +impl TopologyBuilder { + #[must_use] + /// Create a builder from a base topology config. + pub const fn new(config: TopologyConfig) -> Self { + Self { + config, + ids: None, + da_ports: None, + blend_ports: None, + } + } + + #[must_use] + /// Provide deterministic node IDs. + pub fn with_ids(mut self, ids: Vec<[u8; 32]>) -> Self { + self.ids = Some(ids); + self + } + + #[must_use] + /// Override DA ports for nodes in order. + pub fn with_da_ports(mut self, ports: Vec) -> Self { + self.da_ports = Some(ports); + self + } + + #[must_use] + /// Override blend ports for nodes in order. + pub fn with_blend_ports(mut self, ports: Vec) -> Self { + self.blend_ports = Some(ports); + self + } + + #[must_use] + pub const fn with_validator_count(mut self, validators: usize) -> Self { + self.config.n_validators = validators; + self + } + + #[must_use] + /// Set executor count. + pub const fn with_executor_count(mut self, executors: usize) -> Self { + self.config.n_executors = executors; + self + } + + #[must_use] + /// Set validator and executor counts together. + pub const fn with_node_counts(mut self, validators: usize, executors: usize) -> Self { + self.config.n_validators = validators; + self.config.n_executors = executors; + self + } + + #[must_use] + /// Configure the libp2p network layout. + pub const fn with_network_layout(mut self, layout: Libp2pNetworkLayout) -> Self { + self.config.network_params.libp2p_network_layout = layout; + self + } + + #[must_use] + /// Override wallet configuration used in genesis. + pub fn with_wallet_config(mut self, wallet: WalletConfig) -> Self { + self.config.wallet_config = wallet; + self + } + + #[must_use] + /// Finalize and generate topology and node descriptors. + pub fn build(self) -> GeneratedTopology { + let Self { + config, + ids, + da_ports, + blend_ports, + } = self; + + let n_participants = config.n_validators + config.n_executors; + assert!(n_participants > 0, "topology must have at least one node"); + + let ids = resolve_ids(ids, n_participants); + let da_ports = resolve_ports(da_ports, n_participants, "DA"); + let blend_ports = resolve_ports(blend_ports, n_participants, "Blend"); + + let mut consensus_configs = + create_consensus_configs(&ids, &config.consensus_params, &config.wallet_config); + let bootstrapping_config = create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(&ids, &config.da_params, &da_ports); + let network_configs = create_network_configs(&ids, &config.network_params); + let blend_configs = create_blend_configs(&ids, &blend_ports); + let api_configs = create_api_configs(&ids); + let tracing_configs = create_tracing_configs(&ids); + let time_config = default_time_config(); + + let mut providers: Vec<_> = da_configs + .iter() + .enumerate() + .map(|(i, da_conf)| ProviderInfo { + service_type: ServiceType::DataAvailability, + provider_sk: da_conf.signer.clone(), + zk_sk: da_conf.secret_zk_key.clone(), + locator: Locator(da_conf.listening_address.clone()), + note: consensus_configs[0].da_notes[i].clone(), + }) + .collect(); + providers.extend( + blend_configs + .iter() + .enumerate() + .map(|(i, blend_conf)| ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator(blend_conf.backend_core.listening_address.clone()), + note: consensus_configs[0].blend_notes[i].clone(), + }), + ); + + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + let kms_configs = + create_kms_configs(&blend_configs, &da_configs, &config.wallet_config.accounts); + + let mut validators = Vec::with_capacity(config.n_validators); + let mut executors = Vec::with_capacity(config.n_executors); + + for i in 0..n_participants { + let general = GeneralConfig { + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrapping_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + api_config: api_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_configs[i].clone(), + }; + + let role = if i < config.n_validators { + NodeRole::Validator + } else { + NodeRole::Executor + }; + let index = match role { + NodeRole::Validator => i, + NodeRole::Executor => i - config.n_validators, + }; + + let descriptor = GeneratedNodeConfig { + role, + index, + id: ids[i], + general, + da_port: da_ports[i], + blend_port: blend_ports[i], + }; + + match role { + NodeRole::Validator => validators.push(descriptor), + NodeRole::Executor => executors.push(descriptor), + } + } + + GeneratedTopology { + config, + validators, + executors, + } + } +} + +/// Runtime representation of a spawned topology with running nodes. +pub struct Topology { + validators: Vec, + executors: Vec, +} + +impl Topology { + pub async fn spawn(config: TopologyConfig) -> Self { + let n_participants = config.n_validators + config.n_executors; + + // we use the same random bytes for: + // * da id + // * coin sk + // * coin nonce + // * libp2p node key + let mut ids = vec![[0; 32]; n_participants]; + let mut da_ports = vec![]; + let mut blend_ports = vec![]; + for id in &mut ids { + thread_rng().fill(id); + da_ports.push(get_available_udp_port().unwrap()); + blend_ports.push(get_available_udp_port().unwrap()); + } + + let mut consensus_configs = + create_consensus_configs(&ids, &config.consensus_params, &config.wallet_config); + let bootstrapping_config = create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(&ids, &config.da_params, &da_ports); + let network_configs = create_network_configs(&ids, &config.network_params); + let blend_configs = create_blend_configs(&ids, &blend_ports); + let api_configs = create_api_configs(&ids); + let tracing_configs = create_tracing_configs(&ids); + let time_config = default_time_config(); + + // Setup genesis TX with Blend and DA service declarationse + let mut providers: Vec<_> = da_configs + .iter() + .enumerate() + .map(|(i, da_conf)| ProviderInfo { + service_type: ServiceType::DataAvailability, + provider_sk: da_conf.signer.clone(), + zk_sk: da_conf.secret_zk_key.clone(), + locator: Locator(da_conf.listening_address.clone()), + note: consensus_configs[0].da_notes[i].clone(), + }) + .collect(); + providers.extend( + blend_configs + .iter() + .enumerate() + .map(|(i, blend_conf)| ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator(blend_conf.backend_core.listening_address.clone()), + note: consensus_configs[0].blend_notes[i].clone(), + }), + ); + + // Update genesis TX to contain Blend and DA providers. + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + // Set Blend and DA keys in KMS of each node config. + let kms_configs = + create_kms_configs(&blend_configs, &da_configs, &config.wallet_config.accounts); + + let mut node_configs = vec![]; + + for i in 0..n_participants { + node_configs.push(GeneralConfig { + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrapping_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + api_config: api_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_configs[i].clone(), + }); + } + + let (validators, executors) = + Self::spawn_validators_executors(node_configs, config.n_validators, config.n_executors) + .await; + + Self { + validators, + executors, + } + } + + pub async fn spawn_with_empty_membership( + config: TopologyConfig, + ids: &[[u8; 32]], + da_ports: &[u16], + blend_ports: &[u16], + ) -> Self { + let n_participants = config.n_validators + config.n_executors; + + let consensus_configs = + create_consensus_configs(ids, &config.consensus_params, &config.wallet_config); + let bootstrapping_config = create_bootstrap_configs(ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(ids, &config.da_params, da_ports); + let network_configs = create_network_configs(ids, &config.network_params); + let blend_configs = create_blend_configs(ids, blend_ports); + let api_configs = create_api_configs(ids); + // Create membership configs without DA nodes. + let tracing_configs = create_tracing_configs(ids); + let time_config = default_time_config(); + + let kms_config = PreloadKMSBackendSettings { + keys: HashMap::new(), + }; + + let mut node_configs = vec![]; + + for i in 0..n_participants { + node_configs.push(GeneralConfig { + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrapping_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + api_config: api_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_config.clone(), + }); + } + let (validators, executors) = + Self::spawn_validators_executors(node_configs, config.n_validators, config.n_executors) + .await; + + Self { + validators, + executors, + } + } + + async fn spawn_validators_executors( + config: Vec, + n_validators: usize, + n_executors: usize, + ) -> (Vec, Vec) { + let mut validators = Vec::new(); + for i in 0..n_validators { + let config = create_validator_config(config[i].clone()); + validators.push(Validator::spawn(config).await.unwrap()); + } + + let mut executors = Vec::new(); + for i in n_validators..(n_validators + n_executors) { + let config = create_executor_config(config[i].clone()); + executors.push(Executor::spawn(config).await); + } + + (validators, executors) + } + + #[must_use] + pub fn validators(&self) -> &[Validator] { + &self.validators + } + + #[must_use] + pub fn executors(&self) -> &[Executor] { + &self.executors + } + + pub async fn wait_network_ready(&self) -> Result<(), ReadinessError> { + let listen_ports = self.node_listen_ports(); + if listen_ports.len() <= 1 { + return Ok(()); + } + + let initial_peer_ports = self.node_initial_peer_ports(); + let expected_peer_counts = find_expected_peer_counts(&listen_ports, &initial_peer_ports); + let labels = self.node_labels(); + + let check = NetworkReadiness { + topology: self, + expected_peer_counts: &expected_peer_counts, + labels: &labels, + }; + + check.wait().await?; + Ok(()) + } + + pub async fn wait_da_balancer_ready(&self) -> Result<(), ReadinessError> { + if self.validators.is_empty() && self.executors.is_empty() { + return Ok(()); + } + + let labels = self.node_labels(); + let check = DaBalancerReadiness { + topology: self, + labels: &labels, + }; + + check.wait().await?; + Ok(()) + } + + pub async fn wait_membership_ready(&self) -> Result<(), ReadinessError> { + self.wait_membership_ready_for_session(SessionNumber::from(0u64)) + .await + } + + pub async fn wait_membership_ready_for_session( + &self, + session: SessionNumber, + ) -> Result<(), ReadinessError> { + self.wait_membership_assignations(session, true).await + } + + pub async fn wait_membership_empty_for_session( + &self, + session: SessionNumber, + ) -> Result<(), ReadinessError> { + self.wait_membership_assignations(session, false).await + } + + async fn wait_membership_assignations( + &self, + session: SessionNumber, + expect_non_empty: bool, + ) -> Result<(), ReadinessError> { + let total_nodes = self.validators.len() + self.executors.len(); + + if total_nodes == 0 { + return Ok(()); + } + + let labels = self.node_labels(); + let check = MembershipReadiness { + topology: self, + session, + labels: &labels, + expect_non_empty, + }; + + check.wait().await?; + Ok(()) + } + + fn node_listen_ports(&self) -> Vec { + self.validators + .iter() + .map(|node| node.config().network.backend.inner.port) + .chain( + self.executors + .iter() + .map(|node| node.config().network.backend.inner.port), + ) + .collect() + } + + fn node_initial_peer_ports(&self) -> Vec> { + self.validators + .iter() + .map(|node| { + node.config() + .network + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + }) + .chain(self.executors.iter().map(|node| { + node.config() + .network + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + })) + .collect() + } + + fn node_labels(&self) -> Vec { + self.validators + .iter() + .enumerate() + .map(|(idx, node)| { + format!( + "validator#{idx}@{}", + node.config().network.backend.inner.port + ) + }) + .chain(self.executors.iter().enumerate().map(|(idx, node)| { + format!( + "executor#{idx}@{}", + node.config().network.backend.inner.port + ) + })) + .collect() + } +} + +/// Errors emitted while waiting for node readiness. +#[derive(Debug, Error)] +pub enum ReadinessError { + #[error("{message}")] + Timeout { message: String }, +} + +#[async_trait::async_trait] +trait ReadinessCheck<'a> { + type Data: Send; + + async fn collect(&'a self) -> Self::Data; + + fn is_ready(&self, data: &Self::Data) -> bool; + + fn timeout_message(&self, data: Self::Data) -> String; + + fn poll_interval(&self) -> Duration { + Duration::from_millis(200) + } + + async fn wait(&'a self) -> Result<(), ReadinessError> { + let timeout_duration = adjust_timeout(Duration::from_secs(60)); + let poll_interval = self.poll_interval(); + let mut data = self.collect().await; + + let wait_result = timeout(timeout_duration, async { + loop { + if self.is_ready(&data) { + return; + } + + sleep(poll_interval).await; + + data = self.collect().await; + } + }) + .await; + + if wait_result.is_err() { + let message = self.timeout_message(data); + return Err(ReadinessError::Timeout { message }); + } + + Ok(()) + } +} + +struct NetworkReadiness<'a> { + topology: &'a Topology, + expected_peer_counts: &'a [usize], + labels: &'a [String], +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for NetworkReadiness<'a> { + type Data = Vec; + + async fn collect(&'a self) -> Self::Data { + let (validator_infos, executor_infos) = tokio::join!( + join_all(self.topology.validators.iter().map(Validator::network_info)), + join_all(self.topology.executors.iter().map(Executor::network_info)) + ); + + validator_infos.into_iter().chain(executor_infos).collect() + } + + fn is_ready(&self, data: &Self::Data) -> bool { + data.iter() + .enumerate() + .all(|(idx, info)| info.n_peers >= self.expected_peer_counts[idx]) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let summary = build_timeout_summary(self.labels, data, self.expected_peer_counts); + format!("timed out waiting for network readiness: {summary}") + } +} + +struct HttpNetworkReadiness<'a> { + client: &'a Client, + endpoints: &'a [Url], + expected_peer_counts: &'a [usize], + labels: &'a [String], +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for HttpNetworkReadiness<'a> { + type Data = Vec; + + async fn collect(&'a self) -> Self::Data { + let futures = self + .endpoints + .iter() + .map(|endpoint| fetch_network_info(self.client, endpoint)); + join_all(futures).await + } + + fn is_ready(&self, data: &Self::Data) -> bool { + data.iter() + .enumerate() + .all(|(idx, info)| info.n_peers >= self.expected_peer_counts[idx]) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let summary = build_timeout_summary(self.labels, data, self.expected_peer_counts); + format!("timed out waiting for network readiness: {summary}") + } +} + +struct MembershipReadiness<'a> { + topology: &'a Topology, + session: SessionNumber, + labels: &'a [String], + expect_non_empty: bool, +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for MembershipReadiness<'a> { + type Data = Vec>; + + async fn collect(&'a self) -> Self::Data { + let (validator_responses, executor_responses) = tokio::join!( + join_all( + self.topology + .validators + .iter() + .map(|node| node.da_get_membership(self.session)), + ), + join_all( + self.topology + .executors + .iter() + .map(|node| node.da_get_membership(self.session)), + ) + ); + + validator_responses + .into_iter() + .chain(executor_responses) + .collect() + } + + fn is_ready(&self, data: &Self::Data) -> bool { + self.assignation_statuses(data) + .into_iter() + .all(|ready| ready) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let statuses = self.assignation_statuses(&data); + let description = if self.expect_non_empty { + "non-empty assignations" + } else { + "empty assignations" + }; + let summary = build_membership_summary(self.labels, &statuses, description); + format!("timed out waiting for DA membership readiness ({description}): {summary}") + } +} + +impl MembershipReadiness<'_> { + fn assignation_statuses( + &self, + responses: &[Result], + ) -> Vec { + responses + .iter() + .map(|res| { + res.as_ref() + .map(|resp| { + let is_non_empty = !resp.assignations.is_empty(); + if self.expect_non_empty { + is_non_empty + } else { + !is_non_empty + } + }) + .unwrap_or(false) + }) + .collect() + } +} + +struct HttpMembershipReadiness<'a> { + client: &'a Client, + endpoints: &'a [Url], + session: SessionNumber, + labels: &'a [String], + expect_non_empty: bool, +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for HttpMembershipReadiness<'a> { + type Data = Vec>; + + async fn collect(&'a self) -> Self::Data { + let futures = self + .endpoints + .iter() + .map(|endpoint| fetch_membership(self.client, endpoint, self.session)); + join_all(futures).await + } + + fn is_ready(&self, data: &Self::Data) -> bool { + assignation_statuses(data, self.expect_non_empty) + .into_iter() + .all(|ready| ready) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let statuses = assignation_statuses(&data, self.expect_non_empty); + let description = if self.expect_non_empty { + "non-empty assignations" + } else { + "empty assignations" + }; + let summary = build_membership_summary(self.labels, &statuses, description); + format!("timed out waiting for DA membership readiness ({description}): {summary}") + } +} + +struct DaBalancerReadiness<'a> { + topology: &'a Topology, + labels: &'a [String], +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for DaBalancerReadiness<'a> { + type Data = Vec<(String, usize, BalancerStats)>; + + async fn collect(&'a self) -> Self::Data { + let mut data = Vec::new(); + for (idx, validator) in self.topology.validators.iter().enumerate() { + data.push(( + self.labels[idx].clone(), + validator.config().da_network.subnet_threshold, + validator.balancer_stats().await, + )); + } + for (offset, executor) in self.topology.executors.iter().enumerate() { + let label_index = self.topology.validators.len() + offset; + data.push(( + self.labels[label_index].clone(), + executor.config().da_network.subnet_threshold, + executor.balancer_stats().await, + )); + } + data + } + + fn is_ready(&self, data: &Self::Data) -> bool { + data.iter().all(|(_, threshold, stats)| { + if *threshold == 0 { + return true; + } + connected_subnetworks(stats) >= *threshold + }) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let summary = data + .into_iter() + .map(|(label, threshold, stats)| { + let connected = connected_subnetworks(&stats); + format!("{label}: connected={connected}, required={threshold}") + }) + .collect::>() + .join(", "); + format!("timed out waiting for DA balancer readiness: {summary}") + } + + fn poll_interval(&self) -> Duration { + Duration::from_secs(1) + } +} + +fn connected_subnetworks(stats: &BalancerStats) -> usize { + stats + .values() + .filter(|stat| stat.inbound > 0 || stat.outbound > 0) + .count() +} + +fn build_timeout_summary( + labels: &[String], + infos: Vec, + expected_counts: &[usize], +) -> String { + infos + .into_iter() + .zip(expected_counts.iter()) + .zip(labels.iter()) + .map(|((info, expected), label)| { + format!("{}: peers={}, expected={}", label, info.n_peers, expected) + }) + .collect::>() + .join(", ") +} + +fn build_membership_summary(labels: &[String], statuses: &[bool], description: &str) -> String { + statuses + .iter() + .zip(labels.iter()) + .map(|(ready, label)| { + let status = if *ready { "ready" } else { "waiting" }; + format!("{label}: status={status}, expected {description}") + }) + .collect::>() + .join(", ") +} + +async fn fetch_network_info(client: &Client, base: &Url) -> Libp2pInfo { + let url = join_path(base, paths::NETWORK_INFO); + let response = match client.get(url).send().await { + Ok(resp) => resp, + Err(err) => { + return log_network_warning(base, err, "failed to reach network info endpoint"); + } + }; + + let response = match response.error_for_status() { + Ok(resp) => resp, + Err(err) => { + return log_network_warning(base, err, "network info endpoint returned error"); + } + }; + + match response.json::().await { + Ok(info) => info, + Err(err) => log_network_warning(base, err, "failed to decode network info response"), + } +} + +async fn fetch_membership( + client: &Client, + base: &Url, + session: SessionNumber, +) -> Result { + let url = join_path(base, paths::DA_GET_MEMBERSHIP); + client + .post(url) + .json(&session) + .send() + .await? + .error_for_status()? + .json() + .await +} + +fn log_network_warning(base: &Url, err: impl std::fmt::Display, message: &str) -> Libp2pInfo { + warn!(target: "readiness", url = %base, error = %err, "{message}"); + empty_libp2p_info() +} + +fn empty_libp2p_info() -> Libp2pInfo { + Libp2pInfo { + listen_addresses: Vec::with_capacity(0), + n_peers: 0, + n_connections: 0, + n_pending_connections: 0, + } +} + +fn join_path(base: &Url, path: &str) -> Url { + base.join(path.trim_start_matches('/')) + .unwrap_or_else(|err| panic!("failed to join url {base} with path {path}: {err}")) +} + +fn assignation_statuses( + responses: &[Result], + expect_non_empty: bool, +) -> Vec { + responses + .iter() + .map(|res| { + res.as_ref() + .map(|resp| { + let is_non_empty = !resp.assignations.is_empty(); + if expect_non_empty { + is_non_empty + } else { + !is_non_empty + } + }) + .unwrap_or(false) + }) + .collect() +} + +fn multiaddr_port(addr: &nomos_libp2p::Multiaddr) -> Option { + for protocol in addr { + match protocol { + nomos_libp2p::Protocol::Udp(port) | nomos_libp2p::Protocol::Tcp(port) => { + return Some(port); + } + _ => {} + } + } + None +} + +fn find_expected_peer_counts( + listen_ports: &[u16], + initial_peer_ports: &[HashSet], +) -> Vec { + let mut expected: Vec> = vec![HashSet::new(); initial_peer_ports.len()]; + + for (idx, ports) in initial_peer_ports.iter().enumerate() { + for port in ports { + let Some(peer_idx) = listen_ports.iter().position(|p| p == port) else { + continue; + }; + if peer_idx == idx { + continue; + } + + expected[idx].insert(peer_idx); + expected[peer_idx].insert(idx); + } + } + + expected.into_iter().map(|set| set.len()).collect() +} + +#[must_use] +/// Build preload KMS configs for blend/DA and wallet keys for every node. +pub fn create_kms_configs( + blend_configs: &[GeneralBlendConfig], + da_configs: &[GeneralDaConfig], + wallet_accounts: &[WalletAccount], +) -> Vec { + da_configs + .iter() + .zip(blend_configs.iter()) + .map(|(da_conf, blend_conf)| { + let mut keys = HashMap::from([ + ( + hex::encode(blend_conf.signer.verifying_key().as_bytes()), + Key::Ed25519(Ed25519Key::new(blend_conf.signer.clone())), + ), + ( + hex::encode(fr_to_bytes( + &blend_conf.secret_zk_key.to_public_key().into_inner(), + )), + Key::Zk(ZkKey::new(blend_conf.secret_zk_key.clone())), + ), + ( + hex::encode(da_conf.signer.verifying_key().as_bytes()), + Key::Ed25519(Ed25519Key::new(da_conf.signer.clone())), + ), + ( + hex::encode(fr_to_bytes( + &da_conf.secret_zk_key.to_public_key().into_inner(), + )), + Key::Zk(ZkKey::new(da_conf.secret_zk_key.clone())), + ), + ]); + + for account in wallet_accounts { + let key_id = hex::encode(fr_to_bytes(&account.public_key().into_inner())); + keys.entry(key_id) + .or_insert_with(|| Key::Zk(ZkKey::new(account.secret_key.clone()))); + } + + PreloadKMSBackendSettings { keys } + }) + .collect() +} + +fn resolve_ids(ids: Option>, count: usize) -> Vec<[u8; 32]> { + ids.map_or_else( + || { + let mut generated = vec![[0; 32]; count]; + for id in &mut generated { + thread_rng().fill(id); + } + generated + }, + |ids| { + assert_eq!( + ids.len(), + count, + "expected {count} ids but got {}", + ids.len() + ); + ids + }, + ) +} + +fn resolve_ports(ports: Option>, count: usize, label: &str) -> Vec { + let resolved = ports.unwrap_or_else(|| { + iter::repeat_with(|| get_available_udp_port().unwrap()) + .take(count) + .collect() + }); + assert_eq!( + resolved.len(), + count, + "expected {count} {label} ports but got {}", + resolved.len() + ); + resolved +} diff --git a/testing-framework/runners/compose/Cargo.toml b/testing-framework/runners/compose/Cargo.toml new file mode 100644 index 0000000..49dcfe8 --- /dev/null +++ b/testing-framework/runners/compose/Cargo.toml @@ -0,0 +1,36 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-runner-compose" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +anyhow = "1" +async-trait = { workspace = true } +cfgsync = { workspace = true } +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +tempfile = { workspace = true } +tera = "1.19" +testing-framework-core = { path = "../../core" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "process", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } +url = { version = "2" } +uuid = { version = "1", features = ["v4"] } + +[dev-dependencies] +groth16 = { workspace = true } +nomos-core = { workspace = true } +nomos-ledger = { workspace = true } +nomos-tracing-service = { workspace = true } +tests = { workspace = true } +zksign = { workspace = true } diff --git a/testing-framework/runners/compose/assets/docker-compose.yml.tera b/testing-framework/runners/compose/assets/docker-compose.yml.tera new file mode 100644 index 0000000..2932567 --- /dev/null +++ b/testing-framework/runners/compose/assets/docker-compose.yml.tera @@ -0,0 +1,65 @@ +services: + prometheus: + image: prom/prometheus:v3.0.1 + command: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.retention.time=7d + - --web.enable-otlp-receiver + - --enable-feature=otlp-write-receiver + volumes: + - ./stack/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:z + ports: + - {{ prometheus.host_port }} + restart: on-failure + +{% for node in validators %} + {{ node.name }}: + image: {{ node.image }} +{% if node.platform %} platform: {{ node.platform }} +{% endif %} entrypoint: {{ node.entrypoint }} + volumes: +{% for volume in node.volumes %} + - {{ volume }} +{% endfor %} +{% if node.extra_hosts | length > 0 %} + extra_hosts: +{% for host in node.extra_hosts %} + - {{ host }} +{% endfor %} +{% endif %} + ports: +{% for port in node.ports %} + - {{ port }} +{% endfor %} + environment: +{% for env in node.environment %} + {{ env.key }}: "{{ env.value }}" +{% endfor %} + restart: on-failure + +{% endfor %}{% for node in executors %} + {{ node.name }}: + image: {{ node.image }} +{% if node.platform %} platform: {{ node.platform }} +{% endif %} entrypoint: {{ node.entrypoint }} + volumes: +{% for volume in node.volumes %} + - {{ volume }} +{% endfor %} +{% if node.extra_hosts | length > 0 %} + extra_hosts: +{% for host in node.extra_hosts %} + - {{ host }} +{% endfor %} +{% endif %} + ports: +{% for port in node.ports %} + - {{ port }} +{% endfor %} + environment: +{% for env in node.environment %} + {{ env.key }}: "{{ env.value }}" +{% endfor %} + restart: on-failure + +{% endfor %} diff --git a/testing-framework/runners/compose/src/block_feed.rs b/testing-framework/runners/compose/src/block_feed.rs new file mode 100644 index 0000000..19f9b58 --- /dev/null +++ b/testing-framework/runners/compose/src/block_feed.rs @@ -0,0 +1,47 @@ +use std::time::Duration; + +use testing_framework_core::scenario::{BlockFeed, BlockFeedTask, NodeClients, spawn_block_feed}; +use tokio::time::sleep; +use tracing::{info, warn}; + +use crate::errors::ComposeRunnerError; + +const BLOCK_FEED_MAX_ATTEMPTS: usize = 5; +const BLOCK_FEED_RETRY_DELAY: Duration = Duration::from_secs(1); + +async fn spawn_block_feed_with( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> { + let block_source_client = node_clients + .random_validator() + .cloned() + .ok_or(ComposeRunnerError::BlockFeedMissing)?; + + spawn_block_feed(block_source_client) + .await + .map_err(|source| ComposeRunnerError::BlockFeed { source }) +} + +pub async fn spawn_block_feed_with_retry( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> { + let mut last_err = None; + for attempt in 1..=BLOCK_FEED_MAX_ATTEMPTS { + info!(attempt, "starting block feed"); + match spawn_block_feed_with(node_clients).await { + Ok(result) => { + info!(attempt, "block feed established"); + return Ok(result); + } + Err(err) => { + last_err = Some(err); + if attempt < BLOCK_FEED_MAX_ATTEMPTS { + warn!(attempt, "block feed initialization failed; retrying"); + sleep(BLOCK_FEED_RETRY_DELAY).await; + } + } + } + } + + Err(last_err.expect("block feed retry should capture an error")) +} diff --git a/testing-framework/runners/compose/src/cfgsync.rs b/testing-framework/runners/compose/src/cfgsync.rs new file mode 100644 index 0000000..a3e98d7 --- /dev/null +++ b/testing-framework/runners/compose/src/cfgsync.rs @@ -0,0 +1,62 @@ +use std::{path::Path, process::Command as StdCommand}; + +use testing_framework_core::{ + scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, write_cfgsync_template}, + topology::GeneratedTopology, +}; + +/// Handle that tracks a cfgsync server started for compose runs. +#[derive(Debug)] +pub enum CfgsyncServerHandle { + Container { name: String, stopped: bool }, +} + +impl CfgsyncServerHandle { + /// Stop the backing container if still running. + pub fn shutdown(&mut self) { + match self { + Self::Container { name, stopped } if !*stopped => { + remove_container(name); + *stopped = true; + } + _ => {} + } + } +} + +fn remove_container(name: &str) { + match StdCommand::new("docker") + .arg("rm") + .arg("-f") + .arg(name) + .status() + { + Ok(status) if status.success() => {} + Ok(status) => { + eprintln!("[compose-runner] failed to remove cfgsync container {name}: {status}"); + } + Err(_) => { + eprintln!("[compose-runner] failed to spawn docker rm for cfgsync container {name}"); + } + } +} + +impl Drop for CfgsyncServerHandle { + fn drop(&mut self) { + self.shutdown(); + } +} + +/// Updates the cfgsync template on disk with topology-driven overrides. +pub fn update_cfgsync_config( + path: &Path, + topology: &GeneratedTopology, + use_kzg_mount: bool, + port: u16, +) -> anyhow::Result<()> { + let mut cfg = load_cfgsync_template(path)?; + cfg.port = port; + apply_topology_overrides(&mut cfg, topology, use_kzg_mount); + write_cfgsync_template(path, &cfg)?; + Ok(()) +} diff --git a/testing-framework/runners/compose/src/cleanup.rs b/testing-framework/runners/compose/src/cleanup.rs new file mode 100644 index 0000000..1093ca2 --- /dev/null +++ b/testing-framework/runners/compose/src/cleanup.rs @@ -0,0 +1,110 @@ +use std::{env, path::PathBuf, thread}; + +use testing_framework_core::scenario::CleanupGuard; + +use crate::{ + cfgsync::CfgsyncServerHandle, + compose::{ComposeCommandError, compose_down}, + workspace::ComposeWorkspace, +}; + +/// Cleans up a compose deployment and associated cfgsync container. +pub struct RunnerCleanup { + pub compose_file: PathBuf, + pub project_name: String, + pub root: PathBuf, + workspace: Option, + cfgsync: Option, +} + +impl RunnerCleanup { + /// Construct a cleanup guard for the given compose deployment. + pub fn new( + compose_file: PathBuf, + project_name: String, + root: PathBuf, + workspace: ComposeWorkspace, + cfgsync: Option, + ) -> Self { + debug_assert!( + !compose_file.as_os_str().is_empty() && !project_name.is_empty(), + "compose cleanup should receive valid identifiers" + ); + Self { + compose_file, + project_name, + root, + workspace: Some(workspace), + cfgsync, + } + } + + fn teardown_compose(&self) { + if let Err(err) = + run_compose_down_blocking(&self.compose_file, &self.project_name, &self.root) + { + eprintln!("[compose-runner] docker compose down failed: {err}"); + } + } +} + +fn run_compose_down_blocking( + compose_file: &PathBuf, + project_name: &str, + root: &PathBuf, +) -> Result<(), ComposeCommandError> { + let compose_file = compose_file.clone(); + let project_name = project_name.to_owned(); + let root = root.clone(); + + let handle = thread::spawn(move || { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .map_err(|err| ComposeCommandError::Spawn { + command: "docker compose down".into(), + source: std::io::Error::new(std::io::ErrorKind::Other, err), + })? + .block_on(compose_down(&compose_file, &project_name, &root)) + }); + + handle.join().map_err(|_| ComposeCommandError::Spawn { + command: "docker compose down".into(), + source: std::io::Error::new( + std::io::ErrorKind::Other, + "join failure running compose down", + ), + })? +} +impl CleanupGuard for RunnerCleanup { + fn cleanup(mut self: Box) { + if self.should_preserve() { + self.persist_workspace(); + return; + } + + self.teardown_compose(); + + if let Some(mut handle) = self.cfgsync.take() { + handle.shutdown(); + } + } +} + +impl RunnerCleanup { + fn should_preserve(&self) -> bool { + env::var("COMPOSE_RUNNER_PRESERVE").is_ok() || env::var("TESTNET_RUNNER_PRESERVE").is_ok() + } + + fn persist_workspace(&mut self) { + if let Some(workspace) = self.workspace.take() { + let keep = workspace.into_inner().keep(); + eprintln!( + "[compose-runner] preserving docker state at {}", + keep.display() + ); + } + + eprintln!("[compose-runner] compose preserve flag set; skipping docker compose down"); + } +} diff --git a/testing-framework/runners/compose/src/compose.rs b/testing-framework/runners/compose/src/compose.rs new file mode 100644 index 0000000..43620ad --- /dev/null +++ b/testing-framework/runners/compose/src/compose.rs @@ -0,0 +1,639 @@ +use std::{ + env, fs, io, + path::{Path, PathBuf}, + process, + time::Duration, +}; + +use anyhow::Context as _; +use serde::Serialize; +use tera::Context as TeraContext; +use testing_framework_core::{ + adjust_timeout, + topology::{GeneratedNodeConfig, GeneratedTopology}, +}; +use tokio::{process::Command, time::timeout}; + +const COMPOSE_UP_TIMEOUT: Duration = Duration::from_secs(120); +const TEMPLATE_RELATIVE_PATH: &str = + "testing-framework/runners/compose/assets/docker-compose.yml.tera"; + +/// Errors running docker compose commands. +#[derive(Debug, thiserror::Error)] +pub enum ComposeCommandError { + #[error("{command} exited with status {status}")] + Failed { + command: String, + status: process::ExitStatus, + }, + #[error("failed to spawn {command}: {source}")] + Spawn { + command: String, + #[source] + source: io::Error, + }, + #[error("{command} timed out after {timeout:?}")] + Timeout { command: String, timeout: Duration }, +} + +/// Runs `docker compose up -d` for the generated stack. +pub async fn compose_up( + compose_path: &Path, + project_name: &str, + root: &Path, +) -> Result<(), ComposeCommandError> { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(compose_path) + .arg("-p") + .arg(project_name) + .arg("up") + .arg("-d") + .current_dir(root); + + run_compose_command(cmd, adjust_timeout(COMPOSE_UP_TIMEOUT), "docker compose up").await +} + +/// Runs `docker compose down --volumes` for the generated stack. +pub async fn compose_down( + compose_path: &Path, + project_name: &str, + root: &Path, +) -> Result<(), ComposeCommandError> { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(compose_path) + .arg("-p") + .arg(project_name) + .arg("down") + .arg("--volumes") + .current_dir(root); + + run_compose_command( + cmd, + adjust_timeout(COMPOSE_UP_TIMEOUT), + "docker compose down", + ) + .await +} + +/// Errors when templating docker-compose files. +#[derive(Debug, thiserror::Error)] +pub enum TemplateError { + #[error("failed to resolve repository root for compose template: {source}")] + RepositoryRoot { + #[source] + source: anyhow::Error, + }, + #[error("failed to read compose template at {path}: {source}")] + Read { + path: PathBuf, + #[source] + source: io::Error, + }, + #[error("failed to serialise compose descriptor for templating: {source}")] + Serialize { + #[source] + source: tera::Error, + }, + #[error("failed to render compose template at {path}: {source}")] + Render { + path: PathBuf, + #[source] + source: tera::Error, + }, + #[error("failed to write compose file at {path}: {source}")] + Write { + path: PathBuf, + #[source] + source: io::Error, + }, +} + +/// Errors building a compose descriptor from the topology. +#[derive(Debug, thiserror::Error)] +pub enum DescriptorBuildError { + #[error("cfgsync port is not configured for compose descriptor")] + MissingCfgsyncPort, + #[error("prometheus port is not configured for compose descriptor")] + MissingPrometheusPort, +} + +/// Top-level docker-compose descriptor built from a GeneratedTopology. +#[derive(Clone, Debug, Serialize)] +pub struct ComposeDescriptor { + prometheus: PrometheusTemplate, + validators: Vec, + executors: Vec, +} + +impl ComposeDescriptor { + /// Start building a descriptor from a generated topology. + #[must_use] + pub const fn builder(topology: &GeneratedTopology) -> ComposeDescriptorBuilder<'_> { + ComposeDescriptorBuilder::new(topology) + } + + #[cfg(test)] + fn validators(&self) -> &[NodeDescriptor] { + &self.validators + } + + #[cfg(test)] + fn executors(&self) -> &[NodeDescriptor] { + &self.executors + } +} + +/// Builder for `ComposeDescriptor` that plugs topology values into the +/// template. +pub struct ComposeDescriptorBuilder<'a> { + topology: &'a GeneratedTopology, + use_kzg_mount: bool, + cfgsync_port: Option, + prometheus_port: Option, +} + +impl<'a> ComposeDescriptorBuilder<'a> { + const fn new(topology: &'a GeneratedTopology) -> Self { + Self { + topology, + use_kzg_mount: false, + cfgsync_port: None, + prometheus_port: None, + } + } + + #[must_use] + /// Mount KZG parameters into nodes when enabled. + pub const fn with_kzg_mount(mut self, enabled: bool) -> Self { + self.use_kzg_mount = enabled; + self + } + + #[must_use] + /// Set cfgsync port for nodes. + pub const fn with_cfgsync_port(mut self, port: u16) -> Self { + self.cfgsync_port = Some(port); + self + } + + #[must_use] + /// Set host port mapping for Prometheus. + pub const fn with_prometheus_port(mut self, port: u16) -> Self { + self.prometheus_port = Some(port); + self + } + + /// Finish building the descriptor, erroring if required fields are missing. + pub fn build(self) -> Result { + let cfgsync_port = self + .cfgsync_port + .ok_or(DescriptorBuildError::MissingCfgsyncPort)?; + let prometheus_host_port = self + .prometheus_port + .ok_or(DescriptorBuildError::MissingPrometheusPort)?; + + let (default_image, default_platform) = resolve_image(); + let image = default_image; + let platform = default_platform; + + let validators = build_nodes( + self.topology.validators(), + ComposeNodeKind::Validator, + &image, + platform.as_deref(), + self.use_kzg_mount, + cfgsync_port, + ); + + let executors = build_nodes( + self.topology.executors(), + ComposeNodeKind::Executor, + &image, + platform.as_deref(), + self.use_kzg_mount, + cfgsync_port, + ); + + Ok(ComposeDescriptor { + prometheus: PrometheusTemplate::new(prometheus_host_port), + validators, + executors, + }) + } +} + +/// Minimal Prometheus service mapping used in the compose template. +#[derive(Clone, Debug, Serialize)] +pub struct PrometheusTemplate { + host_port: String, +} + +impl PrometheusTemplate { + fn new(port: u16) -> Self { + Self { + host_port: format!("127.0.0.1:{port}:9090"), + } + } +} + +/// Environment variable entry for docker-compose templating. +#[derive(Clone, Debug, Serialize, PartialEq, Eq)] +pub struct EnvEntry { + key: String, + value: String, +} + +impl EnvEntry { + fn new(key: impl Into, value: impl Into) -> Self { + Self { + key: key.into(), + value: value.into(), + } + } + + #[cfg(test)] + fn key(&self) -> &str { + &self.key + } + + #[cfg(test)] + fn value(&self) -> &str { + &self.value + } +} + +/// Describes a validator or executor container in the compose stack. +#[derive(Clone, Debug, Serialize)] +pub struct NodeDescriptor { + name: String, + image: String, + entrypoint: String, + volumes: Vec, + extra_hosts: Vec, + ports: Vec, + environment: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + platform: Option, +} + +/// Host ports mapped for a single node. +#[derive(Clone, Debug)] +pub struct NodeHostPorts { + pub api: u16, + pub testing: u16, +} + +/// All host port mappings for validators and executors. +#[derive(Clone, Debug)] +pub struct HostPortMapping { + pub validators: Vec, + pub executors: Vec, +} + +impl HostPortMapping { + /// Returns API ports for all validators. + pub fn validator_api_ports(&self) -> Vec { + self.validators.iter().map(|ports| ports.api).collect() + } + + /// Returns API ports for all executors. + pub fn executor_api_ports(&self) -> Vec { + self.executors.iter().map(|ports| ports.api).collect() + } +} + +impl NodeDescriptor { + fn from_node( + kind: ComposeNodeKind, + index: usize, + node: &GeneratedNodeConfig, + image: &str, + platform: Option<&str>, + use_kzg_mount: bool, + cfgsync_port: u16, + ) -> Self { + let mut environment = base_environment(cfgsync_port); + let identifier = kind.instance_name(index); + environment.extend([ + EnvEntry::new( + "CFG_NETWORK_PORT", + node.general.network_config.backend.inner.port.to_string(), + ), + EnvEntry::new("CFG_DA_PORT", node.da_port.to_string()), + EnvEntry::new("CFG_BLEND_PORT", node.blend_port.to_string()), + EnvEntry::new( + "CFG_API_PORT", + node.general.api_config.address.port().to_string(), + ), + EnvEntry::new( + "CFG_TESTING_HTTP_PORT", + node.general + .api_config + .testing_http_address + .port() + .to_string(), + ), + EnvEntry::new("CFG_HOST_IDENTIFIER", identifier), + ]); + + let ports = vec![ + node.general.api_config.address.port().to_string(), + node.general + .api_config + .testing_http_address + .port() + .to_string(), + ]; + + Self { + name: kind.instance_name(index), + image: image.to_owned(), + entrypoint: kind.entrypoint().to_owned(), + volumes: base_volumes(use_kzg_mount), + extra_hosts: default_extra_hosts(), + ports, + environment, + platform: platform.map(ToOwned::to_owned), + } + } + + #[cfg(test)] + fn ports(&self) -> &[String] { + &self.ports + } + + #[cfg(test)] + fn environment(&self) -> &[EnvEntry] { + &self.environment + } +} + +/// Render and write the compose file to disk. +pub fn write_compose_file( + descriptor: &ComposeDescriptor, + compose_path: &Path, +) -> Result<(), TemplateError> { + TemplateSource::load()?.write(descriptor, compose_path) +} + +/// Dump docker compose logs to stderr for debugging failures. +pub async fn dump_compose_logs(compose_file: &Path, project: &str, root: &Path) { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(compose_file) + .arg("-p") + .arg(project) + .arg("logs") + .arg("--no-color") + .current_dir(root); + + match cmd.output().await { + Ok(output) => print_logs(&output.stdout, &output.stderr), + Err(err) => eprintln!("[compose-runner] failed to collect docker compose logs: {err}"), + } +} + +fn print_logs(stdout: &[u8], stderr: &[u8]) { + if !stdout.is_empty() { + eprintln!( + "[compose-runner] docker compose logs:\n{}", + String::from_utf8_lossy(stdout) + ); + } + if !stderr.is_empty() { + eprintln!( + "[compose-runner] docker compose errors:\n{}", + String::from_utf8_lossy(stderr) + ); + } +} + +struct TemplateSource { + path: PathBuf, + contents: String, +} + +impl TemplateSource { + fn load() -> Result { + let repo_root = + repository_root().map_err(|source| TemplateError::RepositoryRoot { source })?; + let path = repo_root.join(TEMPLATE_RELATIVE_PATH); + let contents = fs::read_to_string(&path).map_err(|source| TemplateError::Read { + path: path.clone(), + source, + })?; + + Ok(Self { path, contents }) + } + + fn render(&self, descriptor: &ComposeDescriptor) -> Result { + let context = TeraContext::from_serialize(descriptor) + .map_err(|source| TemplateError::Serialize { source })?; + + tera::Tera::one_off(&self.contents, &context, false).map_err(|source| { + TemplateError::Render { + path: self.path.clone(), + source, + } + }) + } + + fn write(&self, descriptor: &ComposeDescriptor, output: &Path) -> Result<(), TemplateError> { + let rendered = self.render(descriptor)?; + fs::write(output, rendered).map_err(|source| TemplateError::Write { + path: output.to_path_buf(), + source, + }) + } +} + +/// Resolve the repository root, respecting `CARGO_WORKSPACE_DIR` override. +pub fn repository_root() -> anyhow::Result { + env::var("CARGO_WORKSPACE_DIR") + .map(PathBuf::from) + .or_else(|_| { + Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .and_then(Path::parent) + .map(PathBuf::from) + .context("resolving repository root from manifest dir") + }) +} + +#[derive(Clone, Copy)] +enum ComposeNodeKind { + Validator, + Executor, +} + +impl ComposeNodeKind { + fn instance_name(self, index: usize) -> String { + match self { + Self::Validator => format!("validator-{index}"), + Self::Executor => format!("executor-{index}"), + } + } + + const fn entrypoint(self) -> &'static str { + match self { + Self::Validator => "/etc/nomos/scripts/run_nomos_node.sh", + Self::Executor => "/etc/nomos/scripts/run_nomos_executor.sh", + } + } +} + +fn build_nodes( + nodes: &[GeneratedNodeConfig], + kind: ComposeNodeKind, + image: &str, + platform: Option<&str>, + use_kzg_mount: bool, + cfgsync_port: u16, +) -> Vec { + nodes + .iter() + .enumerate() + .map(|(index, node)| { + NodeDescriptor::from_node( + kind, + index, + node, + image, + platform, + use_kzg_mount, + cfgsync_port, + ) + }) + .collect() +} + +fn base_environment(cfgsync_port: u16) -> Vec { + let pol_mode = std::env::var("POL_PROOF_DEV_MODE").unwrap_or_else(|_| "true".to_string()); + vec![ + EnvEntry::new("POL_PROOF_DEV_MODE", pol_mode), + EnvEntry::new( + "CFG_SERVER_ADDR", + format!("http://host.docker.internal:{cfgsync_port}"), + ), + EnvEntry::new("OTEL_METRIC_EXPORT_INTERVAL", "5000"), + ] +} + +fn base_volumes(use_kzg_mount: bool) -> Vec { + let mut volumes = vec!["./stack:/etc/nomos".into()]; + if use_kzg_mount { + volumes.push("./kzgrs_test_params:/kzgrs_test_params:z".into()); + } + volumes +} + +fn default_extra_hosts() -> Vec { + host_gateway_entry().into_iter().collect() +} + +/// Select the compose image and optional platform, honoring +/// NOMOS_TESTNET_IMAGE. +pub fn resolve_image() -> (String, Option) { + let image = + env::var("NOMOS_TESTNET_IMAGE").unwrap_or_else(|_| String::from("nomos-testnet:local")); + let platform = (image == "ghcr.io/logos-co/nomos:testnet").then(|| "linux/amd64".to_owned()); + (image, platform) +} + +fn host_gateway_entry() -> Option { + if let Ok(value) = env::var("COMPOSE_RUNNER_HOST_GATEWAY") { + if value.eq_ignore_ascii_case("disable") || value.is_empty() { + return None; + } + return Some(value); + } + + if let Ok(gateway) = env::var("DOCKER_HOST_GATEWAY") { + if !gateway.is_empty() { + return Some(format!("host.docker.internal:{gateway}")); + } + } + + Some("host.docker.internal:host-gateway".into()) +} + +async fn run_compose_command( + mut command: Command, + timeout_duration: Duration, + description: &str, +) -> Result<(), ComposeCommandError> { + let result = timeout(timeout_duration, command.status()).await; + match result { + Ok(status) => handle_compose_status(status, description), + Err(_) => Err(ComposeCommandError::Timeout { + command: description.to_owned(), + timeout: timeout_duration, + }), + } +} + +fn handle_compose_status( + status: std::io::Result, + description: &str, +) -> Result<(), ComposeCommandError> { + match status { + Ok(code) if code.success() => Ok(()), + Ok(code) => Err(ComposeCommandError::Failed { + command: description.to_owned(), + status: code, + }), + Err(err) => Err(ComposeCommandError::Spawn { + command: description.to_owned(), + source: err, + }), + } +} + +#[cfg(test)] +mod tests { + use testing_framework_core::topology::{TopologyBuilder, TopologyConfig}; + + use super::*; + + #[test] + fn descriptor_matches_topology_counts() { + let topology = TopologyBuilder::new(TopologyConfig::with_node_numbers(2, 1)).build(); + let descriptor = ComposeDescriptor::builder(&topology) + .with_cfgsync_port(4400) + .with_prometheus_port(9090) + .build() + .expect("descriptor"); + + assert_eq!(descriptor.validators().len(), topology.validators().len()); + assert_eq!(descriptor.executors().len(), topology.executors().len()); + } + + #[test] + fn descriptor_includes_expected_env_and_ports() { + let topology = TopologyBuilder::new(TopologyConfig::with_node_numbers(1, 1)).build(); + let cfgsync_port = 4555; + let descriptor = ComposeDescriptor::builder(&topology) + .with_cfgsync_port(cfgsync_port) + .with_prometheus_port(9090) + .build() + .expect("descriptor"); + + let validator = &descriptor.validators()[0]; + assert!( + validator + .environment() + .iter() + .any(|entry| entry.key() == "CFG_SERVER_ADDR" + && entry.value() == format!("http://host.docker.internal:{cfgsync_port}")) + ); + + let api_container = topology.validators()[0].general.api_config.address.port(); + assert!(validator.ports().contains(&api_container.to_string())); + } +} diff --git a/testing-framework/runners/compose/src/control.rs b/testing-framework/runners/compose/src/control.rs new file mode 100644 index 0000000..750534d --- /dev/null +++ b/testing-framework/runners/compose/src/control.rs @@ -0,0 +1,59 @@ +use std::path::{Path, PathBuf}; + +use testing_framework_core::scenario::{DynError, NodeControlHandle}; +use tokio::process::Command; + +use crate::{docker::run_docker_command, errors::ComposeRunnerError}; + +pub async fn restart_compose_service( + compose_file: &Path, + project_name: &str, + service: &str, +) -> Result<(), ComposeRunnerError> { + let mut command = Command::new("docker"); + command + .arg("compose") + .arg("-f") + .arg(compose_file) + .arg("-p") + .arg(project_name) + .arg("restart") + .arg(service); + + let description = "docker compose restart"; + run_docker_command( + command, + description, + testing_framework_core::adjust_timeout(std::time::Duration::from_secs(120)), + ) + .await +} + +/// Compose-specific node control handle for restarting nodes. +pub struct ComposeNodeControl { + pub(crate) compose_file: PathBuf, + pub(crate) project_name: String, +} + +#[async_trait::async_trait] +impl NodeControlHandle for ComposeNodeControl { + async fn restart_validator(&self, index: usize) -> Result<(), DynError> { + restart_compose_service( + &self.compose_file, + &self.project_name, + &format!("validator-{index}"), + ) + .await + .map_err(|err| format!("validator restart failed: {err}").into()) + } + + async fn restart_executor(&self, index: usize) -> Result<(), DynError> { + restart_compose_service( + &self.compose_file, + &self.project_name, + &format!("executor-{index}"), + ) + .await + .map_err(|err| format!("executor restart failed: {err}").into()) + } +} diff --git a/testing-framework/runners/compose/src/deployer.rs b/testing-framework/runners/compose/src/deployer.rs new file mode 100644 index 0000000..2927b19 --- /dev/null +++ b/testing-framework/runners/compose/src/deployer.rs @@ -0,0 +1,507 @@ +use std::{ + env, + net::{Ipv4Addr, TcpListener as StdTcpListener}, + sync::Arc, +}; + +use async_trait::async_trait; +use testing_framework_core::{ + scenario::{ + BlockFeed, BlockFeedTask, CleanupGuard, Deployer, NodeClients, NodeControlHandle, + RequiresNodeControl, RunContext, Runner, Scenario, + }, + topology::GeneratedTopology, +}; +use tracing::{debug, info}; + +use crate::{ + block_feed::spawn_block_feed_with_retry, + cleanup::RunnerCleanup, + compose::HostPortMapping, + control::ComposeNodeControl, + docker::ensure_docker_available, + environment::{ + PortReservation, StackEnvironment, ensure_supported_topology, prepare_environment, + }, + errors::ComposeRunnerError, + ports::{compose_runner_host, discover_host_ports, ensure_remote_readiness_with_ports}, + readiness::{ + build_node_clients_with_ports, ensure_executors_ready_with_ports, + ensure_validators_ready_with_ports, maybe_sleep_for_disabled_readiness, + metrics_handle_from_port, + }, +}; + +/// Docker Compose-based deployer for Nomos test scenarios. +#[derive(Clone, Copy)] +pub struct ComposeDeployer { + readiness_checks: bool, +} + +impl Default for ComposeDeployer { + fn default() -> Self { + Self::new() + } +} + +impl ComposeDeployer { + #[must_use] + pub const fn new() -> Self { + Self { + readiness_checks: true, + } + } + + #[must_use] + pub const fn with_readiness(mut self, enabled: bool) -> Self { + self.readiness_checks = enabled; + self + } + + async fn prepare_ports( + &self, + environment: &mut StackEnvironment, + descriptors: &GeneratedTopology, + ) -> Result { + debug!("resolving host ports for compose services"); + match discover_host_ports(environment, descriptors).await { + Ok(mapping) => { + info!( + validator_ports = ?mapping.validator_api_ports(), + executor_ports = ?mapping.executor_api_ports(), + prometheus_port = environment.prometheus_port(), + "resolved container host ports" + ); + Ok(mapping) + } + Err(err) => { + environment + .fail("failed to determine container host ports") + .await; + Err(err) + } + } + } + + async fn wait_for_readiness( + &self, + descriptors: &GeneratedTopology, + host_ports: &HostPortMapping, + environment: &mut StackEnvironment, + ) -> Result<(), ComposeRunnerError> { + info!( + ports = ?host_ports.validator_api_ports(), + "waiting for validator HTTP endpoints" + ); + if let Err(err) = + ensure_validators_ready_with_ports(&host_ports.validator_api_ports()).await + { + environment.fail("validator readiness failed").await; + return Err(err.into()); + } + + info!( + ports = ?host_ports.executor_api_ports(), + "waiting for executor HTTP endpoints" + ); + if let Err(err) = ensure_executors_ready_with_ports(&host_ports.executor_api_ports()).await + { + environment.fail("executor readiness failed").await; + return Err(err.into()); + } + + info!("waiting for remote service readiness"); + if let Err(err) = ensure_remote_readiness_with_ports(descriptors, host_ports).await { + environment.fail("remote readiness probe failed").await; + return Err(err.into()); + } + + Ok(()) + } + + async fn build_node_clients( + &self, + descriptors: &GeneratedTopology, + host_ports: &HostPortMapping, + host: &str, + environment: &mut StackEnvironment, + ) -> Result { + match build_node_clients_with_ports(descriptors, host_ports, host) { + Ok(clients) => Ok(clients), + Err(err) => { + environment + .fail("failed to construct node api clients") + .await; + Err(err.into()) + } + } + } + + async fn start_block_feed( + &self, + node_clients: &NodeClients, + environment: &mut StackEnvironment, + ) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> { + match spawn_block_feed_with_retry(node_clients).await { + Ok(pair) => { + info!("block feed connected to validator"); + Ok(pair) + } + Err(err) => { + environment.fail("failed to initialize block feed").await; + Err(err) + } + } + } + + fn maybe_node_control( + &self, + environment: &StackEnvironment, + ) -> Option> + where + Caps: RequiresNodeControl + Send + Sync, + { + Caps::REQUIRED.then(|| { + Arc::new(ComposeNodeControl { + compose_file: environment.compose_path().to_path_buf(), + project_name: environment.project_name().to_owned(), + }) as Arc + }) + } +} + +pub(crate) const PROMETHEUS_PORT_ENV: &str = "TEST_FRAMEWORK_PROMETHEUS_PORT"; +pub(crate) const DEFAULT_PROMETHEUS_PORT: u16 = 9090; + +fn allocate_prometheus_port() -> Option { + reserve_port(DEFAULT_PROMETHEUS_PORT).or_else(|| reserve_port(0)) +} + +fn reserve_port(port: u16) -> Option { + let listener = StdTcpListener::bind((Ipv4Addr::LOCALHOST, port)).ok()?; + let actual_port = listener.local_addr().ok()?.port(); + Some(PortReservation::new(actual_port, Some(listener))) +} + +#[async_trait] +impl Deployer for ComposeDeployer +where + Caps: RequiresNodeControl + Send + Sync, +{ + type Error = ComposeRunnerError; + + async fn deploy(&self, scenario: &Scenario) -> Result { + ensure_docker_available().await?; + let descriptors = scenario.topology().clone(); + ensure_supported_topology(&descriptors)?; + + info!( + validators = descriptors.validators().len(), + executors = descriptors.executors().len(), + "starting compose deployment" + ); + + let prometheus_env = env::var(PROMETHEUS_PORT_ENV) + .ok() + .and_then(|raw| raw.parse::().ok()); + if prometheus_env.is_some() { + info!(port = prometheus_env, "using prometheus port from env"); + } + let prometheus_port = prometheus_env + .and_then(|port| reserve_port(port)) + .or_else(|| allocate_prometheus_port()) + .unwrap_or_else(|| PortReservation::new(DEFAULT_PROMETHEUS_PORT, None)); + let mut environment = + prepare_environment(&descriptors, prometheus_port, prometheus_env.is_some()).await?; + info!( + compose_file = %environment.compose_path().display(), + project = environment.project_name(), + root = %environment.root().display(), + "compose workspace prepared" + ); + + let host_ports = self.prepare_ports(&mut environment, &descriptors).await?; + + if self.readiness_checks { + self.wait_for_readiness(&descriptors, &host_ports, &mut environment) + .await?; + } else { + info!("readiness checks disabled; giving the stack a short grace period"); + maybe_sleep_for_disabled_readiness(false).await; + } + + info!("compose stack ready; building node clients"); + let host = compose_runner_host(); + let node_clients = self + .build_node_clients(&descriptors, &host_ports, &host, &mut environment) + .await?; + let telemetry = metrics_handle_from_port(environment.prometheus_port(), &host)?; + let node_control = self.maybe_node_control::(&environment); + + let (block_feed, block_feed_guard) = self + .start_block_feed(&node_clients, &mut environment) + .await?; + let cleanup_guard: Box = Box::new(ComposeCleanupGuard::new( + environment.into_cleanup(), + block_feed_guard, + )); + let context = RunContext::new( + descriptors, + None, + node_clients, + scenario.duration(), + telemetry, + block_feed, + node_control, + ); + + Ok(Runner::new(context, Some(cleanup_guard))) + } +} + +struct ComposeCleanupGuard { + environment: RunnerCleanup, + block_feed: Option, +} + +impl ComposeCleanupGuard { + const fn new(environment: RunnerCleanup, block_feed: BlockFeedTask) -> Self { + Self { + environment, + block_feed: Some(block_feed), + } + } +} + +impl CleanupGuard for ComposeCleanupGuard { + fn cleanup(mut self: Box) { + if let Some(block_feed) = self.block_feed.take() { + CleanupGuard::cleanup(Box::new(block_feed)); + } + CleanupGuard::cleanup(Box::new(self.environment)); + } +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, net::Ipv4Addr}; + + use cfgsync::config::{Host, PortOverrides, create_node_configs}; + use groth16::Fr; + use nomos_core::{ + mantle::{GenesisTx as GenesisTxTrait, ledger::NoteId}, + sdp::{ProviderId, ServiceType}, + }; + use nomos_ledger::LedgerState; + use nomos_tracing_service::TracingSettings; + use testing_framework_core::{ + scenario::ScenarioBuilder, + topology::{GeneratedNodeConfig, GeneratedTopology, NodeRole as TopologyNodeRole}, + }; + use zksign::PublicKey; + + #[test] + fn cfgsync_prebuilt_configs_preserve_genesis() { + let scenario = ScenarioBuilder::topology() + .validators(1) + .executors(1) + .apply() + .build(); + let topology = scenario.topology().clone(); + let hosts = hosts_from_topology(&topology); + let tracing_settings = tracing_settings(&topology); + + let configs = create_node_configs( + &topology.config().consensus_params, + &topology.config().da_params, + &tracing_settings, + &topology.config().wallet_config, + Some(topology.nodes().map(|node| node.id).collect()), + Some(topology.nodes().map(|node| node.da_port).collect()), + Some(topology.nodes().map(|node| node.blend_port).collect()), + hosts, + ); + let configs_by_identifier: HashMap<_, _> = configs + .into_iter() + .map(|(host, config)| (host.identifier, config)) + .collect(); + + for node in topology.nodes() { + let identifier = identifier_for(node.role(), node.index()); + let cfgsync_config = configs_by_identifier + .get(&identifier) + .unwrap_or_else(|| panic!("missing cfgsync config for {identifier}")); + let expected_genesis = &node.general.consensus_config.genesis_tx; + let actual_genesis = &cfgsync_config.consensus_config.genesis_tx; + if std::env::var("PRINT_GENESIS").is_ok() { + println!( + "[fingerprint {identifier}] expected={:?}", + declaration_fingerprint(expected_genesis) + ); + println!( + "[fingerprint {identifier}] actual={:?}", + declaration_fingerprint(actual_genesis) + ); + } + assert_eq!( + expected_genesis.mantle_tx().ledger_tx, + actual_genesis.mantle_tx().ledger_tx, + "ledger tx mismatch for {identifier}" + ); + assert_eq!( + declaration_fingerprint(expected_genesis), + declaration_fingerprint(actual_genesis), + "declaration entries mismatch for {identifier}" + ); + } + } + + #[test] + fn cfgsync_genesis_proofs_verify_against_ledger() { + let scenario = ScenarioBuilder::topology() + .validators(1) + .executors(1) + .apply() + .build(); + let topology = scenario.topology().clone(); + let hosts = hosts_from_topology(&topology); + let tracing_settings = tracing_settings(&topology); + + let configs = create_node_configs( + &topology.config().consensus_params, + &topology.config().da_params, + &tracing_settings, + &topology.config().wallet_config, + Some(topology.nodes().map(|node| node.id).collect()), + Some(topology.nodes().map(|node| node.da_port).collect()), + Some(topology.nodes().map(|node| node.blend_port).collect()), + hosts, + ); + let configs_by_identifier: HashMap<_, _> = configs + .into_iter() + .map(|(host, config)| (host.identifier, config)) + .collect(); + + for node in topology.nodes() { + let identifier = identifier_for(node.role(), node.index()); + let cfgsync_config = configs_by_identifier + .get(&identifier) + .unwrap_or_else(|| panic!("missing cfgsync config for {identifier}")); + LedgerState::from_genesis_tx::<()>( + cfgsync_config.consensus_config.genesis_tx.clone(), + &cfgsync_config.consensus_config.ledger_config, + Fr::from(0u64), + ) + .unwrap_or_else(|err| panic!("ledger rejected genesis for {identifier}: {err:?}")); + } + } + + #[test] + fn cfgsync_docker_overrides_produce_valid_genesis() { + let scenario = ScenarioBuilder::topology() + .validators(1) + .executors(1) + .apply() + .build(); + let topology = scenario.topology().clone(); + let tracing_settings = tracing_settings(&topology); + let hosts = docker_style_hosts(&topology); + + let configs = create_node_configs( + &topology.config().consensus_params, + &topology.config().da_params, + &tracing_settings, + &topology.config().wallet_config, + Some(topology.nodes().map(|node| node.id).collect()), + Some(topology.nodes().map(|node| node.da_port).collect()), + Some(topology.nodes().map(|node| node.blend_port).collect()), + hosts, + ); + + for (host, config) in configs { + let genesis = &config.consensus_config.genesis_tx; + LedgerState::from_genesis_tx::<()>( + genesis.clone(), + &config.consensus_config.ledger_config, + Fr::from(0u64), + ) + .unwrap_or_else(|err| { + panic!("ledger rejected genesis for {}: {err:?}", host.identifier) + }); + } + } + + fn hosts_from_topology(topology: &GeneratedTopology) -> Vec { + topology.nodes().map(host_from_node).collect() + } + + fn docker_style_hosts(topology: &GeneratedTopology) -> Vec { + topology + .nodes() + .map(|node| docker_host(node, 10 + node.index() as u8)) + .collect() + } + + fn host_from_node(node: &GeneratedNodeConfig) -> Host { + let identifier = identifier_for(node.role(), node.index()); + let ip = Ipv4Addr::LOCALHOST; + let mut host = make_host(node.role(), ip, identifier); + host.network_port = node.network_port(); + host.da_network_port = node.da_port; + host.blend_port = node.blend_port; + host + } + + fn docker_host(node: &GeneratedNodeConfig, octet: u8) -> Host { + let identifier = identifier_for(node.role(), node.index()); + let ip = Ipv4Addr::new(172, 23, 0, octet); + let mut host = make_host(node.role(), ip, identifier); + host.network_port = node.network_port().saturating_add(1000); + host.da_network_port = node.da_port.saturating_add(1000); + host.blend_port = node.blend_port.saturating_add(1000); + host + } + + fn tracing_settings(topology: &GeneratedTopology) -> TracingSettings { + topology + .validators() + .first() + .or_else(|| topology.executors().first()) + .expect("topology must contain at least one node") + .general + .tracing_config + .tracing_settings + .clone() + } + + fn identifier_for(role: TopologyNodeRole, index: usize) -> String { + match role { + TopologyNodeRole::Validator => format!("validator-{index}"), + TopologyNodeRole::Executor => format!("executor-{index}"), + } + } + + fn make_host(role: TopologyNodeRole, ip: Ipv4Addr, identifier: String) -> Host { + let ports = PortOverrides { + network_port: None, + da_network_port: None, + blend_port: None, + api_port: None, + testing_http_port: None, + }; + match role { + TopologyNodeRole::Validator => Host::validator_from_ip(ip, identifier, ports), + TopologyNodeRole::Executor => Host::executor_from_ip(ip, identifier, ports), + } + } + + fn declaration_fingerprint(genesis: &G) -> Vec<(ServiceType, ProviderId, NoteId, PublicKey)> + where + G: GenesisTxTrait, + { + genesis + .sdp_declarations() + .map(|(op, _)| (op.service_type, op.provider_id, op.locked_note_id, op.zk_id)) + .collect() + } +} diff --git a/testing-framework/runners/compose/src/docker.rs b/testing-framework/runners/compose/src/docker.rs new file mode 100644 index 0000000..1e3cca4 --- /dev/null +++ b/testing-framework/runners/compose/src/docker.rs @@ -0,0 +1,198 @@ +use std::{ + env, + process::{Command as StdCommand, Stdio}, + time::Duration, +}; + +use tokio::{process::Command, time::timeout}; +use tracing::warn; + +use crate::{ + compose::{ComposeCommandError, repository_root}, + errors::ComposeRunnerError, +}; + +const IMAGE_BUILD_TIMEOUT: Duration = Duration::from_secs(600); +const DOCKER_INFO_TIMEOUT: Duration = Duration::from_secs(15); +const IMAGE_INSPECT_TIMEOUT: Duration = Duration::from_secs(60); + +/// Checks that `docker info` succeeds within a timeout. +pub async fn ensure_docker_available() -> Result<(), ComposeRunnerError> { + let mut command = Command::new("docker"); + command + .arg("info") + .stdout(Stdio::null()) + .stderr(Stdio::null()); + + let available = timeout( + testing_framework_core::adjust_timeout(DOCKER_INFO_TIMEOUT), + command.status(), + ) + .await + .ok() + .and_then(Result::ok) + .map(|status| status.success()) + .unwrap_or(false); + + if available { + Ok(()) + } else { + Err(ComposeRunnerError::DockerUnavailable) + } +} + +/// Ensure the configured compose image exists, building a local one if needed. +pub async fn ensure_compose_image() -> Result<(), ComposeRunnerError> { + let (image, platform) = crate::compose::resolve_image(); + ensure_image_present(&image, platform.as_deref()).await +} + +/// Verify an image exists locally, optionally building it for the default tag. +pub async fn ensure_image_present( + image: &str, + platform: Option<&str>, +) -> Result<(), ComposeRunnerError> { + if docker_image_exists(image).await? { + return Ok(()); + } + + if image != "nomos-testnet:local" { + return Err(ComposeRunnerError::MissingImage { + image: image.to_owned(), + }); + } + + build_local_image(image, platform).await +} + +/// Returns true when `docker image inspect` succeeds for the image. +pub async fn docker_image_exists(image: &str) -> Result { + let mut cmd = Command::new("docker"); + cmd.arg("image") + .arg("inspect") + .arg(image) + .stdout(Stdio::null()) + .stderr(Stdio::null()); + + match timeout( + testing_framework_core::adjust_timeout(IMAGE_INSPECT_TIMEOUT), + cmd.status(), + ) + .await + { + Ok(Ok(status)) => Ok(status.success()), + Ok(Err(source)) => Err(ComposeRunnerError::Compose(ComposeCommandError::Spawn { + command: format!("docker image inspect {image}"), + source, + })), + Err(_) => Err(ComposeRunnerError::Compose(ComposeCommandError::Timeout { + command: format!("docker image inspect {image}"), + timeout: testing_framework_core::adjust_timeout(IMAGE_INSPECT_TIMEOUT), + })), + } +} + +/// Build the local testnet image with optional platform override. +pub async fn build_local_image( + image: &str, + platform: Option<&str>, +) -> Result<(), ComposeRunnerError> { + let repo_root = + repository_root().map_err(|source| ComposeRunnerError::ImageBuild { source })?; + let dockerfile = repo_root.join("testing-framework/runners/docker/runner.Dockerfile"); + + tracing::info!(image, "building compose runner docker image"); + + let mut cmd = Command::new("docker"); + cmd.arg("build"); + + if let Some(build_platform) = select_build_platform(platform)? { + cmd.arg("--platform").arg(&build_platform); + } + + let circuits_platform = env::var("COMPOSE_CIRCUITS_PLATFORM") + .ok() + .filter(|value| !value.is_empty()) + .unwrap_or_else(|| String::from("linux-x86_64")); + + cmd.arg("--build-arg") + .arg(format!("NOMOS_CIRCUITS_PLATFORM={circuits_platform}")); + + if let Some(value) = env::var("CIRCUITS_OVERRIDE") + .ok() + .filter(|val| !val.is_empty()) + { + cmd.arg("--build-arg") + .arg(format!("CIRCUITS_OVERRIDE={value}")); + } + + cmd.arg("-t") + .arg(image) + .arg("-f") + .arg(&dockerfile) + .arg(&repo_root); + + run_docker_command(cmd, "docker build compose image", IMAGE_BUILD_TIMEOUT).await +} + +/// Run a docker command with a timeout, mapping errors into runner errors. +pub async fn run_docker_command( + mut command: Command, + description: &str, + timeout_duration: Duration, +) -> Result<(), ComposeRunnerError> { + match timeout(timeout_duration, command.status()).await { + Ok(Ok(status)) if status.success() => Ok(()), + Ok(Ok(status)) => Err(ComposeRunnerError::Compose(ComposeCommandError::Failed { + command: description.to_owned(), + status, + })), + Ok(Err(source)) => Err(ComposeRunnerError::Compose(ComposeCommandError::Spawn { + command: description.to_owned(), + source, + })), + Err(_) => Err(ComposeRunnerError::Compose(ComposeCommandError::Timeout { + command: description.to_owned(), + timeout: timeout_duration, + })), + } +} + +fn detect_docker_platform() -> Result, ComposeRunnerError> { + let output = StdCommand::new("docker") + .arg("info") + .arg("-f") + .arg("{{.Architecture}}") + .output() + .map_err(|source| ComposeRunnerError::ImageBuild { + source: source.into(), + })?; + + if !output.status.success() { + return Ok(None); + } + + let arch = String::from_utf8_lossy(&output.stdout).trim().to_owned(); + if arch.is_empty() { + return Ok(None); + } + + Ok(Some(format!("linux/{arch}"))) +} + +/// Choose the build platform from user override or docker host architecture. +pub fn select_build_platform( + requested: Option<&str>, +) -> Result, ComposeRunnerError> { + if let Some(value) = requested { + return Ok(Some(value.to_owned())); + } + + detect_docker_platform()?.map_or_else( + || { + warn!("docker host architecture unavailable; letting docker choose default platform"); + Ok(None) + }, + |host_platform| Ok(Some(host_platform)), + ) +} diff --git a/testing-framework/runners/compose/src/environment.rs b/testing-framework/runners/compose/src/environment.rs new file mode 100644 index 0000000..fe4b363 --- /dev/null +++ b/testing-framework/runners/compose/src/environment.rs @@ -0,0 +1,431 @@ +use std::{ + net::{Ipv4Addr, TcpListener as StdTcpListener}, + path::{Path, PathBuf}, + time::Duration, +}; + +use anyhow::anyhow; +use testing_framework_core::{adjust_timeout, scenario::CleanupGuard, topology::GeneratedTopology}; +use tokio::process::Command; +use tracing::{debug, info, warn}; +use uuid::Uuid; + +use crate::{ + cfgsync::{CfgsyncServerHandle, update_cfgsync_config}, + cleanup::RunnerCleanup, + compose::{ + ComposeDescriptor, compose_up, dump_compose_logs, resolve_image, write_compose_file, + }, + deployer::DEFAULT_PROMETHEUS_PORT, + docker::{ensure_compose_image, run_docker_command}, + errors::{ComposeRunnerError, ConfigError, WorkspaceError}, + workspace::ComposeWorkspace, +}; + +const CFGSYNC_START_TIMEOUT: Duration = Duration::from_secs(180); +const STACK_BRINGUP_MAX_ATTEMPTS: usize = 3; + +/// Paths and flags describing the prepared compose workspace. +pub struct WorkspaceState { + pub workspace: ComposeWorkspace, + pub root: PathBuf, + pub cfgsync_path: PathBuf, + pub use_kzg: bool, +} + +/// Holds paths and handles for a running docker-compose stack. +pub struct StackEnvironment { + compose_path: PathBuf, + project_name: String, + root: PathBuf, + workspace: Option, + cfgsync_handle: Option, + prometheus_port: u16, +} + +impl StackEnvironment { + /// Builds an environment from the prepared workspace and compose artifacts. + pub fn from_workspace( + state: WorkspaceState, + compose_path: PathBuf, + project_name: String, + cfgsync_handle: Option, + prometheus_port: u16, + ) -> Self { + let WorkspaceState { + workspace, root, .. + } = state; + + Self { + compose_path, + project_name, + root, + workspace: Some(workspace), + cfgsync_handle, + prometheus_port, + } + } + + pub fn compose_path(&self) -> &Path { + &self.compose_path + } + + /// Host port exposed by Prometheus. + pub const fn prometheus_port(&self) -> u16 { + self.prometheus_port + } + + /// Docker compose project name. + pub fn project_name(&self) -> &str { + &self.project_name + } + + /// Root directory that contains generated assets. + pub fn root(&self) -> &Path { + &self.root + } + + /// Convert into a cleanup guard while keeping the environment borrowed. + pub fn take_cleanup(&mut self) -> RunnerCleanup { + RunnerCleanup::new( + self.compose_path.clone(), + self.project_name.clone(), + self.root.clone(), + self.workspace + .take() + .expect("workspace must be available while cleaning up"), + self.cfgsync_handle.take(), + ) + } + + /// Convert into a cleanup guard, consuming the environment. + pub fn into_cleanup(self) -> RunnerCleanup { + RunnerCleanup::new( + self.compose_path, + self.project_name, + self.root, + self.workspace + .expect("workspace must be available while cleaning up"), + self.cfgsync_handle, + ) + } + + /// Dump compose logs and trigger cleanup after a failure. + pub async fn fail(&mut self, reason: &str) { + use tracing::error; + + error!( + reason = reason, + "compose stack failure; dumping docker logs" + ); + dump_compose_logs(self.compose_path(), self.project_name(), self.root()).await; + Box::new(self.take_cleanup()).cleanup(); + } +} + +/// Represents a claimed port, optionally guarded by an open socket. +pub struct PortReservation { + port: u16, + _guard: Option, +} + +impl PortReservation { + /// Holds a port and an optional socket guard to keep it reserved. + pub const fn new(port: u16, guard: Option) -> Self { + Self { + port, + _guard: guard, + } + } + + /// The reserved port number. + pub const fn port(&self) -> u16 { + self.port + } +} + +/// Verifies the topology has at least one validator so compose can start. +pub fn ensure_supported_topology( + descriptors: &GeneratedTopology, +) -> Result<(), ComposeRunnerError> { + let validators = descriptors.validators().len(); + if validators == 0 { + return Err(ComposeRunnerError::MissingValidator { + validators, + executors: descriptors.executors().len(), + }); + } + Ok(()) +} + +/// Create a temporary workspace with copied testnet assets and derived paths. +pub fn prepare_workspace_state() -> Result { + let workspace = ComposeWorkspace::create().map_err(WorkspaceError::new)?; + let root = workspace.root_path().to_path_buf(); + let cfgsync_path = workspace.stack_dir().join("cfgsync.yaml"); + let use_kzg = workspace.root_path().join("kzgrs_test_params").exists(); + + Ok(WorkspaceState { + workspace, + root, + cfgsync_path, + use_kzg, + }) +} + +/// Log wrapper for `prepare_workspace_state`. +pub fn prepare_workspace_logged() -> Result { + info!("preparing compose workspace"); + prepare_workspace_state().map_err(Into::into) +} + +/// Render cfgsync config based on the topology and chosen port, logging +/// progress. +pub fn update_cfgsync_logged( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, + cfgsync_port: u16, +) -> Result<(), ComposeRunnerError> { + info!(cfgsync_port, "updating cfgsync configuration"); + configure_cfgsync(workspace, descriptors, cfgsync_port).map_err(Into::into) +} + +/// Start the cfgsync server container using the generated config. +pub async fn start_cfgsync_stage( + workspace: &WorkspaceState, + cfgsync_port: u16, +) -> Result { + info!(cfgsync_port = cfgsync_port, "launching cfgsync server"); + let handle = launch_cfgsync(&workspace.cfgsync_path, cfgsync_port).await?; + Ok(handle) +} + +/// Update cfgsync YAML on disk with topology-derived values. +pub fn configure_cfgsync( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, + cfgsync_port: u16, +) -> Result<(), ConfigError> { + update_cfgsync_config( + &workspace.cfgsync_path, + descriptors, + workspace.use_kzg, + cfgsync_port, + ) + .map_err(|source| ConfigError::Cfgsync { + path: workspace.cfgsync_path.clone(), + source, + }) +} + +/// Bind an ephemeral port for cfgsync, returning the chosen value. +pub fn allocate_cfgsync_port() -> Result { + let listener = + StdTcpListener::bind((Ipv4Addr::UNSPECIFIED, 0)).map_err(|source| ConfigError::Port { + source: source.into(), + })?; + + let port = listener + .local_addr() + .map_err(|source| ConfigError::Port { + source: source.into(), + })? + .port(); + Ok(port) +} + +/// Launch cfgsync in a detached docker container on the provided port. +pub async fn launch_cfgsync( + cfgsync_path: &Path, + port: u16, +) -> Result { + let testnet_dir = cfgsync_path + .parent() + .ok_or_else(|| ConfigError::CfgsyncStart { + port, + source: anyhow!("cfgsync path {cfgsync_path:?} has no parent directory"), + })?; + let (image, _) = resolve_image(); + let container_name = format!("nomos-cfgsync-{}", Uuid::new_v4()); + + let mut command = Command::new("docker"); + command + .arg("run") + .arg("-d") + .arg("--name") + .arg(&container_name) + .arg("--entrypoint") + .arg("cfgsync-server") + .arg("-p") + .arg(format!("{port}:{port}")) + .arg("-v") + .arg(format!( + "{}:/etc/nomos:ro", + testnet_dir + .canonicalize() + .unwrap_or_else(|_| testnet_dir.to_path_buf()) + .display() + )) + .arg(&image) + .arg("/etc/nomos/cfgsync.yaml"); + + run_docker_command( + command, + "docker run cfgsync server", + adjust_timeout(CFGSYNC_START_TIMEOUT), + ) + .await + .map_err(|source| ConfigError::CfgsyncStart { + port, + source: anyhow!(source), + })?; + + Ok(CfgsyncServerHandle::Container { + name: container_name, + stopped: false, + }) +} + +/// Render compose file and associated assets for the current topology. +pub fn write_compose_artifacts( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, + cfgsync_port: u16, + prometheus_port: u16, +) -> Result { + let descriptor = ComposeDescriptor::builder(descriptors) + .with_kzg_mount(workspace.use_kzg) + .with_cfgsync_port(cfgsync_port) + .with_prometheus_port(prometheus_port) + .build() + .map_err(|source| ConfigError::Descriptor { source })?; + + let compose_path = workspace.root.join("compose.generated.yml"); + write_compose_file(&descriptor, &compose_path) + .map_err(|source| ConfigError::Template { source })?; + Ok(compose_path) +} + +/// Log and wrap `write_compose_artifacts` errors for the runner. +pub fn render_compose_logged( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, + cfgsync_port: u16, + prometheus_port: u16, +) -> Result { + info!( + cfgsync_port, + prometheus_port, "rendering compose file with ports" + ); + write_compose_artifacts(workspace, descriptors, cfgsync_port, prometheus_port) + .map_err(Into::into) +} + +/// Bring up docker compose; shut down cfgsync if start-up fails. +pub async fn bring_up_stack( + compose_path: &Path, + project_name: &str, + workspace_root: &Path, + cfgsync_handle: &mut CfgsyncServerHandle, +) -> Result<(), ComposeRunnerError> { + if let Err(err) = compose_up(compose_path, project_name, workspace_root).await { + cfgsync_handle.shutdown(); + return Err(ComposeRunnerError::Compose(err)); + } + Ok(()) +} + +/// Log compose bring-up with context. +pub async fn bring_up_stack_logged( + compose_path: &Path, + project_name: &str, + workspace_root: &Path, + cfgsync_handle: &mut CfgsyncServerHandle, +) -> Result<(), ComposeRunnerError> { + info!(project = %project_name, "bringing up docker compose stack"); + bring_up_stack(compose_path, project_name, workspace_root, cfgsync_handle).await +} + +/// Prepare workspace, cfgsync, compose artifacts, and launch the stack. +pub async fn prepare_environment( + descriptors: &GeneratedTopology, + mut prometheus_port: PortReservation, + prometheus_port_locked: bool, +) -> Result { + let workspace = prepare_workspace_logged()?; + let cfgsync_port = allocate_cfgsync_port()?; + update_cfgsync_logged(&workspace, descriptors, cfgsync_port)?; + ensure_compose_image().await?; + + let attempts = if prometheus_port_locked { + 1 + } else { + STACK_BRINGUP_MAX_ATTEMPTS + }; + let mut last_err = None; + + for _ in 0..attempts { + let prometheus_port_value = prometheus_port.port(); + let compose_path = + render_compose_logged(&workspace, descriptors, cfgsync_port, prometheus_port_value)?; + + let project_name = format!("nomos-compose-{}", Uuid::new_v4()); + let mut cfgsync_handle = start_cfgsync_stage(&workspace, cfgsync_port).await?; + + drop(prometheus_port); + match bring_up_stack_logged( + &compose_path, + &project_name, + &workspace.root, + &mut cfgsync_handle, + ) + .await + { + Ok(()) => { + info!( + project = %project_name, + compose_file = %compose_path.display(), + cfgsync_port, + prometheus_port = prometheus_port_value, + "compose stack is up" + ); + return Ok(StackEnvironment::from_workspace( + workspace, + compose_path, + project_name, + Some(cfgsync_handle), + prometheus_port_value, + )); + } + Err(err) => { + cfgsync_handle.shutdown(); + last_err = Some(err); + if prometheus_port_locked { + break; + } + warn!( + error = %last_err.as_ref().unwrap(), + "compose bring-up failed; retrying with a new prometheus port" + ); + prometheus_port = allocate_prometheus_port() + .unwrap_or_else(|| PortReservation::new(DEFAULT_PROMETHEUS_PORT, None)); + debug!( + next_prometheus_port = prometheus_port.port(), + "retrying compose bring-up" + ); + } + } + } + + Err(last_err.expect("prepare_environment should return or fail with error")) +} + +fn allocate_prometheus_port() -> Option { + reserve_prometheus_port(DEFAULT_PROMETHEUS_PORT).or_else(|| reserve_prometheus_port(0)) +} + +fn reserve_prometheus_port(port: u16) -> Option { + let listener = StdTcpListener::bind((Ipv4Addr::LOCALHOST, port)).ok()?; + let actual_port = listener.local_addr().ok()?.port(); + Some(PortReservation::new(actual_port, Some(listener))) +} diff --git a/testing-framework/runners/compose/src/errors.rs b/testing-framework/runners/compose/src/errors.rs new file mode 100644 index 0000000..dcbcf87 --- /dev/null +++ b/testing-framework/runners/compose/src/errors.rs @@ -0,0 +1,139 @@ +use std::path::PathBuf; + +use testing_framework_core::{ + scenario::{ + MetricsError, + http_probe::{HttpReadinessError, NodeRole}, + }, + topology::ReadinessError, +}; +use url::ParseError; + +use crate::compose::{ComposeCommandError, DescriptorBuildError, TemplateError}; + +#[derive(Debug, thiserror::Error)] +/// Top-level compose runner errors. +pub enum ComposeRunnerError { + #[error( + "compose runner requires at least one validator (validators={validators}, executors={executors})" + )] + MissingValidator { validators: usize, executors: usize }, + #[error("docker does not appear to be available on this host")] + DockerUnavailable, + #[error("failed to resolve host port for {service} container port {container_port}: {source}")] + PortDiscovery { + service: String, + container_port: u16, + #[source] + source: anyhow::Error, + }, + #[error(transparent)] + Workspace(#[from] WorkspaceError), + #[error(transparent)] + Config(#[from] ConfigError), + #[error(transparent)] + Compose(#[from] ComposeCommandError), + #[error(transparent)] + Readiness(#[from] StackReadinessError), + #[error(transparent)] + NodeClients(#[from] NodeClientError), + #[error(transparent)] + Telemetry(#[from] MetricsError), + #[error("block feed requires at least one validator client")] + BlockFeedMissing, + #[error("failed to start block feed: {source}")] + BlockFeed { + #[source] + source: anyhow::Error, + }, + #[error( + "docker image '{image}' is not available; set NOMOS_TESTNET_IMAGE or build the image manually" + )] + MissingImage { image: String }, + #[error("failed to prepare docker image: {source}")] + ImageBuild { + #[source] + source: anyhow::Error, + }, +} + +#[derive(Debug, thiserror::Error)] +#[error("failed to prepare compose workspace: {source}")] +/// Wraps workspace preparation failures. +pub struct WorkspaceError { + #[source] + source: anyhow::Error, +} + +impl WorkspaceError { + pub const fn new(source: anyhow::Error) -> Self { + Self { source } + } +} + +#[derive(Debug, thiserror::Error)] +/// Configuration-related failures while preparing compose runs. +pub enum ConfigError { + #[error("failed to update cfgsync configuration at {path}: {source}")] + Cfgsync { + path: PathBuf, + #[source] + source: anyhow::Error, + }, + #[error("failed to allocate cfgsync port: {source}")] + Port { + #[source] + source: anyhow::Error, + }, + #[error("failed to start cfgsync server on port {port}: {source}")] + CfgsyncStart { + port: u16, + #[source] + source: anyhow::Error, + }, + #[error("failed to build compose descriptor: {source}")] + Descriptor { + #[source] + source: DescriptorBuildError, + }, + #[error("failed to render compose template: {source}")] + Template { + #[source] + source: TemplateError, + }, +} + +#[derive(Debug, thiserror::Error)] +/// Readiness probe failures surfaced to callers. +pub enum StackReadinessError { + #[error(transparent)] + Http(#[from] HttpReadinessError), + #[error("failed to build readiness URL for {role} port {port}: {source}", role = role.label())] + Endpoint { + role: NodeRole, + port: u16, + #[source] + source: ParseError, + }, + #[error("remote readiness probe failed: {source}")] + Remote { + #[source] + source: ReadinessError, + }, +} + +#[derive(Debug, thiserror::Error)] +/// Node client construction failures. +pub enum NodeClientError { + #[error( + "failed to build {endpoint} client URL for {role} port {port}: {source}", + role = role.label() + )] + Endpoint { + role: NodeRole, + endpoint: &'static str, + port: u16, + #[source] + source: ParseError, + }, +} diff --git a/testing-framework/runners/compose/src/lib.rs b/testing-framework/runners/compose/src/lib.rs new file mode 100644 index 0000000..ac7befa --- /dev/null +++ b/testing-framework/runners/compose/src/lib.rs @@ -0,0 +1,23 @@ +mod block_feed; +mod cfgsync; +mod cleanup; +mod compose; +mod control; +mod deployer; +mod docker; +mod environment; +mod errors; +mod ports; +mod readiness; +mod wait; +mod workspace; + +/// The Docker Compose runner entry point. +pub use deployer::ComposeDeployer; +/// Port binding reservation used while wiring Prometheus. +pub use environment::PortReservation; +/// Error types surfaced by the compose runner. +pub use errors::{ + ComposeRunnerError, ConfigError, NodeClientError, StackReadinessError, WorkspaceError, +}; +pub use workspace::ComposeWorkspace; diff --git a/testing-framework/runners/compose/src/ports.rs b/testing-framework/runners/compose/src/ports.rs new file mode 100644 index 0000000..be07373 --- /dev/null +++ b/testing-framework/runners/compose/src/ports.rs @@ -0,0 +1,159 @@ +use std::time::Duration; + +use anyhow::{Context as _, anyhow}; +use reqwest::Url; +use testing_framework_core::{ + adjust_timeout, + scenario::http_probe::NodeRole as HttpNodeRole, + topology::{GeneratedTopology, NodeRole as TopologyNodeRole}, +}; +use tokio::{process::Command, time::timeout}; +use url::ParseError; + +use crate::{ + compose::{HostPortMapping, NodeHostPorts}, + environment::StackEnvironment, + errors::{ComposeRunnerError, StackReadinessError}, +}; + +/// Resolve host ports for all nodes from docker compose. +pub async fn discover_host_ports( + environment: &StackEnvironment, + descriptors: &GeneratedTopology, +) -> Result { + let mut validators = Vec::new(); + for node in descriptors.validators() { + let service = node_identifier(TopologyNodeRole::Validator, node.index()); + let api = resolve_service_port(environment, &service, node.api_port()).await?; + let testing = resolve_service_port(environment, &service, node.testing_http_port()).await?; + validators.push(NodeHostPorts { api, testing }); + } + + let mut executors = Vec::new(); + for node in descriptors.executors() { + let service = node_identifier(TopologyNodeRole::Executor, node.index()); + let api = resolve_service_port(environment, &service, node.api_port()).await?; + let testing = resolve_service_port(environment, &service, node.testing_http_port()).await?; + executors.push(NodeHostPorts { api, testing }); + } + + Ok(HostPortMapping { + validators, + executors, + }) +} + +async fn resolve_service_port( + environment: &StackEnvironment, + service: &str, + container_port: u16, +) -> Result { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(environment.compose_path()) + .arg("-p") + .arg(environment.project_name()) + .arg("port") + .arg(service) + .arg(container_port.to_string()) + .current_dir(environment.root()); + + let output = timeout(adjust_timeout(Duration::from_secs(30)), cmd.output()) + .await + .map_err(|_| ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source: anyhow!("docker compose port timed out"), + })? + .with_context(|| format!("running docker compose port {service} {container_port}")) + .map_err(|source| ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source, + })?; + + if !output.status.success() { + return Err(ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source: anyhow!("docker compose port exited with {}", output.status), + }); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + for line in stdout.lines() { + let line = line.trim(); + if line.is_empty() { + continue; + } + if let Some(port_str) = line.rsplit(':').next() + && let Ok(port) = port_str.trim().parse::() + { + return Ok(port); + } + } + + Err(ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source: anyhow!("unable to parse docker compose port output: {stdout}"), + }) +} + +/// Wait for remote readiness using mapped host ports. +pub async fn ensure_remote_readiness_with_ports( + descriptors: &GeneratedTopology, + mapping: &HostPortMapping, +) -> Result<(), StackReadinessError> { + let validator_urls = mapping + .validators + .iter() + .map(|ports| readiness_url(HttpNodeRole::Validator, ports.api)) + .collect::, _>>()?; + let executor_urls = mapping + .executors + .iter() + .map(|ports| readiness_url(HttpNodeRole::Executor, ports.api)) + .collect::, _>>()?; + + let validator_membership_urls = mapping + .validators + .iter() + .map(|ports| readiness_url(HttpNodeRole::Validator, ports.testing)) + .collect::, _>>()?; + let executor_membership_urls = mapping + .executors + .iter() + .map(|ports| readiness_url(HttpNodeRole::Executor, ports.testing)) + .collect::, _>>()?; + + descriptors + .wait_remote_readiness( + &validator_urls, + &executor_urls, + Some(&validator_membership_urls), + Some(&executor_membership_urls), + ) + .await + .map_err(|source| StackReadinessError::Remote { source }) +} + +fn readiness_url(role: HttpNodeRole, port: u16) -> Result { + localhost_url(port).map_err(|source| StackReadinessError::Endpoint { role, port, source }) +} + +fn localhost_url(port: u16) -> Result { + Url::parse(&format!("http://{}:{port}/", compose_runner_host())) +} + +fn node_identifier(role: TopologyNodeRole, index: usize) -> String { + match role { + TopologyNodeRole::Validator => format!("validator-{index}"), + TopologyNodeRole::Executor => format!("executor-{index}"), + } +} + +pub(crate) fn compose_runner_host() -> String { + std::env::var("COMPOSE_RUNNER_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()) +} diff --git a/testing-framework/runners/compose/src/readiness.rs b/testing-framework/runners/compose/src/readiness.rs new file mode 100644 index 0000000..e57da2c --- /dev/null +++ b/testing-framework/runners/compose/src/readiness.rs @@ -0,0 +1,107 @@ +use std::time::Duration; + +use reqwest::Url; +use testing_framework_core::{ + nodes::ApiClient, + scenario::{Metrics, MetricsError, NodeClients, http_probe::NodeRole as HttpNodeRole}, + topology::{GeneratedTopology, NodeRole as TopologyNodeRole}, +}; +use tokio::time::sleep; + +use crate::{ + compose::{HostPortMapping, NodeHostPorts}, + errors::{NodeClientError, StackReadinessError}, + wait::{wait_for_executors, wait_for_validators}, +}; + +const DISABLED_READINESS_SLEEP: Duration = Duration::from_secs(5); + +/// Build a metrics client from host/port, validating the URL. +pub fn metrics_handle_from_port(port: u16, host: &str) -> Result { + let url = Url::parse(&format!("http://{host}:{port}/")) + .map_err(|err| MetricsError::new(format!("invalid prometheus url: {err}")))?; + Metrics::from_prometheus(url) +} + +/// Wait until all validators respond on their API ports. +pub async fn ensure_validators_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> { + if ports.is_empty() { + return Ok(()); + } + + wait_for_validators(ports).await.map_err(Into::into) +} + +/// Wait until all executors respond on their API ports. +pub async fn ensure_executors_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> { + if ports.is_empty() { + return Ok(()); + } + + wait_for_executors(ports).await.map_err(Into::into) +} + +/// Allow a brief pause when readiness probes are disabled. +pub async fn maybe_sleep_for_disabled_readiness(readiness_enabled: bool) { + if !readiness_enabled { + sleep(DISABLED_READINESS_SLEEP).await; + } +} + +/// Construct API clients using the mapped host ports. +pub fn build_node_clients_with_ports( + descriptors: &GeneratedTopology, + mapping: &HostPortMapping, + host: &str, +) -> Result { + let validators = descriptors + .validators() + .iter() + .zip(mapping.validators.iter()) + .map(|(node, ports)| api_client_from_host_ports(to_http_role(node.role()), ports, host)) + .collect::, _>>()?; + let executors = descriptors + .executors() + .iter() + .zip(mapping.executors.iter()) + .map(|(node, ports)| api_client_from_host_ports(to_http_role(node.role()), ports, host)) + .collect::, _>>()?; + + Ok(NodeClients::new(validators, executors)) +} + +fn api_client_from_host_ports( + role: HttpNodeRole, + ports: &NodeHostPorts, + host: &str, +) -> Result { + let base_url = localhost_url(ports.api, host).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "api", + port: ports.api, + source, + })?; + + let testing_url = + Some( + localhost_url(ports.testing, host).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "testing", + port: ports.testing, + source, + })?, + ); + + Ok(ApiClient::from_urls(base_url, testing_url)) +} + +fn to_http_role(role: TopologyNodeRole) -> testing_framework_core::scenario::http_probe::NodeRole { + match role { + TopologyNodeRole::Validator => HttpNodeRole::Validator, + TopologyNodeRole::Executor => HttpNodeRole::Executor, + } +} + +fn localhost_url(port: u16, host: &str) -> Result { + Url::parse(&format!("http://{host}:{port}/")) +} diff --git a/testing-framework/runners/compose/src/wait.rs b/testing-framework/runners/compose/src/wait.rs new file mode 100644 index 0000000..17240af --- /dev/null +++ b/testing-framework/runners/compose/src/wait.rs @@ -0,0 +1,33 @@ +use std::{env, time::Duration}; + +use testing_framework_core::{ + adjust_timeout, + scenario::http_probe::{self, HttpReadinessError, NodeRole}, +}; + +const DEFAULT_WAIT: Duration = Duration::from_secs(180); +const POLL_INTERVAL: Duration = Duration::from_millis(250); + +pub async fn wait_for_validators(ports: &[u16]) -> Result<(), HttpReadinessError> { + wait_for_ports(ports, NodeRole::Validator).await +} + +pub async fn wait_for_executors(ports: &[u16]) -> Result<(), HttpReadinessError> { + wait_for_ports(ports, NodeRole::Executor).await +} + +async fn wait_for_ports(ports: &[u16], role: NodeRole) -> Result<(), HttpReadinessError> { + let host = compose_runner_host(); + http_probe::wait_for_http_ports_with_host( + ports, + role, + &host, + adjust_timeout(DEFAULT_WAIT), + POLL_INTERVAL, + ) + .await +} + +fn compose_runner_host() -> String { + env::var("COMPOSE_RUNNER_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()) +} diff --git a/testing-framework/runners/compose/src/workspace.rs b/testing-framework/runners/compose/src/workspace.rs new file mode 100644 index 0000000..bd7a3e0 --- /dev/null +++ b/testing-framework/runners/compose/src/workspace.rs @@ -0,0 +1,132 @@ +use std::{ + env, fs, + path::{Path, PathBuf}, +}; + +use anyhow::{Context as _, Result}; +use tempfile::TempDir; + +/// Copy the repository stack assets into a scenario-specific temp dir. +#[derive(Debug)] +pub struct ComposeWorkspace { + root: TempDir, +} + +impl ComposeWorkspace { + /// Clone the stack assets into a temporary directory. + pub fn create() -> Result { + let repo_root = env::var("CARGO_WORKSPACE_DIR") + .map(PathBuf::from) + .or_else(|_| { + Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .and_then(Path::parent) + .map(Path::to_path_buf) + .context("resolving workspace root from manifest dir") + }) + .context("locating repository root")?; + let temp = tempfile::Builder::new() + .prefix("nomos-testnet-") + .tempdir() + .context("creating testnet temp dir")?; + let stack_source = stack_assets_root(&repo_root); + if !stack_source.exists() { + anyhow::bail!( + "stack assets directory not found at {}", + stack_source.display() + ); + } + copy_dir_recursive(&stack_source, &temp.path().join("stack"))?; + let scripts_source = stack_scripts_root(&repo_root); + if scripts_source.exists() { + copy_dir_recursive(&scripts_source, &temp.path().join("stack/scripts"))?; + } + + // Ensure Prometheus config is a file (Docker bind mount fails if a directory + // exists). + let prometheus_src = stack_source.join("monitoring/prometheus.yml"); + let prometheus_dst = temp.path().join("stack/monitoring/prometheus.yml"); + if prometheus_dst.exists() && prometheus_dst.is_dir() { + fs::remove_dir_all(&prometheus_dst) + .with_context(|| format!("removing bogus dir {}", prometheus_dst.display()))?; + } + if !prometheus_dst.exists() { + fs::copy(&prometheus_src, &prometheus_dst).with_context(|| { + format!( + "copying prometheus.yml {} -> {}", + prometheus_src.display(), + prometheus_dst.display() + ) + })?; + } + + let kzg_source = repo_root.join("testing-framework/assets/stack/kzgrs_test_params"); + if kzg_source.exists() { + let target = temp.path().join("kzgrs_test_params"); + if kzg_source.is_dir() { + copy_dir_recursive(&kzg_source, &target)?; + } else { + fs::copy(&kzg_source, &target).with_context(|| { + format!("copying {} -> {}", kzg_source.display(), target.display()) + })?; + } + } + + Ok(Self { root: temp }) + } + + #[must_use] + /// Root of the temporary workspace on disk. + pub fn root_path(&self) -> &Path { + self.root.path() + } + + #[must_use] + /// Path to the copied assets directory. + pub fn stack_dir(&self) -> PathBuf { + self.root.path().join("stack") + } + + #[must_use] + /// Consume the workspace and return the underlying temp directory. + pub fn into_inner(self) -> TempDir { + self.root + } +} + +fn stack_assets_root(repo_root: &Path) -> PathBuf { + let new_layout = repo_root.join("testing-framework/assets/stack"); + if new_layout.exists() { + new_layout + } else { + repo_root.join("testnet") + } +} + +fn stack_scripts_root(repo_root: &Path) -> PathBuf { + let new_layout = repo_root.join("testing-framework/assets/stack/scripts"); + if new_layout.exists() { + new_layout + } else { + repo_root.join("testnet/scripts") + } +} + +fn copy_dir_recursive(source: &Path, target: &Path) -> Result<()> { + fs::create_dir_all(target) + .with_context(|| format!("creating target dir {}", target.display()))?; + for entry in fs::read_dir(source).with_context(|| format!("reading {}", source.display()))? { + let entry = entry?; + let file_type = entry.file_type()?; + let dest = target.join(entry.file_name()); + if file_type.is_dir() { + copy_dir_recursive(&entry.path(), &dest)?; + } else if !file_type.is_dir() { + fs::copy(entry.path(), &dest).with_context(|| { + format!("copying {} -> {}", entry.path().display(), dest.display()) + })?; + } + } + Ok(()) +} diff --git a/testing-framework/runners/docker/.dockerignore b/testing-framework/runners/docker/.dockerignore new file mode 100644 index 0000000..d4ad8ac --- /dev/null +++ b/testing-framework/runners/docker/.dockerignore @@ -0,0 +1,9 @@ +# Build context trim for runner image +.git +**/target +.tmp +tests/workflows/.tmp* +book +scripts/build-rapidsnark.sh~ +rust-project-all-in-one.txt +**/*.log diff --git a/testing-framework/runners/docker/runner.Dockerfile b/testing-framework/runners/docker/runner.Dockerfile new file mode 100644 index 0000000..2c15649 --- /dev/null +++ b/testing-framework/runners/docker/runner.Dockerfile @@ -0,0 +1,74 @@ +# syntax=docker/dockerfile:1 + +ARG VERSION=v0.3.1 +ARG NOMOS_CIRCUITS_PLATFORM=linux-x86_64 + +# =========================== +# BUILD IMAGE +# =========================== + +FROM rust:1.91.0-slim-bookworm AS builder + +ARG VERSION +ARG NOMOS_CIRCUITS_PLATFORM +ARG TARGETARCH + +LABEL maintainer="logos devs" \ + source="https://github.com/logos-co/nomos-node" \ + description="Nomos testing framework build image" + +WORKDIR /nomos +COPY . . + +RUN apt-get update && apt-get install -yq \ + git gcc g++ clang libssl-dev pkg-config ca-certificates curl wget \ + build-essential cmake libgmp-dev libsodium-dev nasm m4 && \ + rm -rf /var/lib/apt/lists/* + +ENV NOMOS_CIRCUITS_PLATFORM=${NOMOS_CIRCUITS_PLATFORM} + +RUN chmod +x scripts/setup-nomos-circuits.sh && \ + scripts/setup-nomos-circuits.sh "$VERSION" "/opt/circuits" + +RUN if [ "${TARGETARCH:-amd64}" = "arm64" ]; then \ + chmod +x scripts/build-rapidsnark.sh && \ + scripts/build-rapidsnark.sh "/opt/circuits"; \ + fi + +ENV NOMOS_CIRCUITS=/opt/circuits + +# Use debug builds to keep the linker memory footprint low; we only need +# binaries for integration testing, not optimized releases. +RUN cargo build --all-features --workspace && \ + cargo build -p nomos-node -p nomos-executor + +# =========================== +# NODE IMAGE +# =========================== + +FROM debian:bookworm-slim + +ARG VERSION + +LABEL maintainer="logos devs" \ + source="https://github.com/logos-co/nomos-node" \ + description="Nomos testing framework runtime image" + +RUN apt-get update && apt-get install -yq \ + libstdc++6 \ + libssl3 \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /opt/circuits /opt/circuits + +COPY --from=builder /nomos/target/debug/nomos-node /usr/bin/nomos-node +COPY --from=builder /nomos/target/debug/nomos-executor /usr/bin/nomos-executor +COPY --from=builder /nomos/target/debug/cfgsync-server /usr/bin/cfgsync-server +COPY --from=builder /nomos/target/debug/cfgsync-client /usr/bin/cfgsync-client + +ENV NOMOS_CIRCUITS=/opt/circuits + +EXPOSE 3000 8080 9000 60000 + +ENTRYPOINT ["/usr/bin/nomos-node"] diff --git a/testing-framework/runners/k8s/Cargo.toml b/testing-framework/runners/k8s/Cargo.toml new file mode 100644 index 0000000..985bb64 --- /dev/null +++ b/testing-framework/runners/k8s/Cargo.toml @@ -0,0 +1,29 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-runner-k8s" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +anyhow = "1" +async-trait = { workspace = true } +k8s-openapi = { version = "0.20", features = ["latest"] } +kube = { version = "0.87", default-features = false, features = ["client", "runtime", "rustls-tls"] } +reqwest = { workspace = true, features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +testing-framework-core = { path = "../../core" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "process", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } +url = { version = "2" } +uuid = { version = "1", features = ["v4"] } diff --git a/testing-framework/runners/k8s/helm/nomos-runner/Chart.yaml b/testing-framework/runners/k8s/helm/nomos-runner/Chart.yaml new file mode 100644 index 0000000..1785e7e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: nomos-runner +description: Helm chart for Nomos integration test runner assets +type: application +version: 0.1.0 +appVersion: "0.1.0" diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/_helpers.tpl b/testing-framework/runners/k8s/helm/nomos-runner/templates/_helpers.tpl new file mode 100644 index 0000000..1665098 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/_helpers.tpl @@ -0,0 +1,41 @@ +{{- define "nomos-runner.chart" -}} +{{- .Chart.Name -}} +{{- end -}} + +{{- define "nomos-runner.fullname" -}} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nomos-runner.labels" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{- define "nomos-runner.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{- define "nomos-runner.validatorLabels" -}} +{{- $root := index . "root" -}} +{{- $index := index . "index" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" $root }} +app.kubernetes.io/instance: {{ $root.Release.Name }} +nomos/logical-role: validator +nomos/validator-index: "{{ $index }}" +{{- end -}} + +{{- define "nomos-runner.executorLabels" -}} +{{- $root := index . "root" -}} +{{- $index := index . "index" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" $root }} +app.kubernetes.io/instance: {{ $root.Release.Name }} +nomos/logical-role: executor +nomos/executor-index: "{{ $index }}" +{{- end -}} + +{{- define "nomos-runner.prometheusLabels" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +nomos/logical-role: prometheus +{{- end -}} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-deployment.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-deployment.yaml new file mode 100644 index 0000000..bc497dd --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nomos-runner.fullname" . }}-cfgsync + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.selectorLabels" . | nindent 6 }} + nomos/component: cfgsync + template: + metadata: + labels: + {{- include "nomos-runner.selectorLabels" . | nindent 8 }} + nomos/component: cfgsync + spec: + containers: + - name: cfgsync + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + command: ["/etc/nomos/scripts/run_cfgsync.sh"] + ports: + - name: http + containerPort: {{ .Values.cfgsync.port }} + env: + - name: RUST_LOG + value: debug + volumeMounts: + - name: assets + mountPath: /etc/nomos + readOnly: true + volumes: + - name: assets + configMap: + name: {{ include "nomos-runner.fullname" . }}-assets + defaultMode: 0755 + items: + - key: cfgsync.yaml + path: cfgsync.yaml + - key: run_cfgsync.sh + path: scripts/run_cfgsync.sh diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-service.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-service.yaml new file mode 100644 index 0000000..db09c16 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nomos-runner.fullname" . }}-cfgsync + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "nomos-runner.selectorLabels" . | nindent 4 }} + nomos/component: cfgsync + ports: + - name: http + port: {{ .Values.cfgsync.port }} + targetPort: http diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/configmap.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/configmap.yaml new file mode 100644 index 0000000..1803e4e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/configmap.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "nomos-runner.fullname" . }}-assets + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +data: + cfgsync.yaml: | +{{- if .Values.cfgsync.config }} +{{ .Values.cfgsync.config | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} + run_cfgsync.sh: | +{{- if .Values.scripts.runCfgsyncSh }} +{{ .Values.scripts.runCfgsyncSh | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} + run_nomos_node.sh: | +{{- if .Values.scripts.runNomosNodeSh }} +{{ .Values.scripts.runNomosNodeSh | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} + run_nomos_executor.sh: | +{{- if .Values.scripts.runNomosExecutorSh }} +{{ .Values.scripts.runNomosExecutorSh | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-deployments.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-deployments.yaml new file mode 100644 index 0000000..0aa1f42 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-deployments.yaml @@ -0,0 +1,63 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.executors.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nomos-runner.fullname" $root }}-executor-{{ $i }} + labels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 6 }} + template: + metadata: + labels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 8 }} + spec: + containers: + - name: executor + image: {{ $root.Values.image }} + imagePullPolicy: {{ $root.Values.imagePullPolicy }} + command: ["/etc/nomos/scripts/run_nomos_executor.sh"] + ports: + - name: http + containerPort: {{ default 18080 $node.apiPort }} + - name: testing-http + containerPort: {{ default 18081 $node.testingHttpPort }} + env: + - name: CFG_SERVER_ADDR + value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }} + {{- range $key, $value := $node.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + volumeMounts: + - name: assets + mountPath: /etc/nomos + readOnly: true + - name: kzg-params + mountPath: /kzgrs_test_params + readOnly: true + volumes: + - name: assets + configMap: + name: {{ include "nomos-runner.fullname" $root }}-assets + defaultMode: 0755 + items: + - key: cfgsync.yaml + path: cfgsync.yaml + - key: run_cfgsync.sh + path: scripts/run_cfgsync.sh + - key: run_nomos_executor.sh + path: scripts/run_nomos_executor.sh + - key: run_nomos_node.sh + path: scripts/run_nomos_node.sh + - name: kzg-params + persistentVolumeClaim: + claimName: {{ include "nomos-runner.fullname" $root }}-kzg + readOnly: true +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-services.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-services.yaml new file mode 100644 index 0000000..279a976 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-services.yaml @@ -0,0 +1,22 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.executors.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nomos-runner.fullname" $root }}-executor-{{ $i }} + labels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + type: NodePort + selector: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 4 }} + ports: + - name: http + port: {{ default 18080 $node.apiPort }} + targetPort: http + - name: testing-http + port: {{ default 18081 $node.testingHttpPort }} + targetPort: testing-http +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-configmap.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-configmap.yaml new file mode 100644 index 0000000..7eaa16a --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.prometheus.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} +data: + prometheus.yml: | +{{- if .Values.prometheus.config }} +{{ .Values.prometheus.config | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-deployment.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-deployment.yaml new file mode 100644 index 0000000..4cda1c1 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-deployment.yaml @@ -0,0 +1,38 @@ +{{- if .Values.prometheus.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.prometheusLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 8 }} + spec: + containers: + - name: prometheus + image: {{ .Values.prometheus.image }} + imagePullPolicy: {{ .Values.prometheus.imagePullPolicy | default "IfNotPresent" }} + args: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.retention.time={{ .Values.prometheus.retention }} + - --web.enable-otlp-receiver + - --enable-feature=otlp-write-receiver + ports: + - containerPort: 9090 + name: http + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus + volumes: + - name: prometheus-config + configMap: + name: prometheus +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-service.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-service.yaml new file mode 100644 index 0000000..c0d90e2 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.prometheus.enabled }} +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} +spec: + type: {{ .Values.prometheus.service.type | default "NodePort" }} + selector: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} + ports: + - name: http + port: 9090 + targetPort: http + {{- if and (eq (default "NodePort" .Values.prometheus.service.type) "NodePort") .Values.prometheus.service.nodePort }} + nodePort: {{ .Values.prometheus.service.nodePort }} + {{- end }} +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/pv.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/pv.yaml new file mode 100644 index 0000000..3af3b2e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/pv.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ include "nomos-runner.fullname" . }}-kzg + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + capacity: + storage: {{ .Values.kzg.storageSize }} + accessModes: + - ReadOnlyMany + persistentVolumeReclaimPolicy: Delete + storageClassName: manual + hostPath: + path: {{ .Values.kzg.hostPath }} + type: {{ .Values.kzg.hostPathType }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/pvc.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/pvc.yaml new file mode 100644 index 0000000..52248fe --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "nomos-runner.fullname" . }}-kzg + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + accessModes: + - ReadOnlyMany + storageClassName: manual + volumeName: {{ include "nomos-runner.fullname" . }}-kzg + resources: + requests: + storage: {{ .Values.kzg.storageSize }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-deployments.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-deployments.yaml new file mode 100644 index 0000000..d807365 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-deployments.yaml @@ -0,0 +1,61 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.validators.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }} + labels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 6 }} + template: + metadata: + labels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 8 }} + spec: + containers: + - name: validator + image: {{ $root.Values.image }} + imagePullPolicy: {{ $root.Values.imagePullPolicy }} + command: ["/etc/nomos/scripts/run_nomos_node.sh"] + ports: + - name: http + containerPort: {{ default 18080 $node.apiPort }} + - name: testing-http + containerPort: {{ default 18081 $node.testingHttpPort }} + env: + - name: CFG_SERVER_ADDR + value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }} + {{- range $key, $value := $node.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + volumeMounts: + - name: assets + mountPath: /etc/nomos + readOnly: true + - name: kzg-params + mountPath: /kzgrs_test_params + readOnly: true + volumes: + - name: assets + configMap: + name: {{ include "nomos-runner.fullname" $root }}-assets + defaultMode: 0755 + items: + - key: cfgsync.yaml + path: cfgsync.yaml + - key: run_cfgsync.sh + path: scripts/run_cfgsync.sh + - key: run_nomos_node.sh + path: scripts/run_nomos_node.sh + - name: kzg-params + persistentVolumeClaim: + claimName: {{ include "nomos-runner.fullname" $root }}-kzg + readOnly: true +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-services.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-services.yaml new file mode 100644 index 0000000..ff94e2e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-services.yaml @@ -0,0 +1,22 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.validators.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }} + labels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + type: NodePort + selector: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} + ports: + - name: http + port: {{ default 18080 $node.apiPort }} + targetPort: http + - name: testing-http + port: {{ default 18081 $node.testingHttpPort }} + targetPort: testing-http +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/values.yaml b/testing-framework/runners/k8s/helm/nomos-runner/values.yaml new file mode 100644 index 0000000..bc72438 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/values.yaml @@ -0,0 +1,38 @@ +image: "nomos-testnet:local" +imagePullPolicy: IfNotPresent + +cfgsync: + port: 4400 + config: "" + +scripts: + runCfgsyncSh: "" + runNomosNodeSh: "" + runNomosExecutorSh: "" + +validators: + count: 1 + nodes: [] + +executors: + count: 1 + nodes: [] + +kzg: + hostPath: "/var/lib/nomos/kzgrs_test_params" + hostPathType: "Directory" + storageSize: "1Gi" + +prometheus: + enabled: true + image: "prom/prometheus:v3.0.1" + imagePullPolicy: IfNotPresent + retention: "7d" + service: + type: NodePort + nodePort: null + config: | + global: + evaluation_interval: 15s + external_labels: + monitor: "NomosRunner" diff --git a/testing-framework/runners/k8s/src/assets.rs b/testing-framework/runners/k8s/src/assets.rs new file mode 100644 index 0000000..77e03fe --- /dev/null +++ b/testing-framework/runners/k8s/src/assets.rs @@ -0,0 +1,316 @@ +use std::{ + collections::BTreeMap, + env, fs, io, + path::{Path, PathBuf}, +}; + +use anyhow::{Context as _, Result as AnyResult}; +use serde::Serialize; +use tempfile::TempDir; +use testing_framework_core::{ + scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, render_cfgsync_yaml}, + topology::GeneratedTopology, +}; +use thiserror::Error; + +/// Paths and image metadata required to deploy the Helm chart. +pub struct RunnerAssets { + pub image: String, + pub kzg_path: PathBuf, + pub chart_path: PathBuf, + pub cfgsync_file: PathBuf, + pub run_cfgsync_script: PathBuf, + pub run_nomos_node_script: PathBuf, + pub run_nomos_executor_script: PathBuf, + pub values_file: PathBuf, + _tempdir: TempDir, +} + +pub const CFGSYNC_PORT: u16 = 4400; + +#[derive(Debug, Error)] +/// Failures preparing Helm assets and rendered cfgsync configuration. +pub enum AssetsError { + #[error("failed to locate workspace root: {source}")] + WorkspaceRoot { + #[source] + source: anyhow::Error, + }, + #[error("failed to render cfgsync configuration: {source}")] + Cfgsync { + #[source] + source: anyhow::Error, + }, + #[error("missing required script at {path}")] + MissingScript { path: PathBuf }, + #[error("missing KZG parameters at {path}; build them with `make kzgrs_test_params`")] + MissingKzg { path: PathBuf }, + #[error("missing Helm chart at {path}; ensure the repository is up-to-date")] + MissingChart { path: PathBuf }, + #[error("failed to create temporary directory for rendered assets: {source}")] + TempDir { + #[source] + source: io::Error, + }, + #[error("failed to write asset at {path}: {source}")] + Io { + path: PathBuf, + #[source] + source: io::Error, + }, + #[error("failed to render Helm values: {source}")] + Values { + #[source] + source: serde_yaml::Error, + }, +} + +/// Render cfgsync config, Helm values, and locate scripts/KZG assets for a +/// topology. +pub fn prepare_assets(topology: &GeneratedTopology) -> Result { + let root = workspace_root().map_err(|source| AssetsError::WorkspaceRoot { source })?; + let cfgsync_yaml = render_cfgsync_config(&root, topology)?; + + let tempdir = tempfile::Builder::new() + .prefix("nomos-helm-") + .tempdir() + .map_err(|source| AssetsError::TempDir { source })?; + + let cfgsync_file = write_temp_file(tempdir.path(), "cfgsync.yaml", cfgsync_yaml)?; + let scripts = validate_scripts(&root)?; + let kzg_path = validate_kzg_params(&root)?; + let chart_path = helm_chart_path()?; + let values_yaml = render_values_yaml(topology)?; + let values_file = write_temp_file(tempdir.path(), "values.yaml", values_yaml)?; + let image = + env::var("NOMOS_TESTNET_IMAGE").unwrap_or_else(|_| String::from("nomos-testnet:local")); + + Ok(RunnerAssets { + image, + kzg_path, + chart_path, + cfgsync_file, + run_cfgsync_script: scripts.run_cfgsync, + run_nomos_node_script: scripts.run_node, + run_nomos_executor_script: scripts.run_executor, + values_file, + _tempdir: tempdir, + }) +} + +const CFGSYNC_K8S_TIMEOUT_SECS: u64 = 300; + +fn render_cfgsync_config(root: &Path, topology: &GeneratedTopology) -> Result { + let cfgsync_template_path = stack_assets_root(root).join("cfgsync.yaml"); + let mut cfg = load_cfgsync_template(&cfgsync_template_path) + .map_err(|source| AssetsError::Cfgsync { source })?; + apply_topology_overrides(&mut cfg, topology, true); + cfg.timeout = cfg.timeout.max(CFGSYNC_K8S_TIMEOUT_SECS); + render_cfgsync_yaml(&cfg).map_err(|source| AssetsError::Cfgsync { source }) +} + +struct ScriptPaths { + run_cfgsync: PathBuf, + run_node: PathBuf, + run_executor: PathBuf, +} + +fn validate_scripts(root: &Path) -> Result { + let scripts_dir = stack_scripts_root(root); + let run_cfgsync = scripts_dir.join("run_cfgsync.sh"); + let run_node = scripts_dir.join("run_nomos_node.sh"); + let run_executor = scripts_dir.join("run_nomos_executor.sh"); + + for path in [&run_cfgsync, &run_node, &run_executor] { + if !path.exists() { + return Err(AssetsError::MissingScript { path: path.clone() }); + } + } + + Ok(ScriptPaths { + run_cfgsync, + run_node, + run_executor, + }) +} + +fn validate_kzg_params(root: &Path) -> Result { + let path = root.join("testing-framework/assets/stack/kzgrs_test_params"); + if path.exists() { + Ok(path) + } else { + Err(AssetsError::MissingKzg { path }) + } +} + +fn helm_chart_path() -> Result { + let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("helm/nomos-runner"); + if path.exists() { + Ok(path) + } else { + Err(AssetsError::MissingChart { path }) + } +} + +fn render_values_yaml(topology: &GeneratedTopology) -> Result { + let values = build_values(topology); + serde_yaml::to_string(&values).map_err(|source| AssetsError::Values { source }) +} + +fn write_temp_file( + dir: &Path, + name: &str, + contents: impl AsRef<[u8]>, +) -> Result { + let path = dir.join(name); + fs::write(&path, contents).map_err(|source| AssetsError::Io { + path: path.clone(), + source, + })?; + Ok(path) +} + +/// Locate the workspace root, honoring `CARGO_WORKSPACE_DIR` overrides. +pub fn workspace_root() -> AnyResult { + if let Ok(var) = env::var("CARGO_WORKSPACE_DIR") { + return Ok(PathBuf::from(var)); + } + let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR")); + manifest_dir + .parent() + .and_then(Path::parent) + .and_then(Path::parent) + .map(Path::to_path_buf) + .context("resolving workspace root from manifest dir") +} + +fn stack_assets_root(root: &Path) -> PathBuf { + let new_layout = root.join("testing-framework/assets/stack"); + if new_layout.exists() { + new_layout + } else { + root.join("testnet") + } +} + +fn stack_scripts_root(root: &Path) -> PathBuf { + let new_layout = root.join("testing-framework/assets/stack/scripts"); + if new_layout.exists() { + new_layout + } else { + root.join("testnet/scripts") + } +} + +#[derive(Serialize)] +struct HelmValues { + validators: NodeGroup, + executors: NodeGroup, +} + +#[derive(Serialize)] +struct NodeGroup { + count: usize, + nodes: Vec, +} + +#[derive(Serialize)] +struct NodeValues { + #[serde(rename = "apiPort")] + api_port: u16, + #[serde(rename = "testingHttpPort")] + testing_http_port: u16, + env: BTreeMap, +} + +fn build_values(topology: &GeneratedTopology) -> HelmValues { + let pol_mode = pol_proof_mode(); + let validators = topology + .validators() + .iter() + .enumerate() + .map(|(index, validator)| { + let mut env = BTreeMap::new(); + env.insert("POL_PROOF_DEV_MODE".into(), pol_mode.clone()); + env.insert( + "CFG_NETWORK_PORT".into(), + validator.network_port().to_string(), + ); + env.insert("CFG_DA_PORT".into(), validator.da_port.to_string()); + env.insert("CFG_BLEND_PORT".into(), validator.blend_port.to_string()); + env.insert( + "CFG_API_PORT".into(), + validator.general.api_config.address.port().to_string(), + ); + env.insert( + "CFG_TESTING_HTTP_PORT".into(), + validator + .general + .api_config + .testing_http_address + .port() + .to_string(), + ); + env.insert("CFG_HOST_KIND".into(), "validator".into()); + env.insert("CFG_HOST_IDENTIFIER".into(), format!("validator-{index}")); + + NodeValues { + api_port: validator.general.api_config.address.port(), + testing_http_port: validator.general.api_config.testing_http_address.port(), + env, + } + }) + .collect(); + + let executors = topology + .executors() + .iter() + .enumerate() + .map(|(index, executor)| { + let mut env = BTreeMap::new(); + env.insert("POL_PROOF_DEV_MODE".into(), pol_mode.clone()); + env.insert( + "CFG_NETWORK_PORT".into(), + executor.network_port().to_string(), + ); + env.insert("CFG_DA_PORT".into(), executor.da_port.to_string()); + env.insert("CFG_BLEND_PORT".into(), executor.blend_port.to_string()); + env.insert( + "CFG_API_PORT".into(), + executor.general.api_config.address.port().to_string(), + ); + env.insert( + "CFG_TESTING_HTTP_PORT".into(), + executor + .general + .api_config + .testing_http_address + .port() + .to_string(), + ); + env.insert("CFG_HOST_KIND".into(), "executor".into()); + env.insert("CFG_HOST_IDENTIFIER".into(), format!("executor-{index}")); + + NodeValues { + api_port: executor.general.api_config.address.port(), + testing_http_port: executor.general.api_config.testing_http_address.port(), + env, + } + }) + .collect(); + + HelmValues { + validators: NodeGroup { + count: topology.validators().len(), + nodes: validators, + }, + executors: NodeGroup { + count: topology.executors().len(), + nodes: executors, + }, + } +} + +fn pol_proof_mode() -> String { + env::var("POL_PROOF_DEV_MODE").unwrap_or_else(|_| "true".to_string()) +} diff --git a/testing-framework/runners/k8s/src/block_feed.rs b/testing-framework/runners/k8s/src/block_feed.rs new file mode 100644 index 0000000..a5a07f9 --- /dev/null +++ b/testing-framework/runners/k8s/src/block_feed.rs @@ -0,0 +1,18 @@ +use testing_framework_core::scenario::{BlockFeed, BlockFeedTask, NodeClients, spawn_block_feed}; +use tracing::info; + +use crate::deployer::K8sRunnerError; + +pub async fn spawn_block_feed_with( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), K8sRunnerError> { + let block_source_client = node_clients + .any_client() + .cloned() + .ok_or(K8sRunnerError::BlockFeedMissing)?; + + info!("starting block feed"); + spawn_block_feed(block_source_client) + .await + .map_err(|source| K8sRunnerError::BlockFeed { source }) +} diff --git a/testing-framework/runners/k8s/src/cleanup.rs b/testing-framework/runners/k8s/src/cleanup.rs new file mode 100644 index 0000000..41c7c60 --- /dev/null +++ b/testing-framework/runners/k8s/src/cleanup.rs @@ -0,0 +1,232 @@ +use std::thread; + +use k8s_openapi::api::core::v1::Namespace; +use kube::{Api, Client, api::DeleteParams}; +use testing_framework_core::scenario::CleanupGuard; +use tokio::{ + process::Command, + time::{Duration, sleep}, +}; +use tracing::warn; + +use crate::helm::uninstall_release; + +/// Tears down Helm release and namespace after a run unless preservation is +/// set. +pub struct RunnerCleanup { + client: Client, + namespace: String, + release: String, + preserve: bool, +} + +impl RunnerCleanup { + /// Build a cleanup guard; `preserve` skips deletion when true. + pub fn new(client: Client, namespace: String, release: String, preserve: bool) -> Self { + debug_assert!( + !namespace.is_empty() && !release.is_empty(), + "k8s cleanup requires namespace and release" + ); + Self { + client, + namespace, + release, + preserve, + } + } + + async fn cleanup_async(&self) { + if self.preserve { + print_preserve_notice(&self.release, &self.namespace); + return; + } + + uninstall_release_and_namespace(&self.client, &self.release, &self.namespace).await; + } + + fn blocking_cleanup_success(&self) -> bool { + match tokio::runtime::Runtime::new() { + Ok(rt) => match rt.block_on(async { + tokio::time::timeout(Duration::from_secs(120), self.cleanup_async()).await + }) { + Ok(()) => true, + Err(err) => { + warn!( + "[k8s-runner] cleanup timed out after 120s: {err}; falling back to background thread" + ); + false + } + }, + Err(err) => { + warn!( + "[k8s-runner] unable to create cleanup runtime: {err}; falling back to background thread" + ); + false + } + } + } + + fn spawn_cleanup_thread(self: Box) { + match thread::Builder::new() + .name("k8s-runner-cleanup".into()) + .spawn(move || run_background_cleanup(self)) + { + Ok(handle) => { + if let Err(err) = handle.join() { + warn!("[k8s-runner] cleanup thread panicked: {err:?}"); + } + } + Err(err) => warn!("[k8s-runner] failed to spawn cleanup thread: {err}"), + } + } +} + +fn print_preserve_notice(release: &str, namespace: &str) { + println!("[k8s-runner] preserving Helm release `{release}` in namespace `{namespace}`"); +} + +async fn uninstall_release_and_namespace(client: &Client, release: &str, namespace: &str) { + if let Err(err) = uninstall_release(release, namespace).await { + println!("[k8s-runner] helm uninstall {release} failed: {err}"); + } + + println!("[k8s-runner] deleting namespace `{namespace}` via k8s API",); + delete_namespace(client, namespace).await; + println!("[k8s-runner] delete request for namespace `{namespace}` finished",); +} + +fn run_background_cleanup(cleanup: Box) { + match tokio::runtime::Runtime::new() { + Ok(rt) => { + if let Err(err) = rt.block_on(async { + tokio::time::timeout(Duration::from_secs(120), cleanup.cleanup_async()).await + }) { + warn!("[k8s-runner] background cleanup timed out: {err}"); + } + } + Err(err) => warn!("[k8s-runner] unable to create cleanup runtime: {err}"), + } +} + +async fn delete_namespace(client: &Client, namespace: &str) { + let namespaces: Api = Api::all(client.clone()); + + if delete_namespace_via_api(&namespaces, namespace).await { + wait_for_namespace_termination(&namespaces, namespace).await; + return; + } + + if delete_namespace_via_cli(namespace).await { + wait_for_namespace_termination(&namespaces, namespace).await; + } else { + warn!("[k8s-runner] unable to delete namespace `{namespace}` using kubectl fallback"); + } +} + +async fn delete_namespace_via_api(namespaces: &Api, namespace: &str) -> bool { + println!("[k8s-runner] invoking kubernetes API to delete namespace `{namespace}`"); + match tokio::time::timeout( + Duration::from_secs(10), + namespaces.delete(namespace, &DeleteParams::default()), + ) + .await + { + Ok(Ok(_)) => { + println!( + "[k8s-runner] delete request accepted for namespace `{namespace}`; waiting for termination" + ); + true + } + Ok(Err(err)) => { + println!("[k8s-runner] failed to delete namespace `{namespace}` via API: {err}"); + warn!("[k8s-runner] api delete failed for namespace {namespace}: {err}"); + false + } + Err(_) => { + println!( + "[k8s-runner] kubernetes API timed out deleting namespace `{namespace}`; falling back to kubectl" + ); + false + } + } +} + +async fn delete_namespace_via_cli(namespace: &str) -> bool { + println!("[k8s-runner] invoking `kubectl delete namespace {namespace}` fallback"); + let output = Command::new("kubectl") + .arg("delete") + .arg("namespace") + .arg(namespace) + .arg("--wait=true") + .output() + .await; + + match output { + Ok(result) if result.status.success() => { + println!("[k8s-runner] `kubectl delete namespace {namespace}` completed successfully"); + true + } + Ok(result) => { + println!( + "[k8s-runner] `kubectl delete namespace {namespace}` failed: {}\n{}", + String::from_utf8_lossy(&result.stderr), + String::from_utf8_lossy(&result.stdout) + ); + false + } + Err(err) => { + println!("[k8s-runner] failed to spawn kubectl for namespace `{namespace}`: {err}"); + false + } + } +} + +async fn wait_for_namespace_termination(namespaces: &Api, namespace: &str) { + for attempt in 0..60 { + if namespace_deleted(namespaces, namespace, attempt).await { + return; + } + sleep(Duration::from_secs(1)).await; + } + + warn!( + "[k8s-runner] namespace `{}` still present after waiting for deletion", + namespace + ); +} + +async fn namespace_deleted(namespaces: &Api, namespace: &str, attempt: u32) -> bool { + match namespaces.get_opt(namespace).await { + Ok(Some(ns)) => { + if attempt == 0 { + let phase = ns + .status + .as_ref() + .and_then(|status| status.phase.clone()) + .unwrap_or_else(|| "Unknown".into()); + println!( + "[k8s-runner] waiting for namespace `{}` to terminate (phase={phase:?})", + namespace + ); + } + false + } + Ok(None) => { + println!("[k8s-runner] namespace `{namespace}` deleted"); + true + } + Err(err) => { + warn!("[k8s-runner] namespace `{namespace}` poll failed: {err}"); + true + } + } +} + +impl CleanupGuard for RunnerCleanup { + fn cleanup(self: Box) { + if tokio::runtime::Handle::try_current().is_err() && self.blocking_cleanup_success() { + return; + } + self.spawn_cleanup_thread(); + } +} diff --git a/testing-framework/runners/k8s/src/cluster.rs b/testing-framework/runners/k8s/src/cluster.rs new file mode 100644 index 0000000..317c5b6 --- /dev/null +++ b/testing-framework/runners/k8s/src/cluster.rs @@ -0,0 +1,334 @@ +use std::env; + +use kube::Client; +use reqwest::Url; +use testing_framework_core::{ + nodes::ApiClient, + scenario::{CleanupGuard, Metrics, MetricsError, NodeClients, http_probe::NodeRole}, + topology::{GeneratedTopology, ReadinessError}, +}; +use tracing::info; +use url::ParseError; +use uuid::Uuid; + +use crate::{ + assets::RunnerAssets, + cleanup::RunnerCleanup, + host::node_host, + logs::dump_namespace_logs, + wait::{ClusterPorts, ClusterReady, NodeConfigPorts, wait_for_cluster_ready}, +}; + +#[derive(Default)] +pub struct PortSpecs { + pub validators: Vec, + pub executors: Vec, +} + +/// Holds k8s namespace, Helm release, port forwards, and cleanup guard. +pub struct ClusterEnvironment { + client: Client, + namespace: String, + release: String, + cleanup: Option, + validator_api_ports: Vec, + validator_testing_ports: Vec, + executor_api_ports: Vec, + executor_testing_ports: Vec, + prometheus_port: u16, + port_forwards: Vec, +} + +impl ClusterEnvironment { + pub fn new( + client: Client, + namespace: String, + release: String, + cleanup: RunnerCleanup, + ports: &ClusterPorts, + port_forwards: Vec, + ) -> Self { + Self { + client, + namespace, + release, + cleanup: Some(cleanup), + validator_api_ports: ports.validators.iter().map(|ports| ports.api).collect(), + validator_testing_ports: ports.validators.iter().map(|ports| ports.testing).collect(), + executor_api_ports: ports.executors.iter().map(|ports| ports.api).collect(), + executor_testing_ports: ports.executors.iter().map(|ports| ports.testing).collect(), + prometheus_port: ports.prometheus, + port_forwards, + } + } + + pub async fn fail(&mut self, reason: &str) { + tracing::error!( + reason = reason, + namespace = %self.namespace, + release = %self.release, + "k8s stack failure; collecting diagnostics" + ); + dump_namespace_logs(&self.client, &self.namespace).await; + kill_port_forwards(&mut self.port_forwards); + if let Some(guard) = self.cleanup.take() { + Box::new(guard).cleanup(); + } + } + + pub fn into_cleanup(self) -> (RunnerCleanup, Vec) { + ( + self.cleanup.expect("cleanup guard should be available"), + self.port_forwards, + ) + } + + pub fn prometheus_port(&self) -> u16 { + self.prometheus_port + } + + pub fn validator_ports(&self) -> (&[u16], &[u16]) { + (&self.validator_api_ports, &self.validator_testing_ports) + } + + pub fn executor_ports(&self) -> (&[u16], &[u16]) { + (&self.executor_api_ports, &self.executor_testing_ports) + } +} + +#[derive(Debug, thiserror::Error)] +/// Failures while building node clients against forwarded ports. +pub enum NodeClientError { + #[error( + "failed to build {endpoint} client URL for {role} port {port}: {source}", + role = role.label() + )] + Endpoint { + role: NodeRole, + endpoint: &'static str, + port: u16, + #[source] + source: ParseError, + }, +} + +#[derive(Debug, thiserror::Error)] +/// Readiness check failures for the remote cluster endpoints. +pub enum RemoteReadinessError { + #[error( + "failed to build readiness URL for {role} port {port}: {source}", + role = role.label() + )] + Endpoint { + role: NodeRole, + port: u16, + #[source] + source: ParseError, + }, + #[error("remote readiness probe failed: {source}")] + Remote { + #[source] + source: ReadinessError, + }, +} + +pub fn collect_port_specs(descriptors: &GeneratedTopology) -> PortSpecs { + let validators = descriptors + .validators() + .iter() + .map(|node| NodeConfigPorts { + api: node.general.api_config.address.port(), + testing: node.general.api_config.testing_http_address.port(), + }) + .collect(); + let executors = descriptors + .executors() + .iter() + .map(|node| NodeConfigPorts { + api: node.general.api_config.address.port(), + testing: node.general.api_config.testing_http_address.port(), + }) + .collect(); + + PortSpecs { + validators, + executors, + } +} + +pub fn build_node_clients(cluster: &ClusterEnvironment) -> Result { + let validators = cluster + .validator_api_ports + .iter() + .copied() + .zip(cluster.validator_testing_ports.iter().copied()) + .map(|(api_port, testing_port)| { + api_client_from_ports(NodeRole::Validator, api_port, testing_port) + }) + .collect::, _>>()?; + let executors = cluster + .executor_api_ports + .iter() + .copied() + .zip(cluster.executor_testing_ports.iter().copied()) + .map(|(api_port, testing_port)| { + api_client_from_ports(NodeRole::Executor, api_port, testing_port) + }) + .collect::, _>>()?; + + Ok(NodeClients::new(validators, executors)) +} + +pub fn metrics_handle_from_port(port: u16) -> Result { + let url = cluster_host_url(port) + .map_err(|err| MetricsError::new(format!("invalid prometheus url: {err}")))?; + Metrics::from_prometheus(url) +} + +pub async fn ensure_cluster_readiness( + descriptors: &GeneratedTopology, + cluster: &ClusterEnvironment, +) -> Result<(), RemoteReadinessError> { + info!("waiting for remote readiness (API + membership)"); + let (validator_api, validator_testing) = cluster.validator_ports(); + let (executor_api, executor_testing) = cluster.executor_ports(); + + let validator_urls = readiness_urls(validator_api, NodeRole::Validator)?; + let executor_urls = readiness_urls(executor_api, NodeRole::Executor)?; + let validator_membership_urls = readiness_urls(validator_testing, NodeRole::Validator)?; + let executor_membership_urls = readiness_urls(executor_testing, NodeRole::Executor)?; + + descriptors + .wait_remote_readiness( + &validator_urls, + &executor_urls, + Some(&validator_membership_urls), + Some(&executor_membership_urls), + ) + .await + .map_err(|source| RemoteReadinessError::Remote { source }) +} + +pub fn cluster_identifiers() -> (String, String) { + let run_id = Uuid::new_v4().simple().to_string(); + let namespace = format!("nomos-k8s-{run_id}"); + (namespace.clone(), namespace) +} + +pub async fn install_stack( + client: &Client, + assets: &RunnerAssets, + namespace: &str, + release: &str, + validators: usize, + executors: usize, +) -> Result { + tracing::info!( + release = %release, + namespace = %namespace, + "installing helm release" + ); + crate::helm::install_release(assets, release, namespace, validators, executors).await?; + tracing::info!(release = %release, "helm install succeeded"); + + let preserve = env::var("K8S_RUNNER_PRESERVE").is_ok(); + Ok(RunnerCleanup::new( + client.clone(), + namespace.to_owned(), + release.to_owned(), + preserve, + )) +} + +pub async fn wait_for_ports_or_cleanup( + client: &Client, + namespace: &str, + release: &str, + specs: &PortSpecs, + cleanup_guard: &mut Option, +) -> Result { + info!( + validators = specs.validators.len(), + executors = specs.executors.len(), + %namespace, + %release, + "waiting for cluster port-forwards" + ); + match wait_for_cluster_ready( + client, + namespace, + release, + &specs.validators, + &specs.executors, + ) + .await + { + Ok(ports) => { + info!( + prometheus_port = ports.ports.prometheus, + validator_ports = ?ports.ports.validators, + executor_ports = ?ports.ports.executors, + "cluster port-forwards established" + ); + Ok(ports) + } + Err(err) => { + cleanup_pending(client, namespace, cleanup_guard).await; + Err(err.into()) + } + } +} + +pub fn kill_port_forwards(handles: &mut Vec) { + for handle in handles.iter_mut() { + let _ = handle.kill(); + let _ = handle.wait(); + } + handles.clear(); +} + +async fn cleanup_pending(client: &Client, namespace: &str, guard: &mut Option) { + crate::logs::dump_namespace_logs(client, namespace).await; + if let Some(guard) = guard.take() { + Box::new(guard).cleanup(); + } +} + +fn readiness_urls(ports: &[u16], role: NodeRole) -> Result, RemoteReadinessError> { + ports + .iter() + .copied() + .map(|port| readiness_url(role, port)) + .collect() +} + +fn readiness_url(role: NodeRole, port: u16) -> Result { + cluster_host_url(port).map_err(|source| RemoteReadinessError::Endpoint { role, port, source }) +} + +fn cluster_host_url(port: u16) -> Result { + Url::parse(&format!("http://{}:{port}/", node_host())) +} + +fn api_client_from_ports( + role: NodeRole, + api_port: u16, + testing_port: u16, +) -> Result { + let base_endpoint = cluster_host_url(api_port).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "api", + port: api_port, + source, + })?; + let testing_endpoint = + Some( + cluster_host_url(testing_port).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "testing", + port: testing_port, + source, + })?, + ); + Ok(ApiClient::from_urls(base_endpoint, testing_endpoint)) +} diff --git a/testing-framework/runners/k8s/src/deployer.rs b/testing-framework/runners/k8s/src/deployer.rs new file mode 100644 index 0000000..93e5658 --- /dev/null +++ b/testing-framework/runners/k8s/src/deployer.rs @@ -0,0 +1,258 @@ +use anyhow::Error; +use async_trait::async_trait; +use kube::Client; +use testing_framework_core::{ + scenario::{BlockFeedTask, CleanupGuard, Deployer, MetricsError, RunContext, Runner, Scenario}, + topology::GeneratedTopology, +}; +use tracing::{error, info}; + +use crate::{ + assets::{AssetsError, prepare_assets}, + block_feed::spawn_block_feed_with, + cleanup::RunnerCleanup, + cluster::{ + ClusterEnvironment, NodeClientError, PortSpecs, RemoteReadinessError, build_node_clients, + cluster_identifiers, collect_port_specs, ensure_cluster_readiness, install_stack, + kill_port_forwards, metrics_handle_from_port, wait_for_ports_or_cleanup, + }, + helm::HelmError, + wait::ClusterWaitError, +}; + +/// Deploys a scenario into Kubernetes using Helm charts and port-forwards. +#[derive(Clone, Copy)] +pub struct K8sDeployer { + readiness_checks: bool, +} + +impl Default for K8sDeployer { + fn default() -> Self { + Self::new() + } +} + +impl K8sDeployer { + #[must_use] + /// Create a k8s deployer with readiness checks enabled. + pub const fn new() -> Self { + Self { + readiness_checks: true, + } + } + + #[must_use] + /// Enable/disable readiness probes before handing control to workloads. + pub const fn with_readiness(mut self, enabled: bool) -> Self { + self.readiness_checks = enabled; + self + } +} + +#[derive(Debug, thiserror::Error)] +/// High-level runner failures returned to the scenario harness. +pub enum K8sRunnerError { + #[error( + "kubernetes runner requires at least one validator and one executor (validators={validators}, executors={executors})" + )] + UnsupportedTopology { validators: usize, executors: usize }, + #[error("failed to initialise kubernetes client: {source}")] + ClientInit { + #[source] + source: kube::Error, + }, + #[error(transparent)] + Assets(#[from] AssetsError), + #[error(transparent)] + Helm(#[from] HelmError), + #[error(transparent)] + Cluster(#[from] Box), + #[error(transparent)] + Readiness(#[from] RemoteReadinessError), + #[error(transparent)] + NodeClients(#[from] NodeClientError), + #[error(transparent)] + Telemetry(#[from] MetricsError), + #[error("k8s runner requires at least one node client to follow blocks")] + BlockFeedMissing, + #[error("failed to initialize block feed: {source}")] + BlockFeed { + #[source] + source: Error, + }, +} + +#[async_trait] +impl Deployer for K8sDeployer { + type Error = K8sRunnerError; + + async fn deploy(&self, scenario: &Scenario) -> Result { + let descriptors = scenario.topology().clone(); + ensure_supported_topology(&descriptors)?; + + let client = Client::try_default() + .await + .map_err(|source| K8sRunnerError::ClientInit { source })?; + info!( + validators = descriptors.validators().len(), + executors = descriptors.executors().len(), + "starting k8s deployment" + ); + + let port_specs = collect_port_specs(&descriptors); + let mut cluster = + Some(setup_cluster(&client, &port_specs, &descriptors, self.readiness_checks).await?); + + info!("building node clients"); + let node_clients = match build_node_clients( + cluster + .as_ref() + .expect("cluster must be available while building clients"), + ) { + Ok(clients) => clients, + Err(err) => { + if let Some(env) = cluster.as_mut() { + env.fail("failed to construct node api clients").await; + } + return Err(err.into()); + } + }; + + let telemetry = match metrics_handle_from_port( + cluster + .as_ref() + .expect("cluster must be available for telemetry") + .prometheus_port(), + ) { + Ok(handle) => handle, + Err(err) => { + if let Some(env) = cluster.as_mut() { + env.fail("failed to configure prometheus metrics handle") + .await; + } + return Err(err.into()); + } + }; + let (block_feed, block_feed_guard) = match spawn_block_feed_with(&node_clients).await { + Ok(pair) => pair, + Err(err) => { + if let Some(env) = cluster.as_mut() { + env.fail("failed to initialize block feed").await; + } + return Err(err); + } + }; + let (cleanup, port_forwards) = cluster + .take() + .expect("cluster should still be available") + .into_cleanup(); + let cleanup_guard: Box = Box::new(K8sCleanupGuard::new( + cleanup, + block_feed_guard, + port_forwards, + )); + let context = RunContext::new( + descriptors, + None, + node_clients, + scenario.duration(), + telemetry, + block_feed, + None, + ); + Ok(Runner::new(context, Some(cleanup_guard))) + } +} + +impl From for K8sRunnerError { + fn from(value: ClusterWaitError) -> Self { + Self::Cluster(Box::new(value)) + } +} + +fn ensure_supported_topology(descriptors: &GeneratedTopology) -> Result<(), K8sRunnerError> { + let validators = descriptors.validators().len(); + let executors = descriptors.executors().len(); + if validators == 0 || executors == 0 { + return Err(K8sRunnerError::UnsupportedTopology { + validators, + executors, + }); + } + Ok(()) +} + +async fn setup_cluster( + client: &Client, + specs: &PortSpecs, + descriptors: &GeneratedTopology, + readiness_checks: bool, +) -> Result { + let assets = prepare_assets(descriptors)?; + let validators = descriptors.validators().len(); + let executors = descriptors.executors().len(); + + let (namespace, release) = cluster_identifiers(); + info!(%namespace, %release, validators, executors, "preparing k8s assets and namespace"); + + let mut cleanup_guard = + Some(install_stack(client, &assets, &namespace, &release, validators, executors).await?); + + info!("waiting for helm-managed services to become ready"); + let cluster_ready = + wait_for_ports_or_cleanup(client, &namespace, &release, specs, &mut cleanup_guard).await?; + + info!( + prometheus_port = cluster_ready.ports.prometheus, + "discovered prometheus endpoint" + ); + + let environment = ClusterEnvironment::new( + client.clone(), + namespace, + release, + cleanup_guard + .take() + .expect("cleanup guard must exist after successful cluster startup"), + &cluster_ready.ports, + cluster_ready.port_forwards, + ); + + if readiness_checks { + info!("probing cluster readiness"); + ensure_cluster_readiness(descriptors, &environment).await?; + info!("cluster readiness probes passed"); + } + + Ok(environment) +} + +struct K8sCleanupGuard { + cleanup: RunnerCleanup, + block_feed: Option, + port_forwards: Vec, +} + +impl K8sCleanupGuard { + const fn new( + cleanup: RunnerCleanup, + block_feed: BlockFeedTask, + port_forwards: Vec, + ) -> Self { + Self { + cleanup, + block_feed: Some(block_feed), + port_forwards, + } + } +} + +impl CleanupGuard for K8sCleanupGuard { + fn cleanup(mut self: Box) { + if let Some(block_feed) = self.block_feed.take() { + CleanupGuard::cleanup(Box::new(block_feed)); + } + kill_port_forwards(&mut self.port_forwards); + CleanupGuard::cleanup(Box::new(self.cleanup)); + } +} diff --git a/testing-framework/runners/k8s/src/helm.rs b/testing-framework/runners/k8s/src/helm.rs new file mode 100644 index 0000000..31dfd11 --- /dev/null +++ b/testing-framework/runners/k8s/src/helm.rs @@ -0,0 +1,144 @@ +use std::{io, process::Stdio}; + +use thiserror::Error; +use tokio::process::Command; + +use crate::assets::{CFGSYNC_PORT, RunnerAssets, workspace_root}; + +/// Errors returned from Helm invocations. +#[derive(Debug, Error)] +pub enum HelmError { + #[error("failed to spawn {command}: {source}")] + Spawn { + command: String, + #[source] + source: io::Error, + }, + #[error("{command} exited with status {status:?}\nstderr:\n{stderr}\nstdout:\n{stdout}")] + Failed { + command: String, + status: Option, + stdout: String, + stderr: String, + }, +} + +/// Install the Helm release for the provided topology counts. +pub async fn install_release( + assets: &RunnerAssets, + release: &str, + namespace: &str, + validators: usize, + executors: usize, +) -> Result<(), HelmError> { + let host_path_type = if assets.kzg_path.is_dir() { + "Directory" + } else { + "File" + }; + + let mut cmd = Command::new("helm"); + cmd.arg("install") + .arg(release) + .arg(&assets.chart_path) + .arg("--namespace") + .arg(namespace) + .arg("--create-namespace") + .arg("--wait") + .arg("--timeout") + .arg("5m") + .arg("--set") + .arg(format!("image={}", assets.image)) + .arg("--set") + .arg(format!("validators.count={validators}")) + .arg("--set") + .arg(format!("executors.count={executors}")) + .arg("--set") + .arg(format!("cfgsync.port={CFGSYNC_PORT}")) + .arg("--set") + .arg(format!("kzg.hostPath={}", assets.kzg_path.display())) + .arg("--set") + .arg(format!("kzg.hostPathType={host_path_type}")) + .arg("-f") + .arg(&assets.values_file) + .arg("--set-file") + .arg(format!("cfgsync.config={}", assets.cfgsync_file.display())) + .arg("--set-file") + .arg(format!( + "scripts.runCfgsyncSh={}", + assets.run_cfgsync_script.display() + )) + .arg("--set-file") + .arg(format!( + "scripts.runNomosNodeSh={}", + assets.run_nomos_node_script.display() + )) + .arg("--set-file") + .arg(format!( + "scripts.runNomosExecutorSh={}", + assets.run_nomos_executor_script.display() + )) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + if let Ok(root) = workspace_root() { + cmd.current_dir(root); + } + + let command = format!("helm install {release}"); + let output = run_helm_command(cmd, &command).await?; + + if std::env::var("K8S_RUNNER_DEBUG").is_ok() { + println!( + "[k8s-runner] {command} stdout:\n{}", + String::from_utf8_lossy(&output.stdout) + ); + println!( + "[k8s-runner] {command} stderr:\n{}", + String::from_utf8_lossy(&output.stderr) + ); + } + + Ok(()) +} + +/// Uninstall the release and namespace resources. +pub async fn uninstall_release(release: &str, namespace: &str) -> Result<(), HelmError> { + let mut cmd = Command::new("helm"); + cmd.arg("uninstall") + .arg(release) + .arg("--namespace") + .arg(namespace) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + println!("[k8s-runner] issuing `helm uninstall {release}` in namespace `{namespace}`"); + + run_helm_command(cmd, &format!("helm uninstall {release}")).await?; + println!( + "[k8s-runner] helm uninstall {release} completed successfully (namespace `{namespace}`)" + ); + Ok(()) +} + +async fn run_helm_command( + mut cmd: Command, + command: &str, +) -> Result { + cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + let output = cmd.output().await.map_err(|source| HelmError::Spawn { + command: command.to_owned(), + source, + })?; + + if output.status.success() { + Ok(output) + } else { + Err(HelmError::Failed { + command: command.to_owned(), + status: output.status.code(), + stdout: String::from_utf8_lossy(&output.stdout).into_owned(), + stderr: String::from_utf8_lossy(&output.stderr).into_owned(), + }) + } +} diff --git a/testing-framework/runners/k8s/src/host.rs b/testing-framework/runners/k8s/src/host.rs new file mode 100644 index 0000000..6bafd52 --- /dev/null +++ b/testing-framework/runners/k8s/src/host.rs @@ -0,0 +1,20 @@ +use std::env; + +const NODE_HOST_ENV: &str = "K8S_RUNNER_NODE_HOST"; +const KUBE_SERVICE_HOST_ENV: &str = "KUBERNETES_SERVICE_HOST"; + +/// Returns the hostname or IP used to reach `NodePorts` exposed by the cluster. +/// Prefers `K8S_RUNNER_NODE_HOST`, then the standard `KUBERNETES_SERVICE_HOST` +/// (e.g. `kubernetes.docker.internal` on Docker Desktop), and finally falls +/// back to `127.0.0.1`. +pub fn node_host() -> String { + if let Ok(host) = env::var(NODE_HOST_ENV) { + return host; + } + if let Ok(host) = env::var(KUBE_SERVICE_HOST_ENV) + && !host.is_empty() + { + return host; + } + "127.0.0.1".to_owned() +} diff --git a/testing-framework/runners/k8s/src/lib.rs b/testing-framework/runners/k8s/src/lib.rs new file mode 100644 index 0000000..216941d --- /dev/null +++ b/testing-framework/runners/k8s/src/lib.rs @@ -0,0 +1,11 @@ +mod assets; +mod block_feed; +mod cleanup; +mod cluster; +mod deployer; +mod helm; +mod host; +mod logs; +mod wait; + +pub use deployer::{K8sDeployer, K8sRunnerError}; diff --git a/testing-framework/runners/k8s/src/logs.rs b/testing-framework/runners/k8s/src/logs.rs new file mode 100644 index 0000000..eea910a --- /dev/null +++ b/testing-framework/runners/k8s/src/logs.rs @@ -0,0 +1,44 @@ +use k8s_openapi::api::core::v1::Pod; +use kube::{ + Api, Client, + api::{ListParams, LogParams}, +}; +use tracing::{info, warn}; + +pub async fn dump_namespace_logs(client: &Client, namespace: &str) { + let pod_names = match list_pod_names(client, namespace).await { + Ok(names) => names, + Err(err) => { + warn!("[k8s-runner] failed to list pods in namespace {namespace}: {err}"); + return; + } + }; + + for pod_name in pod_names { + stream_pod_logs(client, namespace, &pod_name).await; + } +} + +async fn list_pod_names(client: &Client, namespace: &str) -> Result, kube::Error> { + let list = Api::::namespaced(client.clone(), namespace) + .list(&ListParams::default()) + .await?; + Ok(list + .into_iter() + .filter_map(|pod| pod.metadata.name) + .collect()) +} + +async fn stream_pod_logs(client: &Client, namespace: &str, pod_name: &str) { + let pods: Api = Api::namespaced(client.clone(), namespace); + let params = LogParams { + follow: false, + tail_lines: Some(500), + ..Default::default() + }; + + match pods.logs(pod_name, ¶ms).await { + Ok(log) => info!("[k8s-runner] pod {pod_name} logs:\n{log}"), + Err(err) => warn!("[k8s-runner] failed to fetch logs for pod {pod_name}: {err}"), + } +} diff --git a/testing-framework/runners/k8s/src/wait.rs b/testing-framework/runners/k8s/src/wait.rs new file mode 100644 index 0000000..8cd176b --- /dev/null +++ b/testing-framework/runners/k8s/src/wait.rs @@ -0,0 +1,496 @@ +use std::{ + net::{Ipv4Addr, TcpListener, TcpStream}, + process::{Command as StdCommand, Stdio}, + thread, + time::Duration, +}; + +use k8s_openapi::api::{apps::v1::Deployment, core::v1::Service}; +use kube::{Api, Client, Error as KubeError}; +use testing_framework_core::scenario::http_probe::{self, HttpReadinessError, NodeRole}; +use thiserror::Error; +use tokio::time::sleep; + +use crate::host::node_host; + +const DEPLOYMENT_TIMEOUT: Duration = Duration::from_secs(180); +const NODE_HTTP_TIMEOUT: Duration = Duration::from_secs(240); +const NODE_HTTP_PROBE_TIMEOUT: Duration = Duration::from_secs(30); +const HTTP_POLL_INTERVAL: Duration = Duration::from_secs(1); +const PROMETHEUS_HTTP_PORT: u16 = 9090; +const PROMETHEUS_HTTP_TIMEOUT: Duration = Duration::from_secs(240); +const PROMETHEUS_HTTP_PROBE_TIMEOUT: Duration = Duration::from_secs(30); +const PROMETHEUS_SERVICE_NAME: &str = "prometheus"; + +/// Container and host-side HTTP ports for a node in the Helm chart values. +#[derive(Clone, Copy, Debug)] +pub struct NodeConfigPorts { + pub api: u16, + pub testing: u16, +} + +/// Host-facing NodePorts for a node. +#[derive(Clone, Copy, Debug)] +pub struct NodePortAllocation { + pub api: u16, + pub testing: u16, +} + +/// All port assignments for the cluster plus Prometheus. +#[derive(Debug)] +pub struct ClusterPorts { + pub validators: Vec, + pub executors: Vec, + pub prometheus: u16, +} + +/// Success result from waiting for the cluster: host ports and forward handles. +#[derive(Debug)] +pub struct ClusterReady { + pub ports: ClusterPorts, + pub port_forwards: Vec, +} + +#[derive(Debug, Error)] +/// Failures while waiting for Kubernetes deployments or endpoints. +pub enum ClusterWaitError { + #[error("deployment {name} in namespace {namespace} did not become ready within {timeout:?}")] + DeploymentTimeout { + name: String, + namespace: String, + timeout: Duration, + }, + #[error("failed to fetch deployment {name}: {source}")] + DeploymentFetch { + name: String, + #[source] + source: KubeError, + }, + #[error("failed to fetch service {service}: {source}")] + ServiceFetch { + service: String, + #[source] + source: KubeError, + }, + #[error("service {service} did not allocate a node port for {port}")] + NodePortUnavailable { service: String, port: u16 }, + #[error("cluster must have at least one validator")] + MissingValidator, + #[error( + "timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}", + role = role.label() + )] + NodeHttpTimeout { + role: NodeRole, + port: u16, + timeout: Duration, + }, + #[error("timeout waiting for prometheus readiness on NodePort {port}")] + PrometheusTimeout { port: u16 }, + #[error("failed to start port-forward for service {service} port {port}: {source}")] + PortForward { + service: String, + port: u16, + #[source] + source: anyhow::Error, + }, +} + +pub async fn wait_for_deployment_ready( + client: &Client, + namespace: &str, + name: &str, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + let mut elapsed = Duration::ZERO; + let interval = Duration::from_secs(2); + + while elapsed <= timeout { + match Api::::namespaced(client.clone(), namespace) + .get(name) + .await + { + Ok(deployment) => { + let desired = deployment + .spec + .as_ref() + .and_then(|spec| spec.replicas) + .unwrap_or(1); + let ready = deployment + .status + .as_ref() + .and_then(|status| status.ready_replicas) + .unwrap_or(0); + if ready >= desired { + return Ok(()); + } + } + Err(err) => { + return Err(ClusterWaitError::DeploymentFetch { + name: name.to_owned(), + source: err, + }); + } + } + + sleep(interval).await; + elapsed += interval; + } + + Err(ClusterWaitError::DeploymentTimeout { + name: name.to_owned(), + namespace: namespace.to_owned(), + timeout, + }) +} + +pub async fn find_node_port( + client: &Client, + namespace: &str, + service_name: &str, + service_port: u16, +) -> Result { + let interval = Duration::from_secs(1); + for _ in 0..120 { + match Api::::namespaced(client.clone(), namespace) + .get(service_name) + .await + { + Ok(service) => { + if let Some(spec) = service.spec.clone() + && let Some(ports) = spec.ports + { + for port in ports { + if port.port == i32::from(service_port) + && let Some(node_port) = port.node_port + { + return Ok(node_port as u16); + } + } + } + } + Err(err) => { + return Err(ClusterWaitError::ServiceFetch { + service: service_name.to_owned(), + source: err, + }); + } + } + sleep(interval).await; + } + + Err(ClusterWaitError::NodePortUnavailable { + service: service_name.to_owned(), + port: service_port, + }) +} + +pub async fn wait_for_cluster_ready( + client: &Client, + namespace: &str, + release: &str, + validator_ports: &[NodeConfigPorts], + executor_ports: &[NodeConfigPorts], +) -> Result { + if validator_ports.is_empty() { + return Err(ClusterWaitError::MissingValidator); + } + + let mut validator_allocations = Vec::with_capacity(validator_ports.len()); + + for (index, ports) in validator_ports.iter().enumerate() { + let name = format!("{release}-validator-{index}"); + wait_for_deployment_ready(client, namespace, &name, DEPLOYMENT_TIMEOUT).await?; + let api_port = find_node_port(client, namespace, &name, ports.api).await?; + let testing_port = find_node_port(client, namespace, &name, ports.testing).await?; + validator_allocations.push(NodePortAllocation { + api: api_port, + testing: testing_port, + }); + } + + let mut port_forwards = Vec::new(); + + let validator_api_ports: Vec = validator_allocations + .iter() + .map(|ports| ports.api) + .collect(); + if wait_for_node_http_nodeport( + &validator_api_ports, + NodeRole::Validator, + NODE_HTTP_PROBE_TIMEOUT, + ) + .await + .is_err() + { + // Fall back to port-forwarding when NodePorts are unreachable from the host. + validator_allocations.clear(); + port_forwards = port_forward_group( + namespace, + release, + "validator", + validator_ports, + &mut validator_allocations, + )?; + let validator_api_ports: Vec = validator_allocations + .iter() + .map(|ports| ports.api) + .collect(); + if let Err(err) = + wait_for_node_http_port_forward(&validator_api_ports, NodeRole::Validator).await + { + kill_port_forwards(&mut port_forwards); + return Err(err); + } + } + + let mut executor_allocations = Vec::with_capacity(executor_ports.len()); + for (index, ports) in executor_ports.iter().enumerate() { + let name = format!("{release}-executor-{index}"); + wait_for_deployment_ready(client, namespace, &name, DEPLOYMENT_TIMEOUT).await?; + let api_port = find_node_port(client, namespace, &name, ports.api).await?; + let testing_port = find_node_port(client, namespace, &name, ports.testing).await?; + executor_allocations.push(NodePortAllocation { + api: api_port, + testing: testing_port, + }); + } + + let executor_api_ports: Vec = executor_allocations.iter().map(|ports| ports.api).collect(); + if !executor_allocations.is_empty() + && wait_for_node_http_nodeport( + &executor_api_ports, + NodeRole::Executor, + NODE_HTTP_PROBE_TIMEOUT, + ) + .await + .is_err() + { + executor_allocations.clear(); + match port_forward_group( + namespace, + release, + "executor", + executor_ports, + &mut executor_allocations, + ) { + Ok(forwards) => port_forwards.extend(forwards), + Err(err) => { + kill_port_forwards(&mut port_forwards); + return Err(err); + } + } + let executor_api_ports: Vec = + executor_allocations.iter().map(|ports| ports.api).collect(); + if let Err(err) = + wait_for_node_http_port_forward(&executor_api_ports, NodeRole::Executor).await + { + kill_port_forwards(&mut port_forwards); + return Err(err); + } + } + + let mut prometheus_port = find_node_port( + client, + namespace, + PROMETHEUS_SERVICE_NAME, + PROMETHEUS_HTTP_PORT, + ) + .await?; + if wait_for_prometheus_http_nodeport(prometheus_port, PROMETHEUS_HTTP_PROBE_TIMEOUT) + .await + .is_err() + { + let (local_port, forward) = + port_forward_service(namespace, PROMETHEUS_SERVICE_NAME, PROMETHEUS_HTTP_PORT) + .map_err(|err| { + kill_port_forwards(&mut port_forwards); + err + })?; + prometheus_port = local_port; + port_forwards.push(forward); + if let Err(err) = + wait_for_prometheus_http_port_forward(prometheus_port, PROMETHEUS_HTTP_TIMEOUT).await + { + kill_port_forwards(&mut port_forwards); + return Err(err); + } + } + + Ok(ClusterReady { + ports: ClusterPorts { + validators: validator_allocations, + executors: executor_allocations, + prometheus: prometheus_port, + }, + port_forwards, + }) +} + +async fn wait_for_node_http_nodeport( + ports: &[u16], + role: NodeRole, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + let host = node_host(); + wait_for_node_http_on_host(ports, role, &host, timeout).await +} + +async fn wait_for_node_http_port_forward( + ports: &[u16], + role: NodeRole, +) -> Result<(), ClusterWaitError> { + wait_for_node_http_on_host(ports, role, "127.0.0.1", NODE_HTTP_TIMEOUT).await +} + +async fn wait_for_node_http_on_host( + ports: &[u16], + role: NodeRole, + host: &str, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + http_probe::wait_for_http_ports_with_host(ports, role, host, timeout, HTTP_POLL_INTERVAL) + .await + .map_err(map_http_error) +} + +const fn map_http_error(error: HttpReadinessError) -> ClusterWaitError { + ClusterWaitError::NodeHttpTimeout { + role: error.role(), + port: error.port(), + timeout: error.timeout(), + } +} + +pub async fn wait_for_prometheus_http_nodeport( + port: u16, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + let host = node_host(); + wait_for_prometheus_http(&host, port, timeout).await +} + +pub async fn wait_for_prometheus_http_port_forward( + port: u16, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + wait_for_prometheus_http("127.0.0.1", port, timeout).await +} + +pub async fn wait_for_prometheus_http( + host: &str, + port: u16, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + let client = reqwest::Client::new(); + let url = format!("http://{host}:{port}/-/ready"); + + for _ in 0..timeout.as_secs() { + if let Ok(resp) = client.get(&url).send().await + && resp.status().is_success() + { + return Ok(()); + } + sleep(Duration::from_secs(1)).await; + } + + Err(ClusterWaitError::PrometheusTimeout { port }) +} + +fn port_forward_group( + namespace: &str, + release: &str, + kind: &str, + ports: &[NodeConfigPorts], + allocations: &mut Vec, +) -> Result, ClusterWaitError> { + let mut forwards = Vec::new(); + for (index, ports) in ports.iter().enumerate() { + let service = format!("{release}-{kind}-{index}"); + let (api_port, api_forward) = match port_forward_service(namespace, &service, ports.api) { + Ok(forward) => forward, + Err(err) => { + kill_port_forwards(&mut forwards); + return Err(err); + } + }; + let (testing_port, testing_forward) = + match port_forward_service(namespace, &service, ports.testing) { + Ok(forward) => forward, + Err(err) => { + kill_port_forwards(&mut forwards); + return Err(err); + } + }; + allocations.push(NodePortAllocation { + api: api_port, + testing: testing_port, + }); + forwards.push(api_forward); + forwards.push(testing_forward); + } + Ok(forwards) +} + +fn port_forward_service( + namespace: &str, + service: &str, + remote_port: u16, +) -> Result<(u16, std::process::Child), ClusterWaitError> { + let local_port = allocate_local_port().map_err(|source| ClusterWaitError::PortForward { + service: service.to_owned(), + port: remote_port, + source, + })?; + + let mut child = StdCommand::new("kubectl") + .arg("port-forward") + .arg("-n") + .arg(namespace) + .arg(format!("svc/{service}")) + .arg(format!("{local_port}:{remote_port}")) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .map_err(|source| ClusterWaitError::PortForward { + service: service.to_owned(), + port: remote_port, + source: source.into(), + })?; + + for _ in 0..20 { + if let Ok(Some(status)) = child.try_wait() { + return Err(ClusterWaitError::PortForward { + service: service.to_owned(), + port: remote_port, + source: anyhow::anyhow!("kubectl exited with {status}"), + }); + } + if TcpStream::connect((Ipv4Addr::LOCALHOST, local_port)).is_ok() { + return Ok((local_port, child)); + } + thread::sleep(Duration::from_millis(250)); + } + + let _ = child.kill(); + Err(ClusterWaitError::PortForward { + service: service.to_owned(), + port: remote_port, + source: anyhow::anyhow!("port-forward did not become ready"), + }) +} + +fn allocate_local_port() -> anyhow::Result { + let listener = TcpListener::bind((Ipv4Addr::LOCALHOST, 0))?; + let port = listener.local_addr()?.port(); + drop(listener); + Ok(port) +} + +fn kill_port_forwards(handles: &mut Vec) { + for handle in handles.iter_mut() { + let _ = handle.kill(); + let _ = handle.wait(); + } + handles.clear(); +} diff --git a/testing-framework/runners/local/Cargo.toml b/testing-framework/runners/local/Cargo.toml new file mode 100644 index 0000000..e29a929 --- /dev/null +++ b/testing-framework/runners/local/Cargo.toml @@ -0,0 +1,19 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-runner-local" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +async-trait = "0.1" +testing-framework-core = { path = "../../core" } +thiserror = { workspace = true } +tracing = { workspace = true } diff --git a/testing-framework/runners/local/src/lib.rs b/testing-framework/runners/local/src/lib.rs new file mode 100644 index 0000000..c304ecb --- /dev/null +++ b/testing-framework/runners/local/src/lib.rs @@ -0,0 +1,3 @@ +mod runner; + +pub use runner::{LocalDeployer, LocalDeployerError}; diff --git a/testing-framework/runners/local/src/runner.rs b/testing-framework/runners/local/src/runner.rs new file mode 100644 index 0000000..4f5114f --- /dev/null +++ b/testing-framework/runners/local/src/runner.rs @@ -0,0 +1,149 @@ +use async_trait::async_trait; +use testing_framework_core::{ + scenario::{ + BlockFeed, BlockFeedTask, Deployer, DynError, Metrics, NodeClients, RunContext, Runner, + Scenario, ScenarioError, spawn_block_feed, + }, + topology::{ReadinessError, Topology}, +}; +use thiserror::Error; +use tracing::info; + +/// Spawns validators and executors as local processes, reusing the existing +/// integration harness. +#[derive(Clone)] +pub struct LocalDeployer { + membership_check: bool, +} + +/// Errors surfaced by the local deployer while driving a scenario. +#[derive(Debug, Error)] +pub enum LocalDeployerError { + #[error("readiness probe failed: {source}")] + ReadinessFailed { + #[source] + source: ReadinessError, + }, + #[error("workload failed: {source}")] + WorkloadFailed { + #[source] + source: DynError, + }, + #[error("expectations failed: {source}")] + ExpectationsFailed { + #[source] + source: DynError, + }, +} + +impl From for LocalDeployerError { + fn from(value: ScenarioError) -> Self { + match value { + ScenarioError::Workload(source) => Self::WorkloadFailed { source }, + ScenarioError::ExpectationCapture(source) | ScenarioError::Expectations(source) => { + Self::ExpectationsFailed { source } + } + } + } +} + +#[async_trait] +impl Deployer<()> for LocalDeployer { + type Error = LocalDeployerError; + + async fn deploy(&self, scenario: &Scenario<()>) -> Result { + info!( + validators = scenario.topology().validators().len(), + executors = scenario.topology().executors().len(), + membership_checks = self.membership_check, + "starting local deployment" + ); + let topology = Self::prepare_topology(scenario, self.membership_check).await?; + let node_clients = NodeClients::from_topology(scenario.topology(), &topology); + + let (block_feed, block_feed_guard) = spawn_block_feed_with(&node_clients).await?; + + let context = RunContext::new( + scenario.topology().clone(), + Some(topology), + node_clients, + scenario.duration(), + Metrics::empty(), + block_feed, + None, + ); + + Ok(Runner::new(context, Some(Box::new(block_feed_guard)))) + } +} + +impl LocalDeployer { + #[must_use] + /// Construct with membership readiness checks enabled. + pub fn new() -> Self { + Self::default() + } + + #[must_use] + /// Enable or disable membership readiness probes. + pub const fn with_membership_check(mut self, enabled: bool) -> Self { + self.membership_check = enabled; + self + } + + async fn prepare_topology( + scenario: &Scenario<()>, + membership_check: bool, + ) -> Result { + let descriptors = scenario.topology(); + info!("spawning local validators/executors"); + let topology = descriptors.clone().spawn_local().await; + + let skip_membership = !membership_check; + if let Err(source) = wait_for_readiness(&topology, skip_membership).await { + return Err(LocalDeployerError::ReadinessFailed { source }); + } + + info!("local nodes are ready"); + Ok(topology) + } +} + +impl Default for LocalDeployer { + fn default() -> Self { + Self { + membership_check: true, + } + } +} + +async fn wait_for_readiness( + topology: &Topology, + skip_membership: bool, +) -> Result<(), ReadinessError> { + info!("waiting for local network readiness"); + topology.wait_network_ready().await?; + if !skip_membership { + info!("waiting for membership readiness"); + topology.wait_membership_ready().await?; + } + info!("waiting for DA balancer readiness"); + topology.wait_da_balancer_ready().await +} + +async fn spawn_block_feed_with( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), LocalDeployerError> { + let block_source_client = node_clients.random_validator().cloned().ok_or_else(|| { + LocalDeployerError::WorkloadFailed { + source: "block feed requires at least one validator".into(), + } + })?; + + info!("starting block feed"); + spawn_block_feed(block_source_client) + .await + .map_err(|source| LocalDeployerError::WorkloadFailed { + source: source.into(), + }) +} diff --git a/testing-framework/tools/cfgsync/Cargo.toml b/testing-framework/tools/cfgsync/Cargo.toml new file mode 100644 index 0000000..554d8f4 --- /dev/null +++ b/testing-framework/tools/cfgsync/Cargo.toml @@ -0,0 +1,38 @@ +[package] +categories = { workspace = true } +description = { workspace = true } +edition = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +name = "cfgsync" +readme = { workspace = true } +repository = { workspace = true } +version = { workspace = true } + +[lints] +workspace = true + +[dependencies] +axum = { default-features = false, features = ["http1", "http2", "json", "tokio"], version = "0.7.5" } +clap = { default-features = false, version = "4" } +groth16 = { workspace = true } +hex = { workspace = true } +key-management-system = { workspace = true } +nomos-core = { workspace = true } +nomos-da-network-core = { workspace = true } +nomos-executor = { workspace = true } +nomos-libp2p = { workspace = true } +nomos-node = { workspace = true } +nomos-tracing-service = { workspace = true } +nomos-utils = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +serde = { default-features = false, version = "1" } +serde_json = { default-features = false, version = "1.0" } +serde_path_to_error = "0.1" +serde_with = { workspace = true } +serde_yaml = "0.9" +subnetworks-assignations = { workspace = true } +testing-framework-config = { workspace = true } +tokio = { default-features = false, features = ["macros", "net", "rt-multi-thread"], version = "1" } +tracing = { workspace = true } diff --git a/testing-framework/tools/cfgsync/src/bin/cfgsync-client.rs b/testing-framework/tools/cfgsync/src/bin/cfgsync-client.rs new file mode 100644 index 0000000..67a7da7 --- /dev/null +++ b/testing-framework/tools/cfgsync/src/bin/cfgsync-client.rs @@ -0,0 +1,151 @@ +use std::{ + collections::{HashMap, HashSet}, + env, fs, + net::Ipv4Addr, + process, + str::FromStr, +}; + +use cfgsync::{ + client::{FetchedConfig, get_config}, + server::ClientIp, +}; +use nomos_executor::config::Config as ExecutorConfig; +use nomos_libp2p::PeerId; +use nomos_node::Config as ValidatorConfig; +use serde::{Serialize, de::DeserializeOwned}; +use subnetworks_assignations::{MembershipCreator, MembershipHandler, SubnetworkId}; + +fn parse_ip(ip_str: &str) -> Ipv4Addr { + ip_str.parse().unwrap_or_else(|_| { + eprintln!("Invalid IP format, defaulting to 127.0.0.1"); + Ipv4Addr::LOCALHOST + }) +} + +fn parse_assignations(raw: &serde_json::Value) -> Option>> { + let assignations = raw + .pointer("/da_network/membership/assignations")? + .as_object()?; + let mut result = HashMap::new(); + + for (subnetwork, peers) in assignations { + let subnetwork_id = SubnetworkId::from_str(subnetwork).ok()?; + let mut members = HashSet::new(); + + for peer in peers.as_array()? { + if let Some(peer) = peer.as_str().and_then(|p| PeerId::from_str(p).ok()) { + members.insert(peer); + } + } + + result.insert(subnetwork_id, members); + } + + Some(result) +} + +fn apply_da_assignations< + Membership: MembershipCreator + MembershipHandler, +>( + membership: &Membership, + assignations: HashMap>, +) -> Membership { + let session_id = membership.session_id(); + membership.init(session_id, assignations) +} + +async fn pull_to_file( + payload: ClientIp, + url: &str, + config_file: &str, + apply_membership: F, +) -> Result<(), String> +where + Config: Serialize + DeserializeOwned, + F: FnOnce(&mut Config, HashMap>), +{ + let FetchedConfig { mut config, raw } = get_config::(payload, url).await?; + + if let Some(assignations) = parse_assignations(&raw) { + apply_membership(&mut config, assignations); + } + + let yaml = serde_yaml::to_string(&config) + .map_err(|err| format!("Failed to serialize config to YAML: {err}"))?; + + fs::write(config_file, yaml).map_err(|err| format!("Failed to write config to file: {err}"))?; + + println!("Config saved to {config_file}"); + Ok(()) +} + +#[tokio::main] +async fn main() { + let config_file_path = env::var("CFG_FILE_PATH").unwrap_or_else(|_| "config.yaml".to_owned()); + let server_addr = + env::var("CFG_SERVER_ADDR").unwrap_or_else(|_| "http://127.0.0.1:4400".to_owned()); + let ip = parse_ip(&env::var("CFG_HOST_IP").unwrap_or_else(|_| "127.0.0.1".to_owned())); + let identifier = + env::var("CFG_HOST_IDENTIFIER").unwrap_or_else(|_| "unidentified-node".to_owned()); + + let host_kind = env::var("CFG_HOST_KIND").unwrap_or_else(|_| "validator".to_owned()); + + let network_port = env::var("CFG_NETWORK_PORT") + .ok() + .and_then(|v| v.parse().ok()); + let da_port = env::var("CFG_DA_PORT").ok().and_then(|v| v.parse().ok()); + let blend_port = env::var("CFG_BLEND_PORT").ok().and_then(|v| v.parse().ok()); + let api_port = env::var("CFG_API_PORT").ok().and_then(|v| v.parse().ok()); + let testing_http_port = env::var("CFG_TESTING_HTTP_PORT") + .ok() + .and_then(|v| v.parse().ok()); + + let payload = ClientIp { + ip, + identifier, + network_port, + da_port, + blend_port, + api_port, + testing_http_port, + }; + + let node_config_endpoint = match host_kind.as_str() { + "executor" => format!("{server_addr}/executor"), + _ => format!("{server_addr}/validator"), + }; + + let config_result = match host_kind.as_str() { + "executor" => { + pull_to_file::( + payload, + &node_config_endpoint, + &config_file_path, + |config, assignations| { + config.da_network.membership = + apply_da_assignations(&config.da_network.membership, assignations); + }, + ) + .await + } + _ => { + pull_to_file::( + payload, + &node_config_endpoint, + &config_file_path, + |config, assignations| { + config.da_network.membership = + apply_da_assignations(&config.da_network.membership, assignations); + }, + ) + .await + } + }; + + // Handle error if the config request fails + if let Err(err) = config_result { + eprintln!("Error: {err}"); + process::exit(1); + } +} diff --git a/testing-framework/tools/cfgsync/src/bin/cfgsync-server.rs b/testing-framework/tools/cfgsync/src/bin/cfgsync-server.rs new file mode 100644 index 0000000..0602e76 --- /dev/null +++ b/testing-framework/tools/cfgsync/src/bin/cfgsync-server.rs @@ -0,0 +1,29 @@ +use std::{path::PathBuf, process}; + +use cfgsync::server::{CfgSyncConfig, cfgsync_app}; +use clap::Parser; +use tokio::net::TcpListener; + +#[derive(Parser, Debug)] +#[command(about = "CfgSync")] +struct Args { + config: PathBuf, +} + +#[tokio::main] +async fn main() { + let cli = Args::parse(); + + let config = CfgSyncConfig::load_from_file(&cli.config).unwrap_or_else(|err| { + eprintln!("{err}"); + process::exit(1); + }); + + let port = config.port; + let app = cfgsync_app(config.into()); + + println!("Server running on http://0.0.0.0:{port}"); + let listener = TcpListener::bind(&format!("0.0.0.0:{port}")).await.unwrap(); + + axum::serve(listener, app).await.unwrap(); +} diff --git a/testing-framework/tools/cfgsync/src/client.rs b/testing-framework/tools/cfgsync/src/client.rs new file mode 100644 index 0000000..6e26c89 --- /dev/null +++ b/testing-framework/tools/cfgsync/src/client.rs @@ -0,0 +1,46 @@ +use reqwest::{Client, Response}; +use serde::de::DeserializeOwned; + +use crate::server::ClientIp; + +#[derive(Debug)] +pub struct FetchedConfig { + pub config: Config, + pub raw: serde_json::Value, +} + +async fn deserialize_response( + response: Response, +) -> Result, String> { + let body = response + .text() + .await + .map_err(|error| format!("Failed to read response body: {error}"))?; + let raw: serde_json::Value = + serde_json::from_str(&body).map_err(|error| format!("Failed to parse body: {error}"))?; + let mut json_deserializer = serde_json::Deserializer::from_str(&body); + let config = serde_path_to_error::deserialize(&mut json_deserializer) + .map_err(|error| format!("Failed to deserialize body: {error}, raw body: {body}"))?; + + Ok(FetchedConfig { config, raw }) +} + +pub async fn get_config( + payload: ClientIp, + url: &str, +) -> Result, String> { + let client = Client::new(); + + let response = client + .post(url) + .json(&payload) + .send() + .await + .map_err(|err| format!("Failed to send IP announcement: {err}"))?; + + if !response.status().is_success() { + return Err(format!("Server error: {:?}", response.status())); + } + + deserialize_response(response).await +} diff --git a/testing-framework/tools/cfgsync/src/config.rs b/testing-framework/tools/cfgsync/src/config.rs new file mode 100644 index 0000000..26269bf --- /dev/null +++ b/testing-framework/tools/cfgsync/src/config.rs @@ -0,0 +1,518 @@ +use std::{collections::HashMap, net::Ipv4Addr, str::FromStr as _}; + +use groth16::fr_to_bytes; +use hex; +use key_management_system::{ + backend::preload::PreloadKMSBackendSettings, + keys::{Ed25519Key, Key, ZkKey}, +}; +use nomos_core::{ + mantle::GenesisTx as _, + sdp::{Locator, ServiceType}, +}; +use nomos_libp2p::{Multiaddr, PeerId, Protocol, ed25519}; +use nomos_tracing_service::{LoggerLayer, MetricsLayer, TracingLayer, TracingSettings}; +use nomos_utils::net::get_available_udp_port; +use rand::{Rng as _, thread_rng}; +use testing_framework_config::topology::configs::{ + GeneralConfig, + api::GeneralApiConfig, + blend::{GeneralBlendConfig, create_blend_configs}, + bootstrap::{SHORT_PROLONGED_BOOTSTRAP_PERIOD, create_bootstrap_configs}, + consensus::{ + ConsensusParams, GeneralConsensusConfig, ProviderInfo, create_consensus_configs, + create_genesis_tx_with_declarations, + }, + da::{DaParams, GeneralDaConfig, create_da_configs}, + network::{NetworkParams, create_network_configs}, + time::default_time_config, + tracing::GeneralTracingConfig, + wallet::WalletConfig, +}; + +const DEFAULT_LIBP2P_NETWORK_PORT: u16 = 3000; +const DEFAULT_DA_NETWORK_PORT: u16 = 3300; +const DEFAULT_BLEND_PORT: u16 = 3400; +const DEFAULT_API_PORT: u16 = 18080; + +#[derive(Copy, Clone, Eq, PartialEq, Hash)] +pub enum HostKind { + Validator, + Executor, +} + +#[derive(Eq, PartialEq, Hash, Clone)] +pub struct Host { + pub kind: HostKind, + pub ip: Ipv4Addr, + pub identifier: String, + pub network_port: u16, + pub da_network_port: u16, + pub blend_port: u16, + pub api_port: u16, + pub testing_http_port: u16, +} + +#[derive(Clone, Copy)] +pub struct PortOverrides { + pub network_port: Option, + pub da_network_port: Option, + pub blend_port: Option, + pub api_port: Option, + pub testing_http_port: Option, +} + +impl Host { + fn from_parts(kind: HostKind, ip: Ipv4Addr, identifier: String, ports: PortOverrides) -> Self { + Self { + kind, + ip, + identifier, + network_port: ports.network_port.unwrap_or(DEFAULT_LIBP2P_NETWORK_PORT), + da_network_port: ports.da_network_port.unwrap_or(DEFAULT_DA_NETWORK_PORT), + blend_port: ports.blend_port.unwrap_or(DEFAULT_BLEND_PORT), + api_port: ports.api_port.unwrap_or(DEFAULT_API_PORT), + testing_http_port: ports.testing_http_port.unwrap_or(DEFAULT_API_PORT + 1), + } + } + + #[must_use] + pub fn validator_from_ip(ip: Ipv4Addr, identifier: String, ports: PortOverrides) -> Self { + Self::from_parts(HostKind::Validator, ip, identifier, ports) + } + + #[must_use] + pub fn executor_from_ip(ip: Ipv4Addr, identifier: String, ports: PortOverrides) -> Self { + Self::from_parts(HostKind::Executor, ip, identifier, ports) + } +} + +#[must_use] +pub fn create_node_configs( + consensus_params: &ConsensusParams, + da_params: &DaParams, + tracing_settings: &TracingSettings, + wallet_config: &WalletConfig, + ids: Option>, + da_ports: Option>, + blend_ports: Option>, + hosts: Vec, +) -> HashMap { + let mut hosts = hosts; + hosts.sort_by_key(|host| { + let index = host + .identifier + .rsplit('-') + .next() + .and_then(|raw| raw.parse::().ok()) + .unwrap_or(0); + let kind = match host.kind { + HostKind::Validator => 0, + HostKind::Executor => 1, + }; + (kind, index) + }); + + assert_eq!( + hosts.len(), + consensus_params.n_participants, + "host count must match consensus participants" + ); + + let ids = ids.unwrap_or_else(|| { + let mut generated = vec![[0; 32]; consensus_params.n_participants]; + for id in &mut generated { + thread_rng().fill(id); + } + generated + }); + assert_eq!( + ids.len(), + consensus_params.n_participants, + "pre-generated ids must match participant count" + ); + + let ports = da_ports.unwrap_or_else(|| { + (0..consensus_params.n_participants) + .map(|_| get_available_udp_port().unwrap()) + .collect() + }); + assert_eq!( + ports.len(), + consensus_params.n_participants, + "da port list must match participant count" + ); + + let blend_ports = blend_ports.unwrap_or_else(|| hosts.iter().map(|h| h.blend_port).collect()); + assert_eq!( + blend_ports.len(), + consensus_params.n_participants, + "blend port list must match participant count" + ); + + let mut consensus_configs = create_consensus_configs(&ids, consensus_params, wallet_config); + let bootstrap_configs = create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(&ids, da_params, &ports); + let network_configs = create_network_configs(&ids, &NetworkParams::default()); + let blend_configs = create_blend_configs(&ids, &blend_ports); + let api_configs = hosts + .iter() + .map(|host| GeneralApiConfig { + address: format!("0.0.0.0:{}", host.api_port).parse().unwrap(), + testing_http_address: format!("0.0.0.0:{}", host.testing_http_port) + .parse() + .unwrap(), + }) + .collect::>(); + let mut configured_hosts = HashMap::new(); + + let initial_peer_templates: Vec> = network_configs + .iter() + .map(|cfg| cfg.backend.initial_peers.clone()) + .collect(); + let original_network_ports: Vec = network_configs + .iter() + .map(|cfg| cfg.backend.inner.port) + .collect(); + let peer_ids: Vec = ids + .iter() + .map(|bytes| { + let mut key_bytes = *bytes; + let secret = + ed25519::SecretKey::try_from_bytes(&mut key_bytes).expect("valid ed25519 key"); + PeerId::from_public_key(&ed25519::Keypair::from(secret).public().into()) + }) + .collect(); + + let host_network_init_peers = rewrite_initial_peers( + &initial_peer_templates, + &original_network_ports, + &hosts, + &peer_ids, + ); + + let providers = create_providers(&hosts, &consensus_configs, &blend_configs, &da_configs); + + // Update genesis TX to contain Blend and DA providers. + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + // Set Blend and DA keys in KMS of each node config. + let kms_configs = create_kms_configs(&blend_configs, &da_configs); + + for (i, host) in hosts.into_iter().enumerate() { + let consensus_config = consensus_configs[i].clone(); + let api_config = api_configs[i].clone(); + + // DA Libp2p network config. + let mut da_config = da_configs[i].clone(); + da_config.listening_address = Multiaddr::from_str(&format!( + "/ip4/0.0.0.0/udp/{}/quic-v1", + host.da_network_port, + )) + .unwrap(); + if matches!(host.kind, HostKind::Validator) { + da_config.policy_settings.min_dispersal_peers = 0; + } + + // Libp2p network config. + let mut network_config = network_configs[i].clone(); + network_config.backend.inner.host = Ipv4Addr::from_str("0.0.0.0").unwrap(); + network_config.backend.inner.port = host.network_port; + network_config.backend.initial_peers = host_network_init_peers[i].clone(); + network_config.backend.inner.nat_config = nomos_libp2p::NatSettings::Static { + external_address: Multiaddr::from_str(&format!( + "/ip4/{}/udp/{}/quic-v1", + host.ip, host.network_port + )) + .unwrap(), + }; + + // Blend network config. + let mut blend_config = blend_configs[i].clone(); + blend_config.backend_core.listening_address = + Multiaddr::from_str(&format!("/ip4/0.0.0.0/udp/{}/quic-v1", host.blend_port)).unwrap(); + + // Tracing config. + let tracing_config = + update_tracing_identifier(tracing_settings.clone(), host.identifier.clone()); + + // Time config + let time_config = default_time_config(); + + configured_hosts.insert( + host.clone(), + GeneralConfig { + consensus_config, + bootstrapping_config: bootstrap_configs[i].clone(), + da_config, + network_config, + blend_config, + api_config, + tracing_config, + time_config, + kms_config: kms_configs[i].clone(), + }, + ); + } + + configured_hosts +} + +fn create_providers( + hosts: &[Host], + consensus_configs: &[GeneralConsensusConfig], + blend_configs: &[GeneralBlendConfig], + da_configs: &[GeneralDaConfig], +) -> Vec { + let mut providers: Vec<_> = da_configs + .iter() + .enumerate() + .map(|(i, da_conf)| ProviderInfo { + service_type: ServiceType::DataAvailability, + provider_sk: da_conf.signer.clone(), + zk_sk: da_conf.secret_zk_key.clone(), + locator: Locator( + Multiaddr::from_str(&format!( + "/ip4/{}/udp/{}/quic-v1", + hosts[i].ip, hosts[i].da_network_port + )) + .unwrap(), + ), + note: consensus_configs[0].da_notes[i].clone(), + }) + .collect(); + providers.extend(blend_configs.iter().enumerate().map(|(i, blend_conf)| { + ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator( + Multiaddr::from_str(&format!( + "/ip4/{}/udp/{}/quic-v1", + hosts[i].ip, hosts[i].blend_port + )) + .unwrap(), + ), + note: consensus_configs[0].blend_notes[i].clone(), + } + })); + + providers +} + +fn rewrite_initial_peers( + templates: &[Vec], + original_ports: &[u16], + hosts: &[Host], + peer_ids: &[PeerId], +) -> Vec> { + templates + .iter() + .enumerate() + .map(|(node_idx, peers)| { + peers + .iter() + .filter_map(|addr| find_matching_host(addr, original_ports)) + .filter(|&peer_idx| peer_idx != node_idx) + .map(|peer_idx| { + Multiaddr::from_str(&format!( + "/ip4/{}/udp/{}/quic-v1/p2p/{}", + hosts[peer_idx].ip, hosts[peer_idx].network_port, peer_ids[peer_idx] + )) + .expect("valid peer multiaddr") + }) + .collect() + }) + .collect() +} + +fn find_matching_host(addr: &Multiaddr, original_ports: &[u16]) -> Option { + extract_udp_port(addr).and_then(|port| { + original_ports + .iter() + .position(|candidate| *candidate == port) + }) +} + +fn extract_udp_port(addr: &Multiaddr) -> Option { + addr.iter().find_map(|protocol| { + if let Protocol::Udp(port) = protocol { + Some(port) + } else { + None + } + }) +} + +fn update_tracing_identifier( + settings: TracingSettings, + identifier: String, +) -> GeneralTracingConfig { + GeneralTracingConfig { + tracing_settings: TracingSettings { + logger: match settings.logger { + LoggerLayer::Loki(mut config) => { + config.host_identifier.clone_from(&identifier); + LoggerLayer::Loki(config) + } + other => other, + }, + tracing: match settings.tracing { + TracingLayer::Otlp(mut config) => { + config.service_name.clone_from(&identifier); + TracingLayer::Otlp(config) + } + other @ TracingLayer::None => other, + }, + filter: settings.filter, + metrics: match settings.metrics { + MetricsLayer::Otlp(mut config) => { + config.host_identifier = identifier; + MetricsLayer::Otlp(config) + } + other @ MetricsLayer::None => other, + }, + console: settings.console, + level: settings.level, + }, + } +} + +fn create_kms_configs( + blend_configs: &[GeneralBlendConfig], + da_configs: &[GeneralDaConfig], +) -> Vec { + da_configs + .iter() + .zip(blend_configs.iter()) + .map(|(da_conf, blend_conf)| PreloadKMSBackendSettings { + keys: [ + ( + hex::encode(blend_conf.signer.verifying_key().as_bytes()), + Key::Ed25519(Ed25519Key::new(blend_conf.signer.clone())), + ), + ( + hex::encode(fr_to_bytes( + &blend_conf.secret_zk_key.to_public_key().into_inner(), + )), + Key::Zk(ZkKey::new(blend_conf.secret_zk_key.clone())), + ), + ( + hex::encode(da_conf.signer.verifying_key().as_bytes()), + Key::Ed25519(Ed25519Key::new(da_conf.signer.clone())), + ), + ( + hex::encode(fr_to_bytes( + &da_conf.secret_zk_key.to_public_key().into_inner(), + )), + Key::Zk(ZkKey::new(da_conf.secret_zk_key.clone())), + ), + ] + .into(), + }) + .collect() +} + +#[cfg(test)] +mod cfgsync_tests { + use std::{net::Ipv4Addr, num::NonZero, str::FromStr as _, time::Duration}; + + use nomos_da_network_core::swarm::{ + DAConnectionMonitorSettings, DAConnectionPolicySettings, ReplicationConfig, + }; + use nomos_libp2p::{Multiaddr, Protocol}; + use nomos_tracing_service::{ + ConsoleLayer, FilterLayer, LoggerLayer, MetricsLayer, TracingLayer, TracingSettings, + }; + use testing_framework_config::topology::configs::{ + consensus::ConsensusParams, da::DaParams, wallet::WalletConfig, + }; + use tracing::Level; + + use super::{Host, HostKind, create_node_configs}; + + #[test] + fn basic_ip_list() { + let hosts = (0..10) + .map(|i| Host { + kind: HostKind::Validator, + ip: Ipv4Addr::from_str(&format!("10.1.1.{i}")).unwrap(), + identifier: "node".into(), + network_port: 3000, + da_network_port: 4044, + blend_port: 5000, + api_port: 18080, + testing_http_port: 18081, + }) + .collect(); + + let configs = create_node_configs( + &ConsensusParams { + n_participants: 10, + security_param: NonZero::new(10).unwrap(), + active_slot_coeff: 0.9, + }, + &DaParams { + subnetwork_size: 2, + dispersal_factor: 1, + num_samples: 1, + num_subnets: 2, + old_blobs_check_interval: Duration::from_secs(5), + blobs_validity_duration: Duration::from_secs(u64::MAX), + global_params_path: String::new(), + policy_settings: DAConnectionPolicySettings::default(), + monitor_settings: DAConnectionMonitorSettings::default(), + balancer_interval: Duration::ZERO, + redial_cooldown: Duration::ZERO, + replication_settings: ReplicationConfig { + seen_message_cache_size: 0, + seen_message_ttl: Duration::ZERO, + }, + subnets_refresh_interval: Duration::from_secs(1), + retry_shares_limit: 1, + retry_commitments_limit: 1, + }, + &TracingSettings { + logger: LoggerLayer::None, + tracing: TracingLayer::None, + filter: FilterLayer::None, + metrics: MetricsLayer::None, + console: ConsoleLayer::None, + level: Level::DEBUG, + }, + &WalletConfig::default(), + None, + None, + None, + hosts, + ); + + for (host, config) in &configs { + let network_port = config.network_config.backend.inner.port; + let da_network_port = extract_port(&config.da_config.listening_address); + let blend_port = extract_port(&config.blend_config.backend_core.listening_address); + + assert_eq!(network_port, host.network_port); + assert_eq!(da_network_port, host.da_network_port); + assert_eq!(blend_port, host.blend_port); + } + } + + fn extract_port(multiaddr: &Multiaddr) -> u16 { + multiaddr + .iter() + .find_map(|protocol| match protocol { + Protocol::Udp(port) => Some(port), + _ => None, + }) + .unwrap() + } +} diff --git a/testing-framework/tools/cfgsync/src/lib.rs b/testing-framework/tools/cfgsync/src/lib.rs new file mode 100644 index 0000000..f4662d8 --- /dev/null +++ b/testing-framework/tools/cfgsync/src/lib.rs @@ -0,0 +1,4 @@ +pub mod client; +pub mod config; +pub mod repo; +pub mod server; diff --git a/testing-framework/tools/cfgsync/src/repo.rs b/testing-framework/tools/cfgsync/src/repo.rs new file mode 100644 index 0000000..3b9d4ed --- /dev/null +++ b/testing-framework/tools/cfgsync/src/repo.rs @@ -0,0 +1,141 @@ +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + time::Duration, +}; + +use nomos_tracing_service::TracingSettings; +use testing_framework_config::topology::configs::{ + GeneralConfig, consensus::ConsensusParams, da::DaParams, wallet::WalletConfig, +}; +use tokio::{sync::oneshot::Sender, time::timeout}; + +use crate::{ + config::{Host, create_node_configs}, + server::CfgSyncConfig, +}; + +pub enum RepoResponse { + Config(Box), + Timeout, +} + +pub struct ConfigRepo { + waiting_hosts: Mutex>>, + n_hosts: usize, + consensus_params: ConsensusParams, + da_params: DaParams, + tracing_settings: TracingSettings, + wallet_config: WalletConfig, + timeout_duration: Duration, + ids: Option>, + da_ports: Option>, + blend_ports: Option>, +} + +impl From for Arc { + fn from(config: CfgSyncConfig) -> Self { + let consensus_params = config.to_consensus_params(); + let da_params = config.to_da_params(); + let tracing_settings = config.to_tracing_settings(); + let wallet_config = config.wallet_config(); + let ids = config.ids; + let da_ports = config.da_ports; + let blend_ports = config.blend_ports; + + ConfigRepo::new( + config.n_hosts, + consensus_params, + da_params, + tracing_settings, + wallet_config, + ids, + da_ports, + blend_ports, + Duration::from_secs(config.timeout), + ) + } +} + +impl ConfigRepo { + #[must_use] + pub fn new( + n_hosts: usize, + consensus_params: ConsensusParams, + da_params: DaParams, + tracing_settings: TracingSettings, + wallet_config: WalletConfig, + ids: Option>, + da_ports: Option>, + blend_ports: Option>, + timeout_duration: Duration, + ) -> Arc { + let repo = Arc::new(Self { + waiting_hosts: Mutex::new(HashMap::new()), + n_hosts, + consensus_params, + da_params, + tracing_settings, + wallet_config, + ids, + da_ports, + blend_ports, + timeout_duration, + }); + + let repo_clone = Arc::clone(&repo); + tokio::spawn(async move { + repo_clone.run().await; + }); + + repo + } + + pub fn register(&self, host: Host, reply_tx: Sender) { + let mut waiting_hosts = self.waiting_hosts.lock().unwrap(); + waiting_hosts.insert(host, reply_tx); + } + + async fn run(&self) { + let timeout_duration = self.timeout_duration; + + if timeout(timeout_duration, self.wait_for_hosts()).await == Ok(()) { + println!("All hosts have announced their IPs"); + + let mut waiting_hosts = self.waiting_hosts.lock().unwrap(); + let hosts = waiting_hosts.keys().cloned().collect(); + + let configs = create_node_configs( + &self.consensus_params, + &self.da_params, + &self.tracing_settings, + &self.wallet_config, + self.ids.clone(), + self.da_ports.clone(), + self.blend_ports.clone(), + hosts, + ); + + for (host, sender) in waiting_hosts.drain() { + let config = configs.get(&host).expect("host should have a config"); + let _ = sender.send(RepoResponse::Config(Box::new(config.to_owned()))); + } + } else { + println!("Timeout: Not all hosts announced within the time limit"); + + let mut waiting_hosts = self.waiting_hosts.lock().unwrap(); + for (_, sender) in waiting_hosts.drain() { + let _ = sender.send(RepoResponse::Timeout); + } + } + } + + async fn wait_for_hosts(&self) { + loop { + if self.waiting_hosts.lock().unwrap().len() >= self.n_hosts { + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + } +} diff --git a/testing-framework/tools/cfgsync/src/server.rs b/testing-framework/tools/cfgsync/src/server.rs new file mode 100644 index 0000000..fe91dc3 --- /dev/null +++ b/testing-framework/tools/cfgsync/src/server.rs @@ -0,0 +1,297 @@ +use std::{fs, net::Ipv4Addr, num::NonZero, path::PathBuf, sync::Arc, time::Duration}; + +use axum::{Json, Router, extract::State, http::StatusCode, response::IntoResponse, routing::post}; +use nomos_da_network_core::swarm::{ + DAConnectionMonitorSettings, DAConnectionPolicySettings, ReplicationConfig, +}; +use nomos_tracing_service::TracingSettings; +use nomos_utils::bounded_duration::{MinimalBoundedDuration, SECOND}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use serde_with::serde_as; +use subnetworks_assignations::MembershipHandler; +use testing_framework_config::{ + nodes::{executor::create_executor_config, validator::create_validator_config}, + topology::configs::{consensus::ConsensusParams, da::DaParams, wallet::WalletConfig}, +}; +use tokio::sync::oneshot::channel; + +use crate::{ + config::{Host, PortOverrides}, + repo::{ConfigRepo, RepoResponse}, +}; + +#[serde_as] +#[derive(Debug, Deserialize)] +pub struct CfgSyncConfig { + pub port: u16, + pub n_hosts: usize, + pub timeout: u64, + + // ConsensusConfig related parameters + pub security_param: NonZero, + pub active_slot_coeff: f64, + pub wallet: WalletConfig, + #[serde(default)] + pub ids: Option>, + #[serde(default)] + pub da_ports: Option>, + #[serde(default)] + pub blend_ports: Option>, + + // DaConfig related parameters + pub subnetwork_size: usize, + pub dispersal_factor: usize, + pub num_samples: u16, + pub num_subnets: u16, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub old_blobs_check_interval: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub blobs_validity_duration: Duration, + pub global_params_path: String, + pub min_dispersal_peers: usize, + pub min_replication_peers: usize, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub monitor_failure_time_window: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub balancer_interval: Duration, + pub replication_settings: ReplicationConfig, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, + + // Tracing params + pub tracing_settings: TracingSettings, +} + +impl CfgSyncConfig { + pub fn load_from_file(file_path: &PathBuf) -> Result { + let config_content = fs::read_to_string(file_path) + .map_err(|err| format!("Failed to read config file: {err}"))?; + serde_yaml::from_str(&config_content) + .map_err(|err| format!("Failed to parse config file: {err}")) + } + + #[must_use] + pub const fn to_consensus_params(&self) -> ConsensusParams { + ConsensusParams { + n_participants: self.n_hosts, + security_param: self.security_param, + active_slot_coeff: self.active_slot_coeff, + } + } + + #[must_use] + pub fn to_da_params(&self) -> DaParams { + DaParams { + subnetwork_size: self.subnetwork_size, + dispersal_factor: self.dispersal_factor, + num_samples: self.num_samples, + num_subnets: self.num_subnets, + old_blobs_check_interval: self.old_blobs_check_interval, + blobs_validity_duration: self.blobs_validity_duration, + global_params_path: self.global_params_path.clone(), + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: self.min_dispersal_peers, + min_replication_peers: self.min_replication_peers, + max_dispersal_failures: 3, + max_sampling_failures: 3, + max_replication_failures: 3, + malicious_threshold: 10, + }, + monitor_settings: DAConnectionMonitorSettings { + failure_time_window: self.monitor_failure_time_window, + ..Default::default() + }, + balancer_interval: self.balancer_interval, + redial_cooldown: Duration::ZERO, + replication_settings: self.replication_settings, + subnets_refresh_interval: Duration::from_secs(30), + retry_shares_limit: self.retry_shares_limit, + retry_commitments_limit: self.retry_commitments_limit, + } + } + + #[must_use] + pub fn to_tracing_settings(&self) -> TracingSettings { + self.tracing_settings.clone() + } + + #[must_use] + pub fn wallet_config(&self) -> WalletConfig { + self.wallet.clone() + } +} + +#[derive(Serialize, Deserialize)] +pub struct ClientIp { + pub ip: Ipv4Addr, + pub identifier: String, + #[serde(default)] + pub network_port: Option, + #[serde(default)] + pub da_port: Option, + #[serde(default)] + pub blend_port: Option, + #[serde(default)] + pub api_port: Option, + #[serde(default)] + pub testing_http_port: Option, +} + +async fn validator_config( + State(config_repo): State>, + Json(payload): Json, +) -> impl IntoResponse { + let ClientIp { + ip, + identifier, + network_port, + da_port, + blend_port, + api_port, + testing_http_port, + } = payload; + let ports = PortOverrides { + network_port, + da_network_port: da_port, + blend_port, + api_port, + testing_http_port, + }; + + let (reply_tx, reply_rx) = channel(); + config_repo.register(Host::validator_from_ip(ip, identifier, ports), reply_tx); + + (reply_rx.await).map_or_else( + |_| (StatusCode::INTERNAL_SERVER_ERROR, "Error receiving config").into_response(), + |config_response| match config_response { + RepoResponse::Config(config) => { + let config = create_validator_config(*config); + let mut value = + serde_json::to_value(&config).expect("validator config should serialize"); + inject_defaults(&mut value); + override_api_ports(&mut value, &ports); + inject_da_assignations(&mut value, &config.da_network.membership); + override_min_session_members(&mut value); + (StatusCode::OK, Json(value)).into_response() + } + RepoResponse::Timeout => (StatusCode::REQUEST_TIMEOUT).into_response(), + }, + ) +} + +async fn executor_config( + State(config_repo): State>, + Json(payload): Json, +) -> impl IntoResponse { + let ClientIp { + ip, + identifier, + network_port, + da_port, + blend_port, + api_port, + testing_http_port, + } = payload; + let ports = PortOverrides { + network_port, + da_network_port: da_port, + blend_port, + api_port, + testing_http_port, + }; + + let (reply_tx, reply_rx) = channel(); + config_repo.register(Host::executor_from_ip(ip, identifier, ports), reply_tx); + + (reply_rx.await).map_or_else( + |_| (StatusCode::INTERNAL_SERVER_ERROR, "Error receiving config").into_response(), + |config_response| match config_response { + RepoResponse::Config(config) => { + let config = create_executor_config(*config); + let mut value = + serde_json::to_value(&config).expect("executor config should serialize"); + inject_defaults(&mut value); + override_api_ports(&mut value, &ports); + inject_da_assignations(&mut value, &config.da_network.membership); + override_min_session_members(&mut value); + (StatusCode::OK, Json(value)).into_response() + } + RepoResponse::Timeout => (StatusCode::REQUEST_TIMEOUT).into_response(), + }, + ) +} + +pub fn cfgsync_app(config_repo: Arc) -> Router { + Router::new() + .route("/validator", post(validator_config)) + .route("/executor", post(executor_config)) + .with_state(config_repo) +} + +fn override_api_ports(config: &mut serde_json::Value, ports: &PortOverrides) { + if let Some(api_port) = ports.api_port { + if let Some(address) = config.pointer_mut("/http/backend_settings/address") { + *address = json!(format!("0.0.0.0:{api_port}")); + } + } + + if let Some(testing_port) = ports.testing_http_port { + if let Some(address) = config.pointer_mut("/testing_http/backend_settings/address") { + *address = json!(format!("0.0.0.0:{testing_port}")); + } + } +} + +fn inject_da_assignations( + config: &mut serde_json::Value, + membership: &nomos_node::NomosDaMembership, +) { + let assignations: std::collections::HashMap> = membership + .subnetworks() + .into_iter() + .map(|(subnet_id, members)| { + ( + subnet_id.to_string(), + members.into_iter().map(|peer| peer.to_string()).collect(), + ) + }) + .collect(); + + if let Some(membership) = config.pointer_mut("/da_network/membership") { + if let Some(map) = membership.as_object_mut() { + map.insert("assignations".to_string(), serde_json::json!(assignations)); + } + } +} + +fn override_min_session_members(config: &mut serde_json::Value) { + if let Some(value) = config.pointer_mut("/da_network/min_session_members") { + *value = serde_json::json!(1); + } +} + +fn inject_defaults(config: &mut serde_json::Value) { + if let Some(cryptarchia) = config + .get_mut("cryptarchia") + .and_then(|v| v.as_object_mut()) + { + let bootstrap = cryptarchia + .entry("bootstrap") + .or_insert_with(|| serde_json::json!({})); + if let Some(bootstrap_map) = bootstrap.as_object_mut() { + bootstrap_map + .entry("ibd") + .or_insert_with(|| serde_json::json!({ "peers": [], "delay_before_new_download": { "secs": 10, "nanos": 0 } })); + } + + cryptarchia + .entry("network_adapter_settings") + .or_insert_with(|| serde_json::json!({ "topic": "/cryptarchia/proto" })); + cryptarchia.entry("sync").or_insert_with(|| { + serde_json::json!({ + "orphan": { "max_orphan_cache_size": 5 } + }) + }); + } +} diff --git a/testing-framework/workflows/Cargo.toml b/testing-framework/workflows/Cargo.toml new file mode 100644 index 0000000..3c8ec94 --- /dev/null +++ b/testing-framework/workflows/Cargo.toml @@ -0,0 +1,26 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-workflows" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +async-trait = "0.1" +ed25519-dalek = { version = "2.2.0", features = ["rand_core", "serde"] } +executor-http-client = { workspace = true } +nomos-core = { workspace = true } +rand = { workspace = true } +testing-framework-config = { workspace = true } +testing-framework-core = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "rt-multi-thread", "time"] } +tracing = { workspace = true } +zksign = { workspace = true } diff --git a/testing-framework/workflows/src/builder/mod.rs b/testing-framework/workflows/src/builder/mod.rs new file mode 100644 index 0000000..b7a2a48 --- /dev/null +++ b/testing-framework/workflows/src/builder/mod.rs @@ -0,0 +1,296 @@ +use std::{ + num::{NonZeroU64, NonZeroUsize}, + time::Duration, +}; + +use testing_framework_core::{ + scenario::{Builder as CoreScenarioBuilder, NodeControlCapability}, + topology::configs::wallet::WalletConfig, +}; + +use crate::{ + expectations::ConsensusLiveness, + workloads::{chaos::RandomRestartWorkload, da, transaction}, +}; + +macro_rules! non_zero_rate_fn { + ($name:ident, $message:literal) => { + const fn $name(rate: u64) -> NonZeroU64 { + match NonZeroU64::new(rate) { + Some(value) => value, + None => panic!($message), + } + } + }; +} + +non_zero_rate_fn!( + transaction_rate_checked, + "transaction rate must be non-zero" +); +non_zero_rate_fn!(channel_rate_checked, "channel rate must be non-zero"); +non_zero_rate_fn!(blob_rate_checked, "blob rate must be non-zero"); + +/// Extension methods for building test scenarios with common patterns. +pub trait ScenarioBuilderExt: Sized { + /// Configure a transaction flow workload. + fn transactions(self) -> TransactionFlowBuilder; + /// Configure a data-availability workload. + fn da(self) -> DataAvailabilityFlowBuilder; + #[must_use] + /// Attach a consensus liveness expectation. + fn expect_consensus_liveness(self) -> Self; + #[must_use] + /// Seed deterministic wallets with total funds split across `users`. + fn initialize_wallet(self, total_funds: u64, users: usize) -> Self; +} + +impl ScenarioBuilderExt for CoreScenarioBuilder { + fn transactions(self) -> TransactionFlowBuilder { + TransactionFlowBuilder::new(self) + } + + fn da(self) -> DataAvailabilityFlowBuilder { + DataAvailabilityFlowBuilder::new(self) + } + + fn expect_consensus_liveness(self) -> Self { + self.with_expectation(ConsensusLiveness::default()) + } + + fn initialize_wallet(self, total_funds: u64, users: usize) -> Self { + let user_count = NonZeroUsize::new(users).expect("wallet user count must be non-zero"); + let wallet = WalletConfig::uniform(total_funds, user_count); + self.with_wallet_config(wallet) + } +} + +/// Builder for transaction workloads. +pub struct TransactionFlowBuilder { + builder: CoreScenarioBuilder, + rate: NonZeroU64, + users: Option, +} + +impl TransactionFlowBuilder { + const fn default_rate() -> NonZeroU64 { + transaction_rate_checked(1) + } + + const fn new(builder: CoreScenarioBuilder) -> Self { + Self { + builder, + rate: Self::default_rate(), + users: None, + } + } + + #[must_use] + /// Set transaction submission rate per block (panics on zero). + pub const fn rate(mut self, rate: u64) -> Self { + self.rate = transaction_rate_checked(rate); + self + } + + #[must_use] + /// Set transaction submission rate per block. + pub const fn rate_per_block(mut self, rate: NonZeroU64) -> Self { + self.rate = rate; + self + } + + #[must_use] + /// Limit how many users will submit transactions. + pub const fn users(mut self, users: usize) -> Self { + match NonZeroUsize::new(users) { + Some(value) => self.users = Some(value), + None => panic!("transaction user count must be non-zero"), + } + self + } + + #[must_use] + /// Attach the transaction workload to the scenario. + pub fn apply(mut self) -> CoreScenarioBuilder { + let workload = transaction::Workload::with_rate(self.rate.get()) + .expect("transaction rate must be non-zero") + .with_user_limit(self.users); + self.builder = self.builder.with_workload(workload); + self.builder + } +} + +/// Builder for data availability workloads. +pub struct DataAvailabilityFlowBuilder { + builder: CoreScenarioBuilder, + channel_rate: NonZeroU64, + blob_rate: NonZeroU64, +} + +impl DataAvailabilityFlowBuilder { + const fn default_channel_rate() -> NonZeroU64 { + channel_rate_checked(1) + } + + const fn default_blob_rate() -> NonZeroU64 { + blob_rate_checked(1) + } + + const fn new(builder: CoreScenarioBuilder) -> Self { + Self { + builder, + channel_rate: Self::default_channel_rate(), + blob_rate: Self::default_blob_rate(), + } + } + + #[must_use] + /// Set channel publish rate per block (panics on zero). + pub const fn channel_rate(mut self, rate: u64) -> Self { + self.channel_rate = channel_rate_checked(rate); + self + } + + #[must_use] + /// Set channel publish rate per block. + pub const fn channel_rate_per_block(mut self, rate: NonZeroU64) -> Self { + self.channel_rate = rate; + self + } + + #[must_use] + /// Set blob publish rate (per block). + pub const fn blob_rate(mut self, rate: u64) -> Self { + self.blob_rate = blob_rate_checked(rate); + self + } + + #[must_use] + /// Set blob publish rate per block. + pub const fn blob_rate_per_block(mut self, rate: NonZeroU64) -> Self { + self.blob_rate = rate; + self + } + + #[must_use] + pub fn apply(mut self) -> CoreScenarioBuilder { + let count = (self.channel_rate.get() * self.blob_rate.get()) as usize; + let workload = da::Workload::with_channel_count(count.max(1)); + self.builder = self.builder.with_workload(workload); + self.builder + } +} + +/// Chaos helpers for scenarios that can control nodes. +pub trait ChaosBuilderExt: Sized { + /// Entry point into chaos workloads. + fn chaos(self) -> ChaosBuilder; +} + +impl ChaosBuilderExt for CoreScenarioBuilder { + fn chaos(self) -> ChaosBuilder { + ChaosBuilder { builder: self } + } +} + +/// Chaos workload builder root. +/// +/// Start with `chaos()` on a scenario builder, then select a workload variant +/// such as `restart()`. +pub struct ChaosBuilder { + builder: CoreScenarioBuilder, +} + +impl ChaosBuilder { + /// Configure a random restarts chaos workload. + #[must_use] + pub fn restart(self) -> ChaosRestartBuilder { + ChaosRestartBuilder { + builder: self.builder, + min_delay: Duration::from_secs(10), + max_delay: Duration::from_secs(30), + target_cooldown: Duration::from_secs(60), + include_validators: true, + include_executors: true, + } + } +} + +pub struct ChaosRestartBuilder { + builder: CoreScenarioBuilder, + min_delay: Duration, + max_delay: Duration, + target_cooldown: Duration, + include_validators: bool, + include_executors: bool, +} + +impl ChaosRestartBuilder { + #[must_use] + /// Set the minimum delay between restart operations. + pub fn min_delay(mut self, delay: Duration) -> Self { + assert!(!delay.is_zero(), "chaos restart min delay must be non-zero"); + self.min_delay = delay; + self + } + + #[must_use] + /// Set the maximum delay between restart operations. + pub fn max_delay(mut self, delay: Duration) -> Self { + assert!(!delay.is_zero(), "chaos restart max delay must be non-zero"); + self.max_delay = delay; + self + } + + #[must_use] + /// Cooldown to allow between restarts for a target node. + pub fn target_cooldown(mut self, cooldown: Duration) -> Self { + assert!( + !cooldown.is_zero(), + "chaos restart target cooldown must be non-zero" + ); + self.target_cooldown = cooldown; + self + } + + #[must_use] + /// Include validators in the restart target set. + pub const fn include_validators(mut self, enabled: bool) -> Self { + self.include_validators = enabled; + self + } + + #[must_use] + /// Include executors in the restart target set. + pub const fn include_executors(mut self, enabled: bool) -> Self { + self.include_executors = enabled; + self + } + + #[must_use] + /// Finalize the chaos restart workload and attach it to the scenario. + pub fn apply(mut self) -> CoreScenarioBuilder { + assert!( + self.min_delay <= self.max_delay, + "chaos restart min delay must not exceed max delay" + ); + assert!( + self.target_cooldown >= self.min_delay, + "chaos restart target cooldown must be >= min delay" + ); + assert!( + self.include_validators || self.include_executors, + "chaos restart requires at least one node group" + ); + + let workload = RandomRestartWorkload::new( + self.min_delay, + self.max_delay, + self.target_cooldown, + self.include_validators, + self.include_executors, + ); + self.builder = self.builder.with_workload(workload); + self.builder + } +} diff --git a/testing-framework/workflows/src/expectations/consensus_liveness.rs b/testing-framework/workflows/src/expectations/consensus_liveness.rs new file mode 100644 index 0000000..6124649 --- /dev/null +++ b/testing-framework/workflows/src/expectations/consensus_liveness.rs @@ -0,0 +1,223 @@ +use std::time::Duration; + +use async_trait::async_trait; +use testing_framework_core::scenario::{DynError, Expectation, RunContext}; +use thiserror::Error; +use tokio::time::sleep; + +#[derive(Clone, Copy, Debug)] +/// Checks that every node reaches near the highest observed height within an +/// allowance. +pub struct ConsensusLiveness { + lag_allowance: u64, +} + +impl Default for ConsensusLiveness { + fn default() -> Self { + Self { + lag_allowance: LAG_ALLOWANCE, + } + } +} + +const LAG_ALLOWANCE: u64 = 2; +const MIN_PROGRESS_BLOCKS: u64 = 5; +const REQUEST_RETRIES: usize = 5; +const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(2); + +#[async_trait] +impl Expectation for ConsensusLiveness { + fn name(&self) -> &'static str { + "consensus_liveness" + } + + async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { + Self::ensure_participants(ctx)?; + let target_hint = Self::target_blocks(ctx); + let check = Self::collect_results(ctx).await; + (*self).report(target_hint, check) + } +} + +const fn consensus_target_blocks(ctx: &RunContext) -> u64 { + ctx.expected_blocks() +} + +#[derive(Debug, Error)] +enum ConsensusLivenessIssue { + #[error("{node} height {height} below target {target}")] + HeightBelowTarget { + node: String, + height: u64, + target: u64, + }, + #[error("{node} consensus_info failed: {source}")] + RequestFailed { + node: String, + #[source] + source: DynError, + }, +} + +#[derive(Debug, Error)] +enum ConsensusLivenessError { + #[error("consensus liveness requires at least one validator or executor")] + MissingParticipants, + #[error("consensus liveness violated (target={target}):\n{details}")] + Violations { + target: u64, + #[source] + details: ViolationIssues, + }, +} + +#[derive(Debug, Error)] +#[error("{message}")] +struct ViolationIssues { + issues: Vec, + message: String, +} + +impl ConsensusLiveness { + const fn target_blocks(ctx: &RunContext) -> u64 { + consensus_target_blocks(ctx) + } + + fn ensure_participants(ctx: &RunContext) -> Result<(), DynError> { + if ctx.node_clients().all_clients().count() == 0 { + Err(Box::new(ConsensusLivenessError::MissingParticipants)) + } else { + Ok(()) + } + } + + async fn collect_results(ctx: &RunContext) -> LivenessCheck { + let participant_count = ctx.node_clients().all_clients().count().max(1); + let max_attempts = participant_count * REQUEST_RETRIES; + let mut samples = Vec::with_capacity(participant_count); + let mut issues = Vec::new(); + + for attempt in 0..max_attempts { + match Self::fetch_cluster_height(ctx).await { + Ok(height) => { + samples.push(NodeSample { + label: format!("sample-{attempt}"), + height, + }); + if samples.len() >= participant_count { + break; + } + } + Err(err) => issues.push(ConsensusLivenessIssue::RequestFailed { + node: format!("sample-{attempt}"), + source: err, + }), + } + + if samples.len() < participant_count { + sleep(REQUEST_RETRY_DELAY).await; + } + } + + LivenessCheck { samples, issues } + } + + async fn fetch_cluster_height(ctx: &RunContext) -> Result { + ctx.cluster_client() + .try_all_clients(|client| { + Box::pin(async move { + client + .consensus_info() + .await + .map(|info| info.height) + .map_err(|err| -> DynError { err.into() }) + }) + }) + .await + } + + #[must_use] + /// Adjusts how many blocks behind the leader a node may be before failing. + pub const fn with_lag_allowance(mut self, lag_allowance: u64) -> Self { + self.lag_allowance = lag_allowance; + self + } + + fn report(self, target_hint: u64, mut check: LivenessCheck) -> Result<(), DynError> { + if check.samples.is_empty() { + return Err(Box::new(ConsensusLivenessError::MissingParticipants)); + } + + let max_height = check + .samples + .iter() + .map(|sample| sample.height) + .max() + .unwrap_or(0); + + let mut target = target_hint; + if target == 0 || target > max_height { + target = max_height; + } + + if max_height < MIN_PROGRESS_BLOCKS { + check + .issues + .push(ConsensusLivenessIssue::HeightBelowTarget { + node: "network".to_owned(), + height: max_height, + target: MIN_PROGRESS_BLOCKS, + }); + } + + for sample in &check.samples { + if sample.height + self.lag_allowance < target { + check + .issues + .push(ConsensusLivenessIssue::HeightBelowTarget { + node: sample.label.clone(), + height: sample.height, + target, + }); + } + } + + if check.issues.is_empty() { + tracing::info!( + target, + heights = ?check.samples.iter().map(|s| s.height).collect::>(), + "consensus liveness expectation satisfied" + ); + Ok(()) + } else { + Err(Box::new(ConsensusLivenessError::Violations { + target, + details: check.issues.into(), + })) + } + } +} + +struct NodeSample { + label: String, + height: u64, +} + +struct LivenessCheck { + samples: Vec, + issues: Vec, +} + +impl From> for ViolationIssues { + fn from(issues: Vec) -> Self { + let mut message = String::new(); + for issue in &issues { + if !message.is_empty() { + message.push('\n'); + } + message.push_str("- "); + message.push_str(&issue.to_string()); + } + Self { issues, message } + } +} diff --git a/testing-framework/workflows/src/expectations/mod.rs b/testing-framework/workflows/src/expectations/mod.rs new file mode 100644 index 0000000..e17d8ca --- /dev/null +++ b/testing-framework/workflows/src/expectations/mod.rs @@ -0,0 +1,3 @@ +mod consensus_liveness; + +pub use consensus_liveness::ConsensusLiveness; diff --git a/testing-framework/workflows/src/lib.rs b/testing-framework/workflows/src/lib.rs new file mode 100644 index 0000000..40fc535 --- /dev/null +++ b/testing-framework/workflows/src/lib.rs @@ -0,0 +1,8 @@ +pub mod builder; +pub mod expectations; +pub mod util; +pub mod workloads; + +pub use builder::{ChaosBuilderExt, ScenarioBuilderExt}; +pub use expectations::ConsensusLiveness; +pub use workloads::transaction::TxInclusionExpectation; diff --git a/testing-framework/workflows/src/util/mod.rs b/testing-framework/workflows/src/util/mod.rs new file mode 100644 index 0000000..d7c3294 --- /dev/null +++ b/testing-framework/workflows/src/util/mod.rs @@ -0,0 +1 @@ +pub mod tx; diff --git a/testing-framework/workflows/src/util/tx.rs b/testing-framework/workflows/src/util/tx.rs new file mode 100644 index 0000000..6d797b1 --- /dev/null +++ b/testing-framework/workflows/src/util/tx.rs @@ -0,0 +1,39 @@ +use ed25519_dalek::{Signer as _, SigningKey}; +use nomos_core::mantle::{ + MantleTx, Op, OpProof, SignedMantleTx, Transaction as _, + ledger::Tx as LedgerTx, + ops::channel::{ChannelId, MsgId, inscribe::InscriptionOp}, +}; +use zksign::SecretKey; + +/// Builds a signed inscription transaction with deterministic payload for +/// testing. +#[must_use] +pub fn create_inscription_transaction_with_id(id: ChannelId) -> SignedMantleTx { + let signing_key = SigningKey::from_bytes(&[0u8; 32]); + let signer = signing_key.verifying_key(); + + let inscription_op = InscriptionOp { + channel_id: id, + inscription: format!("Test channel inscription {id:?}").into_bytes(), + parent: MsgId::root(), + signer, + }; + + let mantle_tx = MantleTx { + ops: vec![Op::ChannelInscribe(inscription_op)], + ledger_tx: LedgerTx::new(vec![], vec![]), + storage_gas_price: 0, + execution_gas_price: 0, + }; + + let tx_hash = mantle_tx.hash(); + let signature = signing_key.sign(&tx_hash.as_signing_bytes()); + + SignedMantleTx::new( + mantle_tx, + vec![OpProof::Ed25519Sig(signature)], + SecretKey::multi_sign(&[], tx_hash.as_ref()).expect("zk signature generation"), + ) + .expect("valid transaction") +} diff --git a/testing-framework/workflows/src/workloads/chaos.rs b/testing-framework/workflows/src/workloads/chaos.rs new file mode 100644 index 0000000..57073d2 --- /dev/null +++ b/testing-framework/workflows/src/workloads/chaos.rs @@ -0,0 +1,166 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use rand::{Rng as _, seq::SliceRandom as _, thread_rng}; +use testing_framework_core::scenario::{DynError, RunContext, Workload}; +use tokio::time::{Instant, sleep}; +use tracing::info; + +/// Randomly restarts validators and executors during a run to introduce chaos. +pub struct RandomRestartWorkload { + min_delay: Duration, + max_delay: Duration, + target_cooldown: Duration, + include_validators: bool, + include_executors: bool, +} + +impl RandomRestartWorkload { + /// Creates a restart workload with delay bounds and per-target cooldown. + /// + /// `min_delay`/`max_delay` bound the sleep between restart attempts, while + /// `target_cooldown` prevents repeatedly restarting the same node too + /// quickly. Validators or executors can be selectively included. + #[must_use] + pub const fn new( + min_delay: Duration, + max_delay: Duration, + target_cooldown: Duration, + include_validators: bool, + include_executors: bool, + ) -> Self { + Self { + min_delay, + max_delay, + target_cooldown, + include_validators, + include_executors, + } + } + + fn targets(&self, ctx: &RunContext) -> Vec { + let mut targets = Vec::new(); + let validator_count = ctx.descriptors().validators().len(); + if self.include_validators { + if validator_count > 1 { + for index in 0..validator_count { + targets.push(Target::Validator(index)); + } + } else if validator_count == 1 { + info!("chaos restart skipping validators: only one validator configured"); + } + } + if self.include_executors { + for index in 0..ctx.descriptors().executors().len() { + targets.push(Target::Executor(index)); + } + } + targets + } + + fn random_delay(&self) -> Duration { + if self.max_delay <= self.min_delay { + return self.min_delay; + } + let spread = self + .max_delay + .checked_sub(self.min_delay) + .unwrap_or_else(|| Duration::from_millis(1)) + .as_secs_f64(); + let offset = thread_rng().gen_range(0.0..=spread); + self.min_delay + .checked_add(Duration::from_secs_f64(offset)) + .unwrap_or(self.max_delay) + } + + fn initialize_cooldowns(&self, targets: &[Target]) -> HashMap { + let now = Instant::now(); + let ready = now.checked_sub(self.target_cooldown).unwrap_or(now); + targets + .iter() + .copied() + .map(|target| (target, ready)) + .collect() + } + + async fn pick_target( + &self, + targets: &[Target], + cooldowns: &HashMap, + ) -> Target { + loop { + let now = Instant::now(); + if let Some(next_ready) = cooldowns + .values() + .copied() + .filter(|ready| *ready > now) + .min() + { + let wait = next_ready.saturating_duration_since(now); + if !wait.is_zero() { + sleep(wait).await; + continue; + } + } + + let available: Vec = targets + .iter() + .copied() + .filter(|target| cooldowns.get(target).is_none_or(|ready| *ready <= now)) + .collect(); + + if let Some(choice) = available.choose(&mut thread_rng()).copied() { + return choice; + } + + return targets + .choose(&mut thread_rng()) + .copied() + .expect("chaos restart workload has targets"); + } + } +} + +#[async_trait] +impl Workload for RandomRestartWorkload { + fn name(&self) -> &'static str { + "chaos_restart" + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + let handle = ctx + .node_control() + .ok_or_else(|| "chaos restart workload requires node control".to_owned())?; + + let targets = self.targets(ctx); + if targets.is_empty() { + return Err("chaos restart workload has no eligible targets".into()); + } + + let mut cooldowns = self.initialize_cooldowns(&targets); + + loop { + sleep(self.random_delay()).await; + let target = self.pick_target(&targets, &cooldowns).await; + + match target { + Target::Validator(index) => handle + .restart_validator(index) + .await + .map_err(|err| format!("validator restart failed: {err}"))?, + Target::Executor(index) => handle + .restart_executor(index) + .await + .map_err(|err| format!("executor restart failed: {err}"))?, + } + + cooldowns.insert(target, Instant::now() + self.target_cooldown); + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +enum Target { + Validator(usize), + Executor(usize), +} diff --git a/testing-framework/workflows/src/workloads/da/expectation.rs b/testing-framework/workflows/src/workloads/da/expectation.rs new file mode 100644 index 0000000..7af2e1d --- /dev/null +++ b/testing-framework/workflows/src/workloads/da/expectation.rs @@ -0,0 +1,178 @@ +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; + +use async_trait::async_trait; +use nomos_core::mantle::{ + AuthenticatedMantleTx as _, + ops::{Op, channel::ChannelId}, +}; +use testing_framework_core::scenario::{BlockRecord, DynError, Expectation, RunContext}; +use thiserror::Error; +use tokio::sync::broadcast; + +#[derive(Debug)] +pub struct DaWorkloadExpectation { + planned_channels: Vec, + capture_state: Option, +} + +#[derive(Debug)] +struct CaptureState { + planned: Arc>, + inscriptions: Arc>>, + blobs: Arc>>, +} + +const MIN_INCLUSION_RATIO: f64 = 0.8; + +#[derive(Debug, Error)] +enum DaExpectationError { + #[error("da workload expectation not started")] + NotCaptured, + #[error("missing inscriptions for {missing:?}")] + MissingInscriptions { missing: Vec }, + #[error("missing blobs for {missing:?}")] + MissingBlobs { missing: Vec }, +} + +impl DaWorkloadExpectation { + /// Validates that inscriptions and blobs landed for the planned channels. + pub const fn new(planned_channels: Vec) -> Self { + Self { + planned_channels, + capture_state: None, + } + } +} + +#[async_trait] +impl Expectation for DaWorkloadExpectation { + fn name(&self) -> &'static str { + "da_workload_inclusions" + } + + async fn start_capture(&mut self, ctx: &RunContext) -> Result<(), DynError> { + if self.capture_state.is_some() { + return Ok(()); + } + + let planned = Arc::new( + self.planned_channels + .iter() + .copied() + .collect::>(), + ); + let inscriptions = Arc::new(Mutex::new(HashSet::new())); + let blobs = Arc::new(Mutex::new(HashSet::new())); + + let mut receiver = ctx.block_feed().subscribe(); + let planned_for_task = Arc::clone(&planned); + let inscriptions_for_task = Arc::clone(&inscriptions); + let blobs_for_task = Arc::clone(&blobs); + + tokio::spawn(async move { + loop { + match receiver.recv().await { + Ok(record) => capture_block( + record.as_ref(), + &planned_for_task, + &inscriptions_for_task, + &blobs_for_task, + ), + Err(broadcast::error::RecvError::Lagged(_)) => {} + Err(broadcast::error::RecvError::Closed) => break, + } + } + }); + + self.capture_state = Some(CaptureState { + planned, + inscriptions, + blobs, + }); + + Ok(()) + } + + async fn evaluate(&mut self, _ctx: &RunContext) -> Result<(), DynError> { + let state = self + .capture_state + .as_ref() + .ok_or(DaExpectationError::NotCaptured) + .map_err(DynError::from)?; + + let planned_total = state.planned.len(); + let missing_inscriptions = { + let inscriptions = state + .inscriptions + .lock() + .expect("inscription lock poisoned"); + missing_channels(&state.planned, &inscriptions) + }; + let required_inscriptions = minimum_required(planned_total, MIN_INCLUSION_RATIO); + if planned_total.saturating_sub(missing_inscriptions.len()) < required_inscriptions { + return Err(DaExpectationError::MissingInscriptions { + missing: missing_inscriptions, + } + .into()); + } + + let missing_blobs = { + let blobs = state.blobs.lock().expect("blob lock poisoned"); + missing_channels(&state.planned, &blobs) + }; + let required_blobs = minimum_required(planned_total, MIN_INCLUSION_RATIO); + if planned_total.saturating_sub(missing_blobs.len()) < required_blobs { + return Err(DaExpectationError::MissingBlobs { + missing: missing_blobs, + } + .into()); + } + + Ok(()) + } +} + +fn capture_block( + block: &BlockRecord, + planned: &HashSet, + inscriptions: &Arc>>, + blobs: &Arc>>, +) { + let mut new_inscriptions = Vec::new(); + let mut new_blobs = Vec::new(); + + for tx in block.block.transactions() { + for op in &tx.mantle_tx().ops { + match op { + Op::ChannelInscribe(inscribe) if planned.contains(&inscribe.channel_id) => { + new_inscriptions.push(inscribe.channel_id); + } + Op::ChannelBlob(blob) if planned.contains(&blob.channel) => { + new_blobs.push(blob.channel); + } + _ => {} + } + } + } + + if !new_inscriptions.is_empty() { + let mut guard = inscriptions.lock().expect("inscription lock poisoned"); + guard.extend(new_inscriptions); + } + + if !new_blobs.is_empty() { + let mut guard = blobs.lock().expect("blob lock poisoned"); + guard.extend(new_blobs); + } +} + +fn missing_channels(planned: &HashSet, observed: &HashSet) -> Vec { + planned.difference(observed).copied().collect() +} + +fn minimum_required(total: usize, ratio: f64) -> usize { + ((total as f64) * ratio).ceil() as usize +} diff --git a/testing-framework/workflows/src/workloads/da/mod.rs b/testing-framework/workflows/src/workloads/da/mod.rs new file mode 100644 index 0000000..69ae438 --- /dev/null +++ b/testing-framework/workflows/src/workloads/da/mod.rs @@ -0,0 +1,4 @@ +mod expectation; +mod workload; + +pub use workload::Workload; diff --git a/testing-framework/workflows/src/workloads/da/workload.rs b/testing-framework/workflows/src/workloads/da/workload.rs new file mode 100644 index 0000000..3b215f9 --- /dev/null +++ b/testing-framework/workflows/src/workloads/da/workload.rs @@ -0,0 +1,208 @@ +use std::{sync::Arc, time::Duration}; + +use async_trait::async_trait; +use ed25519_dalek::SigningKey; +use executor_http_client::ExecutorHttpClient; +use nomos_core::{ + da::BlobId, + mantle::ops::{ + Op, + channel::{ChannelId, MsgId}, + }, +}; +use rand::{Rng as _, RngCore as _, seq::SliceRandom as _, thread_rng}; +use testing_framework_core::{ + nodes::ApiClient, + scenario::{BlockRecord, DynError, Expectation, RunContext, Workload as ScenarioWorkload}, +}; +use tokio::{sync::broadcast, time::sleep}; + +use super::expectation::DaWorkloadExpectation; +use crate::{ + util::tx, + workloads::util::{find_channel_op, submit_transaction_via_cluster}, +}; + +const TEST_KEY_BYTES: [u8; 32] = [0u8; 32]; +const DEFAULT_CHANNELS: usize = 1; +const MIN_BLOB_CHUNKS: usize = 1; +const MAX_BLOB_CHUNKS: usize = 8; +const PUBLISH_RETRIES: usize = 5; +const PUBLISH_RETRY_DELAY: Duration = Duration::from_secs(2); + +#[derive(Clone)] +pub struct Workload { + planned_channels: Arc<[ChannelId]>, +} + +impl Default for Workload { + fn default() -> Self { + Self::with_channel_count(DEFAULT_CHANNELS) + } +} + +impl Workload { + /// Creates a workload that inscribes and publishes blobs on `count` + /// channels. + #[must_use] + pub fn with_channel_count(count: usize) -> Self { + assert!(count > 0, "da workload requires positive count"); + Self { + planned_channels: Arc::from(planned_channel_ids(count)), + } + } + + fn plan(&self) -> Arc<[ChannelId]> { + Arc::clone(&self.planned_channels) + } +} + +#[async_trait] +impl ScenarioWorkload for Workload { + fn name(&self) -> &'static str { + "channel_workload" + } + + fn expectations(&self) -> Vec> { + let planned = self.plan().to_vec(); + vec![Box::new(DaWorkloadExpectation::new(planned))] + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + let mut receiver = ctx.block_feed().subscribe(); + + for channel_id in self.plan().iter().copied() { + run_channel_flow(ctx, &mut receiver, channel_id).await?; + } + + Ok(()) + } +} + +async fn run_channel_flow( + ctx: &RunContext, + receiver: &mut broadcast::Receiver>, + channel_id: ChannelId, +) -> Result<(), DynError> { + let tx = Arc::new(tx::create_inscription_transaction_with_id(channel_id)); + submit_transaction_via_cluster(ctx, Arc::clone(&tx)).await?; + + let inscription_id = wait_for_inscription(receiver, channel_id).await?; + let blob_id = publish_blob(ctx, channel_id, inscription_id).await?; + wait_for_blob(receiver, channel_id, blob_id).await?; + Ok(()) +} + +async fn wait_for_inscription( + receiver: &mut broadcast::Receiver>, + channel_id: ChannelId, +) -> Result { + wait_for_channel_op(receiver, move |op| { + if let Op::ChannelInscribe(inscribe) = op + && inscribe.channel_id == channel_id + { + Some(inscribe.id()) + } else { + None + } + }) + .await +} + +async fn wait_for_blob( + receiver: &mut broadcast::Receiver>, + channel_id: ChannelId, + blob_id: BlobId, +) -> Result { + wait_for_channel_op(receiver, move |op| { + if let Op::ChannelBlob(blob_op) = op + && blob_op.channel == channel_id + && blob_op.blob == blob_id + { + Some(blob_op.id()) + } else { + None + } + }) + .await +} + +async fn wait_for_channel_op( + receiver: &mut broadcast::Receiver>, + mut matcher: F, +) -> Result +where + F: FnMut(&Op) -> Option, +{ + loop { + match receiver.recv().await { + Ok(record) => { + if let Some(msg_id) = find_channel_op(record.block.as_ref(), &mut matcher) { + return Ok(msg_id); + } + } + Err(broadcast::error::RecvError::Lagged(_)) => {} + Err(broadcast::error::RecvError::Closed) => { + return Err("block feed closed while waiting for channel operations".into()); + } + } + } +} + +async fn publish_blob( + ctx: &RunContext, + channel_id: ChannelId, + parent_msg: MsgId, +) -> Result { + let executors = ctx.node_clients().executor_clients(); + if executors.is_empty() { + return Err("da workload requires at least one executor".into()); + } + + let signer = SigningKey::from_bytes(&TEST_KEY_BYTES).verifying_key(); + let data = random_blob_payload(); + let client = ExecutorHttpClient::new(None); + + let mut candidates: Vec<&ApiClient> = executors.iter().collect(); + let mut last_err = None; + for attempt in 1..=PUBLISH_RETRIES { + candidates.shuffle(&mut thread_rng()); + for executor in &candidates { + let executor_url = executor.base_url().clone(); + match client + .publish_blob(executor_url, channel_id, parent_msg, signer, data.clone()) + .await + { + Ok(blob_id) => return Ok(blob_id), + Err(err) => last_err = Some(err.into()), + } + } + + if attempt < PUBLISH_RETRIES { + sleep(PUBLISH_RETRY_DELAY).await; + } + } + + Err(last_err.unwrap_or_else(|| "da workload could not publish blob".into())) +} + +fn random_blob_payload() -> Vec { + let mut rng = thread_rng(); + let chunks = rng.gen_range(MIN_BLOB_CHUNKS..=MAX_BLOB_CHUNKS); + let mut data = vec![0u8; 31 * chunks]; + rng.fill_bytes(&mut data); + data +} + +fn planned_channel_ids(total: usize) -> Vec { + (0..total as u64) + .map(deterministic_channel_id) + .collect::>() +} + +fn deterministic_channel_id(index: u64) -> ChannelId { + let mut bytes = [0u8; 32]; + bytes[..8].copy_from_slice(b"chn_wrkd"); + bytes[24..].copy_from_slice(&index.to_be_bytes()); + ChannelId::from(bytes) +} diff --git a/testing-framework/workflows/src/workloads/mod.rs b/testing-framework/workflows/src/workloads/mod.rs new file mode 100644 index 0000000..5dce733 --- /dev/null +++ b/testing-framework/workflows/src/workloads/mod.rs @@ -0,0 +1,6 @@ +pub mod chaos; +pub mod da; +pub mod transaction; +pub mod util; + +pub use transaction::TxInclusionExpectation; diff --git a/testing-framework/workflows/src/workloads/transaction/expectation.rs b/testing-framework/workflows/src/workloads/transaction/expectation.rs new file mode 100644 index 0000000..3c71164 --- /dev/null +++ b/testing-framework/workflows/src/workloads/transaction/expectation.rs @@ -0,0 +1,145 @@ +use std::{ + collections::HashSet, + num::{NonZeroU64, NonZeroUsize}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, +}; + +use async_trait::async_trait; +use nomos_core::{header::HeaderId, mantle::AuthenticatedMantleTx as _}; +use testing_framework_core::scenario::{DynError, Expectation, RunContext}; +use thiserror::Error; +use tokio::sync::broadcast; +use zksign::PublicKey; + +use super::workload::{limited_user_count, submission_plan}; + +const MIN_INCLUSION_RATIO: f64 = 0.5; + +#[derive(Clone)] +pub struct TxInclusionExpectation { + txs_per_block: NonZeroU64, + user_limit: Option, + capture_state: Option, +} + +#[derive(Clone)] +struct CaptureState { + observed: Arc, + expected: u64, +} + +#[derive(Debug, Error)] +enum TxExpectationError { + #[error("transaction workload requires seeded accounts")] + MissingAccounts, + #[error("transaction workload planned zero transactions")] + NoPlannedTransactions, + #[error("transaction inclusion expectation not captured")] + NotCaptured, + #[error("transaction inclusion observed {observed} below required {required}")] + InsufficientInclusions { observed: u64, required: u64 }, +} + +impl TxInclusionExpectation { + /// Expectation that checks a minimum fraction of planned transactions were + /// included. + pub const NAME: &'static str = "tx_inclusion_expectation"; + + /// Constructs an inclusion expectation using the same parameters as the + /// workload. + #[must_use] + pub const fn new(txs_per_block: NonZeroU64, user_limit: Option) -> Self { + Self { + txs_per_block, + user_limit, + capture_state: None, + } + } +} + +#[async_trait] +impl Expectation for TxInclusionExpectation { + fn name(&self) -> &'static str { + Self::NAME + } + + async fn start_capture(&mut self, ctx: &RunContext) -> Result<(), DynError> { + if self.capture_state.is_some() { + return Ok(()); + } + + let wallet_accounts = ctx.descriptors().config().wallet().accounts.clone(); + if wallet_accounts.is_empty() { + return Err(TxExpectationError::MissingAccounts.into()); + } + + let available = limited_user_count(self.user_limit, wallet_accounts.len()); + let (planned, _) = submission_plan(self.txs_per_block, ctx, available)?; + if planned == 0 { + return Err(TxExpectationError::NoPlannedTransactions.into()); + } + + let wallet_pks = wallet_accounts + .into_iter() + .take(planned) + .map(|account| account.secret_key.to_public_key()) + .collect::>(); + + let observed = Arc::new(AtomicU64::new(0)); + let receiver = ctx.block_feed().subscribe(); + let tracked_accounts = Arc::new(wallet_pks); + let spawn_accounts = Arc::clone(&tracked_accounts); + let spawn_observed = Arc::clone(&observed); + + tokio::spawn(async move { + let mut receiver = receiver; + let genesis_parent = HeaderId::from([0; 32]); + loop { + match receiver.recv().await { + Ok(record) => { + if record.block.header().parent_block() == genesis_parent { + continue; + } + + for tx in record.block.transactions() { + for note in &tx.mantle_tx().ledger_tx.outputs { + if spawn_accounts.contains(¬e.pk) { + spawn_observed.fetch_add(1, Ordering::Relaxed); + break; + } + } + } + } + Err(broadcast::error::RecvError::Lagged(_)) => {} + Err(broadcast::error::RecvError::Closed) => break, + } + } + }); + + self.capture_state = Some(CaptureState { + observed, + expected: planned as u64, + }); + + Ok(()) + } + + async fn evaluate(&mut self, _ctx: &RunContext) -> Result<(), DynError> { + let state = self + .capture_state + .as_ref() + .ok_or(TxExpectationError::NotCaptured)?; + + let observed = state.observed.load(Ordering::Relaxed); + let required = ((state.expected as f64) * MIN_INCLUSION_RATIO).ceil() as u64; + + if observed >= required { + Ok(()) + } else { + Err(TxExpectationError::InsufficientInclusions { observed, required }.into()) + } + } +} diff --git a/testing-framework/workflows/src/workloads/transaction/mod.rs b/testing-framework/workflows/src/workloads/transaction/mod.rs new file mode 100644 index 0000000..df5c612 --- /dev/null +++ b/testing-framework/workflows/src/workloads/transaction/mod.rs @@ -0,0 +1,5 @@ +mod expectation; +mod workload; + +pub use expectation::TxInclusionExpectation; +pub use workload::Workload; diff --git a/testing-framework/workflows/src/workloads/transaction/workload.rs b/testing-framework/workflows/src/workloads/transaction/workload.rs new file mode 100644 index 0000000..f852da6 --- /dev/null +++ b/testing-framework/workflows/src/workloads/transaction/workload.rs @@ -0,0 +1,249 @@ +use std::{ + collections::{HashMap, VecDeque}, + num::{NonZeroU64, NonZeroUsize}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use nomos_core::mantle::{ + GenesisTx as _, Note, SignedMantleTx, Transaction as _, Utxo, tx_builder::MantleTxBuilder, +}; +use testing_framework_config::topology::configs::wallet::WalletAccount; +use testing_framework_core::{ + scenario::{DynError, Expectation, RunContext, RunMetrics, Workload as ScenarioWorkload}, + topology::{GeneratedNodeConfig, GeneratedTopology}, +}; +use tokio::time::sleep; +use zksign::{PublicKey, SecretKey}; + +use super::expectation::TxInclusionExpectation; +use crate::workloads::util::submit_transaction_via_cluster; + +#[derive(Clone)] +pub struct Workload { + txs_per_block: NonZeroU64, + user_limit: Option, + accounts: Vec, +} + +#[derive(Clone)] +struct WalletInput { + account: WalletAccount, + utxo: Utxo, +} + +#[async_trait] +impl ScenarioWorkload for Workload { + fn name(&self) -> &'static str { + "tx_workload" + } + + fn expectations(&self) -> Vec> { + vec![Box::new(TxInclusionExpectation::new( + self.txs_per_block, + self.user_limit, + ))] + } + + fn init( + &mut self, + descriptors: &GeneratedTopology, + _run_metrics: &RunMetrics, + ) -> Result<(), DynError> { + let wallet_accounts = descriptors.config().wallet().accounts.clone(); + if wallet_accounts.is_empty() { + return Err("transaction workload requires seeded accounts".into()); + } + + let reference_node = descriptors + .validators() + .first() + .or_else(|| descriptors.executors().first()) + .ok_or("transaction workload requires at least one node in the topology")?; + + let utxo_map = wallet_utxo_map(reference_node); + let mut accounts = wallet_accounts + .into_iter() + .filter_map(|account| { + utxo_map + .get(&account.public_key()) + .copied() + .map(|utxo| WalletInput { account, utxo }) + }) + .collect::>(); + + apply_user_limit(&mut accounts, self.user_limit); + + if accounts.is_empty() { + return Err( + "transaction workload could not match any accounts to genesis UTXOs".into(), + ); + } + + self.accounts = accounts; + Ok(()) + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + Submission::new(self, ctx)?.execute().await + } +} + +impl Workload { + /// Creates a workload that targets the provided transactions per block + /// rate. + #[must_use] + pub const fn new(txs_per_block: NonZeroU64) -> Self { + Self { + txs_per_block, + user_limit: None, + accounts: Vec::new(), + } + } + + /// Creates a workload from a raw rate, returning `None` when zero is given. + #[must_use] + pub fn with_rate(txs_per_block: u64) -> Option { + NonZeroU64::new(txs_per_block).map(Self::new) + } + + /// Returns the configured transactions per block rate. + #[must_use] + pub const fn txs_per_block(&self) -> NonZeroU64 { + self.txs_per_block + } + + /// Limits the number of distinct users that will submit transactions. + #[must_use] + pub const fn with_user_limit(mut self, user_limit: Option) -> Self { + self.user_limit = user_limit; + self + } +} + +impl Default for Workload { + fn default() -> Self { + Self::new(NonZeroU64::new(1).expect("non-zero")) + } +} + +struct Submission<'a> { + plan: VecDeque, + ctx: &'a RunContext, + interval: Duration, +} + +impl<'a> Submission<'a> { + fn new(workload: &Workload, ctx: &'a RunContext) -> Result { + if workload.accounts.is_empty() { + return Err("transaction workload has no available accounts".into()); + } + + let (planned, interval) = + submission_plan(workload.txs_per_block, ctx, workload.accounts.len())?; + + let plan = workload + .accounts + .iter() + .take(planned) + .cloned() + .collect::>(); + + Ok(Self { + plan, + ctx, + interval, + }) + } + + async fn execute(mut self) -> Result<(), DynError> { + while let Some(input) = self.plan.pop_front() { + submit_wallet_transaction(self.ctx, &input).await?; + + if !self.interval.is_zero() { + sleep(self.interval).await; + } + } + + Ok(()) + } +} + +async fn submit_wallet_transaction(ctx: &RunContext, input: &WalletInput) -> Result<(), DynError> { + let signed_tx = Arc::new(build_wallet_transaction(input)?); + submit_transaction_via_cluster(ctx, signed_tx).await +} + +fn build_wallet_transaction(input: &WalletInput) -> Result { + let builder = MantleTxBuilder::new() + .add_ledger_input(input.utxo) + .add_ledger_output(Note::new(input.utxo.note.value, input.account.public_key())); + + let mantle_tx = builder.build(); + let tx_hash = mantle_tx.hash(); + + let signature = SecretKey::multi_sign( + std::slice::from_ref(&input.account.secret_key), + tx_hash.as_ref(), + ) + .map_err(|err| format!("transaction workload could not sign transaction: {err}"))?; + + SignedMantleTx::new(mantle_tx, Vec::new(), signature).map_err(|err| { + format!("transaction workload constructed invalid transaction: {err}").into() + }) +} + +fn wallet_utxo_map(node: &GeneratedNodeConfig) -> HashMap { + let genesis_tx = node.general.consensus_config.genesis_tx.clone(); + let ledger_tx = genesis_tx.mantle_tx().ledger_tx.clone(); + let tx_hash = ledger_tx.hash(); + + ledger_tx + .outputs + .iter() + .enumerate() + .map(|(idx, note)| (note.pk, Utxo::new(tx_hash, idx, *note))) + .collect() +} + +fn apply_user_limit(items: &mut Vec, user_limit: Option) { + if let Some(limit) = user_limit { + let allowed = limit.get().min(items.len()); + items.truncate(allowed); + } +} + +pub(super) fn limited_user_count(user_limit: Option, available: usize) -> usize { + user_limit.map_or(available, |limit| limit.get().min(available)) +} + +pub(super) fn submission_plan( + txs_per_block: NonZeroU64, + ctx: &RunContext, + available_accounts: usize, +) -> Result<(usize, Duration), DynError> { + if available_accounts == 0 { + return Err("transaction workload scheduled zero transactions".into()); + } + + let run_secs = ctx.run_duration().as_secs_f64(); + let block_secs = ctx + .run_metrics() + .block_interval_hint() + .unwrap_or_else(|| ctx.run_duration()) + .as_secs_f64(); + + let expected_blocks = run_secs / block_secs; + let requested = (expected_blocks * txs_per_block.get() as f64) + .floor() + .clamp(0.0, u64::MAX as f64) as u64; + + let planned = requested.min(available_accounts as u64) as usize; + if planned == 0 { + return Err("transaction workload scheduled zero transactions".into()); + } + + let interval = Duration::from_secs_f64(run_secs / planned as f64); + Ok((planned, interval)) +} diff --git a/testing-framework/workflows/src/workloads/util.rs b/testing-framework/workflows/src/workloads/util.rs new file mode 100644 index 0000000..0a94965 --- /dev/null +++ b/testing-framework/workflows/src/workloads/util.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use nomos_core::{ + block::Block, + mantle::{ + AuthenticatedMantleTx as _, SignedMantleTx, + ops::{Op, channel::MsgId}, + }, +}; +use testing_framework_core::scenario::{DynError, RunContext}; + +/// Scans a block and invokes the matcher for every operation until it returns +/// `Some(...)`. Returns `None` when no matching operation is found. +pub fn find_channel_op(block: &Block, matcher: &mut F) -> Option +where + F: FnMut(&Op) -> Option, +{ + for tx in block.transactions() { + for op in &tx.mantle_tx().ops { + if let Some(msg_id) = matcher(op) { + return Some(msg_id); + } + } + } + + None +} + +/// Submits a transaction to the cluster, fanning out across clients until one +/// succeeds. +pub async fn submit_transaction_via_cluster( + ctx: &RunContext, + tx: Arc, +) -> Result<(), DynError> { + ctx.cluster_client() + .try_all_clients(|client| { + let tx = Arc::clone(&tx); + Box::pin(async move { + client + .submit_transaction(&tx) + .await + .map_err(|err| -> DynError { err.into() }) + }) + }) + .await +}