merge dev into master

This merge brings the following updates from the dev branch:

Pull Requests:
- #10: chore: update main repo dependencies (@hansieodendaal)
  Update for main repo changes including removal of DA config-related code and executor node dependencies

- #8: Add support to use framework without running scenario (@andrussal)
  Add ManualCluster for controlling nodes lifecycle and reorganize node-control logic

- #7: Remove DA (@andrussal)
  Remove DA workload usage from framework following node changes

- #6: feat: refactor for using external cucumber (@hansieodendaal)
  Removed all references to cucumber and prepared compose docker workspace for external repo root

- #4: Individual nodes connect at runtime (@andrussal)
  Add option to connect to arbitrary peers when starting a node

- #2: feat: add cucumber auto deployer (@hansieodendaal)
  Added example for selecting deployer based on environment variable

- #1: chore: allow older curl versions as well (@hansieodendaal)
  Allow compatibility with older and newer curl versions

Contributors:
- @andrussal
- @hansieodendaal
This commit is contained in:
Hansie Odendaal 2026-01-25 10:11:16 +02:00 committed by hansieodendaal
parent 7038aaa201
commit 13497ba95b
No known key found for this signature in database
GPG Key ID: D341DA7FC6098627
168 changed files with 2311 additions and 3709 deletions

View File

@ -242,7 +242,6 @@ jobs:
POL_PROOF_DEV_MODE: true
LOCAL_DEMO_RUN_SECS: 120
LOCAL_DEMO_VALIDATORS: 1
LOCAL_DEMO_EXECUTORS: 1
NOMOS_CIRCUITS: ${{ github.workspace }}/.tmp/nomos-circuits
NOMOS_KZGRS_PARAMS_PATH: ${{ github.workspace }}/.tmp/kzgrs_test_params
CARGO_INCREMENTAL: 0

1
.gitignore vendored
View File

@ -17,6 +17,7 @@ NOMOS_RUST_SOURCES_ONLY.txt
dump.zsh
testing-framework/assets/stack/bin/
testing-framework/assets/stack/kzgrs_test_params/
null
# Local test artifacts (kept when NOMOS_TESTS_KEEP_LOGS=1)
tests/workflows/.tmp*

2241
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -40,50 +40,42 @@ testing-framework-runner-local = { default-features = false, path = "testing-f
testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" }
# Logos git dependencies (pinned to latest master)
broadcast-service = { package = "logos-blockchain-chain-broadcast-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-broadcast-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
cfgsync_tf = { default-features = false, path = "testing-framework/tools/cfgsync_tf" }
chain-leader = { package = "logos-blockchain-chain-leader-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71", features = [
chain-leader = { default-features = false, features = [
"pol-dev-mode",
] }
chain-network = { package = "logos-blockchain-chain-network-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
chain-service = { package = "logos-blockchain-chain-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
common-http-client = { package = "logos-blockchain-common-http-client", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
cryptarchia-engine = { package = "logos-blockchain-cryptarchia-engine", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
cryptarchia-sync = { package = "logos-blockchain-cryptarchia-sync", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
executor-http-client = { package = "logos-blockchain-executor-http-client", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
groth16 = { package = "logos-blockchain-groth16", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
key-management-system-service = { package = "logos-blockchain-key-management-system-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
kzgrs = { package = "logos-blockchain-kzgrs", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
kzgrs-backend = { package = "logos-blockchain-kzgrs-backend", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
logos-blockchain-executor = { package = "logos-blockchain-executor", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-api = { package = "logos-blockchain-api-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-blend-message = { package = "logos-blockchain-blend-message", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-blend-service = { package = "logos-blockchain-blend-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-cli = { package = "logos-blockchain-cli", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-core = { package = "logos-blockchain-core", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-da-dispersal = { package = "logos-blockchain-da-dispersal-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-da-network-core = { package = "logos-blockchain-da-network-core", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-da-network-service = { package = "logos-blockchain-da-network-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-da-sampling = { package = "logos-blockchain-da-sampling-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-da-verifier = { package = "logos-blockchain-da-verifier-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-http-api-common = { package = "logos-blockchain-http-api-common", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-ledger = { package = "logos-blockchain-ledger", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-libp2p = { package = "logos-blockchain-libp2p", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-network = { package = "logos-blockchain-network-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-node = { package = "logos-blockchain-node", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-sdp = { package = "logos-blockchain-sdp-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-time = { package = "logos-blockchain-time-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-tracing = { package = "logos-blockchain-tracing", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-tracing-service = { package = "logos-blockchain-tracing-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-utils = { package = "logos-blockchain-utils", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
nomos-wallet = { package = "logos-blockchain-wallet-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
poc = { package = "logos-blockchain-poc", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
pol = { package = "logos-blockchain-pol", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
subnetworks-assignations = { package = "logos-blockchain-subnetworks-assignations", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
tests = { package = "logos-blockchain-tests", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
tx-service = { package = "logos-blockchain-tx-service", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
wallet = { package = "logos-blockchain-wallet", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
zksign = { package = "logos-blockchain-zksign", default-features = false, git = "https://github.com/logos-co/nomos-node.git", rev = "97b411ed0ce269e72a6253c8cd48eea41db5ab71" }
], git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-leader-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-network-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-common-http-client", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-engine", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
kzgrs = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-kzgrs", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
kzgrs-backend = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-kzgrs-backend", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-cli = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cli", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-libp2p", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-network-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-node", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-sdp = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-sdp-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-time = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-time-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-utils", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
nomos-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-poc", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-pol", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tests", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tx-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-zksign", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
# External crates
async-trait = { default-features = false, version = "0.1" }
@ -94,7 +86,7 @@ overwatch = { default-features = false, git = "https://github.com/logos-c
overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
rand = { default-features = false, version = "0.8" }
reqwest = { default-features = false, version = "0.12" }
serde = { default-features = true, version = "1.0", features = ["derive"] }
serde = { default-features = true, features = ["derive"], version = "1.0" }
serde_json = { default-features = false, version = "1.0" }
serde_with = { default-features = false, version = "3.14.0" }
serde_yaml = { default-features = false, version = "0.9.33" }

View File

@ -126,7 +126,6 @@ Key environment variables for customization:
| `POL_PROOF_DEV_MODE=true` | **Required** — Disable expensive proof generation (set automatically by `scripts/run/run-examples.sh`) | (none) |
| `NOMOS_TESTNET_IMAGE` | Docker image tag for compose/k8s | `logos-blockchain-testing:local` |
| `NOMOS_DEMO_VALIDATORS` | Number of validator nodes | Varies by example |
| `NOMOS_DEMO_EXECUTORS` | Number of executor nodes | Varies by example |
| `NOMOS_LOG_DIR` | Directory for persistent log files | (temporary) |
| `NOMOS_LOG_LEVEL` | Logging verbosity | `info` |

View File

@ -81,7 +81,7 @@ Helper utilities:
**Compose runner** includes:
- **Prometheus** at `http://localhost:9090` (metrics scraping)
- Node metrics exposed per validator/executor
- Node metrics exposed per validator
- Access in expectations: `ctx.telemetry().prometheus().map(|p| p.base_url())`
**Logging** controlled by:

View File

@ -17,7 +17,7 @@ use std::time::Duration;
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(5)
.transactions_with(|txs| txs.rate(5).users(3))
.da_with(|da| da.channel_rate(1).blob_rate(1).headroom_percent(20))
@ -56,7 +56,7 @@ let da_workload = da::Workload::with_rate(
da::Workload::default_headroom_percent(),
);
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(5)
.with_workload(tx_workload)
.with_workload(da_workload)
@ -97,7 +97,7 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::transaction};
let tx_workload = transaction::Workload::with_rate(5)
.expect("transaction rate must be non-zero");
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(5)
.with_workload(tx_workload) // direct instantiation
.expect_consensus_liveness() // DSL

View File

@ -142,7 +142,7 @@ The framework is consumed via **runnable example binaries** in `examples/src/bin
**Recommended:** Use the convenience script:
```bash
scripts/run/run-examples.sh -t <duration> -v <validators> -e <executors> <mode>
scripts/run/run-examples.sh -t <duration> -v <validators> <mode>
# mode: host, compose, or k8s
```
@ -169,7 +169,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn scenario_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(50)
.transactions_with(|txs| txs.rate(5).users(20))
.da_with(|da| da.channel_rate(1).blob_rate(2))
@ -180,7 +180,7 @@ pub fn scenario_plan() -> testing_framework_core::scenario::Scenario<()> {
```
**Key API Points:**
- Topology uses `.topology_with(|t| { t.validators(N).executors(M) })` closure pattern
- Topology uses `.topology_with(|t| { t.validators(N) })` closure pattern
- Workloads are configured via `_with` closures (`transactions_with`, `da_with`, `chaos_with`)
- Chaos workloads require `.enable_node_control()` and a compatible runner
@ -197,14 +197,14 @@ Three deployer implementations:
**Compose-specific features:**
- Observability is external (set `NOMOS_METRICS_QUERY_URL` / `NOMOS_METRICS_OTLP_INGEST_URL` / `NOMOS_GRAFANA_URL` as needed)
- Optional OTLP trace/metrics endpoints (`NOMOS_OTLP_ENDPOINT`, `NOMOS_OTLP_METRICS_ENDPOINT`)
- Node control for chaos testing (restart validators/executors)
- Node control for chaos testing (restart validators)
## Assets and Images
### Docker Image
Built via `scripts/build/build_test_image.sh`:
- Embeds KZG circuit parameters and binaries from `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`
- Includes runner scripts: `run_nomos_node.sh`, `run_nomos_executor.sh`
- Includes runner scripts: `run_nomos_node.sh`
- Tagged as `NOMOS_TESTNET_IMAGE` (default: `logos-blockchain-testing:local`)
- **Recommended:** Use prebuilt bundle via `scripts/build/build-bundle.sh --platform linux` and set `NOMOS_BINARIES_TAR` before building image
@ -217,7 +217,7 @@ KZG parameters required for DA workloads:
### Compose Stack
Templates and configs in `testing-framework/runners/compose/assets/`:
- `docker-compose.yml.tera` — Stack template (validators, executors)
- `docker-compose.yml.tera` — Stack template (validators)
- Cfgsync config: `testing-framework/assets/stack/cfgsync.yaml`
- Monitoring assets (not deployed by the framework): `testing-framework/assets/stack/monitoring/`
@ -235,7 +235,7 @@ Templates and configs in `testing-framework/runners/compose/assets/`:
- **Compose runner:** Default logs to container stdout/stderr (`docker logs`). To write per-node files, set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory).
- **K8s runner:** Logs to pod stdout/stderr (`kubectl logs`). To write per-node files, set `tracing_settings.logger: !File` in `testing-framework/assets/stack/cfgsync.yaml` (and mount a writable directory).
**File naming:** Per-node files use prefix `nomos-node-{index}` or `nomos-executor-{index}` (may include timestamps).
**File naming:** Per-node files use prefix `nomos-node-{index}` (may include timestamps).
## Observability

View File

@ -37,14 +37,12 @@ use testing_framework_workflows::ScenarioBuilderExt;
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star() // Star network (one gateway + nodes)
.validators(3) // 3 validator nodes
.executors(1) // 1 executor node
})
```
**What goes in topology?**
- Node counts (validators, executors)
- Node counts (validators)
- Network shape (`network_star()` is currently the only built-in layout)
- Role split (validators vs. executors)
**What does NOT go in topology?**
- Traffic rates (that's workloads)
@ -139,7 +137,6 @@ async fn hello_consensus_liveness() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(3)
.executors(1)
})
.wallets(20)
.transactions_with(|tx| tx.rate(10).users(5))
@ -207,7 +204,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
#[tokio::test]
async fn test_consensus_liveness() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(3).executors(1)
t.network_star().validators(3)
})
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(30))
@ -222,7 +219,7 @@ async fn test_consensus_liveness() -> Result<()> {
#[tokio::test]
async fn test_transaction_inclusion() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(2).executors(1)
t.network_star().validators(2)
})
.wallets(10)
.transactions_with(|tx| tx.rate(5).users(5))
@ -248,13 +245,13 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn minimal_topology() -> ScenarioBuilder {
ScenarioBuilder::topology_with(|t| {
t.network_star().validators(2).executors(1)
t.network_star().validators(2)
})
}
pub fn production_like_topology() -> ScenarioBuilder {
ScenarioBuilder::topology_with(|t| {
t.network_star().validators(7).executors(3)
t.network_star().validators(7)
})
}
@ -296,11 +293,10 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
async fn test_liveness_with_topology(validators: usize, executors: usize) -> Result<()> {
async fn test_liveness_with_topology(validators: usize) -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(validators)
.executors(executors)
})
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
@ -335,9 +331,8 @@ async fn liveness_large() -> Result<()> {
### Topology
**Do include:**
- Node counts (`.validators(3)`, `.executors(1)`)
- Node counts (`.validators(3)`)
- Network shape (`.network_star()`)
- Role split (validators vs. executors)
**Don't include:**
- Traffic rates (workload concern)
@ -372,7 +367,7 @@ async fn liveness_large() -> Result<()> {
## Best Practices
1. **Keep scenarios focused**: One scenario = one behavior under test
2. **Start small**: 2-3 validators, 1 executor, 30-60 seconds
2. **Start small**: 2-3 validators, 30-60 seconds
3. **Use descriptive names**: `test_consensus_survives_validator_restart` not `test_1`
4. **Extract common patterns**: Shared topology builders, helper functions
5. **Document intent**: Add comments explaining what you're testing and why

View File

@ -38,7 +38,6 @@ pub fn standard_da_topology() -> GeneratedTopology {
TopologyBuilder::new()
.network_star()
.validators(3)
.executors(2)
.generate()
}
```
@ -200,7 +199,6 @@ docker stats # monitor resource usage
**Minimal viable topology**
- Consensus: 3 validators (minimum for Byzantine fault tolerance)
- DA: 2+ executors (test dispersal and sampling)
- Network: Star topology (simplest for debugging)
**Workload rate selection**

View File

@ -9,7 +9,7 @@ recovery. The built-in restart workload lives in
## How it works
- Requires `NodeControlCapability` (`enable_node_control()` in the scenario
builder) and a runner that provides a `NodeControlHandle`.
- Randomly selects nodes (validators, executors) to restart based on your
- Randomly selects nodes (validators) to restart based on your
include/exclude flags.
- Respects min/max delay between restarts and a target cooldown to avoid
flapping the same node too frequently.
@ -29,14 +29,13 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRe
pub fn random_restart_plan() -> testing_framework_core::scenario::Scenario<
testing_framework_core::scenario::NodeControlCapability,
> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.enable_node_control()
.with_workload(RandomRestartWorkload::new(
Duration::from_secs(45), // min delay
Duration::from_secs(75), // max delay
Duration::from_secs(120), // target cooldown
true, // include validators
true, // include executors
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(150))
@ -53,6 +52,6 @@ pub fn random_restart_plan() -> testing_framework_core::scenario::Scenario<
## Best practices
- Keep delays/cooldowns realistic; avoid back-to-back restarts that would never
happen in production.
- Limit chaos scope: toggle validators vs executors based on what you want to
- Limit chaos scope: toggle validators based on what you want to
test.
- Combine with observability: monitor metrics/logs to explain failures.

View File

@ -79,7 +79,6 @@ jobs:
with:
path: |
../nomos-node/target/release/nomos-node
../nomos-node/target/release/nomos-executor
key: ${{ runner.os }}-nomos-${{ hashFiles('../nomos-node/**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-nomos-

View File

@ -23,7 +23,6 @@ pub fn topology() -> Builder<()> {
ScenarioBuilder::topology_with(|t| {
t.network_star() // Star topology (all connect to seed node)
.validators(3) // Number of validator nodes
.executors(2) // Number of executor nodes
})
}
```
@ -35,7 +34,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn wallets_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(50) // Seed 50 funded wallet accounts
.build()
}
@ -48,7 +47,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn transactions_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(50)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
@ -65,7 +64,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn da_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(50)
.da_with(|da| {
da.channel_rate(1) // number of DA channels to run
@ -85,7 +84,7 @@ use testing_framework_core::scenario::{NodeControlCapability, ScenarioBuilder};
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub fn chaos_plan() -> testing_framework_core::scenario::Scenario<NodeControlCapability> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.enable_node_control() // Enable node control capability
.chaos_with(|c| {
c.restart() // Random restart chaos
@ -105,7 +104,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn expectations_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.expect_consensus_liveness() // Assert blocks are produced continuously
.build()
}
@ -120,7 +119,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn run_duration_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.with_run_duration(Duration::from_secs(120)) // Run for 120 seconds
.build()
}
@ -133,7 +132,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn build_plan() -> testing_framework_core::scenario::Scenario<()> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0)).build() // Construct the final Scenario
ScenarioBuilder::topology_with(|t| t.network_star().validators(1)).build() // Construct the final Scenario
}
```
@ -165,7 +164,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn execution() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.expect_consensus_liveness()
.build();
@ -188,7 +187,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn run_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(50)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block

View File

@ -32,21 +32,18 @@ Control which runner to use and the test topology:
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators (all runners) |
| `NOMOS_DEMO_EXECUTORS` | 1 | Number of executors (all runners) |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds (all runners) |
| `LOCAL_DEMO_VALIDATORS` | — | Legacy: Number of validators (host runner only) |
| `LOCAL_DEMO_EXECUTORS` | — | Legacy: Number of executors (host runner only) |
| `LOCAL_DEMO_RUN_SECS` | — | Legacy: Run duration (host runner only) |
| `COMPOSE_NODE_PAIRS` | — | Compose-specific topology format: "validators×executors" (e.g., `3x2`) |
| `COMPOSE_NODE_PAIRS` | — | Compose-specific topology format: "validators" (e.g., `3`) |
**Example:**
```bash
# Run with 5 validators, 2 executors, for 120 seconds
# Run with 5 validators, for 120 seconds
NOMOS_DEMO_VALIDATORS=5 \
NOMOS_DEMO_EXECUTORS=2 \
NOMOS_DEMO_RUN_SECS=120 \
scripts/run/run-examples.sh -t 120 -v 5 -e 2 host
scripts/run/run-examples.sh -t 120 -v 5 host
```
---
@ -58,14 +55,12 @@ Required for host runner when not using helper scripts:
| Variable | Required | Default | Effect |
|----------|----------|---------|--------|
| `NOMOS_NODE_BIN` | Yes (host) | — | Path to `nomos-node` binary |
| `NOMOS_EXECUTOR_BIN` | Yes (host) | — | Path to `nomos-executor` binary |
| `NOMOS_NODE_PATH` | No | — | Path to nomos-node git checkout (dev workflow) |
**Example:**
```bash
export NOMOS_NODE_BIN=/path/to/nomos-node/target/release/nomos-node
export NOMOS_EXECUTOR_BIN=/path/to/nomos-node/target/release/nomos-executor
```
---
@ -309,7 +304,7 @@ scripts/run/run-examples.sh -t 120 -v 5 -e 2 compose
## Node Configuration (Advanced)
Node-level configuration passed through to nomos-node/nomos-executor:
Node-level configuration passed through to nomos-node:
| Variable | Default | Effect |
|----------|---------|--------|

View File

@ -13,9 +13,9 @@ Realistic advanced scenarios demonstrating framework capabilities for production
| Example | Topology | Workloads | Deployer | Key Feature |
|---------|----------|-----------|----------|-------------|
| Load Progression | 3 validators + 2 executors | Increasing tx rate | Compose | Dynamic load testing |
| Sustained Load | 4 validators + 2 executors | High tx + DA rate | Compose | Stress testing |
| Aggressive Chaos | 4 validators + 2 executors | Frequent restarts + traffic | Compose | Resilience validation |
| Load Progression | 3 validators | Increasing tx rate | Compose | Dynamic load testing |
| Sustained Load | 4 validators | High tx + DA rate | Compose | Stress testing |
| Aggressive Chaos | 4 validators | Frequent restarts + traffic | Compose | Resilience validation |
## Load Progression Test
@ -34,7 +34,7 @@ pub async fn load_progression_test() -> Result<()> {
println!("Testing with rate: {}", rate);
let mut plan =
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(50)
.transactions_with(|txs| txs.rate(rate).users(20))
.expect_consensus_liveness()
@ -65,7 +65,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn sustained_load_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
.wallets(100)
.transactions_with(|txs| txs.rate(15).users(50))
.da_with(|da| da.channel_rate(2).blob_rate(3))
@ -96,7 +96,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn aggressive_chaos_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
.enable_node_control()
.wallets(50)
.transactions_with(|txs| txs.rate(10).users(20))

View File

@ -13,7 +13,7 @@ and expectations.
- `compose_runner.rs` — Docker Compose (requires image built)
- `k8s_runner.rs` — Kubernetes (requires cluster access and image loaded)
**Recommended:** Use `scripts/run/run-examples.sh -t <duration> -v <validators> -e <executors> <mode>` where mode is `host`, `compose`, or `k8s`.
**Recommended:** Use `scripts/run/run-examples.sh -t <duration> -v <validators> <mode>` where mode is `host`, `compose`, or `k8s`.
**Alternative:** Direct cargo run: `POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin <name>`
@ -34,7 +34,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn simple_consensus() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(0))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(30))
.build();
@ -62,7 +62,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn transaction_workload() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(0))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.wallets(20)
.transactions_with(|txs| txs.rate(5).users(10))
.expect_consensus_liveness()
@ -92,7 +92,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn da_and_transactions() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(30)
.transactions_with(|txs| txs.rate(5).users(15))
.da_with(|da| da.channel_rate(2).blob_rate(2))
@ -123,7 +123,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn chaos_resilience() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
.enable_node_control()
.wallets(20)
.transactions_with(|txs| txs.rate(3).users(10))

View File

@ -201,7 +201,7 @@ impl Deployer<()> for MyDeployer {
async fn deploy(&self, scenario: &Scenario<()>) -> Result<Runner, Self::Error> {
// 1. Launch nodes using scenario.topology()
// 2. Wait for readiness (e.g., consensus info endpoint responds)
// 3. Build NodeClients for validators/executors
// 3. Build NodeClients for validators
// 4. Spawn a block feed for expectations (optional but recommended)
// 5. Create NodeControlHandle if you support restarts (optional)
// 6. Return a Runner wrapping RunContext + CleanupGuard
@ -345,7 +345,7 @@ impl MyWorkloadDsl for ScenarioBuilder {
Users can then call:
```rust,ignore
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.my_workload_with(|w| {
w.target_rate(10)
.some_option(true)

View File

@ -2,11 +2,6 @@
- **Validator**: node role responsible for participating in consensus and block
production.
- **Executor**: a validator node with the DA dispersal service enabled. Executors
can submit transactions and disperse blob data to the DA network, in addition
to performing all validator functions.
- **DA (Data Availability)**: subsystem ensuring blobs or channel data are
published and retrievable for validation.
- **Deployer**: component that provisions infrastructure (spawns processes,
creates containers, or launches pods), waits for readiness, and returns a
Runner. Examples: LocalDeployer, ComposeDeployer, K8sDeployer.

View File

@ -93,7 +93,7 @@ impl<Caps> YourWorkloadDslExt for testing_framework_core::scenario::Builder<Caps
}
pub fn use_in_examples() {
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(0))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.your_workload_with(|w| w.some_config())
.build();
}
@ -136,7 +136,7 @@ impl<Caps> YourExpectationDslExt for testing_framework_core::scenario::Builder<C
}
pub fn use_in_examples() {
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(0))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.expect_your_condition()
.build();
}

View File

@ -6,7 +6,7 @@ tests and full-system validation by letting teams describe a cluster layout,
drive meaningful traffic, and assert the outcomes in one coherent plan.
It is for protocol engineers, infrastructure operators, and QA teams who need
repeatable confidence that validators, executors, and data-availability
repeatable confidence that validators
components work together under network and timing constraints.
Multi-node integration testing is required because many Logos behaviors—block
@ -23,7 +23,6 @@ Here's the conceptual shape of every test you'll write:
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(3)
.executors(2)
})
// 2. Add workloads (traffic)
.transactions_with(|tx| tx.rate(10).users(5))

View File

@ -9,7 +9,7 @@ Comprehensive guide to log collection, metrics, and debugging across all runners
| Component | Controlled By | Purpose |
|-----------|--------------|---------|
| **Framework binaries** (`cargo run -p runner-examples --bin local_runner`) | `RUST_LOG` | Runner orchestration, deployment logs |
| **Node processes** (validators, executors spawned by runner) | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (+ `NOMOS_LOG_DIR` on host runner) | Consensus, DA, mempool, network logs |
| **Node processes** (validators spawned by runner) | `NOMOS_LOG_LEVEL`, `NOMOS_LOG_FILTER` (+ `NOMOS_LOG_DIR` on host runner) | Consensus, DA, mempool, network logs |
**Common mistake:** Setting `RUST_LOG=debug` only increases verbosity of the runner binary itself. Node logs remain at their default level unless you also set `NOMOS_LOG_LEVEL=debug`.
@ -56,12 +56,10 @@ When `NOMOS_LOG_DIR` is set, each node writes logs to separate files:
**File naming pattern:**
- **Validators**: Prefix `nomos-node-0`, `nomos-node-1`, etc. (may include timestamp suffix)
- **Executors**: Prefix `nomos-executor-0`, `nomos-executor-1`, etc. (may include timestamp suffix)
**Example filenames:**
- `nomos-node-0.2024-12-18T14-30-00.log`
- `nomos-node-1.2024-12-18T14-30-00.log`
- `nomos-executor-0.2024-12-18T14-30-00.log`
**Local runner note:** The local runner uses per-run temporary directories under the current working directory and removes them after the run unless `NOMOS_TESTS_KEEP_LOGS=1`. Use `NOMOS_LOG_DIR=/path/to/logs` to write per-node log files to a stable location.
@ -109,7 +107,7 @@ cargo run -p runner-examples --bin local_runner
# After test completes:
ls /tmp/local-logs/
# Files with prefix: nomos-node-0*, nomos-node-1*, nomos-executor-0*
# Files with prefix: nomos-node-0*, nomos-node-1*
# May include timestamps in filename
```
@ -186,10 +184,9 @@ kubectl get pods
# Stream logs using label selectors (recommended)
# Helm chart labels:
# - nomos/logical-role=validator|executor
# - nomos/validator-index / nomos/executor-index
# - nomos/logical-role=validator
# - nomos/validator-index
kubectl logs -l nomos/logical-role=validator -f
kubectl logs -l nomos/logical-role=executor -f
# Stream logs from specific pod
kubectl logs -f nomos-validator-0
@ -203,11 +200,9 @@ kubectl logs --previous -l nomos/logical-role=validator
```bash
# Using label selectors
kubectl logs -l nomos/logical-role=validator --tail=1000 > all-validators.log
kubectl logs -l nomos/logical-role=executor --tail=1000 > all-executors.log
# Specific pods
kubectl logs nomos-validator-0 > validator-0.log
kubectl logs nomos-executor-1 > executor-1.log
```
**K8s debugging variables:**

View File

@ -325,7 +325,6 @@ resilience testing:
**Supported:**
- Restart validators (`restart_validator`)
- Restart executors (`restart_executor`)
- Random restart workload via `.chaos().restart()`
**Not Yet Supported:**
@ -377,7 +376,6 @@ use testing_framework_core::scenario::DynError;
#[async_trait]
pub trait NodeControlHandle: Send + Sync {
async fn restart_validator(&self, index: usize) -> Result<(), DynError>;
async fn restart_executor(&self, index: usize) -> Result<(), DynError>;
}
```

View File

@ -13,7 +13,7 @@ Operational readiness focuses on prerequisites, environment fit, and clear signa
**Prerequisites:**
- `versions.env` file at repository root (required by helper scripts)
- Node binaries (`nomos-node`, `nomos-executor`) available or built on demand
- Node binaries (`nomos-node`) available or built on demand
- Platform requirements met (Docker for compose, cluster access for k8s)
- Circuit assets for DA workloads

View File

@ -38,7 +38,7 @@ This file is required and should define:
## Node Binaries
Scenarios need compiled `nomos-node` and `nomos-executor` binaries.
Scenarios need compiled `nomos-node` binaries.
### Option 1: Use Helper Scripts (Recommended)
@ -49,7 +49,7 @@ scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
This automatically:
- Clones/updates nomos-node checkout
- Builds required binaries
- Sets `NOMOS_NODE_BIN` / `NOMOS_EXECUTOR_BIN`
- Sets `NOMOS_NODE_BIN`
### Option 2: Manual Build
@ -57,11 +57,10 @@ If you have a sibling `nomos-node` checkout:
```bash
cd ../nomos-node
cargo build --release --bin nomos-node --bin nomos-executor
cargo build --release --bin nomos-node
# Set environment variables
export NOMOS_NODE_BIN=$PWD/target/release/nomos-node
export NOMOS_EXECUTOR_BIN=$PWD/target/release/nomos-executor
# Return to testing framework
cd ../nomos-testing
@ -82,7 +81,6 @@ CI workflows use prebuilt artifacts:
run: |
tar -xzf .tmp/nomos-binaries-linux-*.tar.gz -C .tmp/
export NOMOS_NODE_BIN=$PWD/.tmp/nomos-node
export NOMOS_EXECUTOR_BIN=$PWD/.tmp/nomos-executor
```
## Circuit Assets (KZG Parameters)
@ -253,7 +251,6 @@ docker images | grep logos-blockchain-testing
# 6. For host runner: verify node binaries (if not using scripts)
$NOMOS_NODE_BIN --version
$NOMOS_EXECUTOR_BIN --version
```
## Recommended: Use Helper Scripts

View File

@ -13,7 +13,7 @@ The Logos Testing Framework enables you to test consensus, data availability, an
**Everything in this framework is a Scenario.**
A Scenario is a controlled experiment over time, composed of:
- **Topology** — The cluster shape (validators, executors, network layout)
- **Topology** — The cluster shape (validators, network layout)
- **Workloads** — Traffic and conditions that exercise the system (transactions, DA, chaos)
- **Expectations** — Success criteria verified after execution (liveness, inclusion, recovery)
- **Duration** — The time window for the experiment
@ -37,7 +37,7 @@ flowchart LR
```
1. **Define Scenario** — Describe your test: topology, workloads, and success criteria
2. **Deploy Topology** — Launch validators and executors using host, compose, or k8s runners
2. **Deploy Topology** — Launch validators using host, compose, or k8s runners
3. **Run Workloads** — Drive transactions, DA traffic, and chaos operations
4. **Check Expectations** — Verify consensus liveness, inclusion, and system health
@ -82,7 +82,6 @@ async fn main() -> anyhow::Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.validators(3)
.executors(1)
})
.transactions_with(|tx| tx.rate(10).users(5))
.expect_consensus_liveness()
@ -123,11 +122,9 @@ Check the **[Developer Reference](part-iii.md)** to implement custom workloads,
## Project Context
**Logos** is a modular blockchain protocol composed of validators, executors, and a data-availability (DA) subsystem:
**Logos** is a modular blockchain protocol composed of validators, and a data-availability (DA) subsystem:
- **Validators** participate in consensus and produce blocks
- **Executors** are validators with the DA dispersal service enabled. They perform all validator functions plus submit blob data to the DA network
- **Data Availability (DA)** ensures that blob data submitted via channel operations in transactions is published and retrievable by the network
These roles interact tightly, which is why meaningful testing must be performed in multi-node environments that include real networking, timing, and DA interaction.

View File

@ -50,14 +50,12 @@ The framework ships with runnable example binaries in `examples/src/bin/`.
scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
```
This handles circuit setup, binary building, and runs a complete scenario: 1 validator + 1 executor, mixed transaction + DA workload (5 tx/block + 1 channel + 1 blob), 60s duration.
**Note:** The DA workload attaches `DaWorkloadExpectation`, and channel/blob publishing is slower than tx submission. If you see `DaWorkloadExpectation` failures, rerun with a longer duration (e.g., `-t 120`), especially on CI or slower machines.
This handles circuit setup, binary building, and runs a complete scenario: 1 validator, mixed transaction + DA workload (5 tx/block + 1 channel + 1 blob), 60s duration.
**Alternative:** Direct cargo run (requires manual setup):
```bash
# Requires circuits in place and NOMOS_NODE_BIN/NOMOS_EXECUTOR_BIN set
# Requires circuits in place and NOMOS_NODE_BIN set
POL_PROOF_DEV_MODE=true cargo run -p runner-examples --bin local_runner
```
@ -72,8 +70,8 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn run_local_demo() -> Result<()> {
// Define the scenario (1 validator + 1 executor, tx + DA workload)
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(1))
// Define the scenario (1 validator, tx + DA workload)
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(1_000)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
@ -121,7 +119,6 @@ pub fn step_1_topology() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::topology_with(|t| {
t.network_star() // Star topology: all nodes connect to seed
.validators(1) // 1 validator node
.executors(1) // 1 executor node (validator + DA dispersal)
})
}
```
@ -216,7 +213,7 @@ pub async fn step_6_deploy_and_execute() -> Result<()> {
**With run-examples.sh** (recommended):
```bash
# Scale up to 3 validators + 2 executors, run for 2 minutes
# Scale up to 3 validators, run for 2 minutes
scripts/run/run-examples.sh -t 120 -v 3 -e 2 host
```
@ -225,7 +222,6 @@ scripts/run/run-examples.sh -t 120 -v 3 -e 2 host
```bash
# Uses NOMOS_DEMO_* env vars (or legacy *_DEMO_* vars)
NOMOS_DEMO_VALIDATORS=3 \
NOMOS_DEMO_EXECUTORS=2 \
NOMOS_DEMO_RUN_SECS=120 \
POL_PROOF_DEV_MODE=true \
cargo run -p runner-examples --bin local_runner

View File

@ -8,19 +8,18 @@ Use `scripts/run/run-examples.sh` for all modes—it handles all setup automatic
```bash
# Host mode (local processes)
scripts/run/run-examples.sh -t 60 -v 3 -e 1 host
scripts/run/run-examples.sh -t 60 -v 3 host
# Compose mode (Docker Compose)
scripts/run/run-examples.sh -t 60 -v 3 -e 1 compose
scripts/run/run-examples.sh -t 60 -v 3 compose
# K8s mode (Kubernetes)
scripts/run/run-examples.sh -t 60 -v 3 -e 1 k8s
scripts/run/run-examples.sh -t 60 -v 3 k8s
```
**Parameters:**
- `-t 60` — Run duration in seconds
- `-v 3` — Number of validators
- `-e 1` — Number of executors
- `host|compose|k8s` — Deployment mode
This script handles:
@ -102,7 +101,6 @@ For manual control, run the `local_runner` binary directly:
```bash
POL_PROOF_DEV_MODE=true \
NOMOS_NODE_BIN=/path/to/nomos-node \
NOMOS_EXECUTOR_BIN=/path/to/nomos-executor \
cargo run -p runner-examples --bin local_runner
```
@ -111,10 +109,8 @@ cargo run -p runner-examples --bin local_runner
| Variable | Default | Effect |
|----------|---------|--------|
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators (legacy: `LOCAL_DEMO_VALIDATORS`) |
| `NOMOS_DEMO_EXECUTORS` | 1 | Number of executors (legacy: `LOCAL_DEMO_EXECUTORS`) |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds (legacy: `LOCAL_DEMO_RUN_SECS`) |
| `NOMOS_NODE_BIN` | — | Path to nomos-node binary (required) |
| `NOMOS_EXECUTOR_BIN` | — | Path to nomos-executor binary (required) |
| `NOMOS_LOG_DIR` | None | Directory for per-node log files |
| `NOMOS_TESTS_KEEP_LOGS` | 0 | Keep per-run temporary directories (useful for debugging/CI) |
| `NOMOS_TESTS_TRACING` | false | Enable debug tracing preset |
@ -176,9 +172,8 @@ cargo run -p runner-examples --bin compose_runner
| `NOMOS_TESTNET_IMAGE` | — | Image tag (required, must match built image) |
| `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners |
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators |
| `NOMOS_DEMO_EXECUTORS` | 1 | Number of executors |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds |
| `COMPOSE_NODE_PAIRS` | — | Alternative topology format: "validators×executors" (e.g., `3x2`) |
| `COMPOSE_NODE_PAIRS` | — | Alternative topology format: "validators" (e.g., `3`) |
| `NOMOS_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query |
| `NOMOS_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export |
| `NOMOS_GRAFANA_URL` | None | Grafana base URL for printing/logging |
@ -249,7 +244,6 @@ cargo run -p runner-examples --bin k8s_runner
| `NOMOS_TESTNET_IMAGE` | — | Image tag (required) |
| `POL_PROOF_DEV_MODE` | — | **REQUIRED**: Set to `true` for all runners |
| `NOMOS_DEMO_VALIDATORS` | 1 | Number of validators |
| `NOMOS_DEMO_EXECUTORS` | 1 | Number of executors |
| `NOMOS_DEMO_RUN_SECS` | 60 | Run duration in seconds |
| `NOMOS_METRICS_QUERY_URL` | None | Prometheus-compatible base URL for runner to query (PromQL) |
| `NOMOS_METRICS_OTLP_INGEST_URL` | None | Full OTLP HTTP ingest URL for node metrics export |

View File

@ -37,7 +37,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
async fn run_once() -> anyhow::Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(20)
.transactions_with(|tx| tx.rate(1).users(5))
.expect_consensus_liveness()

View File

@ -61,7 +61,7 @@ flowchart TB
Declare a topology, attach workloads and expectations, and set the run window. The plan is the single source of truth for what will happen.
**Key actions:**
- Define cluster shape (validators, executors, network topology)
- Define cluster shape (validators, network topology)
- Configure workloads (transaction rate, DA traffic, chaos patterns)
- Attach expectations (liveness, inclusion, custom checks)
- Set timing parameters (run duration, cooldown period)
@ -74,7 +74,7 @@ Hand the plan to a deployer. It provisions the environment on the chosen backend
**Key actions:**
- Provision infrastructure (processes, containers, or pods)
- Launch validator and executor nodes
- Launch validator nodes
- Wait for readiness probes (HTTP endpoints respond)
- Establish node connectivity and metrics endpoints
- Spawn BlockFeed for real-time block observation

View File

@ -14,7 +14,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn declarative_over_imperative() {
// Good: declarative
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -22,7 +22,7 @@ pub fn declarative_over_imperative() {
.build();
// Bad: imperative (framework doesn't work this way)
// spawn_validator(); spawn_executor();
// spawn_validator();
// loop { submit_tx(); check_block(); }
}
```
@ -47,7 +47,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
pub fn protocol_time_not_wall_time() {
// Good: protocol-oriented thinking
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -84,7 +84,7 @@ use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub fn determinism_first() {
// Separate: functional test (deterministic)
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -93,7 +93,7 @@ pub fn determinism_first() {
// Separate: chaos test (introduces randomness)
let _chaos_plan =
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.enable_node_control()
.chaos_with(|c| {
c.restart()

View File

@ -30,7 +30,7 @@ See also: [RunContext: BlockFeed & Node Control](node-control.md) for the curren
## Guidance
- Keep chaos realistic: avoid flapping or patterns you wouldn't operate in prod.
- Scope chaos: choose validators vs executors intentionally; don't restart all
- Scope chaos: choose validators intentionally; don't restart all
nodes at once unless you're testing full outages.
- Combine chaos with observability: capture block feed/metrics and API health so
failures are diagnosable.

View File

@ -138,9 +138,9 @@ ls -lh testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params
### 4. Node Binaries Not Found
**Symptoms:**
- Error about missing `nomos-node` or `nomos-executor` binary
- Error about missing `nomos-node` binary
- "file not found" or "no such file or directory"
- Environment variables `NOMOS_NODE_BIN` / `NOMOS_EXECUTOR_BIN` not set
- Environment variables `NOMOS_NODE_BIN` not set
**What you'll see:**
@ -151,7 +151,7 @@ Error: Os { code: 2, kind: NotFound, message: "No such file or directory" }
thread 'main' panicked at 'failed to spawn nomos-node process'
```
**Root Cause:** The local runner needs compiled `nomos-node` and `nomos-executor` binaries, but doesn't know where they are.
**Root Cause:** The local runner needs compiled `nomos-node` binaries, but doesn't know where they are.
**Fix (recommended):**
@ -165,11 +165,10 @@ scripts/run/run-examples.sh -t 60 -v 1 -e 1 host
```bash
# Build binaries first
cd ../nomos-node # or wherever your nomos-node checkout is
cargo build --release --bin nomos-node --bin nomos-executor
cargo build --release --bin nomos-node
# Set environment variables
export NOMOS_NODE_BIN=$PWD/target/release/nomos-node
export NOMOS_EXECUTOR_BIN=$PWD/target/release/nomos-executor
# Return to testing framework
cd ../nomos-testing
@ -289,7 +288,6 @@ netstat -ano | findstr :18080 # Windows
# Kill orphaned nomos processes
pkill nomos-node
pkill nomos-executor
# For compose: ensure containers are stopped
docker compose down
@ -337,7 +335,7 @@ thread 'main' panicked at 'workload init failed: insufficient wallets'
use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(20) // ← Increase wallet count
.transactions_with(|tx| {
tx.users(10) // ← Must be ≤ wallets(20)
@ -459,7 +457,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
// Increase run duration to allow more blocks.
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(120)) // ← Give more time
.build();
@ -484,7 +482,7 @@ When a test fails, check these in order:
1. **`POL_PROOF_DEV_MODE=true` is set** (REQUIRED for all runners)
2. **`versions.env` exists at repo root**
3. **KZG circuit assets present** (for DA workloads): `testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params`
4. **Node binaries available** (`NOMOS_NODE_BIN` / `NOMOS_EXECUTOR_BIN` set, or using `run-examples.sh`)
4. **Node binaries available** (`NOMOS_NODE_BIN` set, or using `run-examples.sh`)
5. **Docker daemon running** (for compose/k8s)
6. **Docker image built** (`logos-blockchain-testing:local` exists for compose/k8s)
7. **No port conflicts** (`lsof -i :18080`, kill orphaned processes)
@ -508,7 +506,7 @@ When a test fails, check these in order:
**Important Notes:**
- **Host runner** (local processes): Per-run temporary directories are created under the current working directory and removed after the run unless `NOMOS_TESTS_KEEP_LOGS=1`. To write per-node log files to a stable location, set `NOMOS_LOG_DIR=/path/to/logs`.
- **Compose/K8s**: Node log destination is controlled by `testing-framework/assets/stack/cfgsync.yaml` (`tracing_settings.logger`). By default, rely on `docker logs` or `kubectl logs`.
- **File naming**: Log files use prefix `nomos-node-{index}*` or `nomos-executor-{index}*` with timestamps, e.g., `nomos-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix).
- **File naming**: Log files use prefix `nomos-node-{index}*` with timestamps, e.g., `nomos-node-0.2024-12-01T10-30-45.log` (NOT just `.log` suffix).
- **Container names**: Compose containers include project UUID, e.g., `nomos-compose-<uuid>-validator-0-1` where `<uuid>` is randomly generated per run
### Accessing Node Logs by Runner
@ -566,7 +564,7 @@ docker exec -it <container-id> /bin/sh
docker logs <container-id> > debug.log
```
**Note:** Container names follow the pattern `nomos-compose-{uuid}-validator-{index}-1` or `nomos-compose-{uuid}-executor-{index}-1`, where `{uuid}` is randomly generated per run.
**Note:** Container names follow the pattern `nomos-compose-{uuid}-validator-{index}-1`, where `{uuid}` is randomly generated per run.
#### K8s Runner
@ -581,9 +579,6 @@ kubectl config view --minify | grep namespace
# All validator pods (add -n <namespace> if not using default)
kubectl logs -l nomos/logical-role=validator -f
# All executor pods
kubectl logs -l nomos/logical-role=executor -f
# Specific pod by name (find exact name first)
kubectl get pods -l nomos/logical-role=validator # Find the exact pod name
kubectl logs -f <actual-pod-name> # Then use it
@ -616,7 +611,6 @@ done > all-logs.txt
# Or use label selectors (recommended)
kubectl logs -l nomos/logical-role=validator --tail=500 > validators.log
kubectl logs -l nomos/logical-role=executor --tail=500 > executors.log
# With explicit namespace
kubectl logs -n my-namespace -l nomos/logical-role=validator --tail=500 > validators.log
@ -651,7 +645,6 @@ docker ps -a --filter "name=nomos-compose-"
# K8s: check pod status (use label selectors, add -n <namespace> if needed)
kubectl get pods -l nomos/logical-role=validator
kubectl get pods -l nomos/logical-role=executor
kubectl describe pod <actual-pod-name> # Get name from above first
```

View File

@ -14,7 +14,7 @@ without changing the plan.
- Understand when to use each runner (Host, Compose, Kubernetes)
**Author and Run Scenarios**
- Define multi-node topologies with validators and executors
- Define multi-node topologies with validators
- Configure transaction and DA workloads with appropriate rates
- Add consensus liveness and inclusion expectations
- Run scenarios across all three deployment modes

View File

@ -45,7 +45,7 @@ use testing_framework_workflows::workloads::transaction::Workload;
```rust,ignore
use testing_framework_workflows::ScenarioBuilderExt;
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(20) // Seed 20 wallet accounts
.transactions_with(|tx| {
tx.rate(10) // 10 transactions per block
@ -63,7 +63,7 @@ use testing_framework_workflows::workloads::transaction;
let tx_workload = transaction::Workload::with_rate(10)
.expect("transaction rate must be non-zero");
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(20)
.with_workload(tx_workload)
.with_run_duration(Duration::from_secs(60))
@ -117,7 +117,7 @@ Error: Expectation failed: TxInclusionExpectation
**How to debug:**
1. Check logs for proof generation timing:
```bash
grep "proof generation" $NOMOS_LOG_DIR/executor-0/*.log
grep "proof generation" $NOMOS_LOG_DIR/*/*.log
```
2. Verify `POL_PROOF_DEV_MODE=true` was set
3. Increase duration: `.with_run_duration(Duration::from_secs(120))`
@ -147,7 +147,7 @@ use testing_framework_workflows::workloads::da::Workload;
```rust,ignore
use testing_framework_workflows::ScenarioBuilderExt;
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.da_with(|da| {
da.channel_rate(2) // 2 channels per block
.blob_rate(4) // 4 blobs per block
@ -168,7 +168,7 @@ let da_workload = da::Workload::with_rate(
20, // headroom_percent
);
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.with_workload(da_workload)
.with_run_duration(Duration::from_secs(120))
.build();
@ -176,16 +176,10 @@ ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
#### Prerequisites
1. **Executors must be present:**
```rust,ignore
.executors(N) // At least 1 executor
```
DA workload requires executor nodes to handle blob publishing.
2. **Sufficient duration:**
1. **Sufficient duration:**
Channel creation and blob publishing are slower than transaction submission. Allow 120+ seconds.
3. **Circuit artifacts:**
2. **Circuit artifacts:**
Same as transaction workload (POL_PROOF_DEV_MODE, circuits staged).
#### Attached Expectation
@ -195,12 +189,12 @@ ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
**What it checks:**
- At least `N` channels were created (where N = channel_rate × expected blocks)
- At least `M` blobs were published (where M = blob_rate × expected blocks × headroom)
- Uses BlockFeed and executor API to verify
- Uses BlockFeed API to verify
**Failure modes:**
- "Expected >= X channels, observed Y" (Y < X)
- "Expected >= X blobs, observed Y" (Y < X)
- Common causes: executor crashes, insufficient duration, DA saturation
- Common causes: insufficient duration, DA saturation
#### What Failure Looks Like
@ -210,23 +204,14 @@ Error: Expectation failed: DaWorkloadExpectation
Observed: 23 channels
Possible causes:
- Executors crashed or restarted (check executor logs)
- Duration too short (channels still being created)
- Blob publishing failed (check executor API errors)
- Network issues (check validator/executor connectivity)
- Blob publishing failed (check API errors)
- Network issues (check validator connectivity)
```
**How to debug:**
1. Check executor logs:
```bash
grep "channel\|blob" $NOMOS_LOG_DIR/executor-0/*.log
```
2. Verify executors stayed running:
```bash
grep "panic\|killed" $NOMOS_LOG_DIR/executor-*/*.log
```
3. Increase duration: `.with_run_duration(Duration::from_secs(180))`
4. Reduce rates: `.channel_rate(1).blob_rate(2)`
1. Increase duration: `.with_run_duration(Duration::from_secs(180))`
2. Reduce rates: `.channel_rate(1).blob_rate(2)`
---
@ -247,7 +232,6 @@ use testing_framework_workflows::workloads::chaos::RandomRestartWorkload;
| `max_delay` | `Duration` | **Required** | Maximum time between restart attempts |
| `target_cooldown` | `Duration` | **Required** | Minimum time before restarting same node again |
| `include_validators` | `bool` | **Required** | Whether to restart validators |
| `include_executors` | `bool` | **Required** | Whether to restart executors |
#### Usage
@ -258,7 +242,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRestartWorkload};
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(3).executors(2)
t.network_star().validators(3)
})
.enable_node_control() // REQUIRED for chaos
.with_workload(RandomRestartWorkload::new(
@ -266,7 +250,6 @@ let scenario = ScenarioBuilder::topology_with(|t| {
Duration::from_secs(75), // max_delay
Duration::from_secs(120), // target_cooldown
true, // include_validators
true, // include_executors
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(180))
@ -288,7 +271,6 @@ let scenario = ScenarioBuilder::topology_with(|t| {
3. **Sufficient topology:**
- For validators: Need >1 validator (workload skips if only 1)
- For executors: Can restart all executors
4. **Realistic timing:**
- Total duration should be 2-3× the max_delay + cooldown
@ -338,8 +320,7 @@ Error: Expectation failed: ConsensusLiveness
grep "NodeControlHandle" $NOMOS_LOG_DIR/*/*.log
```
3. Increase cooldown: `Duration::from_secs(180)`
4. Reduce restart scope: `include_validators = false` (test executors only)
5. Increase duration: `.with_run_duration(Duration::from_secs(300))`
4. Increase duration: `.with_run_duration(Duration::from_secs(300))`
---
@ -357,7 +338,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
#### DSL Usage
```rust,ignore
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build();
@ -452,7 +433,6 @@ These expectations are added automatically when using the DSL (`.transactions_wi
Duration::from_secs(75), // max
Duration::from_secs(120), // cooldown
true, // validators
true, // executors
))
```
@ -463,7 +443,7 @@ These expectations are added automatically when using the DSL (`.transactions_wi
### Pattern 1: Multiple Workloads
```rust,ignore
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(20)
.transactions_with(|tx| tx.rate(5).users(10))
.da_with(|da| da.channel_rate(2).blob_rate(2))
@ -493,7 +473,7 @@ impl Expectation for MyCustomExpectation {
}
}
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.with_expectation(MyCustomExpectation)
.with_run_duration(Duration::from_secs(60))
.build();
@ -507,7 +487,7 @@ When a workload or expectation fails:
1. Check logs: `$NOMOS_LOG_DIR/*/` or `docker compose logs` or `kubectl logs`
2. Verify environment variables: `POL_PROOF_DEV_MODE`, `NOMOS_NODE_BIN`, etc.
3. Check prerequisites: wallets, executors, node control, circuits
3. Check prerequisites: wallets, node control, circuits
4. Increase duration: Double the run duration and retry
5. Reduce rates: Half the traffic rates and retry
6. Check metrics: Prometheus queries for block height, tx count, DA stats

View File

@ -16,9 +16,9 @@ testing-framework-runner-compose = { workspace = true }
testing-framework-runner-k8s = { workspace = true }
testing-framework-runner-local = { workspace = true }
testing-framework-workflows = { workspace = true }
tokio = { workspace = true, features = ["macros", "net", "rt-multi-thread", "time"] }
tokio = { features = ["macros", "net", "rt-multi-thread", "time"], workspace = true }
tracing = { workspace = true }
tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] }
tracing-subscriber = { features = ["env-filter", "fmt"], version = "0.3" }
[dev-dependencies]
async-trait = { workspace = true }

View File

@ -6,7 +6,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
use crate::SnippetResult;
pub fn scenario_plan() -> SnippetResult<Scenario<()>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(50)
.transactions_with(|txs| txs.rate(5).users(20))
.expect_consensus_liveness()

View File

@ -6,14 +6,13 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRe
use crate::SnippetResult;
pub fn random_restart_plan() -> SnippetResult<Scenario<NodeControlCapability>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.enable_node_control()
.with_workload(RandomRestartWorkload::new(
Duration::from_secs(45), // min delay
Duration::from_secs(75), // max delay
Duration::from_secs(120), // target cooldown
true, // include validators
true, // include executors
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(150))

View File

@ -4,5 +4,5 @@ use testing_framework_workflows::ScenarioBuilderExt;
use crate::SnippetResult;
pub fn build_plan() -> SnippetResult<Scenario<()>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0)).build() // Construct the final Scenario
ScenarioBuilder::topology_with(|t| t.network_star().validators(1)).build() // Construct the final Scenario
}

View File

@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn run_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(50)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block

View File

@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
use crate::SnippetResult;
pub fn expectations_plan() -> SnippetResult<Scenario<()>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.expect_consensus_liveness() // Assert blocks are produced continuously
.build()
}

View File

@ -6,7 +6,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
use crate::SnippetResult;
pub fn run_duration_plan() -> SnippetResult<Scenario<()>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.with_run_duration(Duration::from_secs(120)) // Run for 120 seconds
.build()
}

View File

@ -4,6 +4,5 @@ pub fn topology() -> Builder<()> {
ScenarioBuilder::topology_with(|t| {
t.network_star() // Star topology (all connect to seed node)
.validators(3) // Number of validator nodes
.executors(2) // Number of executor nodes
})
}

View File

@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
use crate::SnippetResult;
pub fn transactions_plan() -> SnippetResult<Scenario<()>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(50)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block

View File

@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
use crate::SnippetResult;
pub fn wallets_plan() -> SnippetResult<Scenario<()>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.wallets(50) // Seed 50 funded wallet accounts
.build()
}

View File

@ -7,7 +7,7 @@ use crate::SnippetResult;
pub fn chaos_plan()
-> SnippetResult<testing_framework_core::scenario::Scenario<NodeControlCapability>> {
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.enable_node_control() // Enable node control capability
.chaos_with(|c| {
c.restart() // Random restart chaos

View File

@ -4,7 +4,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn execution() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(0))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
.expect_consensus_liveness()
.build()?;

View File

@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn aggressive_chaos_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
.enable_node_control()
.wallets(50)
.transactions_with(|txs| txs.rate(10).users(20))

View File

@ -9,13 +9,12 @@ pub async fn load_progression_test() -> Result<()> {
for rate in [5, 10, 20, 30] {
println!("Testing with rate: {}", rate);
let mut plan =
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
.wallets(50)
.transactions_with(|txs| txs.rate(rate).users(20))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(50)
.transactions_with(|txs| txs.rate(rate).users(20))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = ComposeDeployer::default();
let runner = deployer.deploy(&plan).await?;

View File

@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn sustained_load_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
.wallets(100)
.transactions_with(|txs| txs.rate(15).users(50))
.expect_consensus_liveness()

View File

@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer;
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn chaos_resilience() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
.enable_node_control()
.wallets(20)
.transactions_with(|txs| txs.rate(3).users(10))

View File

@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn transactions_multi_node() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.wallets(30)
.transactions_with(|txs| txs.rate(5).users(15))
.expect_consensus_liveness()

View File

@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn simple_consensus() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(0))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(30))
.build()?;

View File

@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn transaction_workload() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(0))
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.wallets(20)
.transactions_with(|txs| txs.rate(5).users(10))
.expect_consensus_liveness()

View File

@ -13,7 +13,7 @@ impl<Caps> YourExpectationDslExt for testing_framework_core::scenario::Builder<C
}
pub fn use_in_examples() -> SnippetResult<()> {
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(0))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.expect_your_condition()
.build()?;
Ok(())

View File

@ -27,7 +27,7 @@ impl<Caps> YourWorkloadDslExt for testing_framework_core::scenario::Builder<Caps
}
pub fn use_in_examples() -> SnippetResult<()> {
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(0))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.your_workload_with(|w| w.some_config())
.build()?;
Ok(())

View File

@ -4,5 +4,4 @@ use testing_framework_core::scenario::DynError;
#[async_trait]
pub trait NodeControlHandle: Send + Sync {
async fn restart_validator(&self, index: usize) -> Result<(), DynError>;
async fn restart_executor(&self, index: usize) -> Result<(), DynError>;
}

View File

@ -4,7 +4,7 @@ use testing_framework_runner_local::LocalDeployer;
pub async fn run_with_env_overrides() -> Result<()> {
// Uses NOMOS_DEMO_* env vars (or legacy *_DEMO_* vars)
let mut plan = ScenarioBuilder::with_node_counts(3, 2)
let mut plan = ScenarioBuilder::with_node_counts(3)
.with_run_duration(std::time::Duration::from_secs(120))
.build()?;

View File

@ -6,8 +6,8 @@ use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
pub async fn run_local_demo() -> Result<()> {
// Define the scenario (1 validator + 1 executor, tx workload)
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1).executors(1))
// Define the scenario (2 validator, tx workload)
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.wallets(1_000)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block

View File

@ -3,7 +3,6 @@ use testing_framework_core::scenario::ScenarioBuilder;
pub fn step_1_topology() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::topology_with(|t| {
t.network_star() // Star topology: all nodes connect to seed
.validators(1) // 1 validator node
.executors(1) // 1 executor node
.validators(2) // 2 validator nodes
})
}

View File

@ -2,5 +2,5 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn step_2_wallets() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1).wallets(1_000) // Seed 1,000 funded wallet accounts
ScenarioBuilder::with_node_counts(1).wallets(1_000) // Seed 1,000 funded wallet accounts
}

View File

@ -2,7 +2,7 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn step_3_workloads() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1)
ScenarioBuilder::with_node_counts(1)
.wallets(1_000)
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block

View File

@ -2,5 +2,5 @@ use testing_framework_core::scenario::ScenarioBuilder;
use testing_framework_workflows::ScenarioBuilderExt;
pub fn step_4_expectation() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1).expect_consensus_liveness() // This says what success means: blocks must be produced continuously.
ScenarioBuilder::with_node_counts(1).expect_consensus_liveness() // This says what success means: blocks must be produced continuously.
}

View File

@ -3,5 +3,5 @@ use std::time::Duration;
use testing_framework_core::scenario::ScenarioBuilder;
pub fn step_5_run_duration() -> testing_framework_core::scenario::Builder<()> {
ScenarioBuilder::with_node_counts(1, 1).with_run_duration(Duration::from_secs(60))
ScenarioBuilder::with_node_counts(1).with_run_duration(Duration::from_secs(60))
}

View File

@ -3,7 +3,7 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
pub async fn step_6_deploy_and_execute() -> Result<()> {
let mut plan = ScenarioBuilder::with_node_counts(1, 1).build()?;
let mut plan = ScenarioBuilder::with_node_counts(1).build()?;
let deployer = LocalDeployer::default(); // Use local process deployer
let runner = deployer.deploy(&plan).await?; // Provision infrastructure

View File

@ -4,7 +4,7 @@ use testing_framework_runner_compose::ComposeDeployer;
pub async fn run_with_compose_deployer() -> Result<()> {
// ... same scenario definition ...
let mut plan = ScenarioBuilder::with_node_counts(1, 1).build()?;
let mut plan = ScenarioBuilder::with_node_counts(1).build()?;
let deployer = ComposeDeployer::default(); // Use Docker Compose
let runner = deployer.deploy(&plan).await?;

View File

@ -5,7 +5,7 @@ use crate::SnippetResult;
pub fn declarative_over_imperative() -> SnippetResult<()> {
// Good: declarative
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -13,7 +13,7 @@ pub fn declarative_over_imperative() -> SnippetResult<()> {
.build()?;
// Bad: imperative (framework doesn't work this way)
// spawn_validator(); spawn_executor();
// spawn_validator();
// loop { submit_tx(); check_block(); }
Ok(())

View File

@ -7,7 +7,7 @@ use crate::SnippetResult;
pub fn determinism_first() -> SnippetResult<()> {
// Separate: functional test (deterministic)
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
@ -15,20 +15,19 @@ pub fn determinism_first() -> SnippetResult<()> {
.build()?;
// Separate: chaos test (introduces randomness)
let _chaos_plan =
ScenarioBuilder::topology_with(|t| t.network_star().validators(3).executors(2))
.enable_node_control()
.chaos_with(|c| {
c.restart()
.min_delay(Duration::from_secs(30))
.max_delay(Duration::from_secs(60))
.target_cooldown(Duration::from_secs(45))
.apply()
})
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
.expect_consensus_liveness()
.build()?;
let _chaos_plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
.enable_node_control()
.chaos_with(|c| {
c.restart()
.min_delay(Duration::from_secs(30))
.max_delay(Duration::from_secs(60))
.target_cooldown(Duration::from_secs(45))
.apply()
})
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})
.expect_consensus_liveness()
.build()?;
Ok(())
}

View File

@ -7,14 +7,14 @@ use crate::SnippetResult;
pub fn minimum_run_windows() -> SnippetResult<()> {
// Bad: too short (~2 blocks with default 2s slots, 0.9 coeff)
let _too_short = ScenarioBuilder::with_node_counts(1, 0)
let _too_short = ScenarioBuilder::with_node_counts(1)
.with_run_duration(Duration::from_secs(5))
.expect_consensus_liveness()
.build()?;
// Good: enough blocks for assertions (~27 blocks with default 2s slots, 0.9
// coeff)
let _good = ScenarioBuilder::with_node_counts(1, 0)
let _good = ScenarioBuilder::with_node_counts(1)
.with_run_duration(Duration::from_secs(60))
.expect_consensus_liveness()
.build()?;

View File

@ -7,7 +7,7 @@ use crate::SnippetResult;
pub fn protocol_time_not_wall_time() -> SnippetResult<()> {
// Good: protocol-oriented thinking
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(1))
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.transactions_with(|txs| {
txs.rate(5) // 5 transactions per block
})

View File

@ -26,37 +26,25 @@ async fn main() {
let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS);
let executors = read_env_any(&["NOMOS_DEMO_EXECUTORS"], demo::DEFAULT_EXECUTORS);
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(
validators,
executors, run_secs, "starting compose runner demo"
);
info!(validators, run_secs, "starting compose runner demo");
if let Err(err) = run_compose_case(validators, executors, Duration::from_secs(run_secs)).await {
if let Err(err) = run_compose_case(validators, Duration::from_secs(run_secs)).await {
warn!("compose runner demo failed: {err:#}");
process::exit(1);
}
}
async fn run_compose_case(
validators: usize,
executors: usize,
run_duration: Duration,
) -> Result<()> {
async fn run_compose_case(validators: usize, run_duration: Duration) -> Result<()> {
info!(
validators,
executors,
duration_secs = run_duration.as_secs(),
"building scenario plan"
);
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(validators).executors(executors)
})
.enable_node_control();
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators))
.enable_node_control();
let scenario = if let Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown)) =
chaos_timings(run_duration)

View File

@ -18,32 +18,28 @@ async fn main() {
tracing_subscriber::fmt::init();
let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS);
let executors = read_env_any(&["NOMOS_DEMO_EXECUTORS"], demo::DEFAULT_EXECUTORS);
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(validators, executors, run_secs, "starting k8s runner demo");
info!(validators, run_secs, "starting k8s runner demo");
if let Err(err) = run_k8s_case(validators, executors, Duration::from_secs(run_secs)).await {
if let Err(err) = run_k8s_case(validators, Duration::from_secs(run_secs)).await {
warn!("k8s runner demo failed: {err:#}");
process::exit(1);
}
}
async fn run_k8s_case(validators: usize, executors: usize, run_duration: Duration) -> Result<()> {
async fn run_k8s_case(validators: usize, run_duration: Duration) -> Result<()> {
info!(
validators,
executors,
duration_secs = run_duration.as_secs(),
"building scenario plan"
);
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(validators).executors(executors)
})
.with_capabilities(ObservabilityCapability::default())
.wallets(TOTAL_WALLETS)
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
.with_run_duration(run_duration)
.expect_consensus_liveness();
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators))
.with_capabilities(ObservabilityCapability::default())
.wallets(TOTAL_WALLETS)
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
.with_run_duration(run_duration)
.expect_consensus_liveness();
if let Ok(url) = env::var("NOMOS_METRICS_QUERY_URL") {
if !url.trim().is_empty() {

View File

@ -23,33 +23,26 @@ async fn main() {
}
let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS);
let executors = read_env_any(&["NOMOS_DEMO_EXECUTORS"], demo::DEFAULT_EXECUTORS);
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(
validators,
executors, run_secs, "starting local runner demo"
);
info!(validators, run_secs, "starting local runner demo");
if let Err(err) = run_local_case(validators, executors, Duration::from_secs(run_secs)).await {
if let Err(err) = run_local_case(validators, Duration::from_secs(run_secs)).await {
warn!("local runner demo failed: {err:#}");
process::exit(1);
}
}
async fn run_local_case(validators: usize, executors: usize, run_duration: Duration) -> Result<()> {
async fn run_local_case(validators: usize, run_duration: Duration) -> Result<()> {
info!(
validators,
executors,
duration_secs = run_duration.as_secs(),
"building scenario plan"
);
let scenario = ScenarioBuilder::topology_with(|t| {
t.network_star().validators(validators).executors(executors)
})
.wallets(TOTAL_WALLETS)
.with_run_duration(run_duration);
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators))
.wallets(TOTAL_WALLETS)
.with_run_duration(run_duration);
let scenario = if run_duration.as_secs() <= SMOKE_RUN_SECS_MAX {
scenario

View File

@ -1,3 +1,2 @@
pub const DEFAULT_VALIDATORS: usize = 2;
pub const DEFAULT_EXECUTORS: usize = 0;
pub const DEFAULT_RUN_SECS: u64 = 60;

View File

@ -110,13 +110,12 @@ impl Workload for JoinNodeWithPeersWorkload {
async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
let _ = try_init();
let mut scenario =
ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(0))
.enable_node_control()
.with_workload(JoinNodeWorkload::new("joiner"))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.enable_node_control()
.with_workload(JoinNodeWorkload::new("joiner"))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
@ -128,16 +127,15 @@ async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"]
async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> {
let mut scenario =
ScenarioBuilder::topology_with(|t| t.network_star().validators(2).executors(0))
.enable_node_control()
.with_workload(JoinNodeWithPeersWorkload::new(
"joiner",
vec!["validator-0".to_string()],
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
.enable_node_control()
.with_workload(JoinNodeWithPeersWorkload::new(
"joiner",
vec!["validator-0".to_string()],
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;

View File

@ -18,7 +18,7 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> {
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `RUST_LOG=info` (optional)
let config = TopologyConfig::with_node_numbers(2, 0);
let config = TopologyConfig::with_node_numbers(2);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
// Nodes are stopped automatically when the cluster is dropped.

View File

@ -322,7 +322,6 @@ build_bundle::prepare_circuits() {
fi
NODE_BIN="${NODE_TARGET}/debug/logos-blockchain-node"
EXEC_BIN="${NODE_TARGET}/debug/logos-blockchain-executor"
CLI_BIN="${NODE_TARGET}/debug/logos-blockchain-cli"
}
@ -360,13 +359,13 @@ build_bundle::build_binaries() {
LOGOS_BLOCKCHAIN_CIRCUITS="${CIRCUITS_DIR}" \
RUSTUP_TOOLCHAIN="${BUNDLE_RUSTUP_TOOLCHAIN}" \
cargo build --all-features \
-p logos-blockchain-node -p logos-blockchain-executor -p logos-blockchain-cli \
-p logos-blockchain-node -p logos-blockchain-cli \
--target-dir "${NODE_TARGET}"
else
RUSTFLAGS='--cfg feature="pol-dev-mode"' NOMOS_CIRCUITS="${CIRCUITS_DIR}" \
LOGOS_BLOCKCHAIN_CIRCUITS="${CIRCUITS_DIR}" \
cargo build --all-features \
-p logos-blockchain-node -p logos-blockchain-executor -p logos-blockchain-cli \
-p logos-blockchain-node -p logos-blockchain-cli \
--target-dir "${NODE_TARGET}"
fi
)
@ -380,7 +379,6 @@ build_bundle::package_bundle() {
cp -a "${CIRCUITS_DIR}/." "${bundle_dir}/artifacts/circuits/"
mkdir -p "${bundle_dir}/artifacts"
cp "${NODE_BIN}" "${bundle_dir}/artifacts/logos-blockchain-node"
cp "${EXEC_BIN}" "${bundle_dir}/artifacts/logos-blockchain-executor"
cp "${CLI_BIN}" "${bundle_dir}/artifacts/logos-blockchain-cli"
{
echo "nomos_node_path=${NOMOS_NODE_PATH:-}"

View File

@ -134,7 +134,6 @@ build_linux_binaries::stage_from_bundle() {
local artifacts="${extract_dir}/artifacts"
[ -f "${artifacts}/logos-blockchain-node" ] || common::die "Missing logos-blockchain-node in bundle: ${tar_path}"
[ -f "${artifacts}/logos-blockchain-executor" ] || common::die "Missing logos-blockchain-executor in bundle: ${tar_path}"
[ -f "${artifacts}/logos-blockchain-cli" ] || common::die "Missing logos-blockchain-cli in bundle: ${tar_path}"
[ -d "${artifacts}/circuits" ] || common::die "Missing circuits/ in bundle: ${tar_path}"
@ -144,7 +143,7 @@ build_linux_binaries::stage_from_bundle() {
echo "==> Staging binaries to ${bin_out}"
mkdir -p "${bin_out}"
cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-executor" "${artifacts}/logos-blockchain-cli" "${bin_out}/"
cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-cli" "${bin_out}/"
echo "==> Staging circuits to ${circuits_out}"
rm -rf "${circuits_out}"

View File

@ -137,9 +137,9 @@ build_test_image::print_config() {
}
build_test_image::have_host_binaries() {
# Preserve existing behavior: only require node+executor on the host.
# Preserve existing behavior: only require node on the host.
# If logos-blockchain-cli is missing, the Dockerfile can still build it from source.
[ -x "${BIN_DST}/logos-blockchain-node" ] && [ -x "${BIN_DST}/logos-blockchain-executor" ]
[ -x "${BIN_DST}/logos-blockchain-node" ]
}
build_test_image::restore_from_bundle() {
@ -153,13 +153,13 @@ build_test_image::restore_from_bundle() {
tar -xzf "${TAR_PATH}" -C "${tmp_extract}"
local artifacts="${tmp_extract}/artifacts"
for bin in logos-blockchain-node logos-blockchain-executor logos-blockchain-cli; do
for bin in logos-blockchain-node logos-blockchain-cli; do
[ -f "${artifacts}/${bin}" ] || build_test_image::fail "Bundle ${TAR_PATH} missing artifacts/${bin}"
done
mkdir -p "${BIN_DST}"
cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-executor" "${artifacts}/logos-blockchain-cli" "${BIN_DST}/"
chmod +x "${BIN_DST}/logos-blockchain-node" "${BIN_DST}/logos-blockchain-executor" "${BIN_DST}/logos-blockchain-cli" || true
cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-cli" "${BIN_DST}/"
chmod +x "${BIN_DST}/logos-blockchain-node" "${BIN_DST}/logos-blockchain-cli" || true
if [ -d "${artifacts}/circuits" ]; then
mkdir -p "${CIRCUITS_DIR_HOST}"

View File

@ -156,13 +156,6 @@ targets = [
"logos-blockchain-common-http-client",
"logos-blockchain-cryptarchia-engine",
"logos-blockchain-cryptarchia-sync",
"logos-blockchain-da-dispersal-service",
"logos-blockchain-da-network-core",
"logos-blockchain-da-network-service",
"logos-blockchain-da-sampling-service",
"logos-blockchain-da-verifier-service",
"logos-blockchain-executor",
"logos-blockchain-executor-http-client",
"logos-blockchain-groth16",
"logos-blockchain-http-api-common",
"logos-blockchain-key-management-system-service",
@ -175,7 +168,6 @@ targets = [
"logos-blockchain-poc",
"logos-blockchain-pol",
"logos-blockchain-sdp-service",
"logos-blockchain-subnetworks-assignations",
"logos-blockchain-tests",
"logos-blockchain-time-service",
"logos-blockchain-tracing",

View File

@ -45,7 +45,6 @@ Modes:
Options:
-t, --run-seconds N Duration to run the demo (required)
-v, --validators N Number of validators (required)
-e, --executors N Number of executors (required)
--bundle PATH Convenience alias for setting NOMOS_BINARIES_TAR=PATH
--metrics-query-url URL PromQL base URL the runner process can query (optional)
--metrics-otlp-ingest-url URL Full OTLP HTTP ingest URL for node metrics export (optional)
@ -117,7 +116,6 @@ run_examples::parse_args() {
MODE="compose"
RUN_SECS_RAW=""
DEMO_VALIDATORS=""
DEMO_EXECUTORS=""
IMAGE_SELECTION_MODE="auto"
METRICS_QUERY_URL=""
METRICS_OTLP_INGEST_URL=""
@ -148,14 +146,6 @@ run_examples::parse_args() {
DEMO_VALIDATORS="${1#*=}"
shift
;;
-e|--executors)
DEMO_EXECUTORS="${2:-}"
shift 2
;;
--executors=*)
DEMO_EXECUTORS="${1#*=}"
shift
;;
--bundle)
NOMOS_BINARIES_TAR="${2:-}"
export NOMOS_BINARIES_TAR
@ -232,16 +222,12 @@ run_examples::parse_args() {
fi
RUN_SECS="${RUN_SECS_RAW}"
if [ -z "${DEMO_VALIDATORS}" ] || [ -z "${DEMO_EXECUTORS}" ]; then
run_examples::fail_with_usage "validators and executors must be provided via -v/--validators and -e/--executors"
if [ -z "${DEMO_VALIDATORS}" ] ]; then
run_examples::fail_with_usage "validators must be provided via -v/--validators"
fi
if ! common::is_uint "${DEMO_VALIDATORS}" ; then
run_examples::fail_with_usage "validators must be a non-negative integer (pass -v/--validators)"
fi
if ! common::is_uint "${DEMO_EXECUTORS}" ; then
run_examples::fail_with_usage "executors must be a non-negative integer (pass -e/--executors)"
fi
}
run_examples::select_image() {
@ -388,7 +374,7 @@ run_examples::restore_binaries_from_tar() {
RESTORED_BIN_DIR="${src}"
export RESTORED_BIN_DIR
if [ ! -f "${src}/logos-blockchain-node" ] || [ ! -f "${src}/logos-blockchain-executor" ] || [ ! -f "${src}/logos-blockchain-cli" ]; then
if [ ! -f "${src}/logos-blockchain-node" ] || [ ! -f "${src}/logos-blockchain-cli" ]; then
echo "Binaries missing in ${tar_path}; provide a prebuilt binaries tarball." >&2
return 1
fi
@ -397,11 +383,11 @@ run_examples::restore_binaries_from_tar() {
if [ "${MODE}" != "host" ] && ! run_examples::host_bin_matches_arch "${src}/logos-blockchain-node"; then
echo "Bundled binaries do not match host arch; skipping copy so containers rebuild from source."
copy_bins=0
rm -f "${bin_dst}/logos-blockchain-node" "${bin_dst}/logos-blockchain-executor" "${bin_dst}/logos-blockchain-cli"
rm -f "${bin_dst}/logos-blockchain-node" "${bin_dst}/logos-blockchain-cli"
fi
if [ "${copy_bins}" -eq 1 ]; then
mkdir -p "${bin_dst}"
cp "${src}/logos-blockchain-node" "${src}/logos-blockchain-executor" "${src}/logos-blockchain-cli" "${bin_dst}/"
cp "${src}/logos-blockchain-node" "${src}/logos-blockchain-cli" "${bin_dst}/"
fi
if [ -d "${circuits_src}" ] && [ -f "${circuits_src}/${KZG_FILE}" ]; then
@ -436,8 +422,8 @@ run_examples::prepare_bundles() {
HOST_TAR="${ROOT_DIR}/.tmp/nomos-binaries-host-${VERSION}.tar.gz"
LINUX_TAR="${ROOT_DIR}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz"
if [ -n "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_NODE_BIN}" ] && [ -n "${LOGOS_BLOCKCHAIN_EXECUTOR_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_EXECUTOR_BIN}" ]; then
echo "==> Using pre-specified host binaries (LOGOS_BLOCKCHAIN_NODE_BIN/LOGOS_BLOCKCHAIN_EXECUTOR_BIN); skipping tarball restore"
if [ -n "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_NODE_BIN}" ]; then
echo "==> Using pre-specified host binaries (LOGOS_BLOCKCHAIN_NODE_BIN); skipping tarball restore"
return 0
fi
@ -508,20 +494,18 @@ run_examples::validate_restored_bundle() {
common::die "KZG params missing at ${KZG_HOST_PATH}; ensure the tarball contains circuits."
fi
if [ "${MODE}" = "host" ] && ! { [ -n "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -n "${LOGOS_BLOCKCHAIN_EXECUTOR_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_EXECUTOR_BIN:-}" ]; }; then
if [ "${MODE}" = "host" ] && ! { [ -n "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ]; }; then
local tar_node tar_exec
tar_node="${RESTORED_BIN_DIR:-${ROOT_DIR}/testing-framework/assets/stack/bin}/logos-blockchain-node"
tar_exec="${RESTORED_BIN_DIR:-${ROOT_DIR}/testing-framework/assets/stack/bin}/logos-blockchain-executor"
[ -x "${tar_node}" ] && [ -x "${tar_exec}" ] || common::die \
"Restored tarball missing host executables; provide a host-compatible binaries tarball."
run_examples::host_bin_matches_arch "${tar_node}" && run_examples::host_bin_matches_arch "${tar_exec}" || common::die \
run_examples::host_bin_matches_arch "${tar_node}" || common::die \
"Restored executables do not match host architecture; provide a host-compatible binaries tarball."
echo "==> Using restored host binaries from tarball"
LOGOS_BLOCKCHAIN_NODE_BIN="${tar_node}"
LOGOS_BLOCKCHAIN_EXECUTOR_BIN="${tar_exec}"
export LOGOS_BLOCKCHAIN_NODE_BIN LOGOS_BLOCKCHAIN_EXECUTOR_BIN
export LOGOS_BLOCKCHAIN_NODE_BIN
fi
}
@ -572,7 +556,6 @@ run_examples::run() {
export NOMOS_DEMO_RUN_SECS="${RUN_SECS}"
export NOMOS_DEMO_VALIDATORS="${DEMO_VALIDATORS}"
export NOMOS_DEMO_EXECUTORS="${DEMO_EXECUTORS}"
if [ -n "${METRICS_QUERY_URL}" ]; then
export NOMOS_METRICS_QUERY_URL="${METRICS_QUERY_URL}"
@ -591,7 +574,6 @@ run_examples::run() {
LOGOS_BLOCKCHAIN_CIRCUITS="${HOST_BUNDLE_PATH}" \
LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH="${kzg_path}" \
LOGOS_BLOCKCHAIN_NODE_BIN="${LOGOS_BLOCKCHAIN_NODE_BIN:-}" \
LOGOS_BLOCKCHAIN_EXECUTOR_BIN="${LOGOS_BLOCKCHAIN_EXECUTOR_BIN:-}" \
COMPOSE_CIRCUITS_PLATFORM="${COMPOSE_CIRCUITS_PLATFORM:-}" \
cargo run -p runner-examples --bin "${BIN}"
}

View File

@ -18,7 +18,6 @@ image rebuilds (where it makes sense), after cleaning and rebuilding bundles.
Options:
-t, --run-seconds N Demo duration for each run (default: 120)
-v, --validators N Validators (default: 1)
-e, --executors N Executors (default: 1)
--modes LIST Comma-separated: host,compose,k8s (default: host,compose,k8s)
--no-clean Skip scripts/ops/clean.sh step
--no-bundles Skip scripts/build/build-bundle.sh (uses existing .tmp tarballs)
@ -46,7 +45,6 @@ matrix::have() { command -v "$1" >/dev/null 2>&1; }
matrix::parse_args() {
RUN_SECS=120
VALIDATORS=1
EXECUTORS=1
MODES_RAW="host,compose,k8s"
DO_CLEAN=1
DO_BUNDLES=1
@ -63,8 +61,6 @@ matrix::parse_args() {
--run-seconds=*) RUN_SECS="${1#*=}"; shift ;;
-v|--validators) VALIDATORS="${2:-}"; shift 2 ;;
--validators=*) VALIDATORS="${1#*=}"; shift ;;
-e|--executors) EXECUTORS="${2:-}"; shift 2 ;;
--executors=*) EXECUTORS="${1#*=}"; shift ;;
--modes) MODES_RAW="${2:-}"; shift 2 ;;
--modes=*) MODES_RAW="${1#*=}"; shift ;;
--no-clean) DO_CLEAN=0; shift ;;
@ -83,7 +79,6 @@ matrix::parse_args() {
common::is_uint "${RUN_SECS}" || matrix::die "--run-seconds must be an integer"
[ "${RUN_SECS}" -gt 0 ] || matrix::die "--run-seconds must be > 0"
common::is_uint "${VALIDATORS}" || matrix::die "--validators must be an integer"
common::is_uint "${EXECUTORS}" || matrix::die "--executors must be an integer"
}
matrix::split_modes() {
@ -220,7 +215,7 @@ matrix::main() {
host)
matrix::run_case "host" \
"${ROOT_DIR}/scripts/run/run-examples.sh" \
-t "${RUN_SECS}" -v "${VALIDATORS}" -e "${EXECUTORS}" \
-t "${RUN_SECS}" -v "${VALIDATORS}" \
"${forward[@]}" \
host
;;
@ -228,7 +223,7 @@ matrix::main() {
if [ "${SKIP_IMAGE_BUILD_VARIANTS}" -eq 0 ]; then
matrix::run_case "compose.image_build" \
"${ROOT_DIR}/scripts/run/run-examples.sh" \
-t "${RUN_SECS}" -v "${VALIDATORS}" -e "${EXECUTORS}" \
-t "${RUN_SECS}" -v "${VALIDATORS}" \
"${forward[@]}" \
compose
else
@ -238,7 +233,7 @@ matrix::main() {
matrix::run_case "compose.skip_image_build" \
"${ROOT_DIR}/scripts/run/run-examples.sh" \
--no-image-build \
-t "${RUN_SECS}" -v "${VALIDATORS}" -e "${EXECUTORS}" \
-t "${RUN_SECS}" -v "${VALIDATORS}" \
"${forward[@]}" \
compose
;;
@ -259,7 +254,7 @@ matrix::main() {
fi
matrix::run_case "k8s.image_build" \
"${ROOT_DIR}/scripts/run/run-examples.sh" \
-t "${RUN_SECS}" -v "${VALIDATORS}" -e "${EXECUTORS}" \
-t "${RUN_SECS}" -v "${VALIDATORS}" \
"${forward[@]}" \
k8s
unset NOMOS_FORCE_IMAGE_BUILD || true
@ -273,7 +268,7 @@ matrix::main() {
matrix::run_case "k8s.skip_image_build" \
"${ROOT_DIR}/scripts/run/run-examples.sh" \
--no-image-build \
-t "${RUN_SECS}" -v "${VALIDATORS}" -e "${EXECUTORS}" \
-t "${RUN_SECS}" -v "${VALIDATORS}" \
"${forward[@]}" \
k8s
;;

View File

@ -82,7 +82,6 @@ COPY --from=builder /opt/circuits /opt/circuits
COPY --from=builder /workspace/testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params /opt/nomos/kzg-params/kzgrs_test_params
COPY --from=builder /workspace/artifacts/logos-blockchain-node /usr/bin/logos-blockchain-node
COPY --from=builder /workspace/artifacts/logos-blockchain-executor /usr/bin/logos-blockchain-executor
COPY --from=builder /workspace/artifacts/logos-blockchain-cli /usr/bin/logos-blockchain-cli
COPY --from=builder /workspace/artifacts/cfgsync-server /usr/bin/cfgsync-server
COPY --from=builder /workspace/artifacts/cfgsync-client /usr/bin/cfgsync-client

View File

@ -9,7 +9,6 @@ TARGET_ARCH="$(uname -m)"
have_prebuilt() {
[ -f testing-framework/assets/stack/bin/logos-blockchain-node ] && \
[ -f testing-framework/assets/stack/bin/logos-blockchain-executor ] && \
[ -f testing-framework/assets/stack/bin/logos-blockchain-cli ]
}
@ -34,7 +33,6 @@ bin_matches_arch() {
if have_prebuilt && bin_matches_arch; then
echo "Using prebuilt logos-blockchain binaries from testing-framework/assets/stack/bin"
cp testing-framework/assets/stack/bin/logos-blockchain-node /workspace/artifacts/logos-blockchain-node
cp testing-framework/assets/stack/bin/logos-blockchain-executor /workspace/artifacts/logos-blockchain-executor
cp testing-framework/assets/stack/bin/logos-blockchain-cli /workspace/artifacts/logos-blockchain-cli
exit 0
fi
@ -67,10 +65,9 @@ fi
RUSTFLAGS='--cfg feature="pol-dev-mode"' NOMOS_CIRCUITS=/opt/circuits \
LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits \
cargo build --features "testing" \
-p logos-blockchain-node -p logos-blockchain-executor -p logos-blockchain-cli
-p logos-blockchain-node -p logos-blockchain-cli
cp /tmp/nomos-node/target/debug/logos-blockchain-node /workspace/artifacts/logos-blockchain-node
cp /tmp/nomos-node/target/debug/logos-blockchain-executor /workspace/artifacts/logos-blockchain-executor
cp /tmp/nomos-node/target/debug/logos-blockchain-cli /workspace/artifacts/logos-blockchain-cli
rm -rf /tmp/nomos-node/target/debug/incremental

View File

@ -7,7 +7,6 @@ role="${1:-validator}"
bin_for_role() {
case "$1" in
validator) echo "/usr/bin/logos-blockchain-node" ;;
executor) echo "/usr/bin/logos-blockchain-executor" ;;
*) echo "Unknown role: $1" >&2; exit 2 ;;
esac
}

View File

@ -1,2 +0,0 @@
#!/bin/sh
exec /etc/nomos/scripts/run_nomos.sh executor

View File

@ -14,36 +14,29 @@ blst = "0.3.11"
chain-leader = { workspace = true }
chain-network = { workspace = true }
chain-service = { workspace = true }
cryptarchia-engine = { workspace = true, features = ["serde"] }
cryptarchia-engine = { features = ["serde"], workspace = true }
cryptarchia-sync = { workspace = true }
groth16 = { workspace = true }
hex = { version = "0.4.3", default-features = false }
hex = { default-features = false, version = "0.4.3" }
key-management-system-service = { workspace = true }
logos-blockchain-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] }
nomos-api = { workspace = true }
nomos-blend-service = { workspace = true, features = ["libp2p"] }
nomos-blend-service = { features = ["libp2p"], workspace = true }
nomos-core = { workspace = true }
nomos-da-dispersal = { workspace = true }
nomos-da-network-core = { workspace = true }
nomos-da-network-service = { workspace = true }
nomos-da-sampling = { workspace = true }
nomos-da-verifier = { workspace = true }
nomos-ledger = { workspace = true, features = ["serde"] }
nomos-ledger = { features = ["serde"], workspace = true }
nomos-libp2p = { workspace = true }
nomos-node = { workspace = true, default-features = false, features = ["testing"] }
nomos-node = { default-features = false, features = ["testing"], workspace = true }
nomos-sdp = { workspace = true }
nomos-time = { workspace = true }
nomos-tracing = { workspace = true }
nomos-tracing-service = { workspace = true }
nomos-utils = { workspace = true }
nomos-wallet = { workspace = true }
num-bigint = { version = "0.4", default-features = false }
num-bigint = { default-features = false, version = "0.4" }
rand = { workspace = true }
serde = { workspace = true, features = ["derive"] }
subnetworks-assignations = { workspace = true }
serde = { features = ["derive"], workspace = true }
testing-framework-env = { workspace = true }
thiserror = { workspace = true }
time = { version = "0.3", default-features = true }
time = { default-features = true, version = "0.3" }
tracing = { workspace = true }
[lints]

View File

@ -4,15 +4,6 @@ use chain_leader::LeaderConfig as ChainLeaderConfig;
use chain_network::{BootstrapConfig as ChainBootstrapConfig, OrphanConfig, SyncConfig};
use chain_service::StartingState;
use nomos_api::ApiServiceSettings;
use nomos_da_sampling::{
DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings,
verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings,
};
use nomos_da_verifier::{
DaVerifierServiceSettings,
backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig},
storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings,
};
use nomos_node::{
api::backend::AxumBackendSettings as NodeAxumBackendSettings,
config::{
@ -29,10 +20,9 @@ use nomos_node::{
time::{deployment::Settings as TimeDeploymentSettings, serde::Config as TimeConfig},
},
};
use nomos_utils::math::NonNegativeF64;
use nomos_wallet::WalletServiceSettings;
use crate::{constants::KZG_PARAMS_FILENAME, timeouts, topology::configs::GeneralConfig};
use crate::{timeouts, topology::configs::GeneralConfig};
// Configuration constants
const CRYPTARCHIA_GOSSIPSUB_PROTOCOL: &str = "/cryptarchia/proto";
@ -40,11 +30,9 @@ const MEMPOOL_PUBSUB_TOPIC: &str = "mantle";
const STATE_RECORDING_INTERVAL_SECS: u64 = 60;
const IBD_DOWNLOAD_DELAY_SECS: u64 = 10;
const MAX_ORPHAN_CACHE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(5) };
const DA_PUBLISH_THRESHOLD: f64 = 0.8;
const API_RATE_LIMIT_PER_SECOND: u64 = 10000;
const API_RATE_LIMIT_BURST: u32 = 10000;
const API_MAX_CONCURRENT_REQUESTS: usize = 1000;
const BLOB_STORAGE_DIR: &str = "./";
pub(crate) fn cryptarchia_deployment(config: &GeneralConfig) -> CryptarchiaDeploymentSettings {
CryptarchiaDeploymentSettings {
@ -113,64 +101,6 @@ pub(crate) fn cryptarchia_config(config: &GeneralConfig) -> CryptarchiaConfig {
}
}
fn kzg_params_path(raw: &str) -> String {
let path = PathBuf::from(raw);
if path.is_dir() {
return path.join(KZG_PARAMS_FILENAME).to_string_lossy().to_string();
}
path.to_string_lossy().to_string()
}
pub(crate) fn da_verifier_config(
config: &GeneralConfig,
) -> DaVerifierServiceSettings<KzgrsDaVerifierSettings, (), (), VerifierStorageAdapterSettings> {
let publish_threshold = match NonNegativeF64::try_from(DA_PUBLISH_THRESHOLD) {
Ok(value) => value,
Err(_) => unsafe {
// Safety: `DA_PUBLISH_THRESHOLD` is a finite non-negative constant.
std::hint::unreachable_unchecked()
},
};
DaVerifierServiceSettings {
share_verifier_settings: KzgrsDaVerifierSettings {
global_params_path: kzg_params_path(&config.da_config.global_params_path),
domain_size: config.da_config.num_subnets as usize,
},
tx_verifier_settings: (),
network_adapter_settings: (),
storage_adapter_settings: VerifierStorageAdapterSettings {
blob_storage_directory: BLOB_STORAGE_DIR.into(),
},
mempool_trigger_settings: MempoolPublishTriggerConfig {
publish_threshold,
share_duration: timeouts::share_duration(),
prune_duration: timeouts::prune_duration(),
prune_interval: timeouts::prune_interval(),
},
}
}
pub(crate) fn da_sampling_config(
config: &GeneralConfig,
) -> DaSamplingServiceSettings<KzgrsSamplingBackendSettings, SamplingVerifierSettings> {
DaSamplingServiceSettings {
sampling_settings: KzgrsSamplingBackendSettings {
num_samples: config.da_config.num_samples,
num_subnets: config.da_config.num_subnets,
old_blobs_check_interval: config.da_config.old_blobs_check_interval,
blobs_validity_duration: config.da_config.blobs_validity_duration,
},
share_verifier_settings: SamplingVerifierSettings {
global_params_path: kzg_params_path(&config.da_config.global_params_path),
domain_size: config.da_config.num_subnets as usize,
},
commitments_wait_duration: timeouts::commitments_wait(),
sdp_blob_trigger_sampling_delay: crate::adjust_timeout(timeouts::sdp_trigger_delay()),
}
}
pub(crate) fn time_config(config: &GeneralConfig) -> TimeConfig {
TimeConfig {
backend: nomos_time::backends::NtpTimeBackendSettings {
@ -226,10 +156,6 @@ pub(crate) fn wallet_settings(config: &GeneralConfig) -> WalletServiceSettings {
wallet_settings_with_leader(config, true)
}
pub(crate) fn wallet_settings_for_executor(config: &GeneralConfig) -> WalletServiceSettings {
wallet_settings_with_leader(config, false)
}
fn wallet_settings_with_leader(
config: &GeneralConfig,
include_leader: bool,

View File

@ -1,127 +0,0 @@
use logos_blockchain_executor::config::Config as ExecutorConfig;
use nomos_da_dispersal::{
DispersalServiceSettings,
backend::kzgrs::{DispersalKZGRSBackendSettings, EncoderSettings},
};
use nomos_da_network_core::protocols::sampling::SubnetsConfig;
use nomos_da_network_service::{
NetworkConfig as DaNetworkConfig,
api::http::ApiAdapterSettings,
backends::libp2p::{
common::DaNetworkBackendSettings, executor::DaNetworkExecutorBackendSettings,
},
};
use nomos_node::{RocksBackendSettings, config::deployment::DeploymentSettings};
use nomos_sdp::SdpSettings;
use crate::{
nodes::{
blend::build_blend_service_config,
common::{
cryptarchia_config, cryptarchia_deployment, da_sampling_config, da_verifier_config,
http_config, mempool_config, mempool_deployment, testing_http_config, time_config,
time_deployment, tracing_settings, wallet_settings_for_executor,
},
},
timeouts,
topology::configs::GeneralConfig,
};
#[must_use]
pub fn create_executor_config(config: GeneralConfig) -> ExecutorConfig {
let network_config = config.network_config.clone();
let (blend_user_config, blend_deployment, network_deployment) =
build_blend_service_config(&config.blend_config);
let deployment_settings =
build_executor_deployment_settings(&config, blend_deployment, network_deployment);
ExecutorConfig {
network: network_config,
blend: blend_user_config,
deployment: deployment_settings,
cryptarchia: cryptarchia_config(&config),
da_network: DaNetworkConfig {
backend: build_executor_da_network_backend_settings(&config),
membership: config.da_config.membership.clone(),
api_adapter_settings: ApiAdapterSettings {
api_port: config.api_config.address.port(),
is_secure: false,
},
subnet_refresh_interval: config.da_config.subnets_refresh_interval,
subnet_threshold: config.da_config.num_samples as usize,
min_session_members: config.da_config.num_samples as usize,
},
da_verifier: da_verifier_config(&config),
tracing: tracing_settings(&config),
http: http_config(&config),
da_sampling: da_sampling_config(&config),
storage: rocks_storage_settings(),
da_dispersal: DispersalServiceSettings {
backend: build_dispersal_backend_settings(&config),
},
time: time_config(&config),
mempool: mempool_config(),
sdp: SdpSettings { declaration: None },
wallet: wallet_settings_for_executor(&config),
key_management: config.kms_config.clone(),
testing_http: testing_http_config(&config),
}
}
fn build_executor_deployment_settings(
config: &GeneralConfig,
blend_deployment: nomos_node::config::blend::deployment::Settings,
network_deployment: nomos_node::config::network::deployment::Settings,
) -> DeploymentSettings {
DeploymentSettings::new_custom(
blend_deployment,
network_deployment,
cryptarchia_deployment(config),
time_deployment(config),
mempool_deployment(),
)
}
fn build_executor_da_network_backend_settings(
config: &GeneralConfig,
) -> DaNetworkExecutorBackendSettings {
DaNetworkExecutorBackendSettings {
validator_settings: DaNetworkBackendSettings {
node_key: config.da_config.node_key.clone(),
listening_address: config.da_config.listening_address.clone(),
policy_settings: config.da_config.policy_settings.clone(),
monitor_settings: config.da_config.monitor_settings.clone(),
balancer_interval: config.da_config.balancer_interval,
redial_cooldown: config.da_config.redial_cooldown,
replication_settings: config.da_config.replication_settings,
subnets_settings: SubnetsConfig {
num_of_subnets: config.da_config.num_samples as usize,
shares_retry_limit: config.da_config.retry_shares_limit,
commitments_retry_limit: config.da_config.retry_commitments_limit,
},
},
num_subnets: config.da_config.num_subnets,
}
}
fn rocks_storage_settings() -> RocksBackendSettings {
RocksBackendSettings {
db_path: "./db".into(),
read_only: false,
column_family: Some("blocks".into()),
}
}
fn build_dispersal_backend_settings(config: &GeneralConfig) -> DispersalKZGRSBackendSettings {
DispersalKZGRSBackendSettings {
encoder_settings: EncoderSettings {
num_columns: config.da_config.num_subnets as usize,
with_cache: false,
global_params_path: config.da_config.global_params_path.clone(),
},
dispersal_timeout: timeouts::dispersal_timeout(),
retry_cooldown: timeouts::retry_cooldown(),
retry_limit: 2,
}
}

View File

@ -1,5 +1,4 @@
pub(crate) mod blend;
pub(crate) mod common;
pub mod executor;
pub mod kms;
pub mod validator;

View File

@ -1,10 +1,3 @@
use nomos_da_network_core::{
protocols::sampling::SubnetsConfig, swarm::DAConnectionPolicySettings,
};
use nomos_da_network_service::{
NetworkConfig as DaNetworkConfig, api::http::ApiAdapterSettings,
backends::libp2p::common::DaNetworkBackendSettings,
};
use nomos_node::{
Config as ValidatorConfig, RocksBackendSettings, config::deployment::DeploymentSettings,
};
@ -14,9 +7,9 @@ use crate::{
nodes::{
blend::build_blend_service_config,
common::{
cryptarchia_config, cryptarchia_deployment, da_sampling_config, da_verifier_config,
http_config, mempool_config, mempool_deployment, testing_http_config, time_config,
time_deployment, tracing_settings, wallet_settings,
cryptarchia_config, cryptarchia_deployment, http_config, mempool_config,
mempool_deployment, testing_http_config, time_config, time_deployment,
tracing_settings, wallet_settings,
},
},
topology::configs::GeneralConfig,
@ -36,21 +29,8 @@ pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig {
blend: blend_user_config,
deployment: deployment_settings,
cryptarchia: cryptarchia_config(&config),
da_network: DaNetworkConfig {
backend: build_validator_da_network_backend_settings(&config),
membership: config.da_config.membership.clone(),
api_adapter_settings: ApiAdapterSettings {
api_port: config.api_config.address.port(),
is_secure: false,
},
subnet_refresh_interval: config.da_config.subnets_refresh_interval,
subnet_threshold: config.da_config.num_samples as usize,
min_session_members: config.da_config.num_samples as usize,
},
da_verifier: da_verifier_config(&config),
tracing: tracing_settings(&config),
http: http_config(&config),
da_sampling: da_sampling_config(&config),
storage: rocks_storage_settings(),
time: time_config(&config),
mempool: mempool_config(),
@ -75,32 +55,6 @@ fn build_validator_deployment_settings(
)
}
fn build_validator_da_network_backend_settings(config: &GeneralConfig) -> DaNetworkBackendSettings {
let da_policy_settings = config.da_config.policy_settings.clone();
DaNetworkBackendSettings {
node_key: config.da_config.node_key.clone(),
listening_address: config.da_config.listening_address.clone(),
policy_settings: DAConnectionPolicySettings {
min_dispersal_peers: 0,
min_replication_peers: da_policy_settings.min_replication_peers,
max_dispersal_failures: da_policy_settings.max_dispersal_failures,
max_sampling_failures: da_policy_settings.max_sampling_failures,
max_replication_failures: da_policy_settings.max_replication_failures,
malicious_threshold: da_policy_settings.malicious_threshold,
},
monitor_settings: config.da_config.monitor_settings.clone(),
balancer_interval: config.da_config.balancer_interval,
redial_cooldown: config.da_config.redial_cooldown,
replication_settings: config.da_config.replication_settings,
subnets_settings: SubnetsConfig {
num_of_subnets: config.da_config.num_samples as usize,
shares_retry_limit: config.da_config.retry_shares_limit,
commitments_retry_limit: config.da_config.retry_commitments_limit,
},
}
}
fn rocks_storage_settings() -> RocksBackendSettings {
RocksBackendSettings {
db_path: "./db".into(),

View File

@ -2,8 +2,7 @@ use thiserror::Error;
use super::{
blend, bootstrap, bootstrap::SHORT_PROLONGED_BOOTSTRAP_PERIOD, consensus,
consensus::ConsensusParams, da, da::DaParams, network, network::NetworkParams,
wallet::WalletConfig,
consensus::ConsensusParams, network, network::NetworkParams, wallet::WalletConfig,
};
#[derive(Debug, Error)]
@ -11,15 +10,12 @@ pub enum BaseConfigError {
#[error(transparent)]
Consensus(#[from] consensus::ConsensusConfigError),
#[error(transparent)]
Da(#[from] da::DaConfigError),
#[error(transparent)]
Network(#[from] network::NetworkConfigError),
}
pub struct BaseConfigs {
pub consensus_configs: Vec<consensus::GeneralConsensusConfig>,
pub bootstrap_configs: Vec<bootstrap::GeneralBootstrapConfig>,
pub da_configs: Vec<da::GeneralDaConfig>,
pub network_configs: Vec<network::GeneralNetworkConfig>,
pub blend_configs: Vec<blend::GeneralBlendConfig>,
}
@ -27,10 +23,8 @@ pub struct BaseConfigs {
pub fn build_base_configs(
ids: &[[u8; 32]],
consensus_params: &ConsensusParams,
da_params: &DaParams,
network_params: &NetworkParams,
wallet_config: &WalletConfig,
da_ports: &[u16],
blend_ports: &[u16],
) -> Result<BaseConfigs, BaseConfigError> {
Ok(BaseConfigs {
@ -43,7 +37,6 @@ pub fn build_base_configs(
ids,
SHORT_PROLONGED_BOOTSTRAP_PERIOD,
),
da_configs: da::try_create_da_configs(ids, da_params, da_ports)?,
network_configs: network::create_network_configs(ids, network_params)?,
blend_configs: blend::create_blend_configs(ids, blend_ports),
})

View File

@ -103,7 +103,6 @@ pub struct GeneralConsensusConfig {
pub genesis_tx: GenesisTx,
pub utxos: Vec<Utxo>,
pub blend_notes: Vec<ServiceNote>,
pub da_notes: Vec<ServiceNote>,
pub wallet_accounts: Vec<WalletAccount>,
}
@ -166,28 +165,16 @@ fn build_ledger_config(
},
sdp_config: nomos_ledger::mantle::sdp::Config {
service_params: Arc::new(
[
(
ServiceType::BlendNetwork,
ServiceParameters {
lock_period: 10,
inactivity_period: 20,
retention_period: 100,
timestamp: 0,
session_duration: 1000,
},
),
(
ServiceType::DataAvailability,
ServiceParameters {
lock_period: 10,
inactivity_period: 20,
retention_period: 100,
timestamp: 0,
session_duration: 1000,
},
),
]
[(
ServiceType::BlendNetwork,
ServiceParameters {
lock_period: 10,
inactivity_period: 20,
retention_period: 100,
timestamp: 0,
session_duration: 1000,
},
)]
.into(),
),
min_stake: nomos_core::sdp::MinStake {
@ -218,14 +205,8 @@ pub fn create_consensus_configs(
) -> Result<Vec<GeneralConsensusConfig>, ConsensusConfigError> {
let mut leader_keys = Vec::new();
let mut blend_notes = Vec::new();
let mut da_notes = Vec::new();
let utxos = create_utxos_for_leader_and_services(
ids,
&mut leader_keys,
&mut blend_notes,
&mut da_notes,
);
let utxos = create_utxos_for_leader_and_services(ids, &mut leader_keys, &mut blend_notes);
let utxos = append_wallet_utxos(utxos, wallet);
let genesis_tx = create_genesis_tx(&utxos)?;
let ledger_config = build_ledger_config(consensus_params)?;
@ -237,7 +218,6 @@ pub fn create_consensus_configs(
ledger_config: ledger_config.clone(),
genesis_tx: genesis_tx.clone(),
utxos: utxos.clone(),
da_notes: da_notes.clone(),
blend_notes: blend_notes.clone(),
wallet_accounts: wallet.accounts.clone(),
})
@ -248,7 +228,6 @@ fn create_utxos_for_leader_and_services(
ids: &[[u8; 32]],
leader_keys: &mut Vec<(ZkPublicKey, UnsecuredZkKey)>,
blend_notes: &mut Vec<ServiceNote>,
da_notes: &mut Vec<ServiceNote>,
) -> Vec<Utxo> {
let mut utxos = Vec::new();
@ -258,7 +237,6 @@ fn create_utxos_for_leader_and_services(
// Create notes for leader, Blend and DA declarations.
for &id in ids {
output_index = push_leader_utxo(id, leader_keys, &mut utxos, output_index);
output_index = push_service_note(b"da", id, da_notes, &mut utxos, output_index);
output_index = push_service_note(b"bn", id, blend_notes, &mut utxos, output_index);
}

View File

@ -1,353 +0,0 @@
use std::{
collections::{HashMap, HashSet},
env, io,
path::{Path, PathBuf},
process,
str::FromStr as _,
sync::LazyLock,
time::Duration,
};
use key_management_system_service::keys::{Ed25519Key, ZkKey};
use nomos_core::sdp::SessionNumber;
use nomos_da_network_core::swarm::{
DAConnectionMonitorSettings, DAConnectionPolicySettings, ReplicationConfig,
};
use nomos_libp2p::{Multiaddr, PeerId, ed25519};
use nomos_node::LogosBlockchainDaMembership;
use num_bigint::BigUint;
use rand::random;
use subnetworks_assignations::{MembershipCreator as _, MembershipHandler as _};
use testing_framework_env as tf_env;
use thiserror::Error;
use tracing::warn;
use crate::{constants::DEFAULT_KZG_HOST_DIR, secret_key_to_peer_id};
pub static GLOBAL_PARAMS_PATH: LazyLock<String> = LazyLock::new(resolve_global_params_path);
const DEFAULT_OLD_BLOBS_CHECK_INTERVAL: Duration = Duration::from_secs(5);
const DEFAULT_BLOBS_VALIDITY_DURATION: Duration = Duration::from_secs(60);
const DEFAULT_FAILURE_TIME_WINDOW: Duration = Duration::from_secs(5);
const DEFAULT_BALANCER_INTERVAL: Duration = Duration::from_secs(1);
const DEFAULT_SEEN_MESSAGE_TTL: Duration = Duration::from_secs(3600);
const DEFAULT_SUBNETS_REFRESH_INTERVAL: Duration = Duration::from_secs(30);
fn canonicalize_params_path(mut path: PathBuf) -> PathBuf {
if path.is_dir() {
let candidates = [
path.join("kzgrs_test_params"),
path.join("pol/proving_key.zkey"),
path.join("proving_key.zkey"),
];
if let Some(file) = candidates.iter().find(|p| p.is_file()) {
return file.clone();
}
}
if let Ok(resolved) = path.canonicalize() {
path = resolved;
}
path
}
fn resolve_global_params_path() -> String {
if let Some(path) = tf_env::nomos_kzgrs_params_path() {
return canonicalize_params_path(PathBuf::from(path))
.to_string_lossy()
.to_string();
}
let workspace_root = env::var("CARGO_WORKSPACE_DIR")
.map(PathBuf::from)
.ok()
.or_else(|| {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(Path::parent)
.map(Path::to_path_buf)
})
.unwrap_or_else(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")));
let params_path = canonicalize_params_path(
workspace_root.join(
testing_framework_env::nomos_kzg_dir_rel()
.unwrap_or_else(|| DEFAULT_KZG_HOST_DIR.to_string()),
),
);
match params_path.canonicalize() {
Ok(path) => path.to_string_lossy().to_string(),
Err(err) => {
warn!(
?err,
path = %params_path.display(),
"falling back to non-canonical KZG params path; set LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH to override"
);
params_path.to_string_lossy().to_string()
}
}
}
#[derive(Clone)]
pub struct DaParams {
pub subnetwork_size: usize,
pub dispersal_factor: usize,
pub num_samples: u16,
pub num_subnets: u16,
pub old_blobs_check_interval: Duration,
pub blobs_validity_duration: Duration,
pub global_params_path: String,
pub policy_settings: DAConnectionPolicySettings,
pub monitor_settings: DAConnectionMonitorSettings,
pub balancer_interval: Duration,
pub redial_cooldown: Duration,
pub replication_settings: ReplicationConfig,
pub subnets_refresh_interval: Duration,
pub retry_shares_limit: usize,
pub retry_commitments_limit: usize,
}
impl Default for DaParams {
fn default() -> Self {
Self {
subnetwork_size: 2,
dispersal_factor: 1,
num_samples: 1,
num_subnets: 2,
old_blobs_check_interval: DEFAULT_OLD_BLOBS_CHECK_INTERVAL,
blobs_validity_duration: DEFAULT_BLOBS_VALIDITY_DURATION,
global_params_path: GLOBAL_PARAMS_PATH.to_string(),
policy_settings: DAConnectionPolicySettings {
min_dispersal_peers: 1,
min_replication_peers: 1,
max_dispersal_failures: 0,
max_sampling_failures: 0,
max_replication_failures: 0,
malicious_threshold: 0,
},
monitor_settings: DAConnectionMonitorSettings {
failure_time_window: DEFAULT_FAILURE_TIME_WINDOW,
..Default::default()
},
balancer_interval: DEFAULT_BALANCER_INTERVAL,
redial_cooldown: Duration::ZERO,
replication_settings: ReplicationConfig {
seen_message_cache_size: 1000,
seen_message_ttl: DEFAULT_SEEN_MESSAGE_TTL,
},
subnets_refresh_interval: DEFAULT_SUBNETS_REFRESH_INTERVAL,
retry_shares_limit: 1,
retry_commitments_limit: 1,
}
}
}
#[derive(Debug, Clone)]
pub struct GeneralDaConfig {
pub node_key: ed25519::SecretKey,
pub signer: Ed25519Key,
pub peer_id: PeerId,
pub membership: LogosBlockchainDaMembership,
pub listening_address: Multiaddr,
pub blob_storage_directory: PathBuf,
pub global_params_path: String,
pub verifier_sk: String,
pub verifier_index: HashSet<u16>,
pub num_samples: u16,
pub num_subnets: u16,
pub old_blobs_check_interval: Duration,
pub blobs_validity_duration: Duration,
pub policy_settings: DAConnectionPolicySettings,
pub monitor_settings: DAConnectionMonitorSettings,
pub balancer_interval: Duration,
pub redial_cooldown: Duration,
pub replication_settings: ReplicationConfig,
pub subnets_refresh_interval: Duration,
pub retry_shares_limit: usize,
pub retry_commitments_limit: usize,
pub secret_zk_key: ZkKey,
}
#[derive(Debug, Error)]
pub enum DaConfigError {
#[error("DA ports length mismatch (ids={ids}, ports={ports})")]
PortsLenMismatch { ids: usize, ports: usize },
#[error(
"DA subnetwork size too large for u16 subnetwork ids (effective_subnetwork_size={effective_subnetwork_size}, max={max})"
)]
SubnetworkTooLarge {
effective_subnetwork_size: usize,
max: usize,
},
#[error("failed to derive node key from bytes: {message}")]
NodeKeyFromBytes { message: String },
#[error("failed to create DA listening address for port {port}: {message}")]
ListeningAddress { port: u16, message: String },
#[error("failed to create blob storage directory at {path}: {source}")]
BlobStorageCreate {
path: PathBuf,
#[source]
source: io::Error,
},
#[error("failed to generate verifier secret key: {message}")]
VerifierKeyGen { message: String },
}
pub fn try_create_da_configs(
ids: &[[u8; 32]],
da_params: &DaParams,
ports: &[u16],
) -> Result<Vec<GeneralDaConfig>, DaConfigError> {
// Let the subnetwork size track the participant count so tiny local topologies
// can form a membership.
let effective_subnetwork_size = da_params.subnetwork_size.max(ids.len().max(1));
let max_subnetworks = u16::MAX as usize + 1;
if effective_subnetwork_size > max_subnetworks {
return Err(DaConfigError::SubnetworkTooLarge {
effective_subnetwork_size,
max: max_subnetworks,
});
}
if ports.len() < ids.len() {
return Err(DaConfigError::PortsLenMismatch {
ids: ids.len(),
ports: ports.len(),
});
}
let mut node_keys = Vec::with_capacity(ids.len());
let mut peer_ids = Vec::with_capacity(ids.len());
let mut listening_addresses = Vec::with_capacity(ids.len());
for (index, id) in ids.iter().enumerate() {
let mut node_key_bytes = *id;
let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes).map_err(|err| {
DaConfigError::NodeKeyFromBytes {
message: err.to_string(),
}
})?;
node_keys.push(node_key.clone());
let peer_id = secret_key_to_peer_id(node_key);
peer_ids.push(peer_id);
let port = ports[index];
let listening_address = Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{port}/quic-v1",))
.map_err(|err| DaConfigError::ListeningAddress {
port,
message: err.to_string(),
})?;
listening_addresses.push(listening_address);
}
let membership = {
let template = LogosBlockchainDaMembership::new(
SessionNumber::default(),
effective_subnetwork_size,
da_params.dispersal_factor,
);
let mut assignations: HashMap<u16, HashSet<PeerId>> = HashMap::new();
if peer_ids.is_empty() {
for id in 0..effective_subnetwork_size {
assignations.insert(id as u16, HashSet::new());
}
} else {
let mut sorted_peers = peer_ids.clone();
sorted_peers.sort_unstable();
let dispersal = da_params.dispersal_factor.max(1);
let mut peer_cycle = sorted_peers.iter().cycle();
for id in 0..effective_subnetwork_size {
let mut members = HashSet::new();
for _ in 0..dispersal {
// cycle() only yields None when the iterator is empty, which we guard against.
if let Some(peer) = peer_cycle.next() {
members.insert(*peer);
}
}
assignations.insert(id as u16, members);
}
}
template.init(SessionNumber::default(), assignations)
};
let mut configs = Vec::with_capacity(ids.len());
for ((index, id), node_key) in ids.iter().enumerate().zip(node_keys.into_iter()) {
let blob_storage_directory = env::temp_dir().join(format!(
"nomos-da-blob-{}-{index}-{}",
process::id(),
random::<u64>()
));
std::fs::create_dir_all(&blob_storage_directory).map_err(|source| {
DaConfigError::BlobStorageCreate {
path: blob_storage_directory.clone(),
source,
}
})?;
let verifier_sk = blst::min_sig::SecretKey::key_gen(id, &[]).map_err(|err| {
DaConfigError::VerifierKeyGen {
message: format!("{err:?}"),
}
})?;
let verifier_sk_bytes = verifier_sk.to_bytes();
let peer_id = peer_ids[index];
let signer = Ed25519Key::from_bytes(id);
let subnetwork_ids = membership.membership(&peer_id);
let secret_zk_key = ZkKey::from(BigUint::from_bytes_le(signer.public_key().as_bytes()));
configs.push(GeneralDaConfig {
node_key,
signer,
peer_id,
secret_zk_key,
membership: membership.clone(),
listening_address: listening_addresses[index].clone(),
blob_storage_directory,
global_params_path: da_params.global_params_path.clone(),
verifier_sk: hex::encode(verifier_sk_bytes),
verifier_index: subnetwork_ids,
num_samples: da_params.num_samples,
num_subnets: da_params.num_subnets,
old_blobs_check_interval: da_params.old_blobs_check_interval,
blobs_validity_duration: da_params.blobs_validity_duration,
policy_settings: da_params.policy_settings.clone(),
monitor_settings: da_params.monitor_settings.clone(),
balancer_interval: da_params.balancer_interval,
redial_cooldown: da_params.redial_cooldown,
replication_settings: da_params.replication_settings,
subnets_refresh_interval: da_params.subnets_refresh_interval,
retry_shares_limit: da_params.retry_shares_limit,
retry_commitments_limit: da_params.retry_commitments_limit,
});
}
Ok(configs)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn try_create_da_configs_rejects_subnetwork_overflow() {
let ids = vec![[1u8; 32]];
let ports = vec![12345u16];
let mut params = DaParams::default();
params.subnetwork_size = u16::MAX as usize + 2;
let err = try_create_da_configs(&ids, &params, &ports).unwrap_err();
assert!(matches!(err, DaConfigError::SubnetworkTooLarge { .. }));
}
#[test]
fn try_create_da_configs_rejects_port_mismatch() {
let ids = vec![[1u8; 32], [2u8; 32]];
let ports = vec![12345u16];
let params = DaParams::default();
let err = try_create_da_configs(&ids, &params, &ports).unwrap_err();
assert!(matches!(err, DaConfigError::PortsLenMismatch { .. }));
}
}

View File

@ -3,7 +3,6 @@ pub mod base;
pub mod blend;
pub mod bootstrap;
pub mod consensus;
pub mod da;
pub mod network;
pub mod runtime;
pub mod time;
@ -16,7 +15,6 @@ use blend::GeneralBlendConfig;
use consensus::{
ConsensusConfigError, GeneralConsensusConfig, ProviderInfo, create_genesis_tx_with_declarations,
};
use da::GeneralDaConfig;
use key_management_system_service::{backend::preload::PreloadKMSBackendSettings, keys::Key};
use network::GeneralNetworkConfig;
use nomos_core::{
@ -35,7 +33,6 @@ use crate::{
api::GeneralApiConfig,
bootstrap::{GeneralBootstrapConfig, SHORT_PROLONGED_BOOTSTRAP_PERIOD},
consensus::ConsensusParams,
da::DaParams,
network::NetworkParams,
time::GeneralTimeConfig,
},
@ -61,8 +58,6 @@ pub enum GeneralConfigError {
#[error(transparent)]
Network(#[from] network::NetworkConfigError),
#[error(transparent)]
Da(#[from] da::DaConfigError),
#[error(transparent)]
Api(#[from] api::ApiConfigError),
}
@ -71,7 +66,6 @@ pub struct GeneralConfig {
pub api_config: GeneralApiConfig,
pub consensus_config: GeneralConsensusConfig,
pub bootstrapping_config: GeneralBootstrapConfig,
pub da_config: GeneralDaConfig,
pub network_config: GeneralNetworkConfig,
pub blend_config: GeneralBlendConfig,
pub tracing_config: GeneralTracingConfig,
@ -99,9 +93,9 @@ pub fn create_general_configs_with_blend_core_subset(
) -> Result<Vec<GeneralConfig>, GeneralConfigError> {
validate_node_counts(n_nodes, n_blend_core_nodes)?;
let (ids, da_ports, blend_ports) = generate_ids_and_ports(n_nodes)?;
let (ids, blend_ports) = generate_ids_and_ports(n_nodes)?;
validate_generated_vectors(n_nodes, &ids, &da_ports, &blend_ports)?;
validate_generated_vectors(n_nodes, &ids, &blend_ports)?;
let consensus_params = ConsensusParams::default_for_participants(n_nodes);
let mut consensus_configs =
@ -109,7 +103,6 @@ pub fn create_general_configs_with_blend_core_subset(
let bootstrap_config =
bootstrap::create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD);
let network_configs = network::create_network_configs(&ids, network_params)?;
let da_configs = da::try_create_da_configs(&ids, &DaParams::default(), &da_ports)?;
let api_configs = api::create_api_configs(&ids)?;
let blend_configs = blend::create_blend_configs(&ids, &blend_ports);
let tracing_configs = tracing::create_tracing_configs(&ids);
@ -133,7 +126,6 @@ pub fn create_general_configs_with_blend_core_subset(
&api_configs,
&consensus_configs,
&bootstrap_config,
&da_configs,
&network_configs,
&blend_configs,
&tracing_configs,
@ -160,29 +152,22 @@ fn validate_node_counts(
Ok(())
}
fn generate_ids_and_ports(
n_nodes: usize,
) -> Result<(Vec<[u8; 32]>, Vec<u16>, Vec<u16>), GeneralConfigError> {
fn generate_ids_and_ports(n_nodes: usize) -> Result<(Vec<[u8; 32]>, Vec<u16>), GeneralConfigError> {
// Blend relies on each node declaring a different ZK public key, so we need
// different IDs to generate different keys.
let mut ids: Vec<_> = (0..n_nodes).map(|i| [i as u8; 32]).collect();
let mut da_ports = Vec::with_capacity(n_nodes);
let mut blend_ports = Vec::with_capacity(n_nodes);
for id in &mut ids {
thread_rng().fill(id);
da_ports.push(
get_available_udp_port()
.ok_or(GeneralConfigError::PortAllocationFailed { label: "DA" })?,
);
blend_ports.push(
get_available_udp_port()
.ok_or(GeneralConfigError::PortAllocationFailed { label: "Blend" })?,
);
}
Ok((ids, da_ports, blend_ports))
Ok((ids, blend_ports))
}
fn collect_blend_core_providers(
@ -250,7 +235,6 @@ fn build_general_configs(
api_configs: &[GeneralApiConfig],
consensus_configs: &[GeneralConsensusConfig],
bootstrap_config: &[GeneralBootstrapConfig],
da_configs: &[GeneralDaConfig],
network_configs: &[GeneralNetworkConfig],
blend_configs: &[GeneralBlendConfig],
tracing_configs: &[GeneralTracingConfig],
@ -263,7 +247,6 @@ fn build_general_configs(
let api_config = get_cloned_or_empty(api_configs, i)?;
let consensus_config = get_cloned_or_empty(consensus_configs, i)?;
let bootstrapping_config = get_cloned_or_empty(bootstrap_config, i)?;
let da_config = get_cloned_or_empty(da_configs, i)?;
let network_config = get_cloned_or_empty(network_configs, i)?;
let blend_config = get_cloned_or_empty(blend_configs, i)?;
let tracing_config = get_cloned_or_empty(tracing_configs, i)?;
@ -273,7 +256,6 @@ fn build_general_configs(
api_config,
consensus_config,
bootstrapping_config,
da_config,
network_config,
blend_config,
tracing_config,

View File

@ -9,8 +9,6 @@ use crate::{
topology::configs::{
GeneralConfig, GeneralConfigError, api, blend, bootstrap, consensus,
consensus::{ConsensusParams, GeneralConsensusConfig},
da,
da::DaParams,
network,
network::{Libp2pNetworkLayout, NetworkParams},
time, tracing,
@ -22,10 +20,8 @@ pub fn build_general_config_for_node(
id: [u8; 32],
network_port: u16,
initial_peers: Vec<Multiaddr>,
da_port: u16,
blend_port: u16,
consensus_params: &ConsensusParams,
da_params: &DaParams,
wallet_config: &WalletConfig,
base_consensus: &GeneralConsensusConfig,
time_config: &time::GeneralTimeConfig,
@ -39,11 +35,6 @@ pub fn build_general_config_for_node(
.next()
.ok_or(GeneralConfigError::EmptyParticipants)?;
let da_config = da::try_create_da_configs(&[id], da_params, &[da_port])?
.into_iter()
.next()
.ok_or(GeneralConfigError::EmptyParticipants)?;
let blend_config = blend::create_blend_configs(&[id], &[blend_port])
.into_iter()
.next()
@ -61,12 +52,11 @@ pub fn build_general_config_for_node(
.next()
.ok_or(GeneralConfigError::EmptyParticipants)?;
let kms_config = build_kms_config_for_node(&blend_config, &da_config, wallet_config);
let kms_config = build_kms_config_for_node(&blend_config, wallet_config);
Ok(GeneralConfig {
consensus_config,
bootstrapping_config: bootstrap_config,
da_config,
network_config,
blend_config,
api_config,
@ -89,7 +79,6 @@ pub fn build_consensus_config_for_node(
config.genesis_tx = base.genesis_tx.clone();
config.utxos = base.utxos.clone();
config.da_notes = base.da_notes.clone();
config.blend_notes = base.blend_notes.clone();
config.wallet_accounts = base.wallet_accounts.clone();
@ -115,7 +104,6 @@ pub fn build_initial_peers(network_params: &NetworkParams, peer_ports: &[u16]) -
fn build_kms_config_for_node(
blend_config: &blend::GeneralBlendConfig,
da_config: &da::GeneralDaConfig,
wallet_config: &WalletConfig,
) -> PreloadKMSBackendSettings {
let mut keys = HashMap::from([
@ -127,14 +115,6 @@ fn build_kms_config_for_node(
key_id_for_preload_backend(&Key::Zk(blend_config.secret_zk_key.clone())),
Key::Zk(blend_config.secret_zk_key.clone()),
),
(
key_id_for_preload_backend(&Key::Ed25519(da_config.signer.clone())),
Key::Ed25519(da_config.signer.clone()),
),
(
key_id_for_preload_backend(&Key::Zk(da_config.secret_zk_key.clone())),
Key::Zk(da_config.secret_zk_key.clone()),
),
]);
for account in &wallet_config.accounts {

View File

@ -19,7 +19,6 @@ pub enum TopologyInvariantError {
pub fn validate_node_vectors(
participants: usize,
ids: Option<&Vec<[u8; 32]>>,
da_ports: Option<&Vec<u16>>,
blend_ports: Option<&Vec<u16>>,
) -> Result<(), TopologyInvariantError> {
if participants == 0 {
@ -35,15 +34,6 @@ pub fn validate_node_vectors(
}
}
if let Some(ports) = da_ports {
if ports.len() != participants {
return Err(TopologyInvariantError::DaPortCountMismatch {
actual: ports.len(),
expected: participants,
});
}
}
if let Some(ports) = blend_ports {
if ports.len() != participants {
return Err(TopologyInvariantError::BlendPortCountMismatch {
@ -59,7 +49,6 @@ pub fn validate_node_vectors(
pub fn validate_generated_vectors(
participants: usize,
ids: &[[u8; 32]],
da_ports: &[u16],
blend_ports: &[u16],
) -> Result<(), TopologyInvariantError> {
if participants == 0 {
@ -73,13 +62,6 @@ pub fn validate_generated_vectors(
});
}
if da_ports.len() != participants {
return Err(TopologyInvariantError::DaPortCountMismatch {
actual: da_ports.len(),
expected: participants,
});
}
if blend_ports.len() != participants {
return Err(TopologyInvariantError::BlendPortCountMismatch {
actual: blend_ports.len(),

View File

@ -22,22 +22,19 @@ chain-service = { workspace = true }
common-http-client = { workspace = true }
futures = { default-features = false, version = "0.3" }
groth16 = { workspace = true }
hex = { version = "0.4.3", default-features = false }
hex = { default-features = false, version = "0.4.3" }
key-management-system-service = { workspace = true }
logos-blockchain-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] }
nomos-core = { workspace = true }
nomos-da-network-core = { workspace = true }
nomos-da-network-service = { workspace = true }
nomos-http-api-common = { workspace = true }
nomos-libp2p = { workspace = true }
nomos-network = { workspace = true, features = ["libp2p"] }
nomos-node = { workspace = true, default-features = false, features = ["testing"] }
nomos-network = { features = ["libp2p"], workspace = true }
nomos-node = { default-features = false, features = ["testing"], workspace = true }
nomos-tracing = { workspace = true }
nomos-tracing-service = { workspace = true }
nomos-utils = { workspace = true }
prometheus-http-query = "0.8"
rand = { workspace = true }
reqwest = { workspace = true, features = ["json"] }
reqwest = { features = ["json"], workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
@ -46,5 +43,5 @@ tempfile = { workspace = true }
testing-framework-config = { workspace = true }
testing-framework-env = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["macros", "process", "rt-multi-thread", "time"] }
tokio = { features = ["macros", "process", "rt-multi-thread", "time"], workspace = true }
tracing = { workspace = true }

View File

@ -8,7 +8,6 @@ use std::{env, ops::Mul as _, sync::LazyLock, time::Duration};
pub use testing_framework_config::{
IS_DEBUG_TRACING, node_address_from_port, secret_key_to_peer_id, secret_key_to_provider_id,
topology::configs::da::GLOBAL_PARAMS_PATH,
};
static IS_SLOW_TEST_ENV: LazyLock<bool> =

View File

@ -11,11 +11,5 @@ pub trait ManualClusterHandle: Send + Sync {
options: StartNodeOptions,
) -> Result<StartedNode, DynError>;
async fn start_executor_with(
&self,
name: &str,
options: StartNodeOptions,
) -> Result<StartedNode, DynError>;
async fn wait_network_ready(&self) -> Result<(), DynError>;
}

View File

@ -3,16 +3,12 @@ use std::net::SocketAddr;
use chain_service::CryptarchiaInfo;
use common_http_client::CommonHttpClient;
use hex;
use nomos_core::{block::Block, da::BlobId, mantle::SignedMantleTx, sdp::SessionNumber};
use nomos_da_network_core::swarm::{BalancerStats, MonitorStats};
use nomos_da_network_service::MembershipResponse;
use nomos_core::{block::Block, mantle::SignedMantleTx};
use nomos_http_api_common::paths::{
CRYPTARCHIA_HEADERS, CRYPTARCHIA_INFO, DA_BALANCER_STATS, DA_BLACKLISTED_PEERS, DA_BLOCK_PEER,
DA_GET_MEMBERSHIP, DA_HISTORIC_SAMPLING, DA_MONITOR_STATS, DA_UNBLOCK_PEER, MEMPOOL_ADD_TX,
NETWORK_INFO, STORAGE_BLOCK,
CRYPTARCHIA_HEADERS, CRYPTARCHIA_INFO, MEMPOOL_ADD_TX, NETWORK_INFO, STORAGE_BLOCK,
};
use nomos_network::backends::libp2p::Libp2pInfo;
use nomos_node::{HeaderId, api::testing::handlers::HistoricSamplingRequest};
use nomos_node::HeaderId;
use reqwest::{Client, RequestBuilder, Response, Url};
use serde::{Serialize, de::DeserializeOwned};
use serde_json::Value;
@ -232,31 +228,6 @@ impl ApiClient {
self.get_testing_response_checked(path).await
}
/// Block a peer via the DA testing API.
pub async fn block_peer(&self, peer_id: &str) -> reqwest::Result<bool> {
self.post_json_decode(DA_BLOCK_PEER, &peer_id).await
}
/// Unblock a peer via the DA testing API.
pub async fn unblock_peer(&self, peer_id: &str) -> reqwest::Result<bool> {
self.post_json_decode(DA_UNBLOCK_PEER, &peer_id).await
}
/// Fetch the list of blacklisted peers.
pub async fn blacklisted_peers(&self) -> reqwest::Result<Vec<String>> {
self.get_json(DA_BLACKLISTED_PEERS).await
}
/// Fetch balancer stats from DA API.
pub async fn balancer_stats(&self) -> reqwest::Result<BalancerStats> {
self.get_json(DA_BALANCER_STATS).await
}
/// Fetch monitor stats from DA API.
pub async fn monitor_stats(&self) -> reqwest::Result<MonitorStats> {
self.get_json(DA_MONITOR_STATS).await
}
/// Fetch consensus info from the base API.
pub async fn consensus_info(&self) -> reqwest::Result<CryptarchiaInfo> {
self.get_json(CRYPTARCHIA_INFO).await
@ -304,37 +275,6 @@ impl ApiClient {
.await
}
/// Query DA membership via testing API.
pub async fn da_get_membership_checked(
&self,
session_id: &SessionNumber,
) -> Result<MembershipResponse, ApiClientError> {
self.post_testing_json_response_checked(DA_GET_MEMBERSHIP, session_id)
.await?
.error_for_status()
.map_err(ApiClientError::Request)?
.json()
.await
.map_err(ApiClientError::Request)
}
pub async fn da_get_membership(
&self,
session_id: &SessionNumber,
) -> Result<MembershipResponse, ApiClientError> {
self.post_testing_json_decode(DA_GET_MEMBERSHIP, session_id)
.await
}
/// Query historic sampling via testing API.
pub async fn da_historic_sampling(
&self,
request: &HistoricSamplingRequest<BlobId>,
) -> Result<bool, ApiClientError> {
self.post_testing_json_decode(DA_HISTORIC_SAMPLING, request)
.await
}
/// Submit a mantle transaction through the base API.
pub async fn submit_transaction(&self, tx: &SignedMantleTx) -> reqwest::Result<()> {
let res = self.post_json_response(MEMPOOL_ADD_TX, tx).await?;

Some files were not shown because too many files have changed in this diff Show More