Decouple nomos and core

This commit is contained in:
andrussal 2026-02-02 07:19:22 +01:00
parent ad5a25bc94
commit b62e712085
330 changed files with 10484 additions and 13654 deletions

View File

@ -53,9 +53,10 @@ license-files = [{ hash = 0xcb90f5db, path = "LICENSE" }]
name = "jsonpath-rust"
[sources]
allow-git = ["https://github.com/EspressoSystems/jellyfish.git"]
unknown-git = "deny"
allow-git = [
"https://github.com/EspressoSystems/jellyfish.git",
"https://github.com/logos-blockchain/logos-blockchain.git",
"https://github.com/logos-co/Overwatch",
]
unknown-git = "deny"
unknown-registry = "deny"
[sources.allow-org]
github = ["logos-co"]

View File

@ -98,41 +98,6 @@ jobs:
restore-keys: ${{ runner.os }}-target-clippy-
- run: cargo +nightly-2025-09-14 clippy --all --all-targets --all-features -- -D warnings
doc_snippets:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Load versions
run: |
set -euo pipefail
if [ ! -f versions.env ]; then
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
exit 1
fi
set -a
. versions.env
set +a
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
: "${VERSION:?Missing VERSION}"
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
- name: Install nomos circuits
run: |
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2025-09-14
- uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo-
- run: cargo +nightly-2025-09-14 check -p doc-snippets
deny:
runs-on: ubuntu-latest
steps:

2
.gitignore vendored
View File

@ -15,7 +15,7 @@ ci-artifacts/
tests/kzgrs/circuits_bundle/
NOMOS_RUST_SOURCES_ONLY.txt
dump.zsh
testing-framework/assets/stack/bin/
nomos/assets/stack/bin/
testing-framework/assets/stack/kzgrs_test_params/
null

550
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,15 @@
[workspace]
members = [
"examples",
"examples/doc-snippets",
"testing-framework/configs",
"logos/examples",
"logos/runtime/env",
"logos/runtime/ext",
"logos/runtime/workloads",
"testing-framework/core",
"testing-framework/deployers/compose",
"testing-framework/deployers/k8s",
"testing-framework/deployers/local",
"testing-framework/env",
"testing-framework/tools/cfgsync_tf",
"testing-framework/workflows",
"testing-framework/tools/cfgsync-core",
"testing-framework/tools/cfgsync-runtime",
]
resolver = "2"
@ -31,61 +31,74 @@ all = "allow"
[workspace.dependencies]
# Local testing framework crates
testing-framework-config = { default-features = false, path = "testing-framework/configs" }
cfgsync-core = { default-features = false, path = "testing-framework/tools/cfgsync-core" }
lb-ext = { default-features = false, path = "logos/runtime/ext" }
lb-framework = { default-features = false, package = "testing_framework", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
lb-workloads = { default-features = false, path = "logos/runtime/workloads" }
testing-framework-core = { default-features = false, path = "testing-framework/core" }
testing-framework-env = { default-features = false, path = "testing-framework/env" }
testing-framework-env = { default-features = false, path = "logos/runtime/env" }
testing-framework-runner-compose = { default-features = false, path = "testing-framework/deployers/compose" }
testing-framework-runner-k8s = { default-features = false, path = "testing-framework/deployers/k8s" }
testing-framework-runner-local = { default-features = false, path = "testing-framework/deployers/local" }
testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" }
# Logos git dependencies (pinned to latest master)
cfgsync_tf = { default-features = false, path = "testing-framework/tools/cfgsync_tf" }
lb-api-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-chain-broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-broadcast-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-chain-leader-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-leader-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-network-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-common-http-client", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-engine", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-libp2p", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-network-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-network-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-node", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-poc", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-pol", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-sdp-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-sdp-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tests", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-time-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-time-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tx-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-utils", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-wallet-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
lb-zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-zksign", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
# Logos dependencies (from logos-blockchain master @ deccbb2d2)
broadcast-service = { default-features = false, package = "logos-blockchain-chain-broadcast-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
cfgsync_runtime = { default-features = false, package = "cfgsync-runtime", path = "testing-framework/tools/cfgsync-runtime" }
chain-leader = { default-features = false, features = [
"pol-dev-mode",
], package = "logos-blockchain-chain-leader-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
chain-network = { default-features = false, package = "logos-blockchain-chain-network-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
chain-service = { default-features = false, package = "logos-blockchain-chain-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
common-http-client = { default-features = false, package = "logos-blockchain-common-http-client", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
cryptarchia-engine = { default-features = false, package = "logos-blockchain-cryptarchia-engine", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
cryptarchia-sync = { default-features = false, package = "logos-blockchain-cryptarchia-sync", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
groth16 = { default-features = false, package = "logos-blockchain-groth16", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
key-management-system-service = { default-features = false, package = "logos-blockchain-key-management-system-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-api = { default-features = false, package = "logos-blockchain-api-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-blend-message = { default-features = false, package = "logos-blockchain-blend-message", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-blend-service = { default-features = false, package = "logos-blockchain-blend-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-core = { default-features = false, package = "logos-blockchain-core", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-http-api-common = { default-features = false, package = "logos-blockchain-http-api-common", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-ledger = { default-features = false, package = "logos-blockchain-ledger", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-libp2p = { default-features = false, package = "logos-blockchain-libp2p", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-network = { default-features = false, package = "logos-blockchain-network-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-node = { default-features = false, features = [
"testing",
], package = "logos-blockchain-node", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-sdp = { default-features = false, package = "logos-blockchain-sdp-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-time = { default-features = false, package = "logos-blockchain-time-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-tracing = { default-features = false, package = "logos-blockchain-tracing", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-tracing-service = { default-features = false, package = "logos-blockchain-tracing-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-utils = { default-features = false, package = "logos-blockchain-utils", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
nomos-wallet = { default-features = false, package = "logos-blockchain-wallet-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
poc = { default-features = false, package = "logos-blockchain-poc", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
pol = { default-features = false, package = "logos-blockchain-pol", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
tx-service = { default-features = false, package = "logos-blockchain-tx-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
wallet = { default-features = false, package = "logos-blockchain-wallet", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
zksign = { default-features = false, package = "logos-blockchain-zksign", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
# lb_* aliases (nomos-node repo naming)
lb_http_api_common = { default-features = false, package = "logos-blockchain-http-api-common", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
lb_tracing = { default-features = false, package = "logos-blockchain-tracing", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
lb_tracing_service = { default-features = false, package = "logos-blockchain-tracing-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "a4275d00eb3041ed6bfb394e0913cd1ad172224c" }
# External crates
async-trait = { default-features = false, version = "0.1" }
bytes = { default-features = false, version = "1.3" }
hex = { default-features = false, version = "0.4.3" }
libp2p = { default-features = false, version = "0.55" }
overwatch = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
rand = { default-features = false, version = "0.8" }
reqwest = { default-features = false, version = "0.12" }
serde = { default-features = true, features = ["derive"], version = "1.0" }
serde_json = { default-features = false, version = "1.0" }
serde_with = { default-features = false, version = "3.14.0" }
serde_yaml = { default-features = false, version = "0.9.33" }
tempfile = { default-features = false, version = "3" }
thiserror = { default-features = false, version = "2.0" }
tokio = { default-features = false, version = "1" }
tracing = { default-features = false, version = "0.1" }
async-trait = { default-features = false, version = "0.1" }
bytes = { default-features = false, version = "1.3" }
hex = { default-features = false, version = "0.4.3" }
libp2p = { default-features = false, version = "0.55" }
num-bigint = { default-features = false, version = "0.4" }
overwatch = { default-features = false, git = "https://github.com/logos-co/Overwatch" }
overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch" }
parking_lot = { default-features = false, version = "0.12" }
rand = { default-features = false, features = ["std", "std_rng"], version = "0.8" }
reqwest = { default-features = false, version = "0.12" }
serde = { default-features = true, features = ["derive"], version = "1.0" }
serde_json = { default-features = false, version = "1.0" }
serde_path_to_error = { default-features = false, version = "0.1" }
serde_with = { default-features = false, version = "3.14.0" }
serde_yaml = { default-features = false, version = "0.9.33" }
tempfile = { default-features = false, version = "3" }
thiserror = { default-features = false, version = "2.0" }
time = { default-features = false, version = "0.3" }
tokio = { default-features = false, version = "1" }
tracing = { default-features = false, version = "0.1" }
uuid = { default-features = false, version = "1", features = ["v4"] }

View File

@ -1,116 +0,0 @@
use std::{process, time::Duration};
use anyhow::{Context as _, Result};
use runner_examples::{
ChaosBuilderExt as _, DeployerKind, ScenarioBuilderExt as _, demo, read_env_any,
};
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
use testing_framework_runner_compose::{ComposeDeployer, ComposeRunnerError};
use tracing::{info, warn};
const MIXED_TXS_PER_BLOCK: u64 = 5;
const TOTAL_WALLETS: usize = 1000;
const TRANSACTION_WALLETS: usize = 500;
// Chaos Testing Constants
const CHAOS_MIN_DELAY_SECS: u64 = 120;
const CHAOS_MAX_DELAY_SECS: u64 = 180;
const CHAOS_COOLDOWN_SECS: u64 = 240;
const CHAOS_DELAY_HEADROOM_SECS: u64 = 1;
#[tokio::main]
async fn main() {
runner_examples::defaults::init_node_log_dir_defaults(DeployerKind::Compose);
tracing_subscriber::fmt::init();
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(nodes, run_secs, "starting compose runner demo");
if let Err(err) = run_compose_case(nodes, Duration::from_secs(run_secs)).await {
warn!("compose runner demo failed: {err:#}");
process::exit(1);
}
}
async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
info!(
nodes,
duration_secs = run_duration.as_secs(),
"building scenario plan"
);
let scenario =
ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes)).enable_node_control();
let scenario = if let Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown)) =
chaos_timings(run_duration)
{
scenario.chaos_with(|c| {
c.restart()
.min_delay(chaos_min_delay)
.max_delay(chaos_max_delay)
.target_cooldown(chaos_target_cooldown)
.apply()
})
} else {
scenario
};
let mut plan = scenario
.wallets(TOTAL_WALLETS)
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
.with_run_duration(run_duration)
.expect_consensus_liveness()
.build()?;
let deployer = ComposeDeployer::new();
info!("deploying compose stack");
let runner: Runner = match deployer.deploy(&plan).await {
Ok(runner) => runner,
Err(ComposeRunnerError::DockerUnavailable) => {
warn!("Docker is unavailable; cannot run compose demo");
return Ok(());
}
Err(err) => return Err(anyhow::Error::new(err)).context("deploying compose stack failed"),
};
if !runner.context().telemetry().is_configured() {
warn!(
"metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries"
);
}
info!("running scenario");
runner
.run(&mut plan)
.await
.context("running compose scenario failed")?;
Ok(())
}
fn chaos_timings(run_duration: Duration) -> Option<(Duration, Duration, Duration)> {
let headroom = Duration::from_secs(CHAOS_DELAY_HEADROOM_SECS);
let Some(max_allowed_delay) = run_duration.checked_sub(headroom) else {
return None;
};
let chaos_min_delay = Duration::from_secs(CHAOS_MIN_DELAY_SECS);
if max_allowed_delay <= chaos_min_delay {
return None;
}
let chaos_max_delay = Duration::from_secs(CHAOS_MAX_DELAY_SECS)
.min(max_allowed_delay)
.max(chaos_min_delay);
let chaos_target_cooldown = Duration::from_secs(CHAOS_COOLDOWN_SECS)
.min(max_allowed_delay)
.max(chaos_max_delay);
Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown))
}

View File

@ -1,2 +0,0 @@
pub const DEFAULT_NODES: usize = 2;
pub const DEFAULT_RUN_SECS: u64 = 60;

View File

@ -1,10 +0,0 @@
use std::{env, str::FromStr};
pub fn read_env_any<T>(keys: &[&str], default: T) -> T
where
T: FromStr + Copy,
{
keys.iter()
.find_map(|key| env::var(key).ok().and_then(|raw| raw.parse::<T>().ok()))
.unwrap_or(default)
}

View File

@ -1,13 +0,0 @@
pub mod defaults;
pub mod demo;
pub mod env;
pub use env::read_env_any;
pub use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum DeployerKind {
#[default]
Local,
Compose,
}

View File

@ -1,139 +0,0 @@
use std::time::Duration;
use anyhow::Result;
use testing_framework_core::{
scenario::{PeerSelection, StartNodeOptions},
topology::config::TopologyConfig,
};
use testing_framework_runner_local::LocalDeployer;
use tokio::time::sleep;
use tracing_subscriber::fmt::try_init;
const MAX_HEIGHT_DIFF: u64 = 5;
const CONVERGENCE_TIMEOUT: Duration = Duration::from_secs(60);
const CONVERGENCE_POLL: Duration = Duration::from_secs(2);
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_two_clusters_merge`"]
async fn manual_cluster_two_clusters_merge() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `RUST_LOG=info` (optional)
let config = TopologyConfig::with_node_numbers(2);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
// Nodes are stopped automatically when the cluster is dropped.
println!("starting node a");
let node_a = cluster
.start_node_with(
"a",
StartNodeOptions {
peers: PeerSelection::None,
config_patch: None,
persist_dir: None,
},
)
.await?
.api;
println!("waiting briefly before starting c");
sleep(Duration::from_secs(30)).await;
println!("starting node c -> a");
let node_c = cluster
.start_node_with(
"c",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
config_patch: None,
persist_dir: None,
},
)
.await?
.api;
println!("waiting for network readiness: cluster a,c");
cluster.wait_network_ready().await?;
let start = tokio::time::Instant::now();
loop {
let a_info = node_a.consensus_info().await?;
let c_info = node_c.consensus_info().await?;
let a_height = a_info.height;
let c_height = c_info.height;
let diff = a_height.abs_diff(c_height);
if diff <= MAX_HEIGHT_DIFF {
println!(
"final heights: node-a={}, node-c={}, diff={}",
a_height, c_height, diff
);
return Ok(());
}
if start.elapsed() >= CONVERGENCE_TIMEOUT {
return Err(anyhow::anyhow!(
"height diff too large after timeout: {diff} > {MAX_HEIGHT_DIFF} (node-a={a_height}, node-c={c_height})"
));
}
sleep(CONVERGENCE_POLL).await;
}
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_with_persist_dir`"]
async fn manual_cluster_with_persist_dir() -> Result<()> {
use std::path::PathBuf;
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `RUST_LOG=info` (optional)
let config = TopologyConfig::with_node_numbers(1);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(config)?;
let persist_dir = PathBuf::from("/tmp/test-node-persist-dir");
println!("starting validator with persist_dir: {:?}", persist_dir);
let _node = cluster
.start_node_with(
"test",
StartNodeOptions {
peers: PeerSelection::None,
config_patch: None,
persist_dir: Some(persist_dir.clone()),
},
)
.await?
.api;
println!("validator started, waiting briefly");
sleep(Duration::from_secs(5)).await;
// Drop the cluster to trigger the persist logic
drop(cluster);
println!("cluster dropped, checking if persist_dir exists");
// Verify the persist_dir was created
if !persist_dir.exists() {
return Err(anyhow::anyhow!(
"persist_dir was not created: {:?}",
persist_dir
));
}
println!("persist_dir verified: {:?}", persist_dir);
// Clean up
if persist_dir.exists() {
std::fs::remove_dir_all(&persist_dir)?;
}
Ok(())
}

View File

@ -1,114 +0,0 @@
use std::{
net::{SocketAddr, TcpListener},
time::Duration,
};
use anyhow::Result;
use testing_framework_core::{
nodes::ApiClient,
scenario::{Deployer, PeerSelection, ScenarioBuilder, StartNodeOptions},
topology::config::TopologyConfig,
};
use testing_framework_runner_local::LocalDeployer;
use tracing_subscriber::fmt::try_init;
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_api_port_override`"]
async fn manual_cluster_api_port_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let api_port = random_api_port();
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(TopologyConfig::with_node_numbers(1))?;
let node = cluster
.start_node_with(
"override-api",
StartNodeOptions {
peers: PeerSelection::None,
config_patch: None,
persist_dir: None,
}
.create_patch(move |mut config| {
println!("overriding API port to {api_port}");
let current_addr = config.user.http.backend_settings.address;
config.user.http.backend_settings.address =
SocketAddr::new(current_addr.ip(), api_port);
Ok(config)
}),
)
.await?
.api;
node.consensus_info()
.await
.expect("consensus_info should succeed");
assert_eq!(resolved_port(&node), api_port);
Ok(())
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_api_port_override`"]
async fn scenario_builder_api_port_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let api_port = random_api_port();
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.nodes(1)
.node_config_patch_with(0, move |mut config| {
println!("overriding API port to {api_port}");
let current_addr = config.user.http.backend_settings.address;
config.user.http.backend_settings.address =
SocketAddr::new(current_addr.ip(), api_port);
Ok(config)
})
})
.with_run_duration(Duration::from_secs(1))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let handle = runner.run(&mut scenario).await?;
let client = handle
.context()
.node_clients()
.any_client()
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
client
.consensus_info()
.await
.expect("consensus_info should succeed");
assert_eq!(resolved_port(&client), api_port);
Ok(())
}
fn random_api_port() -> u16 {
let listener = TcpListener::bind("127.0.0.1:0").expect("bind random API port");
listener.local_addr().expect("read API port").port()
}
fn resolved_port(client: &ApiClient) -> u16 {
client.base_url().port().unwrap_or_default()
}

View File

@ -1,110 +0,0 @@
use std::time::Duration;
use anyhow::{Result, anyhow};
use testing_framework_core::{
scenario::StartNodeOptions,
topology::{
config::{TopologyBuilder, TopologyConfig},
configs::network::Libp2pNetworkLayout,
},
};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::{start_node_with_timeout, wait_for_min_height};
use tokio::time::{sleep, timeout};
use tracing_subscriber::fmt::try_init;
const MIN_HEIGHT: u64 = 5;
const INITIAL_READY_TIMEOUT: Duration = Duration::from_secs(500);
const CATCH_UP_TIMEOUT: Duration = Duration::from_secs(300);
const START_NODE_TIMEOUT: Duration = Duration::from_secs(90);
const TEST_TIMEOUT: Duration = Duration::from_secs(600);
const POLL_INTERVAL: Duration = Duration::from_secs(1);
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored orphan_manual_cluster`"]
async fn orphan_manual_cluster() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `NOMOS_KZGRS_PARAMS_PATH=...` (path to KZG params directory/file)
// - `RUST_LOG=info` (optional; better visibility)
let config = TopologyConfig::with_node_numbers(3);
timeout(TEST_TIMEOUT, async {
let builder = TopologyBuilder::new(config).with_network_layout(Libp2pNetworkLayout::Full);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster_with_builder(builder)?;
// Nodes are stopped automatically when the cluster is dropped.
let node_a = start_node_with_timeout(
&cluster,
"a",
StartNodeOptions::default(),
START_NODE_TIMEOUT,
)
.await?
.api;
let node_b = start_node_with_timeout(
&cluster,
"b",
StartNodeOptions::default(),
START_NODE_TIMEOUT,
)
.await?
.api;
wait_for_min_height(
&[node_a.clone(), node_b.clone()],
MIN_HEIGHT,
INITIAL_READY_TIMEOUT,
POLL_INTERVAL,
)
.await?;
let behind_node = start_node_with_timeout(
&cluster,
"c",
StartNodeOptions::default(),
START_NODE_TIMEOUT,
)
.await?
.api;
timeout(CATCH_UP_TIMEOUT, async {
loop {
let node_a_info = node_a
.consensus_info()
.await
.map_err(|err| anyhow!("node-a consensus_info failed: {err}"))?;
let node_b_info = node_b
.consensus_info()
.await
.map_err(|err| anyhow!("node-b consensus_info failed: {err}"))?;
let behind_info = behind_node
.consensus_info()
.await
.map_err(|err| anyhow!("node-c consensus_info failed: {err}"))?;
let initial_min_height = node_a_info.height.min(node_b_info.height);
if behind_info.height >= initial_min_height.saturating_sub(1) {
return Ok::<(), anyhow::Error>(());
}
sleep(POLL_INTERVAL).await;
}
})
.await
.map_err(|_| anyhow!("timeout waiting for behind node to catch up"))??;
Ok::<(), anyhow::Error>(())
})
.await
.map_err(|_| anyhow!("test timeout exceeded"))??;
Ok(())
}

33
logos/README.md Normal file
View File

@ -0,0 +1,33 @@
# Logos Testing Framework Extension
This directory contains the **Logos-specific extension layer** that plugs into the generic
`testing-framework` core. The goal is to keep all Nomos logic in one place with a clear
structure so it can be reviewed and moved into the `logos-blockchain-node` repo cleanly.
## Layout
- `runtime/env`
Logos implementation of the core `Application` trait and runtime wiring.
- `runtime/ext`
Logos extension glue for compose/k8s/cfgsync integration and scenario helpers.
- `runtime/workloads`
Logos workloads and expectations (e.g., transaction workload, consensus liveness).
- `runtime/cfgsync`
Logos cfgsync server/client and config bundling.
- `infra/assets/stack`
Docker stack assets, scripts, and monitoring bundles.
- `infra/helm/logos-runner`
Helm chart used by the k8s deployer.
## Extension Boundary
The **core** (`testing-framework/*`) remains Logos-agnostic. All app assumptions should
live under `logos/runtime/*` and expose only the minimal surface needed by the core.
If you need to introduce new core capabilities, add them to the core and keep the Logos
implementation in `logos/runtime/*`.

View File

@ -11,11 +11,12 @@ version = "0.1.0"
[dependencies]
anyhow = "1"
lb-ext = { workspace = true }
lb-framework = { workspace = true }
lb-workloads = { workspace = true }
testing-framework-core = { workspace = true }
testing-framework-runner-compose = { workspace = true }
testing-framework-runner-k8s = { workspace = true }
testing-framework-runner-local = { workspace = true }
testing-framework-workflows = { workspace = true }
tokio = { features = ["macros", "net", "rt-multi-thread", "time"], workspace = true }
tracing = { workspace = true }
tracing-subscriber = { features = ["env-filter", "fmt"], version = "0.3" }

View File

@ -0,0 +1,83 @@
use std::{process, time::Duration};
use anyhow::{Context as _, Result};
use lb_ext::{
CoreBuilderExt as _, LbcComposeDeployer as ComposeDeployer, LbcExtEnv, ScenarioBuilder,
ScenarioBuilderExt as _, configs::network::Libp2pNetworkLayout,
};
use runner_examples::{DeployerKind, demo, read_env_any, read_topology_seed_or_default};
use testing_framework_core::scenario::{Deployer as _, Runner};
use testing_framework_runner_compose::ComposeRunnerError;
use tracing::{info, warn};
#[tokio::main]
async fn main() {
runner_examples::defaults::init_node_log_dir_defaults(DeployerKind::Compose);
tracing_subscriber::fmt::init();
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
info!(nodes, run_secs, "starting compose runner demo");
if let Err(err) = run_compose_case(nodes, Duration::from_secs(run_secs)).await {
warn!("compose runner demo failed: {err:#}");
process::exit(1);
}
}
async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
info!(
nodes,
duration_secs = run_duration.as_secs(),
"building scenario plan"
);
let seed = read_topology_seed_or_default();
let scenario = ScenarioBuilder::deployment_with(|t| {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(nodes)
})
.enable_node_control()
.with_run_duration(run_duration)
.with_deployment_seed(seed)
.initialize_wallet(
demo::DEFAULT_TOTAL_WALLETS as u64 * 100,
demo::DEFAULT_TOTAL_WALLETS,
)
.transactions_with(|txs| {
txs.rate(demo::DEFAULT_MIXED_TXS_PER_BLOCK)
.users(demo::DEFAULT_TRANSACTION_WALLETS)
})
.expect_consensus_liveness();
let mut plan = scenario.build()?;
let deployer = ComposeDeployer::new();
info!("deploying compose stack");
let runner: Runner<LbcExtEnv> = match deployer.deploy(&plan).await {
Ok(runner) => runner,
Err(ComposeRunnerError::DockerUnavailable) => {
warn!("Docker is unavailable; cannot run compose demo");
return Ok(());
}
Err(err) => return Err(anyhow::Error::new(err)).context("deploying compose stack failed"),
};
if !runner.context().telemetry().is_configured() {
warn!(
"metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries"
);
}
info!("running scenario");
runner
.run(&mut plan)
.await
.context("running compose scenario failed")?;
Ok(())
}

View File

@ -1,18 +1,15 @@
use std::{env, process, time::Duration};
use std::{process, time::Duration};
use anyhow::{Context as _, Result};
use runner_examples::{ScenarioBuilderExt as _, demo, read_env_any};
use testing_framework_core::scenario::{
Deployer as _, ObservabilityCapability, Runner, ScenarioBuilder,
use lb_ext::{
CoreBuilderExt as _, LbcExtEnv, LbcK8sDeployer as K8sDeployer, ScenarioBuilder,
ScenarioBuilderExt as _, configs::network::Libp2pNetworkLayout,
};
use testing_framework_runner_k8s::{K8sDeployer, K8sRunnerError};
use testing_framework_workflows::ObservabilityBuilderExt as _;
use runner_examples::{demo, read_env_any, read_topology_seed_or_default};
use testing_framework_core::scenario::{Deployer as _, Runner};
use testing_framework_runner_k8s::K8sRunnerError;
use tracing::{info, warn};
const MIXED_TXS_PER_BLOCK: u64 = 2;
const TOTAL_WALLETS: usize = 200;
const TRANSACTION_WALLETS: usize = 50;
#[tokio::main]
async fn main() {
tracing_subscriber::fmt::init();
@ -34,31 +31,31 @@ async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> {
"building scenario plan"
);
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes))
.with_capabilities(ObservabilityCapability::default())
.wallets(TOTAL_WALLETS)
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
.with_run_duration(run_duration)
.expect_consensus_liveness();
let seed = read_topology_seed_or_default();
if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_QUERY_URL") {
if !url.trim().is_empty() {
scenario = scenario.with_metrics_query_url_str(url.trim());
}
}
if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL") {
if !url.trim().is_empty() {
scenario = scenario.with_metrics_otlp_ingest_url_str(url.trim());
}
}
let scenario = ScenarioBuilder::deployment_with(|t| {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(nodes)
})
.enable_observability()
.with_run_duration(run_duration)
.with_deployment_seed(seed)
.initialize_wallet(
demo::DEFAULT_TOTAL_WALLETS as u64 * 100,
demo::DEFAULT_TOTAL_WALLETS,
)
.transactions_with(|txs| {
txs.rate(demo::DEFAULT_MIXED_TXS_PER_BLOCK)
.users(demo::DEFAULT_TRANSACTION_WALLETS)
})
.expect_consensus_liveness();
let mut plan = scenario.build()?;
let deployer = K8sDeployer::new();
info!("deploying k8s stack");
let runner: Runner = match deployer.deploy(&plan).await {
let runner: Runner<LbcExtEnv> = match deployer.deploy(&plan).await {
Ok(runner) => runner,
Err(K8sRunnerError::ClientInit { source }) => {
warn!("Kubernetes cluster unavailable ({source}); skipping");

View File

@ -1,16 +1,14 @@
use std::{process, time::Duration};
use anyhow::{Context as _, Result};
use runner_examples::{DeployerKind, ScenarioBuilderExt as _, demo, read_env_any};
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
use testing_framework_runner_local::LocalDeployer;
use lb_framework::{
CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder, ScenarioBuilderExt as _,
configs::network::Libp2pNetworkLayout,
};
use runner_examples::{DeployerKind, demo, read_env_any, read_topology_seed_or_default};
use testing_framework_core::scenario::{Deployer as _, Runner};
use tracing::{info, warn};
const MIXED_TXS_PER_BLOCK: u64 = 5;
const TOTAL_WALLETS: usize = 1000;
const TRANSACTION_WALLETS: usize = 500;
const SMOKE_RUN_SECS_MAX: u64 = 30;
#[tokio::main]
async fn main() {
runner_examples::defaults::init_node_log_dir_defaults(DeployerKind::Local);
@ -35,24 +33,30 @@ async fn run_local_case(nodes: usize, run_duration: Duration) -> Result<()> {
"building scenario plan"
);
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes))
.wallets(TOTAL_WALLETS)
.with_run_duration(run_duration);
let seed = read_topology_seed_or_default();
let scenario = if run_duration.as_secs() <= SMOKE_RUN_SECS_MAX {
scenario
} else {
scenario
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
.expect_consensus_liveness()
};
let scenario = ScenarioBuilder::deployment_with(|t| {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(nodes)
})
.with_run_duration(run_duration)
.with_deployment_seed(seed)
.initialize_wallet(
demo::DEFAULT_TOTAL_WALLETS as u64 * 100,
demo::DEFAULT_TOTAL_WALLETS,
)
.transactions_with(|txs| {
txs.rate(demo::DEFAULT_MIXED_TXS_PER_BLOCK)
.users(demo::DEFAULT_TRANSACTION_WALLETS)
})
.expect_consensus_liveness();
let mut plan = scenario.build()?;
let deployer = LocalDeployer::default();
let deployer = LbcLocalDeployer::default();
info!("deploying local nodes");
let runner: Runner = deployer
let runner: Runner<LbcEnv> = deployer
.deploy(&plan)
.await
.context("deploying local nodes failed")?;

View File

@ -11,11 +11,11 @@ const DEFAULT_NODE_LOG_DIR_REL: &str = ".tmp/node-logs";
const DEFAULT_CONTAINER_NODE_LOG_DIR: &str = "/tmp/node-logs";
fn set_default_env(key: &str, value: &str) {
if std::env::var_os(key).is_none() {
if env::var_os(key).is_none() {
// SAFETY: Used as an early-run default. Prefer setting env vars in the
// shell for multi-threaded runs.
unsafe {
std::env::set_var(key, value);
env::set_var(key, value);
}
}
}

View File

@ -0,0 +1,6 @@
pub const DEFAULT_NODES: usize = 2;
pub const DEFAULT_RUN_SECS: u64 = 60;
pub const DEFAULT_TOTAL_WALLETS: usize = 200;
pub const DEFAULT_TRANSACTION_WALLETS: usize = 20;
pub const DEFAULT_MIXED_TXS_PER_BLOCK: u64 = 3;

41
logos/examples/src/env.rs Normal file
View File

@ -0,0 +1,41 @@
use std::{
env,
str::{self, FromStr},
};
use testing_framework_core::topology::DeploymentSeed;
const DEFAULT_TOPOLOGY_SEED: [u8; 32] = {
let mut bytes = [0u8; 32];
bytes[31] = 1;
bytes
};
pub fn read_env_any<T>(keys: &[&str], default: T) -> T
where
T: FromStr + Copy,
{
keys.iter()
.find_map(|key| env::var(key).ok().and_then(|raw| raw.parse::<T>().ok()))
.unwrap_or(default)
}
pub fn read_topology_seed() -> Option<DeploymentSeed> {
let raw = env::var("LOGOS_BLOCKCHAIN_TOPOLOGY_SEED").ok()?;
let raw = raw.strip_prefix("0x").unwrap_or(&raw);
if raw.len() != 64 {
return None;
}
let mut bytes = [0u8; 32];
for (idx, chunk) in raw.as_bytes().chunks(2).enumerate() {
let chunk = str::from_utf8(chunk).ok()?;
bytes[idx] = u8::from_str_radix(chunk, 16).ok()?;
}
Some(DeploymentSeed::new(bytes))
}
pub fn read_topology_seed_or_default() -> DeploymentSeed {
read_topology_seed().unwrap_or_else(|| DeploymentSeed::new(DEFAULT_TOPOLOGY_SEED))
}

14
logos/examples/src/lib.rs Normal file
View File

@ -0,0 +1,14 @@
pub mod defaults;
pub mod demo;
pub mod env;
pub use env::{read_env_any, read_topology_seed, read_topology_seed_or_default};
pub use lb_framework::ScenarioBuilderExt as NodeScenarioBuilderExt;
pub use lb_workloads::{ChaosBuilderExt, ScenarioBuilderExt};
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum DeployerKind {
#[default]
Local,
Compose,
}

View File

@ -0,0 +1,120 @@
use std::{env, time::Duration};
use anyhow::Result;
use lb_ext::{
CoreBuilderExt as _, LbcComposeDeployer, LbcExtEnv, LbcK8sDeployer,
ScenarioBuilder as ExtScenarioBuilder, ScenarioBuilderExt as _,
};
use lb_framework::{
CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder as LocalScenarioBuilder,
ScenarioBuilderExt as _, configs::network::NetworkLayout,
};
use testing_framework_core::{
scenario::{Deployer as _, Runner},
topology::DeploymentDescriptor,
};
#[derive(Clone, Copy)]
struct ScenarioSpec {
nodes: usize,
run_secs: u64,
tx_rate: u64,
tx_users: usize,
total_wallets: usize,
}
fn shared_spec() -> ScenarioSpec {
ScenarioSpec {
nodes: 2,
run_secs: 30,
tx_rate: 5,
tx_users: 500,
total_wallets: 1000,
}
}
fn build_local_scenario(
spec: ScenarioSpec,
) -> Result<testing_framework_core::scenario::Scenario<LbcEnv>> {
LocalScenarioBuilder::deployment_with(|d| {
d.with_network_layout(NetworkLayout::Star)
.with_node_count(spec.nodes)
})
.with_run_duration(Duration::from_secs(spec.run_secs))
.initialize_wallet(spec.total_wallets as u64 * 100, spec.total_wallets)
.transactions_with(|txs| txs.rate(spec.tx_rate).users(spec.tx_users))
.expect_consensus_liveness()
.build()
.map_err(Into::into)
}
fn build_ext_scenario(
spec: ScenarioSpec,
) -> Result<testing_framework_core::scenario::Scenario<LbcExtEnv>> {
ExtScenarioBuilder::deployment_with(|d| {
d.with_network_layout(NetworkLayout::Star)
.with_node_count(spec.nodes)
})
.with_run_duration(Duration::from_secs(spec.run_secs))
.initialize_wallet(spec.total_wallets as u64 * 100, spec.total_wallets)
.transactions_with(|txs| txs.rate(spec.tx_rate).users(spec.tx_users))
.expect_consensus_liveness()
.build()
.map_err(Into::into)
}
#[test]
fn parity_builds_have_same_shape() -> Result<()> {
let spec = shared_spec();
let local = build_local_scenario(spec)?;
let ext = build_ext_scenario(spec)?;
assert_eq!(
local.deployment().node_count(),
ext.deployment().node_count()
);
assert_eq!(local.duration(), ext.duration());
assert_eq!(local.workloads().len(), ext.workloads().len());
assert_eq!(local.expectations().len(), ext.expectations().len());
Ok(())
}
#[tokio::test]
async fn local_parity_smoke_opt_in() -> Result<()> {
if env::var("TF_RUN_LOCAL_PARITY").is_err() {
return Ok(());
}
let mut scenario = build_local_scenario(shared_spec())?;
let deployer = LbcLocalDeployer::default();
let runner: Runner<LbcEnv> = deployer.deploy(&scenario).await?;
runner.run(&mut scenario).await?;
Ok(())
}
#[tokio::test]
async fn compose_parity_smoke_opt_in() -> Result<()> {
if env::var("TF_RUN_COMPOSE_PARITY").is_err() {
return Ok(());
}
let mut scenario = build_ext_scenario(shared_spec())?;
let deployer = LbcComposeDeployer::default();
let runner: Runner<LbcExtEnv> = deployer.deploy(&scenario).await?;
runner.run(&mut scenario).await?;
Ok(())
}
#[tokio::test]
async fn k8s_parity_smoke_opt_in() -> Result<()> {
if env::var("TF_RUN_K8S_PARITY").is_err() {
return Ok(());
}
let mut scenario = build_ext_scenario(shared_spec())?;
let deployer = LbcK8sDeployer::default();
let runner: Runner<LbcExtEnv> = deployer.deploy(&scenario).await?;
runner.run(&mut scenario).await?;
Ok(())
}

View File

@ -2,11 +2,13 @@ use std::time::Duration;
use anyhow::Result;
use async_trait::async_trait;
use testing_framework_core::scenario::{
Deployer, DynError, PeerSelection, RunContext, ScenarioBuilder, StartNodeOptions, Workload,
use lb_framework::{
CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder,
configs::network::Libp2pNetworkLayout,
};
use testing_framework_core::scenario::{
Deployer, DynError, PeerSelection, RunContext, StartNodeOptions, Workload,
};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
use tokio::time::{sleep, timeout};
use tracing_subscriber::fmt::try_init;
@ -25,12 +27,12 @@ impl JoinNodeWorkload {
}
#[async_trait]
impl Workload for JoinNodeWorkload {
impl Workload<LbcEnv> for JoinNodeWorkload {
fn name(&self) -> &str {
"dynamic_join"
}
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
async fn start(&self, ctx: &RunContext<LbcEnv>) -> Result<(), DynError> {
let handle = ctx
.node_control()
.ok_or_else(|| "dynamic join workload requires node control".to_owned())?;
@ -38,7 +40,7 @@ impl Workload for JoinNodeWorkload {
sleep(START_DELAY).await;
let node = handle.start_node(&self.name).await?;
let client = node.api;
let client = node.client;
timeout(READY_TIMEOUT, async {
loop {
@ -71,25 +73,22 @@ impl JoinNodeWithPeersWorkload {
}
#[async_trait]
impl Workload for JoinNodeWithPeersWorkload {
impl Workload<LbcEnv> for JoinNodeWithPeersWorkload {
fn name(&self) -> &str {
"dynamic_join_with_peers"
}
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
async fn start(&self, ctx: &RunContext<LbcEnv>) -> Result<(), DynError> {
let handle = ctx
.node_control()
.ok_or_else(|| "dynamic join workload requires node control".to_owned())?;
sleep(START_DELAY).await;
let options = StartNodeOptions {
peers: PeerSelection::Named(self.peers.clone()),
config_patch: None,
persist_dir: None,
};
let mut options = StartNodeOptions::<LbcEnv>::default();
options.peers = PeerSelection::Named(self.peers.clone());
let node = handle.start_node_with(&self.name, options).await?;
let client = node.api;
let client = node.client;
timeout(READY_TIMEOUT, async {
loop {
@ -112,14 +111,17 @@ impl Workload for JoinNodeWithPeersWorkload {
async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
let _ = try_init();
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.enable_node_control()
.with_workload(JoinNodeWorkload::new("joiner"))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let mut scenario = ScenarioBuilder::deployment_with(|t| {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(2)
})
.enable_node_control()
.with_workload(JoinNodeWorkload::new("joiner"))
.with_expectation(lb_framework::workloads::ConsensusLiveness::<LbcEnv>::default())
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = LocalDeployer::default();
let deployer = LbcLocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let _handle = runner.run(&mut scenario).await?;
@ -129,17 +131,20 @@ async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"]
async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> {
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.enable_node_control()
.with_workload(JoinNodeWithPeersWorkload::new(
"joiner",
vec!["node-0".to_string()],
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let mut scenario = ScenarioBuilder::deployment_with(|t| {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(2)
})
.enable_node_control()
.with_workload(JoinNodeWithPeersWorkload::new(
"joiner",
vec!["node-0".to_string()],
))
.with_expectation(lb_framework::workloads::ConsensusLiveness::<LbcEnv>::default())
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = LocalDeployer::default();
let deployer = LbcLocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let _handle = runner.run(&mut scenario).await?;

View File

@ -0,0 +1,90 @@
use std::time::Duration;
use anyhow::{Result, anyhow};
use lb_framework::{
CoreBuilderExt as _, DeploymentBuilder, LbcLocalDeployer, ScenarioBuilder, TopologyConfig,
};
use testing_framework_core::scenario::Deployer;
use tracing_subscriber::fmt::try_init;
#[tokio::test]
#[ignore = "requires local node binary and open ports"]
async fn local_restart_node() -> Result<()> {
let _ = try_init();
let mut scenario = ScenarioBuilder::deployment_with(|t| t.with_node_count(1))
.enable_node_control()
.with_run_duration(Duration::from_secs(1))
.build()?;
let deployer = LbcLocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let context = runner.context();
let control = context
.node_control()
.ok_or_else(|| anyhow!("node control not available"))?;
let node_name = "node-0";
let old_pid = control
.node_pid(node_name)
.ok_or_else(|| anyhow!("missing node pid"))?;
control
.restart_node(node_name)
.await
.map_err(|error| anyhow!("failed to restart {node_name}: {error}"))?;
let new_pid = control
.node_pid(node_name)
.ok_or_else(|| anyhow!("missing node pid"))?;
assert_ne!(old_pid, new_pid, "expected a new process after restart");
control
.stop_node(node_name)
.await
.map_err(|error| anyhow!("failed to stop {node_name}: {error}"))?;
assert!(
control.node_pid(node_name).is_none(),
"expected node pid to be absent after stop"
);
let _handle = runner.run(&mut scenario).await?;
Ok(())
}
#[tokio::test]
#[ignore = "requires local node binary and open ports"]
async fn manual_cluster_restart_node() -> Result<()> {
let _ = try_init();
let deployer = LbcLocalDeployer::default();
let descriptors = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1)).build()?;
let cluster = deployer.manual_cluster_from_descriptors(descriptors);
let node_name = cluster.start_node("a").await?.name;
let old_pid = cluster
.node_pid(&node_name)
.ok_or_else(|| anyhow!("missing node pid"))?;
cluster
.restart_node(&node_name)
.await
.map_err(|error| anyhow!("failed to restart {node_name}: {error}"))?;
let new_pid = cluster
.node_pid(&node_name)
.ok_or_else(|| anyhow!("missing node pid"))?;
assert_ne!(old_pid, new_pid, "expected a new process after restart");
cluster
.stop_node(&node_name)
.await
.map_err(|error| anyhow!("failed to stop {node_name}: {error}"))?;
assert!(
cluster.node_pid(&node_name).is_none(),
"expected node pid to be absent after stop"
);
Ok(())
}

View File

@ -0,0 +1,78 @@
use std::time::Duration;
use anyhow::{Result, anyhow};
use lb_framework::{DeploymentBuilder, LbcEnv, LbcLocalDeployer, NodeHttpClient, TopologyConfig};
use testing_framework_core::scenario::{PeerSelection, StartNodeOptions};
use tokio::time::sleep;
use tracing_subscriber::fmt::try_init;
const MAX_HEIGHT_DIFF: u64 = 5;
const CONVERGENCE_TIMEOUT: Duration = Duration::from_secs(60);
const CONVERGENCE_POLL: Duration = Duration::from_secs(2);
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_two_clusters_merge`"]
async fn manual_cluster_two_clusters_merge() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `RUST_LOG=info` (optional)
let config = TopologyConfig::with_node_numbers(2);
let deployer = LbcLocalDeployer::new();
let descriptors = DeploymentBuilder::new(config).build()?;
let cluster = deployer.manual_cluster_from_descriptors(descriptors);
// Nodes are stopped automatically when the cluster is dropped.
println!("starting node a");
let node_a = cluster
.start_node_with("a", node_start_options(PeerSelection::None))
.await?
.client;
println!("waiting briefly before starting c");
sleep(Duration::from_secs(30)).await;
println!("starting node c -> a");
let node_c = cluster
.start_node_with(
"c",
node_start_options(PeerSelection::Named(vec!["node-a".to_owned()])),
)
.await?
.client;
println!("waiting for network readiness: cluster a,c");
cluster.wait_network_ready().await?;
wait_for_convergence(&node_a, &node_c).await
}
async fn wait_for_convergence(node_a: &NodeHttpClient, node_c: &NodeHttpClient) -> Result<()> {
let start = tokio::time::Instant::now();
loop {
let a_height = node_a.consensus_info().await?.height;
let c_height = node_c.consensus_info().await?.height;
let diff = a_height.abs_diff(c_height);
if diff <= MAX_HEIGHT_DIFF {
println!("final heights: node-a={a_height}, node-c={c_height}, diff={diff}");
return Ok(());
}
if start.elapsed() >= CONVERGENCE_TIMEOUT {
return Err(anyhow!(
"height diff too large after timeout: {diff} > {MAX_HEIGHT_DIFF} (node-a={a_height}, node-c={c_height})"
));
}
sleep(CONVERGENCE_POLL).await;
}
}
fn node_start_options(peers: PeerSelection) -> StartNodeOptions<LbcEnv> {
let mut options = StartNodeOptions::<LbcEnv>::default();
options.peers = peers;
options
}

View File

@ -0,0 +1,127 @@
use std::{
net::{SocketAddr, TcpListener},
time::Duration,
};
use anyhow::Result;
use lb_framework::{
DeploymentBuilder, LbcEnv, LbcLocalDeployer, NodeHttpClient, ScenarioBuilder, TopologyConfig,
configs::build_node_run_config,
};
use testing_framework_core::scenario::{Deployer, PeerSelection, StartNodeOptions};
use tracing_subscriber::fmt::try_init;
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_api_port_override`"]
async fn manual_cluster_api_port_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let api_port = random_api_port();
let deployer = LbcLocalDeployer::new();
let descriptors = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1)).build()?;
let cluster = deployer.manual_cluster_from_descriptors(descriptors.clone());
let node = cluster
.start_node_with(
"override-api",
StartNodeOptions::<LbcEnv>::default()
.with_peers(PeerSelection::None)
.create_patch(move |mut run_config| {
println!("overriding API port to {api_port}");
let current_addr = run_config.user.api.backend.listen_address;
run_config.user.api.backend.listen_address =
SocketAddr::new(current_addr.ip(), api_port);
Ok(run_config)
}),
)
.await?
.client;
cluster.wait_network_ready().await?;
wait_until_consensus_ready(&node).await?;
assert_eq!(resolved_port(&node), api_port);
Ok(())
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_api_port_override`"]
async fn scenario_builder_api_port_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let api_port = random_api_port();
let base_builder = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1));
let base_descriptors = base_builder.clone().build()?;
let base_node = base_descriptors.nodes().first().expect("node 0 descriptor");
let mut run_config = build_node_run_config(
base_node,
base_descriptors
.config()
.node_config_override(base_node.index()),
)
.expect("build run config");
println!("overriding API port to {api_port}");
let current_addr = run_config.user.api.backend.listen_address;
run_config.user.api.backend.listen_address = SocketAddr::new(current_addr.ip(), api_port);
let mut scenario = ScenarioBuilder::new(Box::new(
base_builder.with_node_config_override(0, run_config),
))
.with_run_duration(Duration::from_secs(1))
.build()?;
let deployer = LbcLocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let handle = runner.run(&mut scenario).await?;
let client = handle
.context()
.random_node_client()
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
client
.consensus_info()
.await
.expect("consensus_info should succeed");
assert_eq!(resolved_port(&client), api_port);
Ok(())
}
fn random_api_port() -> u16 {
let listener = TcpListener::bind("127.0.0.1:0").expect("bind random API port");
listener.local_addr().expect("read API port").port()
}
fn resolved_port(client: &NodeHttpClient) -> u16 {
client.base_url().port().unwrap_or_default()
}
async fn wait_until_consensus_ready(client: &NodeHttpClient) -> Result<()> {
const RETRIES: usize = 120;
const DELAY_MS: u64 = 500;
for _ in 0..RETRIES {
if client.consensus_info().await.is_ok() {
return Ok(());
}
tokio::time::sleep(Duration::from_millis(DELAY_MS)).await;
}
anyhow::bail!("consensus_info did not become ready in time")
}

View File

@ -0,0 +1,111 @@
use std::time::Duration;
use anyhow::{Result, anyhow};
use lb_framework::{
DeploymentBuilder, LbcEnv, LbcLocalDeployer, NodeHttpClient, TopologyConfig,
configs::network::NetworkLayout,
};
use lb_workloads::{start_node_with_timeout, wait_for_min_height};
use testing_framework_core::scenario::StartNodeOptions;
use tokio::time::{sleep, timeout};
use tracing_subscriber::fmt::try_init;
const MIN_HEIGHT: u64 = 5;
const INITIAL_READY_TIMEOUT: Duration = Duration::from_secs(500);
const CATCH_UP_TIMEOUT: Duration = Duration::from_secs(300);
const START_NODE_TIMEOUT: Duration = Duration::from_secs(90);
const TEST_TIMEOUT: Duration = Duration::from_secs(600);
const POLL_INTERVAL: Duration = Duration::from_secs(1);
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored orphan_manual_cluster`"]
async fn orphan_manual_cluster() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `NOMOS_KZGRS_PARAMS_PATH=...` (path to KZG params directory/file)
// - `RUST_LOG=info` (optional; better visibility)
let config = TopologyConfig::with_node_numbers(3);
timeout(TEST_TIMEOUT, run_orphan_flow(config))
.await
.map_err(|_| anyhow!("test timeout exceeded"))??;
Ok(())
}
async fn run_orphan_flow(config: TopologyConfig) -> Result<()> {
let builder = DeploymentBuilder::new(config).with_network_layout(NetworkLayout::Full);
let deployer = LbcLocalDeployer::new();
let descriptors = builder.build()?;
let cluster = deployer.manual_cluster_from_descriptors(descriptors);
let node_a = start_node_with_timeout(
&cluster,
"a",
StartNodeOptions::<LbcEnv>::default(),
START_NODE_TIMEOUT,
)
.await?
.client;
let node_b = start_node_with_timeout(
&cluster,
"b",
StartNodeOptions::<LbcEnv>::default(),
START_NODE_TIMEOUT,
)
.await?
.client;
wait_for_min_height(
&[node_a.clone(), node_b.clone()],
MIN_HEIGHT,
INITIAL_READY_TIMEOUT,
POLL_INTERVAL,
)
.await?;
let behind_node = start_node_with_timeout(
&cluster,
"c",
StartNodeOptions::<LbcEnv>::default(),
START_NODE_TIMEOUT,
)
.await?
.client;
wait_for_catch_up(&node_a, &node_b, &behind_node).await
}
async fn wait_for_catch_up(
node_a: &NodeHttpClient,
node_b: &NodeHttpClient,
behind_node: &NodeHttpClient,
) -> Result<()> {
timeout(CATCH_UP_TIMEOUT, async {
loop {
let node_a_height = node_height(node_a, "node-a").await?;
let node_b_height = node_height(node_b, "node-b").await?;
let behind_height = node_height(behind_node, "node-c").await?;
let initial_min_height = node_a_height.min(node_b_height);
if behind_height >= initial_min_height.saturating_sub(1) {
return Ok::<(), anyhow::Error>(());
}
sleep(POLL_INTERVAL).await;
}
})
.await
.map_err(|_| anyhow!("timeout waiting for behind node to catch up"))?
}
async fn node_height(node: &NodeHttpClient, name: &str) -> Result<u64> {
let info = node
.consensus_info()
.await
.map_err(|error| anyhow!("{name} consensus_info failed: {error}"))?;
Ok(info.height)
}

View File

@ -0,0 +1,106 @@
# syntax=docker/dockerfile:1
# check=skip=SecretsUsedInArgOrEnv
# Ignore warnings about sensitive information as this is test data.
ARG VERSION
ARG LOGOS_BLOCKCHAIN_NODE_REV
ARG LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT
# ===========================
# BUILD IMAGE
# ===========================
FROM rust:1.91.0-slim-bookworm AS builder
ARG VERSION
ARG LOGOS_BLOCKCHAIN_NODE_REV
ARG LOGOS_BLOCKCHAIN_FORCE_BUILD
ARG LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT
LABEL maintainer="augustinas@status.im" \
source="https://github.com/logos-co/nomos-node" \
description="Logos testnet build image"
WORKDIR /workspace
COPY . .
# Reduce debug artifact size.
ENV CARGO_PROFILE_DEV_DEBUG=0
ENV LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV}
ENV LOGOS_BLOCKCHAIN_FORCE_BUILD=${LOGOS_BLOCKCHAIN_FORCE_BUILD}
ENV LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT=${LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT}
# Install dependencies needed for building RocksDB and for circuit tooling.
RUN apt-get update && apt-get install -yq \
git gcc g++ clang make cmake m4 xz-utils libgmp-dev libssl-dev pkg-config ca-certificates curl wget file \
&& rm -rf /var/lib/apt/lists/*
RUN chmod +x \
/workspace/logos/infra/assets/stack/scripts/docker/prepare_binaries.sh \
/workspace/logos/infra/assets/stack/scripts/docker/build_cfgsync.sh \
/workspace/logos/infra/assets/stack/scripts/setup-logos-blockchain-circuits.sh \
|| true
RUN /workspace/logos/infra/assets/stack/scripts/setup-logos-blockchain-circuits.sh "${VERSION}" /opt/circuits
ENV LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits
RUN /workspace/scripts/build/build-rapidsnark.sh /opt/circuits
RUN --mount=type=bind,from=nomos_node,source=.,target=/nomos-node-local,ro \
if [ "${LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT}" = "1" ]; then \
rm -rf /nomos-node && mkdir -p /nomos-node && cp -a /nomos-node-local/. /nomos-node/ ; \
if grep -q 'file:///Users/.*nomos-node' /workspace/Cargo.toml; then \
sed -i "s#git = \\\"file:///Users/[^\\\"]*nomos-node\\\"#path = \\\"/nomos-node\\\"#g" /workspace/Cargo.toml; \
fi; \
# Local checkout may reference ../nomos-testing paths; remap them to /workspace in container.
if [ -f /nomos-node/Cargo.toml ]; then \
sed -i 's#\.\./nomos-testing/#../workspace/#g' /nomos-node/Cargo.toml; \
fi; \
if [ -f /nomos-node/tests/Cargo.toml ]; then \
sed -i 's#\.\./\.\./nomos-testing/#../../workspace/#g' /nomos-node/tests/Cargo.toml; \
fi; \
else \
if grep -q 'file:///Users/.*nomos-node' /workspace/Cargo.toml; then \
sed -i "s#git = \\\"file:///Users/[^\\\"]*nomos-node\\\"#git = \\\"https://github.com/logos-co/nomos-node.git\\\", rev = \\\"${LOGOS_BLOCKCHAIN_NODE_REV}\\\"#g" /workspace/Cargo.toml; \
fi; \
rm -rf /nomos-node; \
git clone https://github.com/logos-co/nomos-node.git /nomos-node; \
cd /nomos-node; \
git fetch origin "${LOGOS_BLOCKCHAIN_NODE_REV}"; \
git checkout "${LOGOS_BLOCKCHAIN_NODE_REV}"; \
fi
RUN /workspace/logos/infra/assets/stack/scripts/docker/prepare_binaries.sh
# Strip host-local patches to avoid unresolved absolute paths inside containers.
RUN sed -i '/^\[patch\."https:\/\/github.com\/logos-co\/nomos-node"\]/,/^$/d' /workspace/Cargo.toml
RUN /workspace/logos/infra/assets/stack/scripts/docker/build_cfgsync.sh
# ===========================
# BASE RUNTIME IMAGE
# ===========================
FROM ubuntu:24.04 AS base
LABEL maintainer="augustinas@status.im" \
source="https://github.com/logos-co/nomos-node" \
description="Logos base runtime image (testing)"
RUN apt-get update && apt-get install -yq \
libstdc++6 \
libgmp10 \
libgomp1 \
libssl3 \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /workspace/artifacts/logos-blockchain-node /usr/bin/logos-blockchain-node
COPY --from=builder /workspace/artifacts/cfgsync-server /usr/bin/cfgsync-server
COPY --from=builder /workspace/artifacts/cfgsync-client /usr/bin/cfgsync-client
COPY --from=builder /opt/circuits /opt/circuits
ENV LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits
EXPOSE 3000 8080 9000 60000

View File

@ -1,6 +1,9 @@
port: 4400
n_hosts: 4
timeout: 10
global_params_path: "/etc/logos/global_params"
# Optional: serve prebuilt configs from a bundle file.
# bundle_path: cfgsync.bundle.yaml
# ConsensusConfig related parameters
security_param: 10
@ -42,6 +45,7 @@ tracing_settings:
filters:
nomos: debug
cryptarchia: debug
logos_blockchain_chain_leader_service: debug
metrics: None
console: None
level: INFO
level: DEBUG

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -euo pipefail
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
cargo build --manifest-path /workspace/testing-framework/tools/cfgsync-runtime/Cargo.toml --bin cfgsync-server
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
cargo build --manifest-path /workspace/testing-framework/tools/cfgsync-runtime/Cargo.toml --bin cfgsync-client
cp /workspace/target/debug/cfgsync-server /workspace/artifacts/cfgsync-server
cp /workspace/target/debug/cfgsync-client /workspace/artifacts/cfgsync-client
rm -rf /workspace/target/debug/incremental

Some files were not shown because too many files have changed in this diff Show More