mirror of
https://github.com/logos-blockchain/logos-blockchain-testing.git
synced 2026-02-17 19:53:05 +00:00
Unify ndoe types
This commit is contained in:
parent
dc06af5486
commit
8d2dd4c86a
2651
Cargo.lock
generated
2651
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -52,12 +52,9 @@ cryptarchia-engine = { default-features = false, git = "https://github.com/logos
|
||||
cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
kzgrs = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-kzgrs", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
kzgrs-backend = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-kzgrs-backend", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-cli = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cli", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "47ae18e95f643bde563b4769212b37f6f018fed3" }
|
||||
|
||||
@ -125,7 +125,7 @@ Key environment variables for customization:
|
||||
|----------|---------|---------|
|
||||
| `POL_PROOF_DEV_MODE=true` | **Required** — Disable expensive proof generation (set automatically by `scripts/run/run-examples.sh`) | (none) |
|
||||
| `NOMOS_TESTNET_IMAGE` | Docker image tag for compose/k8s | `logos-blockchain-testing:local` |
|
||||
| `NOMOS_DEMO_VALIDATORS` | Number of validator nodes | Varies by example |
|
||||
| `NOMOS_DEMO_NODES` | Number of nodes | Varies by example |
|
||||
| `NOMOS_LOG_DIR` | Directory for persistent log files | (temporary) |
|
||||
| `NOMOS_LOG_LEVEL` | Logging verbosity | `info` |
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn scenario_plan() -> SnippetResult<Scenario<()>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.wallets(50)
|
||||
.transactions_with(|txs| txs.rate(5).users(20))
|
||||
.expect_consensus_liveness()
|
||||
|
||||
@ -6,13 +6,13 @@ use testing_framework_workflows::{ScenarioBuilderExt, workloads::chaos::RandomRe
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn random_restart_plan() -> SnippetResult<Scenario<NodeControlCapability>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.enable_node_control()
|
||||
.with_workload(RandomRestartWorkload::new(
|
||||
Duration::from_secs(45), // min delay
|
||||
Duration::from_secs(75), // max delay
|
||||
Duration::from_secs(120), // target cooldown
|
||||
true, // include validators
|
||||
true, // include nodes
|
||||
))
|
||||
.expect_consensus_liveness()
|
||||
.with_run_duration(Duration::from_secs(150))
|
||||
|
||||
@ -18,8 +18,8 @@ impl Expectation for ReachabilityExpectation {
|
||||
}
|
||||
|
||||
async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> {
|
||||
let validators = ctx.node_clients().validator_clients();
|
||||
let client = validators.get(self.target_idx).ok_or_else(|| {
|
||||
let nodes = ctx.node_clients().node_clients();
|
||||
let client = nodes.get(self.target_idx).ok_or_else(|| {
|
||||
Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"missing target client",
|
||||
|
||||
@ -33,18 +33,18 @@ impl Workload for ReachabilityWorkload {
|
||||
topology: &GeneratedTopology,
|
||||
_run_metrics: &RunMetrics,
|
||||
) -> Result<(), DynError> {
|
||||
if topology.validators().get(self.target_idx).is_none() {
|
||||
if topology.nodes().get(self.target_idx).is_none() {
|
||||
return Err(Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"no validator at requested index",
|
||||
"no node at requested index",
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
|
||||
let validators = ctx.node_clients().validator_clients();
|
||||
let client = validators.get(self.target_idx).ok_or_else(|| {
|
||||
let nodes = ctx.node_clients().node_clients();
|
||||
let client = nodes.get(self.target_idx).ok_or_else(|| {
|
||||
Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"missing target client",
|
||||
|
||||
@ -4,5 +4,5 @@ use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn build_plan() -> SnippetResult<Scenario<()>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(1)).build() // Construct the final Scenario
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1)).build() // Construct the final Scenario
|
||||
}
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn run_test() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.wallets(50)
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(5) // 5 transactions per block
|
||||
|
||||
@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn expectations_plan() -> SnippetResult<Scenario<()>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
|
||||
.expect_consensus_liveness() // Assert blocks are produced continuously
|
||||
.build()
|
||||
}
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn run_duration_plan() -> SnippetResult<Scenario<()>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
|
||||
.with_run_duration(Duration::from_secs(120)) // Run for 120 seconds
|
||||
.build()
|
||||
}
|
||||
|
||||
@ -3,6 +3,6 @@ use testing_framework_core::scenario::{Builder, ScenarioBuilder};
|
||||
pub fn topology() -> Builder<()> {
|
||||
ScenarioBuilder::topology_with(|t| {
|
||||
t.network_star() // Star topology (all connect to seed node)
|
||||
.validators(3) // Number of validator nodes
|
||||
.nodes(3) // Number of node nodes
|
||||
})
|
||||
}
|
||||
|
||||
@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn transactions_plan() -> SnippetResult<Scenario<()>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
|
||||
.wallets(50)
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(5) // 5 transactions per block
|
||||
|
||||
@ -4,7 +4,7 @@ use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use crate::SnippetResult;
|
||||
|
||||
pub fn wallets_plan() -> SnippetResult<Scenario<()>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
|
||||
.wallets(50) // Seed 50 funded wallet accounts
|
||||
.build()
|
||||
}
|
||||
|
||||
@ -7,7 +7,7 @@ use crate::SnippetResult;
|
||||
|
||||
pub fn chaos_plan()
|
||||
-> SnippetResult<testing_framework_core::scenario::Scenario<NodeControlCapability>> {
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.enable_node_control() // Enable node control capability
|
||||
.chaos_with(|c| {
|
||||
c.restart() // Random restart chaos
|
||||
|
||||
@ -4,7 +4,7 @@ use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn execution() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(1))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(1))
|
||||
.expect_consensus_liveness()
|
||||
.build()?;
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer;
|
||||
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
|
||||
|
||||
pub async fn aggressive_chaos_test() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
|
||||
.enable_node_control()
|
||||
.wallets(50)
|
||||
.transactions_with(|txs| txs.rate(10).users(20))
|
||||
|
||||
@ -9,7 +9,7 @@ pub async fn load_progression_test() -> Result<()> {
|
||||
for rate in [5, 10, 20, 30] {
|
||||
println!("Testing with rate: {}", rate);
|
||||
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.wallets(50)
|
||||
.transactions_with(|txs| txs.rate(rate).users(20))
|
||||
.expect_consensus_liveness()
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn sustained_load_test() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
|
||||
.wallets(100)
|
||||
.transactions_with(|txs| txs.rate(15).users(50))
|
||||
.expect_consensus_liveness()
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_compose::ComposeDeployer;
|
||||
use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
|
||||
|
||||
pub async fn chaos_resilience() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(4))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
|
||||
.enable_node_control()
|
||||
.wallets(20)
|
||||
.transactions_with(|txs| txs.rate(3).users(10))
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn transactions_multi_node() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.wallets(30)
|
||||
.transactions_with(|txs| txs.rate(5).users(15))
|
||||
.expect_consensus_liveness()
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn simple_consensus() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.expect_consensus_liveness()
|
||||
.with_run_duration(Duration::from_secs(30))
|
||||
.build()?;
|
||||
|
||||
@ -6,7 +6,7 @@ use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn transaction_workload() -> Result<()> {
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.wallets(20)
|
||||
.transactions_with(|txs| txs.rate(5).users(10))
|
||||
.expect_consensus_liveness()
|
||||
|
||||
@ -13,7 +13,7 @@ impl<Caps> YourExpectationDslExt for testing_framework_core::scenario::Builder<C
|
||||
}
|
||||
|
||||
pub fn use_in_examples() -> SnippetResult<()> {
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.expect_your_condition()
|
||||
.build()?;
|
||||
Ok(())
|
||||
|
||||
@ -27,7 +27,7 @@ impl<Caps> YourWorkloadDslExt for testing_framework_core::scenario::Builder<Caps
|
||||
}
|
||||
|
||||
pub fn use_in_examples() -> SnippetResult<()> {
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.your_workload_with(|w| w.some_config())
|
||||
.build()?;
|
||||
Ok(())
|
||||
|
||||
@ -11,8 +11,8 @@ impl Workload for RestartWorkload {
|
||||
|
||||
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
|
||||
if let Some(control) = ctx.node_control() {
|
||||
// Restart the first validator (index 0) if supported.
|
||||
control.restart_validator(0).await?;
|
||||
// Restart the first node (index 0) if supported.
|
||||
control.restart_node(0).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -3,5 +3,5 @@ use testing_framework_core::scenario::DynError;
|
||||
|
||||
#[async_trait]
|
||||
pub trait NodeControlHandle: Send + Sync {
|
||||
async fn restart_validator(&self, index: usize) -> Result<(), DynError>;
|
||||
async fn restart_node(&self, index: usize) -> Result<(), DynError>;
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ use testing_framework_core::scenario::{Deployer, ScenarioBuilder};
|
||||
use testing_framework_runner_local::LocalDeployer;
|
||||
|
||||
pub async fn run_with_env_overrides() -> Result<()> {
|
||||
// Uses NOMOS_DEMO_* env vars (or legacy *_DEMO_* vars)
|
||||
// Uses NOMOS_DEMO_* env vars (for example NOMOS_DEMO_NODES)
|
||||
let mut plan = ScenarioBuilder::with_node_counts(3)
|
||||
.with_run_duration(std::time::Duration::from_secs(120))
|
||||
.build()?;
|
||||
|
||||
@ -6,8 +6,8 @@ use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
|
||||
pub async fn run_local_demo() -> Result<()> {
|
||||
// Define the scenario (2 validator, tx workload)
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
// Define the scenario (2 node, tx workload)
|
||||
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.wallets(1_000)
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(5) // 5 transactions per block
|
||||
|
||||
@ -3,6 +3,6 @@ use testing_framework_core::scenario::ScenarioBuilder;
|
||||
pub fn step_1_topology() -> testing_framework_core::scenario::Builder<()> {
|
||||
ScenarioBuilder::topology_with(|t| {
|
||||
t.network_star() // Star topology: all nodes connect to seed
|
||||
.validators(2) // 2 validator nodes
|
||||
.nodes(2) // 2 node nodes
|
||||
})
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@ use crate::SnippetResult;
|
||||
|
||||
pub fn declarative_over_imperative() -> SnippetResult<()> {
|
||||
// Good: declarative
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(5) // 5 transactions per block
|
||||
})
|
||||
@ -13,7 +13,7 @@ pub fn declarative_over_imperative() -> SnippetResult<()> {
|
||||
.build()?;
|
||||
|
||||
// Bad: imperative (framework doesn't work this way)
|
||||
// spawn_validator();
|
||||
// spawn_node();
|
||||
// loop { submit_tx(); check_block(); }
|
||||
|
||||
Ok(())
|
||||
|
||||
@ -7,7 +7,7 @@ use crate::SnippetResult;
|
||||
|
||||
pub fn determinism_first() -> SnippetResult<()> {
|
||||
// Separate: functional test (deterministic)
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(5) // 5 transactions per block
|
||||
})
|
||||
@ -15,7 +15,7 @@ pub fn determinism_first() -> SnippetResult<()> {
|
||||
.build()?;
|
||||
|
||||
// Separate: chaos test (introduces randomness)
|
||||
let _chaos_plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(3))
|
||||
let _chaos_plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
|
||||
.enable_node_control()
|
||||
.chaos_with(|c| {
|
||||
c.restart()
|
||||
|
||||
@ -7,7 +7,7 @@ use crate::SnippetResult;
|
||||
|
||||
pub fn protocol_time_not_wall_time() -> SnippetResult<()> {
|
||||
// Good: protocol-oriented thinking
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
let _plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(5) // 5 transactions per block
|
||||
})
|
||||
|
||||
@ -24,27 +24,27 @@ async fn main() {
|
||||
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS);
|
||||
let nodes = read_env_any(&["NOMOS_DEMO_NODES"], demo::DEFAULT_NODES);
|
||||
|
||||
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
|
||||
|
||||
info!(validators, run_secs, "starting compose runner demo");
|
||||
info!(nodes, run_secs, "starting compose runner demo");
|
||||
|
||||
if let Err(err) = run_compose_case(validators, Duration::from_secs(run_secs)).await {
|
||||
if let Err(err) = run_compose_case(nodes, Duration::from_secs(run_secs)).await {
|
||||
warn!("compose runner demo failed: {err:#}");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_compose_case(validators: usize, run_duration: Duration) -> Result<()> {
|
||||
async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
info!(
|
||||
validators,
|
||||
nodes,
|
||||
duration_secs = run_duration.as_secs(),
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators))
|
||||
.enable_node_control();
|
||||
let scenario =
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes)).enable_node_control();
|
||||
|
||||
let scenario = if let Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown)) =
|
||||
chaos_timings(run_duration)
|
||||
|
||||
@ -17,24 +17,24 @@ const TRANSACTION_WALLETS: usize = 50;
|
||||
async fn main() {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS);
|
||||
let nodes = read_env_any(&["NOMOS_DEMO_NODES"], demo::DEFAULT_NODES);
|
||||
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
|
||||
info!(validators, run_secs, "starting k8s runner demo");
|
||||
info!(nodes, run_secs, "starting k8s runner demo");
|
||||
|
||||
if let Err(err) = run_k8s_case(validators, Duration::from_secs(run_secs)).await {
|
||||
if let Err(err) = run_k8s_case(nodes, Duration::from_secs(run_secs)).await {
|
||||
warn!("k8s runner demo failed: {err:#}");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_k8s_case(validators: usize, run_duration: Duration) -> Result<()> {
|
||||
async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
info!(
|
||||
validators,
|
||||
nodes,
|
||||
duration_secs = run_duration.as_secs(),
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators))
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes))
|
||||
.with_capabilities(ObservabilityCapability::default())
|
||||
.wallets(TOTAL_WALLETS)
|
||||
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
|
||||
|
||||
@ -22,25 +22,25 @@ async fn main() {
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
let validators = read_env_any(&["NOMOS_DEMO_VALIDATORS"], demo::DEFAULT_VALIDATORS);
|
||||
let nodes = read_env_any(&["NOMOS_DEMO_NODES"], demo::DEFAULT_NODES);
|
||||
let run_secs = read_env_any(&["NOMOS_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
|
||||
|
||||
info!(validators, run_secs, "starting local runner demo");
|
||||
info!(nodes, run_secs, "starting local runner demo");
|
||||
|
||||
if let Err(err) = run_local_case(validators, Duration::from_secs(run_secs)).await {
|
||||
if let Err(err) = run_local_case(nodes, Duration::from_secs(run_secs)).await {
|
||||
warn!("local runner demo failed: {err:#}");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_local_case(validators: usize, run_duration: Duration) -> Result<()> {
|
||||
async fn run_local_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
info!(
|
||||
validators,
|
||||
nodes,
|
||||
duration_secs = run_duration.as_secs(),
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(validators))
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes))
|
||||
.wallets(TOTAL_WALLETS)
|
||||
.with_run_duration(run_duration);
|
||||
|
||||
|
||||
@ -1,2 +1,2 @@
|
||||
pub const DEFAULT_VALIDATORS: usize = 2;
|
||||
pub const DEFAULT_NODES: usize = 2;
|
||||
pub const DEFAULT_RUN_SECS: u64 = 60;
|
||||
|
||||
@ -37,7 +37,7 @@ impl Workload for JoinNodeWorkload {
|
||||
|
||||
sleep(START_DELAY).await;
|
||||
|
||||
let node = handle.start_validator(&self.name).await?;
|
||||
let node = handle.start_node(&self.name).await?;
|
||||
let client = node.api;
|
||||
|
||||
timeout(READY_TIMEOUT, async {
|
||||
@ -86,7 +86,7 @@ impl Workload for JoinNodeWithPeersWorkload {
|
||||
let options = StartNodeOptions {
|
||||
peers: PeerSelection::Named(self.peers.clone()),
|
||||
};
|
||||
let node = handle.start_validator_with(&self.name, options).await?;
|
||||
let node = handle.start_node_with(&self.name, options).await?;
|
||||
let client = node.api;
|
||||
|
||||
timeout(READY_TIMEOUT, async {
|
||||
@ -110,7 +110,7 @@ impl Workload for JoinNodeWithPeersWorkload {
|
||||
async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
|
||||
let _ = try_init();
|
||||
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.enable_node_control()
|
||||
.with_workload(JoinNodeWorkload::new("joiner"))
|
||||
.expect_consensus_liveness()
|
||||
@ -127,11 +127,11 @@ async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"]
|
||||
async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> {
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().validators(2))
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.enable_node_control()
|
||||
.with_workload(JoinNodeWithPeersWorkload::new(
|
||||
"joiner",
|
||||
vec!["validator-0".to_string()],
|
||||
vec!["node-0".to_string()],
|
||||
))
|
||||
.expect_consensus_liveness()
|
||||
.with_run_duration(Duration::from_secs(60))
|
||||
|
||||
@ -10,6 +10,8 @@ use tokio::time::sleep;
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
const MAX_HEIGHT_DIFF: u64 = 5;
|
||||
const CONVERGENCE_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
const CONVERGENCE_POLL: Duration = Duration::from_secs(2);
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_two_clusters_merge`"]
|
||||
@ -23,10 +25,10 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> {
|
||||
let cluster = deployer.manual_cluster(config)?;
|
||||
// Nodes are stopped automatically when the cluster is dropped.
|
||||
|
||||
println!("starting validator a");
|
||||
println!("starting node a");
|
||||
|
||||
let validator_a = cluster
|
||||
.start_validator_with(
|
||||
let node_a = cluster
|
||||
.start_node_with(
|
||||
"a",
|
||||
StartNodeOptions {
|
||||
peers: PeerSelection::None,
|
||||
@ -38,12 +40,12 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> {
|
||||
println!("waiting briefly before starting c");
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
|
||||
println!("starting validator c -> a");
|
||||
let validator_c = cluster
|
||||
.start_validator_with(
|
||||
println!("starting node c -> a");
|
||||
let node_c = cluster
|
||||
.start_node_with(
|
||||
"c",
|
||||
StartNodeOptions {
|
||||
peers: PeerSelection::Named(vec!["validator-a".to_owned()]),
|
||||
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
|
||||
},
|
||||
)
|
||||
.await?
|
||||
@ -52,21 +54,29 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> {
|
||||
println!("waiting for network readiness: cluster a,c");
|
||||
cluster.wait_network_ready().await?;
|
||||
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
let start = tokio::time::Instant::now();
|
||||
|
||||
let a_info = validator_a.consensus_info().await?;
|
||||
let c_info = validator_c.consensus_info().await?;
|
||||
let height_diff = a_info.height.abs_diff(c_info.height);
|
||||
loop {
|
||||
let a_info = node_a.consensus_info().await?;
|
||||
let c_info = node_c.consensus_info().await?;
|
||||
let a_height = a_info.height;
|
||||
let c_height = c_info.height;
|
||||
let diff = a_height.abs_diff(c_height);
|
||||
|
||||
println!(
|
||||
"final heights: validator-a={}, validator-c={}, diff={}",
|
||||
a_info.height, c_info.height, height_diff
|
||||
);
|
||||
if diff <= MAX_HEIGHT_DIFF {
|
||||
println!(
|
||||
"final heights: node-a={}, node-c={}, diff={}",
|
||||
a_height, c_height, diff
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if height_diff > MAX_HEIGHT_DIFF {
|
||||
return Err(anyhow::anyhow!(
|
||||
"height diff too large: {height_diff} > {MAX_HEIGHT_DIFF}"
|
||||
));
|
||||
if start.elapsed() >= CONVERGENCE_TIMEOUT {
|
||||
return Err(anyhow::anyhow!(
|
||||
"height diff too large after timeout: {diff} > {MAX_HEIGHT_DIFF} (node-a={a_height}, node-c={c_height})"
|
||||
));
|
||||
}
|
||||
|
||||
sleep(CONVERGENCE_POLL).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -1,15 +1,6 @@
|
||||
# Paths used by demo scripts and runners.
|
||||
# Relative paths are resolved from the repo root.
|
||||
|
||||
# Directory containing the KZG test parameters on the host.
|
||||
NOMOS_KZG_DIR_REL="testing-framework/assets/stack/kzgrs_test_params"
|
||||
|
||||
# The KZG parameters filename (repeated inside the directory).
|
||||
NOMOS_KZG_FILE="kzgrs_test_params"
|
||||
|
||||
# Path to the KZG params inside containers.
|
||||
NOMOS_KZG_CONTAINER_PATH="/kzgrs_test_params/kzgrs_test_params"
|
||||
|
||||
# Host-side circuit bundle locations used by helper scripts.
|
||||
NOMOS_CIRCUITS_HOST_DIR_REL=".tmp/logos-blockchain-circuits-host"
|
||||
NOMOS_CIRCUITS_LINUX_DIR_REL=".tmp/logos-blockchain-circuits-linux"
|
||||
|
||||
@ -264,10 +264,6 @@ build_bundle::maybe_run_linux_build_in_docker() {
|
||||
-e NOMOS_NODE_REV="${NOMOS_NODE_REV}" \
|
||||
-e NOMOS_NODE_PATH="${node_path_env}" \
|
||||
-e NOMOS_BUNDLE_DOCKER_PLATFORM="${DOCKER_PLATFORM}" \
|
||||
-e NOMOS_CIRCUITS="/workspace/.tmp/logos-blockchain-circuits-linux" \
|
||||
-e LOGOS_BLOCKCHAIN_CIRCUITS="/workspace/.tmp/logos-blockchain-circuits-linux" \
|
||||
-e STACK_DIR="/workspace/.tmp/logos-blockchain-circuits-linux" \
|
||||
-e HOST_DIR="/workspace/.tmp/logos-blockchain-circuits-linux" \
|
||||
-e NOMOS_EXTRA_FEATURES="${NOMOS_EXTRA_FEATURES:-}" \
|
||||
-e BUNDLE_IN_CONTAINER=1 \
|
||||
-e CARGO_HOME=/workspace/.tmp/cargo-linux \
|
||||
@ -284,12 +280,10 @@ build_bundle::maybe_run_linux_build_in_docker() {
|
||||
}
|
||||
|
||||
build_bundle::prepare_circuits() {
|
||||
echo "==> Preparing circuits (version ${VERSION})"
|
||||
echo "==> Preparing build workspace (version ${VERSION})"
|
||||
if [ "${PLATFORM}" = "host" ]; then
|
||||
CIRCUITS_DIR="${ROOT_DIR}/.tmp/logos-blockchain-circuits-host"
|
||||
NODE_TARGET="${ROOT_DIR}/.tmp/logos-blockchain-node-host-target"
|
||||
else
|
||||
CIRCUITS_DIR="${ROOT_DIR}/.tmp/logos-blockchain-circuits-linux"
|
||||
# When building Linux bundles in Docker, avoid reusing the same target dir
|
||||
# across different container architectures (e.g. linux/arm64 vs linux/amd64),
|
||||
# as the native-host `target/debug` layout would otherwise get mixed.
|
||||
@ -311,18 +305,7 @@ build_bundle::prepare_circuits() {
|
||||
NODE_TARGET="${NODE_TARGET}-local"
|
||||
fi
|
||||
|
||||
export NOMOS_CIRCUITS="${CIRCUITS_DIR}"
|
||||
export LOGOS_BLOCKCHAIN_CIRCUITS="${CIRCUITS_DIR}"
|
||||
mkdir -p "${ROOT_DIR}/.tmp" "${CIRCUITS_DIR}"
|
||||
if [ -f "${CIRCUITS_DIR}/${KZG_FILE:-kzgrs_test_params}" ]; then
|
||||
echo "Circuits already present at ${CIRCUITS_DIR}; skipping download"
|
||||
else
|
||||
STACK_DIR="${CIRCUITS_DIR}" HOST_DIR="${CIRCUITS_DIR}" \
|
||||
"${ROOT_DIR}/scripts/setup/setup-circuits-stack.sh" "${VERSION}" </dev/null
|
||||
fi
|
||||
|
||||
NODE_BIN="${NODE_TARGET}/debug/logos-blockchain-node"
|
||||
CLI_BIN="${NODE_TARGET}/debug/logos-blockchain-cli"
|
||||
}
|
||||
|
||||
build_bundle::build_binaries() {
|
||||
@ -346,26 +329,17 @@ build_bundle::build_binaries() {
|
||||
if [ -z "${NOMOS_NODE_PATH}" ]; then
|
||||
build_bundle::apply_nomos_node_patches "${NODE_SRC}"
|
||||
fi
|
||||
if [ -f "${CIRCUITS_DIR}/zksign/verification_key.json" ] \
|
||||
|| [ -f "${CIRCUITS_DIR}/pol/verification_key.json" ] \
|
||||
|| [ -f "${CIRCUITS_DIR}/poq/verification_key.json" ] \
|
||||
|| [ -f "${CIRCUITS_DIR}/poc/verification_key.json" ]; then
|
||||
export CARGO_FEATURE_BUILD_VERIFICATION_KEY=1
|
||||
else
|
||||
unset CARGO_FEATURE_BUILD_VERIFICATION_KEY
|
||||
fi
|
||||
unset CARGO_FEATURE_BUILD_VERIFICATION_KEY
|
||||
if [ -n "${BUNDLE_RUSTUP_TOOLCHAIN}" ]; then
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' NOMOS_CIRCUITS="${CIRCUITS_DIR}" \
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS="${CIRCUITS_DIR}" \
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
|
||||
RUSTUP_TOOLCHAIN="${BUNDLE_RUSTUP_TOOLCHAIN}" \
|
||||
cargo build --all-features \
|
||||
-p logos-blockchain-node -p logos-blockchain-cli \
|
||||
-p logos-blockchain-node \
|
||||
--target-dir "${NODE_TARGET}"
|
||||
else
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' NOMOS_CIRCUITS="${CIRCUITS_DIR}" \
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS="${CIRCUITS_DIR}" \
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
|
||||
cargo build --all-features \
|
||||
-p logos-blockchain-node -p logos-blockchain-cli \
|
||||
-p logos-blockchain-node \
|
||||
--target-dir "${NODE_TARGET}"
|
||||
fi
|
||||
)
|
||||
@ -375,11 +349,8 @@ build_bundle::package_bundle() {
|
||||
echo "==> Packaging bundle"
|
||||
local bundle_dir="${ROOT_DIR}/.tmp/nomos-bundle"
|
||||
rm -rf "${bundle_dir}"
|
||||
mkdir -p "${bundle_dir}/artifacts/circuits"
|
||||
cp -a "${CIRCUITS_DIR}/." "${bundle_dir}/artifacts/circuits/"
|
||||
mkdir -p "${bundle_dir}/artifacts"
|
||||
cp "${NODE_BIN}" "${bundle_dir}/artifacts/logos-blockchain-node"
|
||||
cp "${CLI_BIN}" "${bundle_dir}/artifacts/logos-blockchain-cli"
|
||||
{
|
||||
echo "nomos_node_path=${NOMOS_NODE_PATH:-}"
|
||||
echo "nomos_node_rev=${NOMOS_NODE_REV:-}"
|
||||
|
||||
@ -14,7 +14,6 @@ Usage: scripts/build/build-linux-binaries.sh [options]
|
||||
|
||||
Builds a Linux bundle via scripts/build/build-bundle.sh, then stages artifacts into:
|
||||
- testing-framework/assets/stack/bin
|
||||
- testing-framework/assets/stack/kzgrs_test_params (or NOMOS_KZG_DIR_REL)
|
||||
|
||||
Options:
|
||||
--rev REV logos-blockchain-node git revision to build (overrides NOMOS_NODE_REV)
|
||||
@ -26,10 +25,9 @@ Options:
|
||||
-h, --help show help
|
||||
|
||||
Environment:
|
||||
VERSION circuits version (default from versions.env)
|
||||
VERSION bundle version (default from versions.env)
|
||||
NOMOS_CIRCUITS_VERSION legacy alias for VERSION (supported)
|
||||
NOMOS_NODE_REV default logos-blockchain-node revision (from versions.env)
|
||||
NOMOS_KZG_DIR_REL host path for staged circuits dir (default: testing-framework/assets/stack/kzgrs_test_params)
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -134,28 +132,13 @@ build_linux_binaries::stage_from_bundle() {
|
||||
|
||||
local artifacts="${extract_dir}/artifacts"
|
||||
[ -f "${artifacts}/logos-blockchain-node" ] || common::die "Missing logos-blockchain-node in bundle: ${tar_path}"
|
||||
[ -f "${artifacts}/logos-blockchain-cli" ] || common::die "Missing logos-blockchain-cli in bundle: ${tar_path}"
|
||||
[ -d "${artifacts}/circuits" ] || common::die "Missing circuits/ in bundle: ${tar_path}"
|
||||
|
||||
local bin_out="${ROOT_DIR}/testing-framework/assets/stack/bin"
|
||||
local kzg_dir_rel="${NOMOS_KZG_DIR_REL:-testing-framework/assets/stack/kzgrs_test_params}"
|
||||
local circuits_out="${ROOT_DIR}/${kzg_dir_rel}"
|
||||
|
||||
echo "==> Staging binaries to ${bin_out}"
|
||||
mkdir -p "${bin_out}"
|
||||
cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-cli" "${bin_out}/"
|
||||
|
||||
echo "==> Staging circuits to ${circuits_out}"
|
||||
rm -rf "${circuits_out}"
|
||||
mkdir -p "${circuits_out}"
|
||||
if command -v rsync >/dev/null 2>&1; then
|
||||
rsync -a --delete "${artifacts}/circuits/" "${circuits_out}/"
|
||||
else
|
||||
cp -a "${artifacts}/circuits/." "${circuits_out}/"
|
||||
fi
|
||||
|
||||
cp "${artifacts}/logos-blockchain-node" "${bin_out}/"
|
||||
# If the tarball was produced inside Docker, it might be root-owned on the host.
|
||||
chown -R "$(id -u)":"$(id -g)" "${bin_out}" "${circuits_out}" 2>/dev/null || true
|
||||
chown -R "$(id -u)":"$(id -g)" "${bin_out}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
build_linux_binaries::main() {
|
||||
@ -166,7 +149,6 @@ build_linux_binaries::main() {
|
||||
|
||||
echo
|
||||
echo "Binaries staged in ${ROOT_DIR}/testing-framework/assets/stack/bin"
|
||||
echo "Circuits staged in ${ROOT_DIR}/${NOMOS_KZG_DIR_REL:-testing-framework/assets/stack/kzgrs_test_params}"
|
||||
echo "Bundle tarball: ${BUNDLE_TAR}"
|
||||
}
|
||||
|
||||
|
||||
@ -12,23 +12,20 @@ build_test_image::usage() {
|
||||
cat <<'USAGE'
|
||||
Usage: scripts/build/build_test_image.sh [options]
|
||||
|
||||
Builds the compose/k8s test image (bakes in binaries + circuit assets).
|
||||
Builds the compose/k8s test image (bakes in binaries).
|
||||
|
||||
Options:
|
||||
--tag TAG Docker image tag (default: logos-blockchain-testing:local; or env IMAGE_TAG)
|
||||
--version VERSION Circuits release tag (default: versions.env VERSION)
|
||||
--version VERSION Bundle version tag (default: versions.env VERSION)
|
||||
--dockerfile PATH Dockerfile path (default: testing-framework/assets/stack/Dockerfile.runtime)
|
||||
--base-tag TAG Base image tag (default: logos-blockchain-testing:base)
|
||||
--circuits-override PATH Relative path (within repo) to circuits dir/file to bake (default: testing-framework/assets/stack/kzgrs_test_params)
|
||||
--circuits-platform NAME Circuits platform identifier for downloads (default: auto; linux-x86_64 or linux-aarch64)
|
||||
--bundle-tar PATH Bundle tar containing artifacts/{nomos-*,circuits} (default: .tmp/nomos-binaries-linux-<version>.tar.gz; or env NOMOS_BINARIES_TAR)
|
||||
--no-restore Do not restore binaries/circuits from bundle tar (forces Dockerfile to build/download as needed)
|
||||
--bundle-tar PATH Bundle tar containing artifacts/{nomos-*} (default: .tmp/nomos-binaries-linux-<version>.tar.gz; or env NOMOS_BINARIES_TAR)
|
||||
--no-restore Do not restore binaries from bundle tar (forces Dockerfile to build/download as needed)
|
||||
--print-config Print resolved configuration and exit
|
||||
-h, --help Show this help and exit
|
||||
|
||||
Env (legacy/compatible):
|
||||
IMAGE_TAG, VERSION, CIRCUITS_OVERRIDE, CIRCUITS_PLATFORM, COMPOSE_CIRCUITS_PLATFORM,
|
||||
NOMOS_BINARIES_TAR, NOMOS_KZG_DIR_REL
|
||||
IMAGE_TAG, VERSION, NOMOS_BINARIES_TAR
|
||||
USAGE
|
||||
}
|
||||
|
||||
@ -58,23 +55,12 @@ build_test_image::load_env() {
|
||||
NOMOS_NODE_REV="${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV in versions.env}"
|
||||
}
|
||||
|
||||
build_test_image::detect_circuits_platform() {
|
||||
case "$(uname -m)" in
|
||||
x86_64) echo "linux-x86_64" ;;
|
||||
arm64|aarch64) echo "linux-aarch64" ;;
|
||||
*) echo "linux-x86_64" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
build_test_image::parse_args() {
|
||||
IMAGE_TAG="${IMAGE_TAG:-${IMAGE_TAG_DEFAULT}}"
|
||||
VERSION_OVERRIDE=""
|
||||
DOCKERFILE_PATH="${DOCKERFILE_PATH_DEFAULT}"
|
||||
BASE_DOCKERFILE_PATH="${BASE_DOCKERFILE_PATH_DEFAULT}"
|
||||
BASE_IMAGE_TAG="${BASE_IMAGE_TAG:-${BASE_IMAGE_TAG_DEFAULT}}"
|
||||
KZG_DIR_REL_DEFAULT="${NOMOS_KZG_DIR_REL:-testing-framework/assets/stack/kzgrs_test_params}"
|
||||
CIRCUITS_OVERRIDE="${CIRCUITS_OVERRIDE:-${KZG_DIR_REL_DEFAULT}}"
|
||||
CIRCUITS_PLATFORM="${CIRCUITS_PLATFORM:-${COMPOSE_CIRCUITS_PLATFORM:-}}"
|
||||
BUNDLE_TAR_PATH="${NOMOS_BINARIES_TAR:-}"
|
||||
NO_RESTORE=0
|
||||
PRINT_CONFIG=0
|
||||
@ -90,10 +76,6 @@ build_test_image::parse_args() {
|
||||
--dockerfile) DOCKERFILE_PATH="${2:-}"; shift 2 ;;
|
||||
--base-tag=*) BASE_IMAGE_TAG="${1#*=}"; shift ;;
|
||||
--base-tag) BASE_IMAGE_TAG="${2:-}"; shift 2 ;;
|
||||
--circuits-override=*) CIRCUITS_OVERRIDE="${1#*=}"; shift ;;
|
||||
--circuits-override) CIRCUITS_OVERRIDE="${2:-}"; shift 2 ;;
|
||||
--circuits-platform=*) CIRCUITS_PLATFORM="${1#*=}"; shift ;;
|
||||
--circuits-platform) CIRCUITS_PLATFORM="${2:-}"; shift 2 ;;
|
||||
--bundle-tar=*) BUNDLE_TAR_PATH="${1#*=}"; shift ;;
|
||||
--bundle-tar) BUNDLE_TAR_PATH="${2:-}"; shift 2 ;;
|
||||
--no-restore) NO_RESTORE=1; shift ;;
|
||||
@ -108,13 +90,7 @@ build_test_image::parse_args() {
|
||||
VERSION="${VERSION_DEFAULT}"
|
||||
fi
|
||||
|
||||
if [ -z "${CIRCUITS_PLATFORM}" ]; then
|
||||
CIRCUITS_PLATFORM="$(build_test_image::detect_circuits_platform)"
|
||||
fi
|
||||
|
||||
BIN_DST="${ROOT_DIR}/testing-framework/assets/stack/bin"
|
||||
KZG_DIR_REL="${KZG_DIR_REL_DEFAULT}"
|
||||
CIRCUITS_DIR_HOST="${ROOT_DIR}/${KZG_DIR_REL}"
|
||||
|
||||
DEFAULT_LINUX_TAR="${ROOT_DIR}/.tmp/nomos-binaries-linux-${VERSION}.tar.gz"
|
||||
TAR_PATH="${BUNDLE_TAR_PATH:-${DEFAULT_LINUX_TAR}}"
|
||||
@ -127,10 +103,6 @@ build_test_image::print_config() {
|
||||
echo "Base image tag: ${BASE_IMAGE_TAG}"
|
||||
echo "Base Dockerfile: ${BASE_DOCKERFILE_PATH}"
|
||||
echo "Logos node rev: ${NOMOS_NODE_REV}"
|
||||
echo "Circuits override: ${CIRCUITS_OVERRIDE:-<none>}"
|
||||
echo "Circuits version (download fallback): ${VERSION}"
|
||||
echo "Circuits platform: ${CIRCUITS_PLATFORM}"
|
||||
echo "Host circuits dir: ${CIRCUITS_DIR_HOST}"
|
||||
echo "Binaries dir: ${BIN_DST}"
|
||||
echo "Bundle tar (if used): ${TAR_PATH}"
|
||||
echo "Restore from tar: $([ "${NO_RESTORE}" -eq 1 ] && echo "disabled" || echo "enabled")"
|
||||
@ -138,14 +110,13 @@ build_test_image::print_config() {
|
||||
|
||||
build_test_image::have_host_binaries() {
|
||||
# Preserve existing behavior: only require node on the host.
|
||||
# If logos-blockchain-cli is missing, the Dockerfile can still build it from source.
|
||||
[ -x "${BIN_DST}/logos-blockchain-node" ]
|
||||
}
|
||||
|
||||
build_test_image::restore_from_bundle() {
|
||||
[ -f "${TAR_PATH}" ] || build_test_image::fail "Prebuilt binaries missing and bundle tar not found at ${TAR_PATH}"
|
||||
|
||||
echo "==> Restoring binaries/circuits from ${TAR_PATH}"
|
||||
echo "==> Restoring binaries from ${TAR_PATH}"
|
||||
local tmp_extract
|
||||
tmp_extract="$(common::tmpdir nomos-bundle-extract.XXXXXX)"
|
||||
trap "rm -rf -- '${tmp_extract}'" RETURN
|
||||
@ -153,22 +124,13 @@ build_test_image::restore_from_bundle() {
|
||||
tar -xzf "${TAR_PATH}" -C "${tmp_extract}"
|
||||
local artifacts="${tmp_extract}/artifacts"
|
||||
|
||||
for bin in logos-blockchain-node logos-blockchain-cli; do
|
||||
[ -f "${artifacts}/${bin}" ] || build_test_image::fail "Bundle ${TAR_PATH} missing artifacts/${bin}"
|
||||
done
|
||||
[ -f "${artifacts}/logos-blockchain-node" ] || build_test_image::fail \
|
||||
"Bundle ${TAR_PATH} missing artifacts/logos-blockchain-node"
|
||||
|
||||
mkdir -p "${BIN_DST}"
|
||||
cp "${artifacts}/logos-blockchain-node" "${artifacts}/logos-blockchain-cli" "${BIN_DST}/"
|
||||
chmod +x "${BIN_DST}/logos-blockchain-node" "${BIN_DST}/logos-blockchain-cli" || true
|
||||
cp "${artifacts}/logos-blockchain-node" "${BIN_DST}/"
|
||||
chmod +x "${BIN_DST}/logos-blockchain-node" || true
|
||||
|
||||
if [ -d "${artifacts}/circuits" ]; then
|
||||
mkdir -p "${CIRCUITS_DIR_HOST}"
|
||||
if command -v rsync >/dev/null 2>&1; then
|
||||
rsync -a --delete "${artifacts}/circuits/" "${CIRCUITS_DIR_HOST}/"
|
||||
else
|
||||
cp -a "${artifacts}/circuits/." "${CIRCUITS_DIR_HOST}/"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
build_test_image::maybe_restore_assets() {
|
||||
@ -193,26 +155,25 @@ build_test_image::docker_build() {
|
||||
x86_64) host_platform="linux/amd64" ;;
|
||||
arm64|aarch64) host_platform="linux/arm64" ;;
|
||||
esac
|
||||
case "${CIRCUITS_PLATFORM}" in
|
||||
linux-x86_64) target_platform="linux/amd64" ;;
|
||||
linux-aarch64) target_platform="linux/arm64" ;;
|
||||
esac
|
||||
|
||||
if [ -n "${DOCKER_PLATFORM:-}" ]; then
|
||||
target_platform="${DOCKER_PLATFORM}"
|
||||
elif [ -n "${COMPOSE_CIRCUITS_PLATFORM:-}" ] || [ -n "${CIRCUITS_PLATFORM:-}" ]; then
|
||||
case "${COMPOSE_CIRCUITS_PLATFORM:-${CIRCUITS_PLATFORM}}" in
|
||||
linux-x86_64) target_platform="linux/amd64" ;;
|
||||
linux-aarch64) target_platform="linux/arm64" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
local -a base_build_args=(
|
||||
-f "${BASE_DOCKERFILE_PATH}"
|
||||
-t "${BASE_IMAGE_TAG}"
|
||||
--build-arg "NOMOS_NODE_REV=${NOMOS_NODE_REV}"
|
||||
--build-arg "CIRCUITS_PLATFORM=${CIRCUITS_PLATFORM}"
|
||||
--build-arg "VERSION=${VERSION}"
|
||||
"${ROOT_DIR}"
|
||||
)
|
||||
|
||||
if [ -n "${CIRCUITS_OVERRIDE}" ]; then
|
||||
base_build_args+=(--build-arg "CIRCUITS_OVERRIDE=${CIRCUITS_OVERRIDE}")
|
||||
fi
|
||||
if [ -n "${host_platform}" ] && [ -n "${target_platform}" ] && [ "${host_platform}" != "${target_platform}" ]; then
|
||||
base_build_args+=(--platform "${target_platform}")
|
||||
base_build_args+=(--build-arg "RAPIDSNARK_FORCE_REBUILD=1")
|
||||
fi
|
||||
|
||||
printf "Running:"
|
||||
@ -229,7 +190,6 @@ build_test_image::docker_build() {
|
||||
if [ -n "${host_platform}" ] && [ -n "${target_platform}" ] && [ "${host_platform}" != "${target_platform}" ]; then
|
||||
final_build_args+=(--platform "${target_platform}")
|
||||
fi
|
||||
|
||||
printf "Running:"
|
||||
printf " %q" docker build "${final_build_args[@]}"
|
||||
echo
|
||||
@ -253,7 +213,6 @@ build_test_image::main() {
|
||||
|
||||
Build complete.
|
||||
- Use this image in k8s/compose by exporting NOMOS_TESTNET_IMAGE=${IMAGE_TAG}
|
||||
- Circuits source: ${CIRCUITS_OVERRIDE:-download ${VERSION}}
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
@ -159,8 +159,6 @@ targets = [
|
||||
"logos-blockchain-groth16",
|
||||
"logos-blockchain-http-api-common",
|
||||
"logos-blockchain-key-management-system-service",
|
||||
"logos-blockchain-kzgrs",
|
||||
"logos-blockchain-kzgrs-backend",
|
||||
"logos-blockchain-ledger",
|
||||
"logos-blockchain-libp2p",
|
||||
"logos-blockchain-network-service",
|
||||
|
||||
@ -80,26 +80,6 @@ checks::print_disk_space() {
|
||||
fi
|
||||
}
|
||||
|
||||
checks::print_kzg_params() {
|
||||
checks::section "KZG Params"
|
||||
|
||||
local default_kzg_dir_rel="testing-framework/assets/stack/kzgrs_test_params"
|
||||
local default_kzg_file="kzgrs_test_params"
|
||||
local default_kzg_container_path="/kzgrs_test_params/kzgrs_test_params"
|
||||
|
||||
local kzg_dir_rel="${NOMOS_KZG_DIR_REL:-${default_kzg_dir_rel}}"
|
||||
local kzg_file="${NOMOS_KZG_FILE:-${default_kzg_file}}"
|
||||
local kzg_container_path="${NOMOS_KZG_CONTAINER_PATH:-${default_kzg_container_path}}"
|
||||
local host_kzg_path="${ROOT_DIR}/${kzg_dir_rel}/${kzg_file}"
|
||||
|
||||
checks::say "host: ${host_kzg_path}"
|
||||
checks::say "container: ${kzg_container_path}"
|
||||
if [ -f "${host_kzg_path}" ]; then
|
||||
checks::ok "KZG params file exists"
|
||||
else
|
||||
checks::warn "KZG params file missing (DA workloads will fail); run: scripts/run/run-examples.sh <mode> (auto) or scripts/setup/setup-logos-blockchain-circuits.sh"
|
||||
fi
|
||||
}
|
||||
|
||||
checks::print_rust_toolchain() {
|
||||
checks::section "Rust Toolchain"
|
||||
@ -285,7 +265,6 @@ checks::main() {
|
||||
checks::load_env
|
||||
checks::print_workspace
|
||||
checks::print_disk_space
|
||||
checks::print_kzg_params
|
||||
checks::print_rust_toolchain
|
||||
checks::print_docker
|
||||
checks::print_docker_compose
|
||||
|
||||
@ -8,11 +8,6 @@ fi
|
||||
# shellcheck disable=SC1091
|
||||
. "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../lib/common.sh"
|
||||
|
||||
readonly DEFAULT_KZG_DIR_REL="testing-framework/assets/stack/kzgrs_test_params"
|
||||
readonly DEFAULT_KZG_FILE="kzgrs_test_params"
|
||||
readonly DEFAULT_KZG_CONTAINER_PATH="/kzgrs_test_params/kzgrs_test_params"
|
||||
readonly DEFAULT_KZG_IN_IMAGE_PARAMS_PATH="/opt/nomos/kzg-params/kzgrs_test_params"
|
||||
|
||||
readonly DEFAULT_LOCAL_IMAGE="logos-blockchain-testing:local"
|
||||
readonly DEFAULT_PUBLIC_ECR_REGISTRY="public.ecr.aws/r4s5t9y4"
|
||||
readonly DEFAULT_PUBLIC_ECR_REPO="logos/logos-blockchain"
|
||||
@ -44,7 +39,7 @@ Modes:
|
||||
|
||||
Options:
|
||||
-t, --run-seconds N Duration to run the demo (required)
|
||||
-v, --validators N Number of validators (required)
|
||||
-n, --nodes N Number of nodes (required)
|
||||
--bundle PATH Convenience alias for setting NOMOS_BINARIES_TAR=PATH
|
||||
--metrics-query-url URL PromQL base URL the runner process can query (optional)
|
||||
--metrics-otlp-ingest-url URL Full OTLP HTTP ingest URL for node metrics export (optional)
|
||||
@ -54,7 +49,7 @@ Options:
|
||||
--no-image-build Skip rebuilding the compose/k8s image (sets NOMOS_SKIP_IMAGE_BUILD=1)
|
||||
|
||||
Environment:
|
||||
VERSION Circuits version (default from versions.env)
|
||||
VERSION Bundle version (default from versions.env)
|
||||
CONSENSUS_SLOT_TIME Consensus slot duration in seconds (default 2)
|
||||
CONSENSUS_ACTIVE_SLOT_COEFF Probability a slot is active (default 0.9); expected block interval ≈ slot_time / coeff
|
||||
NOMOS_TESTNET_IMAGE Image reference (overridden by --local/--ecr selection)
|
||||
@ -63,7 +58,8 @@ Environment:
|
||||
ECR_REPO Repository path for --ecr (default ${DEFAULT_PUBLIC_ECR_REPO})
|
||||
TAG Tag for --ecr (default ${DEFAULT_ECR_TAG})
|
||||
NOMOS_TESTNET_IMAGE_PULL_POLICY K8s imagePullPolicy (default ${DEFAULT_PULL_POLICY_LOCAL}; set to ${DEFAULT_PULL_POLICY_ECR} for --ecr)
|
||||
NOMOS_BINARIES_TAR Path to prebuilt binaries/circuits tarball (default .tmp/nomos-binaries-<platform>-<version>.tar.gz)
|
||||
NOMOS_BINARIES_TAR Path to prebuilt binaries tarball (default .tmp/nomos-binaries-<platform>-<version>.tar.gz)
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS Directory containing circuits assets (defaults to ~/.logos-blockchain-circuits)
|
||||
NOMOS_SKIP_IMAGE_BUILD Set to 1 to skip rebuilding the compose/k8s image
|
||||
NOMOS_FORCE_IMAGE_BUILD Set to 1 to force image rebuild even for k8s ECR mode
|
||||
NOMOS_METRICS_QUERY_URL PromQL base URL for the runner process (optional)
|
||||
@ -96,11 +92,6 @@ run_examples::load_env() {
|
||||
DEFAULT_VERSION="${VERSION:?Missing VERSION in versions.env}"
|
||||
VERSION="${VERSION:-${DEFAULT_VERSION}}"
|
||||
|
||||
KZG_DIR_REL="${NOMOS_KZG_DIR_REL:-${DEFAULT_KZG_DIR_REL}}"
|
||||
KZG_FILE="${NOMOS_KZG_FILE:-${DEFAULT_KZG_FILE}}"
|
||||
KZG_CONTAINER_PATH="${NOMOS_KZG_CONTAINER_PATH:-${DEFAULT_KZG_CONTAINER_PATH}}"
|
||||
HOST_KZG_DIR="${ROOT_DIR}/${KZG_DIR_REL}"
|
||||
HOST_KZG_FILE="${HOST_KZG_DIR}/${KZG_FILE}"
|
||||
}
|
||||
|
||||
run_examples::select_bin() {
|
||||
@ -115,7 +106,7 @@ run_examples::select_bin() {
|
||||
run_examples::parse_args() {
|
||||
MODE="compose"
|
||||
RUN_SECS_RAW=""
|
||||
DEMO_VALIDATORS=""
|
||||
DEMO_NODES=""
|
||||
IMAGE_SELECTION_MODE="auto"
|
||||
METRICS_QUERY_URL=""
|
||||
METRICS_OTLP_INGEST_URL=""
|
||||
@ -138,12 +129,12 @@ run_examples::parse_args() {
|
||||
RUN_SECS_RAW="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
-v|--validators)
|
||||
DEMO_VALIDATORS="${2:-}"
|
||||
-n|--nodes)
|
||||
DEMO_NODES="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--validators=*)
|
||||
DEMO_VALIDATORS="${1#*=}"
|
||||
--nodes=*)
|
||||
DEMO_NODES="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--bundle)
|
||||
@ -222,11 +213,11 @@ run_examples::parse_args() {
|
||||
fi
|
||||
RUN_SECS="${RUN_SECS_RAW}"
|
||||
|
||||
if [ -z "${DEMO_VALIDATORS}" ] ]; then
|
||||
run_examples::fail_with_usage "validators must be provided via -v/--validators"
|
||||
if [ -z "${DEMO_NODES}" ]; then
|
||||
run_examples::fail_with_usage "nodes must be provided via -n/--nodes"
|
||||
fi
|
||||
if ! common::is_uint "${DEMO_VALIDATORS}" ; then
|
||||
run_examples::fail_with_usage "validators must be a non-negative integer (pass -v/--validators)"
|
||||
if ! common::is_uint "${DEMO_NODES}" ; then
|
||||
run_examples::fail_with_usage "nodes must be a non-negative integer (pass -n/--nodes)"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -277,18 +268,13 @@ run_examples::select_image() {
|
||||
export IMAGE_TAG="${IMAGE}"
|
||||
export NOMOS_TESTNET_IMAGE="${IMAGE}"
|
||||
|
||||
if [ "${MODE}" = "k8s" ]; then
|
||||
if [ "${selection}" = "ecr" ]; then
|
||||
export NOMOS_KZG_MODE="${NOMOS_KZG_MODE:-inImage}"
|
||||
# A locally built Docker image isn't visible to remote clusters (e.g. EKS). Default to
|
||||
# skipping the local rebuild, unless the user explicitly set NOMOS_SKIP_IMAGE_BUILD or
|
||||
# overrides via NOMOS_FORCE_IMAGE_BUILD=1.
|
||||
if [ "${NOMOS_FORCE_IMAGE_BUILD:-0}" != "1" ]; then
|
||||
NOMOS_SKIP_IMAGE_BUILD="${NOMOS_SKIP_IMAGE_BUILD:-${DEFAULT_K8S_ECR_SKIP_IMAGE_BUILD}}"
|
||||
export NOMOS_SKIP_IMAGE_BUILD
|
||||
fi
|
||||
else
|
||||
export NOMOS_KZG_MODE="${NOMOS_KZG_MODE:-hostPath}"
|
||||
if [ "${MODE}" = "k8s" ] && [ "${selection}" = "ecr" ]; then
|
||||
# A locally built Docker image isn't visible to remote clusters (e.g. EKS). Default to
|
||||
# skipping the local rebuild, unless the user explicitly set NOMOS_SKIP_IMAGE_BUILD or
|
||||
# overrides via NOMOS_FORCE_IMAGE_BUILD=1.
|
||||
if [ "${NOMOS_FORCE_IMAGE_BUILD:-0}" != "1" ]; then
|
||||
NOMOS_SKIP_IMAGE_BUILD="${NOMOS_SKIP_IMAGE_BUILD:-${DEFAULT_K8S_ECR_SKIP_IMAGE_BUILD}}"
|
||||
export NOMOS_SKIP_IMAGE_BUILD
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@ -368,13 +354,10 @@ run_examples::restore_binaries_from_tar() {
|
||||
|
||||
local src="${extract_dir}/artifacts"
|
||||
local bin_dst="${ROOT_DIR}/testing-framework/assets/stack/bin"
|
||||
local circuits_src="${src}/circuits"
|
||||
local circuits_dst="${HOST_KZG_DIR}"
|
||||
|
||||
RESTORED_BIN_DIR="${src}"
|
||||
export RESTORED_BIN_DIR
|
||||
|
||||
if [ ! -f "${src}/logos-blockchain-node" ] || [ ! -f "${src}/logos-blockchain-cli" ]; then
|
||||
if [ ! -f "${src}/logos-blockchain-node" ]; then
|
||||
echo "Binaries missing in ${tar_path}; provide a prebuilt binaries tarball." >&2
|
||||
return 1
|
||||
fi
|
||||
@ -383,25 +366,11 @@ run_examples::restore_binaries_from_tar() {
|
||||
if [ "${MODE}" != "host" ] && ! run_examples::host_bin_matches_arch "${src}/logos-blockchain-node"; then
|
||||
echo "Bundled binaries do not match host arch; skipping copy so containers rebuild from source."
|
||||
copy_bins=0
|
||||
rm -f "${bin_dst}/logos-blockchain-node" "${bin_dst}/logos-blockchain-cli"
|
||||
rm -f "${bin_dst}/logos-blockchain-node"
|
||||
fi
|
||||
if [ "${copy_bins}" -eq 1 ]; then
|
||||
mkdir -p "${bin_dst}"
|
||||
cp "${src}/logos-blockchain-node" "${src}/logos-blockchain-cli" "${bin_dst}/"
|
||||
fi
|
||||
|
||||
if [ -d "${circuits_src}" ] && [ -f "${circuits_src}/${KZG_FILE}" ]; then
|
||||
rm -rf "${circuits_dst}"
|
||||
mkdir -p "${circuits_dst}"
|
||||
if command -v rsync >/dev/null 2>&1; then
|
||||
rsync -a --delete "${circuits_src}/" "${circuits_dst}/"
|
||||
else
|
||||
rm -rf "${circuits_dst:?}/"*
|
||||
cp -a "${circuits_src}/." "${circuits_dst}/"
|
||||
fi
|
||||
else
|
||||
echo "Circuits missing in ${tar_path}; provide a prebuilt binaries/circuits tarball." >&2
|
||||
return 1
|
||||
cp "${src}/logos-blockchain-node" "${bin_dst}/"
|
||||
fi
|
||||
|
||||
RESTORED_BINARIES=1
|
||||
@ -467,8 +436,7 @@ run_examples::maybe_rebuild_image() {
|
||||
fi
|
||||
|
||||
echo "==> Rebuilding testnet image (${IMAGE})"
|
||||
IMAGE_TAG="${IMAGE}" COMPOSE_CIRCUITS_PLATFORM="${COMPOSE_CIRCUITS_PLATFORM:-}" \
|
||||
bash "${ROOT_DIR}/scripts/build/build_test_image.sh"
|
||||
IMAGE_TAG="${IMAGE}" bash "${ROOT_DIR}/scripts/build/build_test_image.sh"
|
||||
}
|
||||
|
||||
run_examples::maybe_restore_host_after_image() {
|
||||
@ -484,21 +452,11 @@ run_examples::maybe_restore_host_after_image() {
|
||||
}
|
||||
|
||||
run_examples::validate_restored_bundle() {
|
||||
HOST_BUNDLE_PATH="${HOST_KZG_DIR}"
|
||||
KZG_HOST_PATH="${HOST_BUNDLE_PATH}/${KZG_FILE}"
|
||||
|
||||
if [ ! -x "${HOST_BUNDLE_PATH}/zksign/witness_generator" ]; then
|
||||
common::die "Missing zksign/witness_generator in restored bundle; ensure the tarball contains host-compatible circuits."
|
||||
fi
|
||||
if [ ! -f "${KZG_HOST_PATH}" ]; then
|
||||
common::die "KZG params missing at ${KZG_HOST_PATH}; ensure the tarball contains circuits."
|
||||
fi
|
||||
|
||||
if [ "${MODE}" = "host" ] && ! { [ -n "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ] && [ -x "${LOGOS_BLOCKCHAIN_NODE_BIN:-}" ]; }; then
|
||||
local tar_node tar_exec
|
||||
local tar_node
|
||||
tar_node="${RESTORED_BIN_DIR:-${ROOT_DIR}/testing-framework/assets/stack/bin}/logos-blockchain-node"
|
||||
|
||||
[ -x "${tar_node}" ] && [ -x "${tar_exec}" ] || common::die \
|
||||
[ -x "${tar_node}" ] || common::die \
|
||||
"Restored tarball missing host executables; provide a host-compatible binaries tarball."
|
||||
run_examples::host_bin_matches_arch "${tar_node}" || common::die \
|
||||
"Restored executables do not match host architecture; provide a host-compatible binaries tarball."
|
||||
@ -509,53 +467,30 @@ run_examples::validate_restored_bundle() {
|
||||
fi
|
||||
}
|
||||
|
||||
run_examples::kzg_path_for_mode() {
|
||||
if [ "${MODE}" = "compose" ] || [ "${MODE}" = "k8s" ]; then
|
||||
if [ "${MODE}" = "k8s" ] && [ "${NOMOS_KZG_MODE:-hostPath}" = "inImage" ]; then
|
||||
echo "${NOMOS_KZG_IN_IMAGE_PARAMS_PATH:-${DEFAULT_KZG_IN_IMAGE_PARAMS_PATH}}"
|
||||
else
|
||||
echo "${KZG_CONTAINER_PATH}"
|
||||
run_examples::ensure_circuits() {
|
||||
if [ -n "${LOGOS_BLOCKCHAIN_CIRCUITS:-}" ]; then
|
||||
if [ -d "${LOGOS_BLOCKCHAIN_CIRCUITS}" ]; then
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
echo "${KZG_HOST_PATH}"
|
||||
common::die "LOGOS_BLOCKCHAIN_CIRCUITS is set to '${LOGOS_BLOCKCHAIN_CIRCUITS}', but the directory does not exist"
|
||||
fi
|
||||
}
|
||||
|
||||
run_examples::ensure_compose_circuits_platform_default() {
|
||||
if [ "${MODE}" != "compose" ] || [ -n "${COMPOSE_CIRCUITS_PLATFORM:-}" ]; then
|
||||
local default_dir="${HOME}/.logos-blockchain-circuits"
|
||||
if [ -d "${default_dir}" ]; then
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS="${default_dir}"
|
||||
export LOGOS_BLOCKCHAIN_CIRCUITS
|
||||
return 0
|
||||
fi
|
||||
|
||||
local arch
|
||||
arch="$(uname -m)"
|
||||
case "${arch}" in
|
||||
x86_64) COMPOSE_CIRCUITS_PLATFORM="linux-x86_64" ;;
|
||||
arm64|aarch64) COMPOSE_CIRCUITS_PLATFORM="linux-x86_64" ;;
|
||||
*) COMPOSE_CIRCUITS_PLATFORM="linux-x86_64" ;;
|
||||
esac
|
||||
export COMPOSE_CIRCUITS_PLATFORM
|
||||
}
|
||||
|
||||
run_examples::maybe_set_docker_platform() {
|
||||
if [ "${MODE}" != "compose" ] || [ -n "${DOCKER_DEFAULT_PLATFORM:-}" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
case "${COMPOSE_CIRCUITS_PLATFORM:-}" in
|
||||
linux-x86_64) DOCKER_DEFAULT_PLATFORM="linux/amd64" ;;
|
||||
linux-aarch64) DOCKER_DEFAULT_PLATFORM="linux/arm64" ;;
|
||||
*) return 0 ;;
|
||||
esac
|
||||
|
||||
export DOCKER_DEFAULT_PLATFORM
|
||||
echo "==> Circuits not found; installing to ${default_dir}"
|
||||
bash "${ROOT_DIR}/scripts/setup/setup-logos-blockchain-circuits.sh" "${VERSION}" "${default_dir}"
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS="${default_dir}"
|
||||
export LOGOS_BLOCKCHAIN_CIRCUITS
|
||||
}
|
||||
|
||||
run_examples::run() {
|
||||
local kzg_path
|
||||
kzg_path="$(run_examples::kzg_path_for_mode)"
|
||||
|
||||
export NOMOS_DEMO_RUN_SECS="${RUN_SECS}"
|
||||
export NOMOS_DEMO_VALIDATORS="${DEMO_VALIDATORS}"
|
||||
export NOMOS_DEMO_NODES="${DEMO_NODES}"
|
||||
|
||||
if [ -n "${METRICS_QUERY_URL}" ]; then
|
||||
export NOMOS_METRICS_QUERY_URL="${METRICS_QUERY_URL}"
|
||||
@ -564,17 +499,17 @@ run_examples::run() {
|
||||
export NOMOS_METRICS_OTLP_INGEST_URL="${METRICS_OTLP_INGEST_URL}"
|
||||
fi
|
||||
|
||||
if [ "${MODE}" = "host" ]; then
|
||||
run_examples::ensure_circuits
|
||||
fi
|
||||
|
||||
echo "==> Running ${BIN} for ${RUN_SECS}s (mode=${MODE}, image=${IMAGE})"
|
||||
cd "${ROOT_DIR}"
|
||||
|
||||
POL_PROOF_DEV_MODE=true \
|
||||
TESTNET_PRINT_ENDPOINTS=1 \
|
||||
NOMOS_TESTNET_IMAGE="${IMAGE}" \
|
||||
NOMOS_CIRCUITS="${HOST_BUNDLE_PATH}" \
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS="${HOST_BUNDLE_PATH}" \
|
||||
LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH="${kzg_path}" \
|
||||
LOGOS_BLOCKCHAIN_NODE_BIN="${LOGOS_BLOCKCHAIN_NODE_BIN:-}" \
|
||||
COMPOSE_CIRCUITS_PLATFORM="${COMPOSE_CIRCUITS_PLATFORM:-}" \
|
||||
cargo run -p runner-examples --bin "${BIN}"
|
||||
}
|
||||
|
||||
@ -585,12 +520,10 @@ run_examples::main() {
|
||||
run_examples::select_image
|
||||
|
||||
run_examples::prepare_bundles
|
||||
echo "==> Using restored circuits/binaries bundle"
|
||||
echo "==> Using restored binaries bundle"
|
||||
|
||||
SETUP_OUT="$(common::tmpfile nomos-setup-output.XXXXXX)"
|
||||
|
||||
run_examples::ensure_compose_circuits_platform_default
|
||||
run_examples::maybe_set_docker_platform
|
||||
run_examples::maybe_rebuild_image
|
||||
run_examples::maybe_restore_host_after_image
|
||||
run_examples::validate_restored_bundle
|
||||
|
||||
@ -17,7 +17,7 @@ image rebuilds (where it makes sense), after cleaning and rebuilding bundles.
|
||||
|
||||
Options:
|
||||
-t, --run-seconds N Demo duration for each run (default: 120)
|
||||
-v, --validators N Validators (default: 1)
|
||||
-n, --nodes N Nodes (default: 1)
|
||||
--modes LIST Comma-separated: host,compose,k8s (default: host,compose,k8s)
|
||||
--no-clean Skip scripts/ops/clean.sh step
|
||||
--no-bundles Skip scripts/build/build-bundle.sh (uses existing .tmp tarballs)
|
||||
@ -44,7 +44,7 @@ matrix::have() { command -v "$1" >/dev/null 2>&1; }
|
||||
|
||||
matrix::parse_args() {
|
||||
RUN_SECS=120
|
||||
VALIDATORS=1
|
||||
NODES=1
|
||||
MODES_RAW="host,compose,k8s"
|
||||
DO_CLEAN=1
|
||||
DO_BUNDLES=1
|
||||
@ -59,8 +59,8 @@ matrix::parse_args() {
|
||||
-h|--help) matrix::usage; exit 0 ;;
|
||||
-t|--run-seconds) RUN_SECS="${2:-}"; shift 2 ;;
|
||||
--run-seconds=*) RUN_SECS="${1#*=}"; shift ;;
|
||||
-v|--validators) VALIDATORS="${2:-}"; shift 2 ;;
|
||||
--validators=*) VALIDATORS="${1#*=}"; shift ;;
|
||||
-n|--nodes) NODES="${2:-}"; shift 2 ;;
|
||||
--nodes=*) NODES="${1#*=}"; shift ;;
|
||||
--modes) MODES_RAW="${2:-}"; shift 2 ;;
|
||||
--modes=*) MODES_RAW="${1#*=}"; shift ;;
|
||||
--no-clean) DO_CLEAN=0; shift ;;
|
||||
@ -78,7 +78,7 @@ matrix::parse_args() {
|
||||
|
||||
common::is_uint "${RUN_SECS}" || matrix::die "--run-seconds must be an integer"
|
||||
[ "${RUN_SECS}" -gt 0 ] || matrix::die "--run-seconds must be > 0"
|
||||
common::is_uint "${VALIDATORS}" || matrix::die "--validators must be an integer"
|
||||
common::is_uint "${NODES}" || matrix::die "--nodes must be an integer"
|
||||
}
|
||||
|
||||
matrix::split_modes() {
|
||||
@ -215,7 +215,7 @@ matrix::main() {
|
||||
host)
|
||||
matrix::run_case "host" \
|
||||
"${ROOT_DIR}/scripts/run/run-examples.sh" \
|
||||
-t "${RUN_SECS}" -v "${VALIDATORS}" \
|
||||
-t "${RUN_SECS}" -n "${NODES}" \
|
||||
"${forward[@]}" \
|
||||
host
|
||||
;;
|
||||
@ -223,7 +223,7 @@ matrix::main() {
|
||||
if [ "${SKIP_IMAGE_BUILD_VARIANTS}" -eq 0 ]; then
|
||||
matrix::run_case "compose.image_build" \
|
||||
"${ROOT_DIR}/scripts/run/run-examples.sh" \
|
||||
-t "${RUN_SECS}" -v "${VALIDATORS}" \
|
||||
-t "${RUN_SECS}" -n "${NODES}" \
|
||||
"${forward[@]}" \
|
||||
compose
|
||||
else
|
||||
@ -233,7 +233,7 @@ matrix::main() {
|
||||
matrix::run_case "compose.skip_image_build" \
|
||||
"${ROOT_DIR}/scripts/run/run-examples.sh" \
|
||||
--no-image-build \
|
||||
-t "${RUN_SECS}" -v "${VALIDATORS}" \
|
||||
-t "${RUN_SECS}" -n "${NODES}" \
|
||||
"${forward[@]}" \
|
||||
compose
|
||||
;;
|
||||
@ -254,7 +254,7 @@ matrix::main() {
|
||||
fi
|
||||
matrix::run_case "k8s.image_build" \
|
||||
"${ROOT_DIR}/scripts/run/run-examples.sh" \
|
||||
-t "${RUN_SECS}" -v "${VALIDATORS}" \
|
||||
-t "${RUN_SECS}" -n "${NODES}" \
|
||||
"${forward[@]}" \
|
||||
k8s
|
||||
unset NOMOS_FORCE_IMAGE_BUILD || true
|
||||
@ -268,7 +268,7 @@ matrix::main() {
|
||||
matrix::run_case "k8s.skip_image_build" \
|
||||
"${ROOT_DIR}/scripts/run/run-examples.sh" \
|
||||
--no-image-build \
|
||||
-t "${RUN_SECS}" -v "${VALIDATORS}" \
|
||||
-t "${RUN_SECS}" -n "${NODES}" \
|
||||
"${forward[@]}" \
|
||||
k8s
|
||||
;;
|
||||
|
||||
@ -1,186 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [ -z "${BASH_VERSION:-}" ]; then
|
||||
exec bash "$0" "$@"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../lib/common.sh"
|
||||
|
||||
readonly DEFAULT_CIRCUITS_VERSION="v0.3.2"
|
||||
readonly DEFAULT_LINUX_PLATFORM="linux-x86_64"
|
||||
|
||||
readonly DEFAULT_KZG_DIR_REL="testing-framework/assets/stack/kzgrs_test_params"
|
||||
readonly DEFAULT_KZG_FILE="kzgrs_test_params"
|
||||
readonly DEFAULT_KZG_PARAMS_RELPATH="tests/kzgrs/kzgrs_test_params"
|
||||
readonly RAW_GITHUB_BASE_URL="https://raw.githubusercontent.com"
|
||||
|
||||
setup_circuits_stack::usage() {
|
||||
cat <<'EOF'
|
||||
Usage: scripts/setup/setup-circuits-stack.sh [VERSION]
|
||||
|
||||
Prepares circuits for both the Docker image (Linux/x86_64) and the host (for
|
||||
witness generators).
|
||||
|
||||
Env overrides:
|
||||
STACK_DIR Where to place the Linux bundle (default: testing-framework/assets/stack/kzgrs_test_params)
|
||||
HOST_DIR Where to place the host bundle (default: .tmp/logos-blockchain-circuits-host)
|
||||
LINUX_STAGE_DIR Optional staging dir for the Linux bundle (default: .tmp/logos-blockchain-circuits-linux)
|
||||
NOMOS_CIRCUITS_PLATFORM Force host platform (e.g., macos-aarch64)
|
||||
NOMOS_CIRCUITS_REBUILD_RAPIDSNARK Set to 1 to force rebuild (host bundle only)
|
||||
EOF
|
||||
}
|
||||
|
||||
setup_circuits_stack::fail_with_usage() {
|
||||
echo "$1" >&2
|
||||
setup_circuits_stack::usage
|
||||
exit 1
|
||||
}
|
||||
|
||||
setup_circuits_stack::realpath_py() {
|
||||
python3 - "$1" <<'PY'
|
||||
import os, sys
|
||||
print(os.path.realpath(sys.argv[1]))
|
||||
PY
|
||||
}
|
||||
|
||||
setup_circuits_stack::detect_platform() {
|
||||
local os arch
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="linux" ;;
|
||||
Darwin*) os="macos" ;;
|
||||
MINGW*|MSYS*|CYGWIN*) os="windows" ;;
|
||||
*) common::die "Unsupported OS: $(uname -s)" ;;
|
||||
esac
|
||||
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="x86_64" ;;
|
||||
aarch64|arm64) arch="aarch64" ;;
|
||||
*) common::die "Unsupported arch: $(uname -m)" ;;
|
||||
esac
|
||||
|
||||
echo "${os}-${arch}"
|
||||
}
|
||||
|
||||
setup_circuits_stack::fetch_bundle() {
|
||||
local platform="$1"
|
||||
local dest="$2"
|
||||
local rebuild="${3:-0}"
|
||||
|
||||
# Install into a temporary directory first and only replace `${dest}` once we
|
||||
# have a complete bundle. This avoids deleting a working install if GitHub
|
||||
# returns transient errors (e.g. 502/504).
|
||||
local temp_dest
|
||||
temp_dest="$(mktemp -d)"
|
||||
|
||||
if ! NOMOS_CIRCUITS_PLATFORM="${platform}" \
|
||||
NOMOS_CIRCUITS_REBUILD_RAPIDSNARK="${rebuild}" \
|
||||
"${ROOT_DIR}/scripts/setup/setup-logos-blockchain-circuits.sh" "${VERSION}" "${temp_dest}"; then
|
||||
rm -rf "${temp_dest}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -rf "${dest}"
|
||||
mkdir -p "$(dirname "${dest}")"
|
||||
mv "${temp_dest}" "${dest}"
|
||||
}
|
||||
|
||||
setup_circuits_stack::fetch_kzg_params() {
|
||||
local dest_dir="$1"
|
||||
local dest_file="${dest_dir}/${KZG_FILE}"
|
||||
local url="${RAW_GITHUB_BASE_URL}/logos-co/nomos-node/${NOMOS_NODE_REV}/${DEFAULT_KZG_PARAMS_RELPATH}"
|
||||
|
||||
echo "Fetching KZG parameters from ${url}"
|
||||
curl -fsSL "${url}" -o "${dest_file}"
|
||||
}
|
||||
|
||||
setup_circuits_stack::load_env() {
|
||||
ROOT_DIR="$(common::repo_root)"
|
||||
export ROOT_DIR
|
||||
|
||||
common::require_file "${ROOT_DIR}/versions.env"
|
||||
# shellcheck disable=SC1091
|
||||
. "${ROOT_DIR}/versions.env"
|
||||
common::maybe_source "${ROOT_DIR}/paths.env"
|
||||
|
||||
KZG_DIR_REL="${NOMOS_KZG_DIR_REL:-${DEFAULT_KZG_DIR_REL}}"
|
||||
KZG_FILE="${NOMOS_KZG_FILE:-${DEFAULT_KZG_FILE}}"
|
||||
HOST_DIR_REL_DEFAULT="${NOMOS_CIRCUITS_HOST_DIR_REL:-.tmp/logos-blockchain-circuits-host}"
|
||||
LINUX_DIR_REL_DEFAULT="${NOMOS_CIRCUITS_LINUX_DIR_REL:-.tmp/logos-blockchain-circuits-linux}"
|
||||
|
||||
VERSION="${VERSION:-${DEFAULT_CIRCUITS_VERSION}}"
|
||||
STACK_DIR="${STACK_DIR:-${ROOT_DIR}/${KZG_DIR_REL}}"
|
||||
HOST_DIR="${HOST_DIR:-${ROOT_DIR}/${HOST_DIR_REL_DEFAULT}}"
|
||||
LINUX_STAGE_DIR="${LINUX_STAGE_DIR:-${ROOT_DIR}/${LINUX_DIR_REL_DEFAULT}}"
|
||||
|
||||
NOMOS_NODE_REV="${NOMOS_NODE_REV:?Missing NOMOS_NODE_REV in versions.env or env}"
|
||||
|
||||
# Force non-interactive installs so repeated runs do not prompt.
|
||||
export NOMOS_CIRCUITS_NONINTERACTIVE=1
|
||||
}
|
||||
|
||||
setup_circuits_stack::main() {
|
||||
if [ "${1:-}" = "-h" ] || [ "${1:-}" = "--help" ]; then
|
||||
setup_circuits_stack::usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
setup_circuits_stack::load_env
|
||||
if [ -n "${1:-}" ]; then
|
||||
VERSION="$1"
|
||||
fi
|
||||
|
||||
echo "Preparing circuits (version ${VERSION})"
|
||||
echo "Workspace: ${ROOT_DIR}"
|
||||
|
||||
local linux_platform="${DEFAULT_LINUX_PLATFORM}"
|
||||
|
||||
echo "Installing Linux bundle for Docker image into ${STACK_DIR}"
|
||||
local stage_real stack_real
|
||||
stage_real="$(setup_circuits_stack::realpath_py "${LINUX_STAGE_DIR}")"
|
||||
stack_real="$(setup_circuits_stack::realpath_py "${STACK_DIR}")"
|
||||
|
||||
if [ "${stage_real}" = "${stack_real}" ]; then
|
||||
rm -rf "${STACK_DIR}"
|
||||
setup_circuits_stack::fetch_bundle "${linux_platform}" "${STACK_DIR}" 0
|
||||
setup_circuits_stack::fetch_kzg_params "${STACK_DIR}"
|
||||
else
|
||||
rm -rf "${LINUX_STAGE_DIR}"
|
||||
mkdir -p "${LINUX_STAGE_DIR}"
|
||||
setup_circuits_stack::fetch_bundle "${linux_platform}" "${LINUX_STAGE_DIR}" 0
|
||||
rm -rf "${STACK_DIR}"
|
||||
mkdir -p "${STACK_DIR}"
|
||||
cp -R "${LINUX_STAGE_DIR}/." "${STACK_DIR}/"
|
||||
setup_circuits_stack::fetch_kzg_params "${STACK_DIR}"
|
||||
fi
|
||||
echo "Linux bundle ready at ${STACK_DIR}"
|
||||
|
||||
local host_platform
|
||||
host_platform="${NOMOS_CIRCUITS_PLATFORM:-$(setup_circuits_stack::detect_platform)}"
|
||||
if [[ "${host_platform}" == "${linux_platform}" ]]; then
|
||||
echo "Host platform ${host_platform} matches Linux bundle; host can reuse ${STACK_DIR}"
|
||||
echo "Export if you want to be explicit:"
|
||||
echo " export NOMOS_CIRCUITS=\"${STACK_DIR}\""
|
||||
else
|
||||
echo "Host platform detected: ${host_platform}; installing host-native bundle into ${HOST_DIR}"
|
||||
setup_circuits_stack::fetch_bundle "${host_platform}" "${HOST_DIR}" "${NOMOS_CIRCUITS_REBUILD_RAPIDSNARK:-0}"
|
||||
setup_circuits_stack::fetch_kzg_params "${HOST_DIR}"
|
||||
echo "Host bundle ready at ${HOST_DIR}"
|
||||
echo
|
||||
echo "Set for host runs:"
|
||||
echo " export NOMOS_CIRCUITS=\"${HOST_DIR}\""
|
||||
fi
|
||||
|
||||
cat <<'EOF'
|
||||
|
||||
Done.
|
||||
- For Docker/compose: rebuild the image to bake the Linux bundle:
|
||||
scripts/build/build_test_image.sh
|
||||
- For host runs (e.g., compose_runner): ensure NOMOS_CIRCUITS points to the host bundle above.
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
||||
setup_circuits_stack::main "$@"
|
||||
fi
|
||||
@ -3,10 +3,7 @@
|
||||
# Ignore warnings about sensitive information as this is test data.
|
||||
|
||||
ARG VERSION
|
||||
ARG CIRCUITS_OVERRIDE
|
||||
ARG NOMOS_NODE_REV
|
||||
ARG CIRCUITS_PLATFORM
|
||||
ARG RAPIDSNARK_FORCE_REBUILD
|
||||
|
||||
# ===========================
|
||||
# BUILD IMAGE
|
||||
@ -15,10 +12,7 @@ ARG RAPIDSNARK_FORCE_REBUILD
|
||||
FROM rust:1.91.0-slim-bookworm AS builder
|
||||
|
||||
ARG VERSION
|
||||
ARG CIRCUITS_OVERRIDE
|
||||
ARG NOMOS_NODE_REV
|
||||
ARG CIRCUITS_PLATFORM
|
||||
ARG RAPIDSNARK_FORCE_REBUILD
|
||||
|
||||
LABEL maintainer="augustinas@status.im" \
|
||||
source="https://github.com/logos-co/nomos-node" \
|
||||
@ -30,7 +24,6 @@ COPY . .
|
||||
# Reduce debug artifact size.
|
||||
ENV CARGO_PROFILE_DEV_DEBUG=0
|
||||
ENV NOMOS_NODE_REV=${NOMOS_NODE_REV}
|
||||
ENV RAPIDSNARK_FORCE_REBUILD=${RAPIDSNARK_FORCE_REBUILD}
|
||||
|
||||
# Install dependencies needed for building RocksDB and for circuit tooling.
|
||||
RUN apt-get update && apt-get install -yq \
|
||||
@ -38,16 +31,13 @@ RUN apt-get update && apt-get install -yq \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN chmod +x \
|
||||
/workspace/testing-framework/assets/stack/scripts/docker/prepare_circuits.sh \
|
||||
/workspace/testing-framework/assets/stack/scripts/docker/prepare_binaries.sh \
|
||||
/workspace/testing-framework/assets/stack/scripts/docker/build_cfgsync.sh \
|
||||
/workspace/scripts/build/build-rapidsnark.sh \
|
||||
/workspace/scripts/setup/setup-logos-blockchain-circuits.sh \
|
||||
/workspace/testing-framework/assets/stack/scripts/setup-logos-blockchain-circuits.sh \
|
||||
|| true
|
||||
|
||||
RUN /workspace/testing-framework/assets/stack/scripts/docker/prepare_circuits.sh
|
||||
RUN /workspace/testing-framework/assets/stack/scripts/setup-logos-blockchain-circuits.sh "${VERSION}" /opt/circuits
|
||||
|
||||
ENV NOMOS_CIRCUITS=/opt/circuits
|
||||
ENV LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits
|
||||
|
||||
RUN /workspace/testing-framework/assets/stack/scripts/docker/prepare_binaries.sh
|
||||
@ -75,18 +65,8 @@ RUN apt-get update && apt-get install -yq \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /opt/circuits /opt/circuits
|
||||
|
||||
# Provide a stable in-image location for the KZG test parameters so EKS runs do
|
||||
# not rely on hostPath volumes.
|
||||
COPY --from=builder /workspace/testing-framework/assets/stack/kzgrs_test_params/kzgrs_test_params /opt/nomos/kzg-params/kzgrs_test_params
|
||||
|
||||
COPY --from=builder /workspace/artifacts/logos-blockchain-node /usr/bin/logos-blockchain-node
|
||||
COPY --from=builder /workspace/artifacts/logos-blockchain-cli /usr/bin/logos-blockchain-cli
|
||||
COPY --from=builder /workspace/artifacts/cfgsync-server /usr/bin/cfgsync-server
|
||||
COPY --from=builder /workspace/artifacts/cfgsync-client /usr/bin/cfgsync-client
|
||||
|
||||
ENV NOMOS_CIRCUITS=/opt/circuits
|
||||
ENV LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits
|
||||
|
||||
EXPOSE 3000 8080 9000 60000
|
||||
|
||||
@ -15,8 +15,6 @@ num_samples: 1
|
||||
num_subnets: 2
|
||||
old_blobs_check_interval: "5.0"
|
||||
blobs_validity_duration: "60.0"
|
||||
# KZG parameters are mounted into the stack as /kzgrs_test_params.
|
||||
global_params_path: "/kzgrs_test_params/kzgrs_test_params"
|
||||
min_dispersal_peers: 1
|
||||
min_replication_peers: 1
|
||||
monitor_failure_time_window: "5.0"
|
||||
|
||||
@ -9,7 +9,7 @@ TARGET_ARCH="$(uname -m)"
|
||||
|
||||
have_prebuilt() {
|
||||
[ -f testing-framework/assets/stack/bin/logos-blockchain-node ] && \
|
||||
[ -f testing-framework/assets/stack/bin/logos-blockchain-cli ]
|
||||
[ -f testing-framework/assets/stack/bin/logos-blockchain-node ]
|
||||
}
|
||||
|
||||
bin_matches_arch() {
|
||||
@ -33,7 +33,6 @@ bin_matches_arch() {
|
||||
if have_prebuilt && bin_matches_arch; then
|
||||
echo "Using prebuilt logos-blockchain binaries from testing-framework/assets/stack/bin"
|
||||
cp testing-framework/assets/stack/bin/logos-blockchain-node /workspace/artifacts/logos-blockchain-node
|
||||
cp testing-framework/assets/stack/bin/logos-blockchain-cli /workspace/artifacts/logos-blockchain-cli
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@ -51,23 +50,10 @@ git checkout "${NOMOS_NODE_REV}"
|
||||
git reset --hard
|
||||
git clean -fdx
|
||||
|
||||
# Enable real verification keys when available.
|
||||
if [ -f "/opt/circuits/zksign/verification_key.json" ] \
|
||||
|| [ -f "/opt/circuits/pol/verification_key.json" ] \
|
||||
|| [ -f "/opt/circuits/poq/verification_key.json" ] \
|
||||
|| [ -f "/opt/circuits/poc/verification_key.json" ]; then
|
||||
export CARGO_FEATURE_BUILD_VERIFICATION_KEY=1
|
||||
else
|
||||
unset CARGO_FEATURE_BUILD_VERIFICATION_KEY
|
||||
fi
|
||||
|
||||
# Enable pol-dev-mode via cfg to let POL_PROOF_DEV_MODE short-circuit proofs in tests.
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' NOMOS_CIRCUITS=/opt/circuits \
|
||||
LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits \
|
||||
cargo build --features "testing" \
|
||||
-p logos-blockchain-node -p logos-blockchain-cli
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
|
||||
cargo build --features "testing" -p logos-blockchain-node
|
||||
|
||||
cp /tmp/nomos-node/target/debug/logos-blockchain-node /workspace/artifacts/logos-blockchain-node
|
||||
cp /tmp/nomos-node/target/debug/logos-blockchain-cli /workspace/artifacts/logos-blockchain-cli
|
||||
|
||||
rm -rf /tmp/nomos-node/target/debug/incremental
|
||||
|
||||
@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
VERSION="${VERSION:?VERSION build arg missing}"
|
||||
CIRCUITS_PLATFORM="${CIRCUITS_PLATFORM:?CIRCUITS_PLATFORM build arg missing}"
|
||||
CIRCUITS_OVERRIDE="${CIRCUITS_OVERRIDE:-}"
|
||||
|
||||
mkdir -p /opt/circuits
|
||||
|
||||
select_circuits_source() {
|
||||
if [ -n "${CIRCUITS_OVERRIDE}" ] && [ -e "/workspace/${CIRCUITS_OVERRIDE}" ]; then
|
||||
echo "/workspace/${CIRCUITS_OVERRIDE}"
|
||||
return 0
|
||||
fi
|
||||
if [ -e "/workspace/tests/kzgrs/kzgrs_test_params" ]; then
|
||||
echo "/workspace/tests/kzgrs/kzgrs_test_params"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
if CIRCUITS_PATH="$(select_circuits_source)"; then
|
||||
echo "Using prebuilt circuits bundle from ${CIRCUITS_PATH#/workspace/}"
|
||||
if [ -d "${CIRCUITS_PATH}" ]; then
|
||||
cp -R "${CIRCUITS_PATH}/." /opt/circuits
|
||||
else
|
||||
cp "${CIRCUITS_PATH}" /opt/circuits/
|
||||
fi
|
||||
fi
|
||||
|
||||
TARGET_ARCH="$(uname -m)"
|
||||
|
||||
expect_arch_pattern() {
|
||||
case "$1" in
|
||||
x86_64) echo "x86-64|x86_64" ;;
|
||||
aarch64|arm64) echo "arm64|aarch64" ;;
|
||||
*) echo "$1" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
require_linux_execs=0
|
||||
|
||||
check_linux_exec() {
|
||||
local path="$1"
|
||||
if [ ! -f "${path}" ]; then
|
||||
return 0
|
||||
fi
|
||||
local info
|
||||
info="$(file -b "${path}" 2>/dev/null || true)"
|
||||
case "${info}" in
|
||||
*ELF*) : ;;
|
||||
*)
|
||||
echo "Circuits executable is not ELF: ${path} (${info}); forcing circuits download"
|
||||
require_linux_execs=1
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
local pattern
|
||||
pattern="$(expect_arch_pattern "${TARGET_ARCH}")"
|
||||
if [ -n "${pattern}" ] && ! echo "${info}" | grep -Eqi "${pattern}"; then
|
||||
echo "Circuits executable arch mismatch: ${path} (${info}); forcing circuits download"
|
||||
require_linux_execs=1
|
||||
fi
|
||||
}
|
||||
|
||||
check_linux_exec /opt/circuits/zksign/witness_generator
|
||||
check_linux_exec /opt/circuits/pol/witness_generator
|
||||
|
||||
if [ "${RAPIDSNARK_FORCE_REBUILD:-0}" = "1" ]; then
|
||||
echo "Forcing rapidsnark rebuild for /opt/circuits"
|
||||
scripts/build/build-rapidsnark.sh /opt/circuits
|
||||
elif [ -f "/opt/circuits/prover" ]; then
|
||||
PROVER_INFO="$(file -b /opt/circuits/prover || true)"
|
||||
case "${TARGET_ARCH}" in
|
||||
x86_64) EXPECT_ARCH="x86-64" ;;
|
||||
aarch64|arm64) EXPECT_ARCH="aarch64" ;;
|
||||
*) EXPECT_ARCH="${TARGET_ARCH}" ;;
|
||||
esac
|
||||
if [ -n "${PROVER_INFO}" ] && ! echo "${PROVER_INFO}" | grep -qi "${EXPECT_ARCH}"; then
|
||||
echo "Circuits prover architecture (${PROVER_INFO}) does not match target ${TARGET_ARCH}; rebuilding rapidsnark binaries"
|
||||
RAPIDSNARK_FORCE_REBUILD=1 \
|
||||
scripts/build/build-rapidsnark.sh /opt/circuits
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${require_linux_execs}" -eq 1 ] || [ ! -f "/opt/circuits/pol/verification_key.json" ]; then
|
||||
echo "Downloading ${VERSION} circuits bundle for ${CIRCUITS_PLATFORM}"
|
||||
NOMOS_CIRCUITS_PLATFORM="${CIRCUITS_PLATFORM}" \
|
||||
NOMOS_CIRCUITS_REBUILD_RAPIDSNARK=1 \
|
||||
RAPIDSNARK_BUILD_GMP=1 \
|
||||
scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "/opt/circuits"
|
||||
fi
|
||||
@ -2,11 +2,11 @@
|
||||
|
||||
set -e
|
||||
|
||||
role="${1:-validator}"
|
||||
role="${1:-node}"
|
||||
|
||||
bin_for_role() {
|
||||
case "$1" in
|
||||
validator) echo "/usr/bin/logos-blockchain-node" ;;
|
||||
node) echo "/usr/bin/logos-blockchain-node" ;;
|
||||
*) echo "Unknown role: $1" >&2; exit 2 ;;
|
||||
esac
|
||||
}
|
||||
@ -39,7 +39,6 @@ check_binary_arch() {
|
||||
bin_path="$(bin_for_role "$role")"
|
||||
check_binary_arch "$bin_path" "logos-blockchain-${role}"
|
||||
|
||||
KZG_CONTAINER_PATH="${NOMOS_KZG_CONTAINER_PATH:-/kzgrs_test_params/kzgrs_test_params}"
|
||||
host_identifier_default="${role}-$(hostname -i)"
|
||||
|
||||
export CFG_FILE_PATH="/config.yaml" \
|
||||
@ -47,7 +46,6 @@ export CFG_FILE_PATH="/config.yaml" \
|
||||
CFG_HOST_IP=$(hostname -i) \
|
||||
CFG_HOST_KIND="${CFG_HOST_KIND:-$role}" \
|
||||
CFG_HOST_IDENTIFIER="${CFG_HOST_IDENTIFIER:-$host_identifier_default}" \
|
||||
LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH="${LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH:-${KZG_CONTAINER_PATH}}" \
|
||||
NOMOS_TIME_BACKEND="${NOMOS_TIME_BACKEND:-monotonic}" \
|
||||
LOG_LEVEL="${LOG_LEVEL:-INFO}" \
|
||||
POL_PROOF_DEV_MODE="${POL_PROOF_DEV_MODE:-true}"
|
||||
|
||||
@ -1,2 +1,2 @@
|
||||
#!/bin/sh
|
||||
exec /etc/nomos/scripts/run_nomos.sh validator
|
||||
exec /etc/nomos/scripts/run_nomos.sh node
|
||||
|
||||
@ -10,7 +10,6 @@ repository.workspace = true
|
||||
version = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
blst = "0.3.11"
|
||||
chain-leader = { workspace = true }
|
||||
chain-network = { workspace = true }
|
||||
chain-service = { workspace = true }
|
||||
|
||||
@ -37,18 +37,5 @@ pub fn cfgsync_port() -> u16 {
|
||||
tf_env::nomos_cfgsync_port().unwrap_or(DEFAULT_CFGSYNC_PORT)
|
||||
}
|
||||
|
||||
/// Default KZG parameters file name.
|
||||
pub const KZG_PARAMS_FILENAME: &str = "kzgrs_test_params";
|
||||
/// Default container path for KZG parameters (compose/k8s mount point).
|
||||
pub const DEFAULT_KZG_CONTAINER_PATH: &str = "/kzgrs_test_params/kzgrs_test_params";
|
||||
|
||||
/// Resolve container KZG path from `NOMOS_KZG_CONTAINER_PATH`, falling back to
|
||||
/// the default.
|
||||
pub fn kzg_container_path() -> String {
|
||||
tf_env::nomos_kzg_container_path().unwrap_or_else(|| DEFAULT_KZG_CONTAINER_PATH.to_string())
|
||||
}
|
||||
|
||||
/// Default stack assets directory.
|
||||
pub const DEFAULT_ASSETS_STACK_DIR: &str = "testing-framework/assets/stack";
|
||||
/// Default host-relative directory for KZG assets.
|
||||
pub const DEFAULT_KZG_HOST_DIR: &str = "testing-framework/assets/stack/kzgrs_test_params";
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
pub(crate) mod blend;
|
||||
pub(crate) mod common;
|
||||
pub mod kms;
|
||||
pub mod validator;
|
||||
pub mod node;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
use nomos_node::{
|
||||
Config as ValidatorConfig, RocksBackendSettings, config::deployment::DeploymentSettings,
|
||||
Config as NodeConfig, RocksBackendSettings, config::deployment::DeploymentSettings,
|
||||
};
|
||||
use nomos_sdp::SdpSettings;
|
||||
|
||||
@ -16,15 +16,15 @@ use crate::{
|
||||
};
|
||||
|
||||
#[must_use]
|
||||
pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig {
|
||||
pub fn create_node_config(config: GeneralConfig) -> NodeConfig {
|
||||
let network_config = config.network_config.clone();
|
||||
let (blend_user_config, blend_deployment, network_deployment) =
|
||||
build_blend_service_config(&config.blend_config);
|
||||
|
||||
let deployment_settings =
|
||||
build_validator_deployment_settings(&config, blend_deployment, network_deployment);
|
||||
build_node_deployment_settings(&config, blend_deployment, network_deployment);
|
||||
|
||||
ValidatorConfig {
|
||||
NodeConfig {
|
||||
network: network_config,
|
||||
blend: blend_user_config,
|
||||
deployment: deployment_settings,
|
||||
@ -41,7 +41,7 @@ pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_validator_deployment_settings(
|
||||
fn build_node_deployment_settings(
|
||||
config: &GeneralConfig,
|
||||
blend_deployment: nomos_node::config::blend::deployment::Settings,
|
||||
network_deployment: nomos_node::config::network::deployment::Settings,
|
||||
@ -1,85 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use testing_framework_config::constants::{DEFAULT_KZG_CONTAINER_PATH, DEFAULT_KZG_HOST_DIR};
|
||||
use testing_framework_env as tf_env;
|
||||
|
||||
/// Default in-image path for KZG params used by testnet images.
|
||||
pub const DEFAULT_IN_IMAGE_KZG_PARAMS_PATH: &str = "/opt/nomos/kzg-params/kzgrs_test_params";
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum KzgMode {
|
||||
HostPath,
|
||||
InImage,
|
||||
}
|
||||
|
||||
impl KzgMode {
|
||||
#[must_use]
|
||||
pub fn from_env_or_default() -> Self {
|
||||
match tf_env::nomos_kzg_mode().as_deref() {
|
||||
Some("hostPath") => Self::HostPath,
|
||||
Some("inImage") => Self::InImage,
|
||||
None => Self::InImage,
|
||||
Some(other) => {
|
||||
tracing::warn!(
|
||||
value = other,
|
||||
"unknown NOMOS_KZG_MODE; defaulting to inImage"
|
||||
);
|
||||
Self::InImage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Canonical KZG parameters model used by runners and config distribution.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct KzgParamsSpec {
|
||||
pub mode: KzgMode,
|
||||
/// Value written into node configs (cfgsync `global_params_path`) and,
|
||||
/// where applicable, exported as `LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH` for
|
||||
/// node processes.
|
||||
pub node_params_path: String,
|
||||
/// Host directory that must exist when running in `HostPath` mode.
|
||||
pub host_params_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl KzgParamsSpec {
|
||||
#[must_use]
|
||||
pub fn for_compose(use_kzg_mount: bool) -> Self {
|
||||
let node_params_path = tf_env::nomos_kzgrs_params_path().unwrap_or_else(|| {
|
||||
if use_kzg_mount {
|
||||
DEFAULT_KZG_CONTAINER_PATH.to_string()
|
||||
} else {
|
||||
DEFAULT_IN_IMAGE_KZG_PARAMS_PATH.to_string()
|
||||
}
|
||||
});
|
||||
Self {
|
||||
mode: if use_kzg_mount {
|
||||
KzgMode::HostPath
|
||||
} else {
|
||||
KzgMode::InImage
|
||||
},
|
||||
node_params_path,
|
||||
host_params_dir: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn for_k8s(root: &Path) -> Self {
|
||||
let mode = KzgMode::from_env_or_default();
|
||||
match mode {
|
||||
KzgMode::HostPath => Self {
|
||||
mode,
|
||||
node_params_path: DEFAULT_KZG_CONTAINER_PATH.to_string(),
|
||||
host_params_dir: Some(root.join(
|
||||
tf_env::nomos_kzg_dir_rel().unwrap_or_else(|| DEFAULT_KZG_HOST_DIR.to_string()),
|
||||
)),
|
||||
},
|
||||
KzgMode::InImage => Self {
|
||||
mode,
|
||||
node_params_path: tf_env::nomos_kzgrs_params_path()
|
||||
.unwrap_or_else(|| DEFAULT_IN_IMAGE_KZG_PARAMS_PATH.to_string()),
|
||||
host_params_dir: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,4 +1,3 @@
|
||||
pub mod kzg;
|
||||
pub mod manual;
|
||||
pub mod nodes;
|
||||
pub mod scenario;
|
||||
|
||||
@ -5,7 +5,7 @@ use crate::scenario::{DynError, StartNodeOptions, StartedNode};
|
||||
/// Interface for imperative, deployer-backed manual clusters.
|
||||
#[async_trait]
|
||||
pub trait ManualClusterHandle: Send + Sync {
|
||||
async fn start_validator_with(
|
||||
async fn start_node_with(
|
||||
&self,
|
||||
name: &str,
|
||||
options: StartNodeOptions,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
mod api_client;
|
||||
pub mod common;
|
||||
pub mod validator;
|
||||
pub mod node;
|
||||
|
||||
use std::sync::LazyLock;
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@ use std::{ops::Deref, path::PathBuf, time::Duration};
|
||||
|
||||
use nomos_node::Config;
|
||||
use nomos_tracing_service::LoggerLayer;
|
||||
pub use testing_framework_config::nodes::validator::create_validator_config;
|
||||
pub use testing_framework_config::nodes::node::create_node_config;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use super::{persist_tempdir, should_persist_tempdir};
|
||||
@ -30,16 +30,11 @@ fn binary_path() -> PathBuf {
|
||||
BinaryResolver::resolve_path(&cfg)
|
||||
}
|
||||
|
||||
pub enum Pool {
|
||||
Da,
|
||||
Mantle,
|
||||
}
|
||||
|
||||
pub struct Validator {
|
||||
pub struct Node {
|
||||
handle: NodeHandle<Config>,
|
||||
}
|
||||
|
||||
impl Deref for Validator {
|
||||
impl Deref for Node {
|
||||
type Target = NodeHandle<Config>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -47,26 +42,26 @@ impl Deref for Validator {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Validator {
|
||||
impl Drop for Node {
|
||||
fn drop(&mut self) {
|
||||
if should_persist_tempdir()
|
||||
&& let Err(e) = persist_tempdir(&mut self.handle.tempdir, "logos-blockchain-node")
|
||||
{
|
||||
debug!(error = ?e, "failed to persist validator tempdir");
|
||||
debug!(error = ?e, "failed to persist node tempdir");
|
||||
}
|
||||
|
||||
debug!("stopping validator process");
|
||||
debug!("stopping node process");
|
||||
kill_child(&mut self.handle.child);
|
||||
}
|
||||
}
|
||||
|
||||
impl Validator {
|
||||
/// Check if the validator process is still running
|
||||
impl Node {
|
||||
/// Check if the node process is still running
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
is_running(&mut self.handle.child)
|
||||
}
|
||||
|
||||
/// Wait for the validator process to exit, with a timeout
|
||||
/// Wait for the node process to exit, with a timeout
|
||||
/// Returns true if the process exited within the timeout, false otherwise
|
||||
pub async fn wait_for_exit(&mut self, timeout: Duration) -> bool {
|
||||
self.handle.wait_for_exit(timeout).await
|
||||
@ -77,13 +72,13 @@ impl Validator {
|
||||
let handle = spawn_node(
|
||||
config,
|
||||
&log_prefix,
|
||||
"validator.yaml",
|
||||
"node.yaml",
|
||||
binary_path(),
|
||||
!*IS_DEBUG_TRACING,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("validator spawned and ready");
|
||||
info!("node spawned and ready");
|
||||
|
||||
Ok(Self { handle })
|
||||
}
|
||||
@ -2,7 +2,7 @@ use async_trait::async_trait;
|
||||
use reqwest::Url;
|
||||
|
||||
use super::DynError;
|
||||
use crate::{nodes::ApiClient, topology::generation::NodeRole};
|
||||
use crate::{nodes::ApiClient, topology::generation::NodeKind};
|
||||
|
||||
/// Marker type used by scenario builders to request node control support.
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
@ -69,18 +69,18 @@ impl RequiresNodeControl for ObservabilityCapability {
|
||||
/// Interface exposed by runners that can restart nodes at runtime.
|
||||
#[async_trait]
|
||||
pub trait NodeControlHandle: Send + Sync {
|
||||
async fn restart_validator(&self, index: usize) -> Result<(), DynError>;
|
||||
async fn restart_node(&self, index: usize) -> Result<(), DynError>;
|
||||
|
||||
async fn start_validator(&self, _name: &str) -> Result<StartedNode, DynError> {
|
||||
Err("start_validator not supported by this deployer".into())
|
||||
async fn start_node(&self, _name: &str) -> Result<StartedNode, DynError> {
|
||||
Err("start_node not supported by this deployer".into())
|
||||
}
|
||||
|
||||
async fn start_validator_with(
|
||||
async fn start_node_with(
|
||||
&self,
|
||||
_name: &str,
|
||||
_options: StartNodeOptions,
|
||||
) -> Result<StartedNode, DynError> {
|
||||
Err("start_validator_with not supported by this deployer".into())
|
||||
Err("start_node_with not supported by this deployer".into())
|
||||
}
|
||||
|
||||
fn node_client(&self, _name: &str) -> Option<ApiClient> {
|
||||
@ -91,6 +91,6 @@ pub trait NodeControlHandle: Send + Sync {
|
||||
#[derive(Clone)]
|
||||
pub struct StartedNode {
|
||||
pub name: String,
|
||||
pub role: NodeRole,
|
||||
pub kind: NodeKind,
|
||||
pub api: ApiClient,
|
||||
}
|
||||
|
||||
@ -5,7 +5,6 @@ use nomos_tracing_service::TracingSettings;
|
||||
use nomos_utils::bounded_duration::{MinimalBoundedDuration, SECOND};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::serde_as;
|
||||
use testing_framework_config::constants::kzg_container_path;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::topology::{configs::wallet::WalletConfig, generation::GeneratedTopology};
|
||||
@ -32,7 +31,6 @@ pub struct CfgSyncConfig {
|
||||
pub old_blobs_check_interval: Duration,
|
||||
#[serde_as(as = "MinimalBoundedDuration<0, SECOND>")]
|
||||
pub blobs_validity_duration: Duration,
|
||||
pub global_params_path: String,
|
||||
pub min_dispersal_peers: usize,
|
||||
pub min_replication_peers: usize,
|
||||
#[serde_as(as = "MinimalBoundedDuration<0, SECOND>")]
|
||||
@ -65,16 +63,12 @@ pub fn render_cfgsync_yaml(cfg: &CfgSyncConfig) -> Result<String> {
|
||||
serde_yaml::to_string(&serializable).context("rendering cfgsync yaml")
|
||||
}
|
||||
|
||||
pub fn apply_topology_overrides(
|
||||
cfg: &mut CfgSyncConfig,
|
||||
topology: &GeneratedTopology,
|
||||
use_kzg_mount: bool,
|
||||
) {
|
||||
pub fn apply_topology_overrides(cfg: &mut CfgSyncConfig, topology: &GeneratedTopology) {
|
||||
debug!(
|
||||
validators = topology.validators().len(),
|
||||
use_kzg_mount, "applying topology overrides to cfgsync config"
|
||||
nodes = topology.nodes().len(),
|
||||
"applying topology overrides to cfgsync config"
|
||||
);
|
||||
let hosts = topology.validators().len();
|
||||
let hosts = topology.nodes().len();
|
||||
cfg.n_hosts = hosts;
|
||||
|
||||
let consensus = &topology.config().consensus_params;
|
||||
@ -83,14 +77,14 @@ pub fn apply_topology_overrides(
|
||||
|
||||
let config = topology.config();
|
||||
cfg.wallet = config.wallet_config.clone();
|
||||
cfg.ids = Some(topology.nodes().map(|node| node.id).collect());
|
||||
cfg.blend_ports = Some(topology.nodes().map(|node| node.blend_port).collect());
|
||||
|
||||
if use_kzg_mount {
|
||||
// Compose mounts the bundle at /kzgrs_test_params; the proving key lives under
|
||||
// pol/.
|
||||
cfg.global_params_path = kzg_container_path()
|
||||
};
|
||||
cfg.ids = Some(topology.nodes().iter().map(|node| node.id).collect());
|
||||
cfg.blend_ports = Some(
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.blend_port)
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
@ -114,7 +108,6 @@ struct SerializableCfgSyncConfig {
|
||||
old_blobs_check_interval: Duration,
|
||||
#[serde_as(as = "MinimalBoundedDuration<0, SECOND>")]
|
||||
blobs_validity_duration: Duration,
|
||||
global_params_path: String,
|
||||
min_dispersal_peers: usize,
|
||||
min_replication_peers: usize,
|
||||
#[serde_as(as = "MinimalBoundedDuration<0, SECOND>")]
|
||||
@ -143,7 +136,6 @@ impl From<&CfgSyncConfig> for SerializableCfgSyncConfig {
|
||||
num_subnets: cfg.num_subnets,
|
||||
old_blobs_check_interval: cfg.old_blobs_check_interval,
|
||||
blobs_validity_duration: cfg.blobs_validity_duration,
|
||||
global_params_path: cfg.global_params_path.clone(),
|
||||
min_dispersal_peers: cfg.min_dispersal_peers,
|
||||
min_replication_peers: cfg.min_replication_peers,
|
||||
monitor_failure_time_window: cfg.monitor_failure_time_window,
|
||||
|
||||
@ -104,7 +104,7 @@ pub type ScenarioBuilder = Builder<()>;
|
||||
/// Builder for shaping the scenario topology.
|
||||
pub struct TopologyConfigurator<Caps> {
|
||||
builder: Builder<Caps>,
|
||||
validators: usize,
|
||||
nodes: usize,
|
||||
network_star: bool,
|
||||
}
|
||||
|
||||
@ -123,14 +123,14 @@ impl<Caps: Default> Builder<Caps> {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_node_counts(validators: usize) -> Self {
|
||||
pub fn with_node_counts(nodes: usize) -> Self {
|
||||
Self::new(TopologyBuilder::new(TopologyConfig::with_node_numbers(
|
||||
validators,
|
||||
nodes,
|
||||
)))
|
||||
}
|
||||
|
||||
/// Convenience constructor that immediately enters topology configuration,
|
||||
/// letting callers set counts via `validators`.
|
||||
/// letting callers set counts via `nodes`.
|
||||
pub fn topology() -> TopologyConfigurator<Caps> {
|
||||
TopologyConfigurator::new(Self::new(TopologyBuilder::new(TopologyConfig::empty())))
|
||||
}
|
||||
@ -262,7 +262,7 @@ impl<Caps> Builder<Caps> {
|
||||
let workloads: Vec<Arc<dyn Workload>> = workloads.into_iter().map(Arc::from).collect();
|
||||
|
||||
info!(
|
||||
validators = generated.validators().len(),
|
||||
nodes = generated.nodes().len(),
|
||||
duration_secs = duration.as_secs(),
|
||||
workloads = workloads.len(),
|
||||
expectations = expectations.len(),
|
||||
@ -283,18 +283,25 @@ impl<Caps> TopologyConfigurator<Caps> {
|
||||
const fn new(builder: Builder<Caps>) -> Self {
|
||||
Self {
|
||||
builder,
|
||||
validators: 0,
|
||||
nodes: 0,
|
||||
network_star: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the number of validator nodes.
|
||||
/// Set the number of nodes.
|
||||
#[must_use]
|
||||
pub fn validators(mut self, count: usize) -> Self {
|
||||
self.validators = count;
|
||||
pub fn nodes(mut self, count: usize) -> Self {
|
||||
self.nodes = count;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Legacy alias for node count (kept for downstream compatibility; remove
|
||||
/// later).
|
||||
pub fn validators(self, count: usize) -> Self {
|
||||
self.nodes(count)
|
||||
}
|
||||
|
||||
/// Use a star libp2p network layout.
|
||||
#[must_use]
|
||||
pub fn network_star(mut self) -> Self {
|
||||
@ -305,7 +312,7 @@ impl<Caps> TopologyConfigurator<Caps> {
|
||||
/// Finalize and return the underlying scenario builder.
|
||||
#[must_use]
|
||||
pub fn apply(self) -> Builder<Caps> {
|
||||
let mut config = TopologyConfig::with_node_numbers(self.validators);
|
||||
let mut config = TopologyConfig::with_node_numbers(self.nodes);
|
||||
if self.network_star {
|
||||
config.network_params.libp2p_network_layout = Libp2pNetworkLayout::Star;
|
||||
}
|
||||
|
||||
@ -7,17 +7,17 @@ use thiserror::Error;
|
||||
use tokio::time::{Instant, sleep};
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Role used for labelling readiness probes.
|
||||
/// Kind used for labelling readiness probes.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum NodeRole {
|
||||
Validator,
|
||||
pub enum NodeKind {
|
||||
Node,
|
||||
}
|
||||
|
||||
impl NodeRole {
|
||||
impl NodeKind {
|
||||
#[must_use]
|
||||
pub const fn label(self) -> &'static str {
|
||||
match self {
|
||||
Self::Validator => "validator",
|
||||
Self::Node => "node",
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -26,14 +26,14 @@ impl NodeRole {
|
||||
#[derive(Clone, Copy, Debug, Error)]
|
||||
#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}", role = role.label())]
|
||||
pub struct HttpReadinessError {
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
port: u16,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl HttpReadinessError {
|
||||
#[must_use]
|
||||
pub const fn new(role: NodeRole, port: u16, timeout: Duration) -> Self {
|
||||
pub const fn new(role: NodeKind, port: u16, timeout: Duration) -> Self {
|
||||
Self {
|
||||
role,
|
||||
port,
|
||||
@ -42,7 +42,7 @@ impl HttpReadinessError {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub const fn role(&self) -> NodeRole {
|
||||
pub const fn role(&self) -> NodeKind {
|
||||
self.role
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ impl HttpReadinessError {
|
||||
/// Wait for HTTP readiness on the provided ports against localhost.
|
||||
pub async fn wait_for_http_ports(
|
||||
ports: &[u16],
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
timeout_duration: Duration,
|
||||
poll_interval: Duration,
|
||||
) -> Result<(), HttpReadinessError> {
|
||||
@ -70,7 +70,7 @@ pub async fn wait_for_http_ports(
|
||||
/// Wait for HTTP readiness on the provided ports against a specific host.
|
||||
pub async fn wait_for_http_ports_with_host(
|
||||
ports: &[u16],
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
host: &str,
|
||||
timeout_duration: Duration,
|
||||
poll_interval: Duration,
|
||||
@ -106,7 +106,7 @@ pub async fn wait_for_http_ports_with_host(
|
||||
async fn wait_for_single_port(
|
||||
client: ReqwestClient,
|
||||
port: u16,
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
host: &str,
|
||||
timeout_duration: Duration,
|
||||
poll_interval: Duration,
|
||||
|
||||
@ -6,8 +6,8 @@ use tracing::warn;
|
||||
|
||||
pub const CONSENSUS_PROCESSED_BLOCKS: &str = "consensus_processed_blocks";
|
||||
pub const CONSENSUS_TRANSACTIONS_TOTAL: &str = "consensus_transactions_total";
|
||||
const CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY: &str =
|
||||
r#"sum(consensus_transactions_total{job=~"validator-.*"})"#;
|
||||
const CONSENSUS_TRANSACTIONS_NODE_QUERY: &str =
|
||||
r#"sum(consensus_transactions_total{job=~"node-.*"})"#;
|
||||
|
||||
/// Telemetry handles available during a run.
|
||||
#[derive(Clone, Default)]
|
||||
@ -71,21 +71,21 @@ impl Metrics {
|
||||
.prometheus()
|
||||
.ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?;
|
||||
|
||||
match handle.instant_samples(CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY) {
|
||||
match handle.instant_samples(CONSENSUS_TRANSACTIONS_NODE_QUERY) {
|
||||
Ok(samples) if !samples.is_empty() => {
|
||||
return Ok(samples.into_iter().map(|sample| sample.value).sum());
|
||||
}
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY,
|
||||
"validator-specific consensus transaction metric returned no samples; falling back to aggregate counter"
|
||||
query = CONSENSUS_TRANSACTIONS_NODE_QUERY,
|
||||
"node-specific consensus transaction metric returned no samples; falling back to aggregate counter"
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY,
|
||||
query = CONSENSUS_TRANSACTIONS_NODE_QUERY,
|
||||
error = %err,
|
||||
"failed to query validator-specific consensus transaction metric; falling back to aggregate counter"
|
||||
"failed to query node-specific consensus transaction metric; falling back to aggregate counter"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ use crate::{
|
||||
topology::{deployment::Topology, generation::GeneratedTopology},
|
||||
};
|
||||
|
||||
/// Collection of API clients for the validatorset.
|
||||
/// Collection of API clients for the node set.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct NodeClients {
|
||||
inner: Arc<RwLock<NodeClientsInner>>,
|
||||
@ -19,49 +19,49 @@ pub struct NodeClients {
|
||||
|
||||
#[derive(Default)]
|
||||
struct NodeClientsInner {
|
||||
validators: Vec<ApiClient>,
|
||||
nodes: Vec<ApiClient>,
|
||||
}
|
||||
|
||||
impl NodeClients {
|
||||
#[must_use]
|
||||
/// Build clients from preconstructed vectors.
|
||||
pub fn new(validators: Vec<ApiClient>) -> Self {
|
||||
pub fn new(nodes: Vec<ApiClient>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(RwLock::new(NodeClientsInner { validators })),
|
||||
inner: Arc::new(RwLock::new(NodeClientsInner { nodes })),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Derive clients from a spawned topology.
|
||||
pub fn from_topology(_descriptors: &GeneratedTopology, topology: &Topology) -> Self {
|
||||
let validator_clients = topology.validators().iter().map(|node| {
|
||||
let node_clients = topology.nodes().iter().map(|node| {
|
||||
let testing = node.testing_url();
|
||||
ApiClient::from_urls(node.url(), testing)
|
||||
});
|
||||
|
||||
Self::new(validator_clients.collect())
|
||||
Self::new(node_clients.collect())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Validator API clients.
|
||||
pub fn validator_clients(&self) -> Vec<ApiClient> {
|
||||
/// Node API clients.
|
||||
pub fn node_clients(&self) -> Vec<ApiClient> {
|
||||
self.inner
|
||||
.read()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.validators
|
||||
.nodes
|
||||
.clone()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Choose a random validator client if present.
|
||||
pub fn random_validator(&self) -> Option<ApiClient> {
|
||||
let validators = self.validator_clients();
|
||||
if validators.is_empty() {
|
||||
/// Choose a random node client if present.
|
||||
pub fn random_node(&self) -> Option<ApiClient> {
|
||||
let nodes = self.node_clients();
|
||||
if nodes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let idx = rng.gen_range(0..validators.len());
|
||||
validators.get(idx).cloned()
|
||||
let idx = rng.gen_range(0..nodes.len());
|
||||
nodes.get(idx).cloned()
|
||||
}
|
||||
|
||||
/// Iterator over all clients.
|
||||
@ -71,25 +71,24 @@ impl NodeClients {
|
||||
.read()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
|
||||
guard.validators.iter().cloned().collect()
|
||||
guard.nodes.iter().cloned().collect()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Choose any random client from validators.
|
||||
/// Choose any random client from nodes.
|
||||
pub fn any_client(&self) -> Option<ApiClient> {
|
||||
let guard = self
|
||||
.inner
|
||||
.read()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
|
||||
let validator_count = guard.validators.len();
|
||||
let total = validator_count;
|
||||
let total = guard.nodes.len();
|
||||
if total == 0 {
|
||||
return None;
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let choice = rng.gen_range(0..total);
|
||||
guard.validators.get(choice).cloned()
|
||||
guard.nodes.get(choice).cloned()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@ -98,13 +97,13 @@ impl NodeClients {
|
||||
ClusterClient::new(self)
|
||||
}
|
||||
|
||||
pub fn add_validator(&self, client: ApiClient) {
|
||||
pub fn add_node(&self, client: ApiClient) {
|
||||
let mut guard = self
|
||||
.inner
|
||||
.write()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
|
||||
guard.validators.push(client);
|
||||
guard.nodes.push(client);
|
||||
}
|
||||
|
||||
pub fn clear(&self) {
|
||||
@ -113,7 +112,7 @@ impl NodeClients {
|
||||
.write()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
|
||||
guard.validators.clear();
|
||||
guard.nodes.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ use thiserror::Error;
|
||||
|
||||
use crate::topology::{
|
||||
configs::{GeneralConfig, time::default_time_config},
|
||||
generation::{GeneratedNodeConfig, GeneratedTopology, NodeRole},
|
||||
generation::{GeneratedNodeConfig, GeneratedTopology, NodeKind},
|
||||
utils::{TopologyResolveError, create_kms_configs, resolve_ids, resolve_ports},
|
||||
};
|
||||
|
||||
@ -51,7 +51,7 @@ pub enum TopologyBuildError {
|
||||
/// High-level topology settings used to generate node configs for a scenario.
|
||||
#[derive(Clone)]
|
||||
pub struct TopologyConfig {
|
||||
pub n_validators: usize,
|
||||
pub n_nodes: usize,
|
||||
pub consensus_params: ConsensusParams,
|
||||
pub network_params: NetworkParams,
|
||||
pub wallet_config: WalletConfig,
|
||||
@ -62,7 +62,7 @@ impl TopologyConfig {
|
||||
#[must_use]
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
n_validators: 0,
|
||||
n_nodes: 0,
|
||||
consensus_params: ConsensusParams::default_for_participants(1),
|
||||
network_params: NetworkParams::default(),
|
||||
wallet_config: WalletConfig::default(),
|
||||
@ -70,10 +70,10 @@ impl TopologyConfig {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Convenience config with two validators for consensus-only scenarios.
|
||||
pub fn two_validators() -> Self {
|
||||
/// Convenience config with two nodes for consensus-only scenarios.
|
||||
pub fn two_nodes() -> Self {
|
||||
Self {
|
||||
n_validators: 2,
|
||||
n_nodes: 2,
|
||||
consensus_params: ConsensusParams::default_for_participants(2),
|
||||
network_params: NetworkParams::default(),
|
||||
wallet_config: WalletConfig::default(),
|
||||
@ -81,12 +81,12 @@ impl TopologyConfig {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Build a topology with explicit validator counts.
|
||||
pub fn with_node_numbers(validators: usize) -> Self {
|
||||
let participants = validators;
|
||||
/// Build a topology with explicit node counts.
|
||||
pub fn with_node_numbers(nodes: usize) -> Self {
|
||||
let participants = nodes;
|
||||
|
||||
Self {
|
||||
n_validators: validators,
|
||||
n_nodes: nodes,
|
||||
consensus_params: ConsensusParams::default_for_participants(participants),
|
||||
network_params: NetworkParams::default(),
|
||||
wallet_config: WalletConfig::default(),
|
||||
@ -133,15 +133,9 @@ impl TopologyBuilder {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub const fn with_validator_count(mut self, validators: usize) -> Self {
|
||||
self.config.n_validators = validators;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Set validator counts.
|
||||
pub const fn with_node_counts(mut self, validators: usize) -> Self {
|
||||
self.config.n_validators = validators;
|
||||
/// Set node counts.
|
||||
pub const fn with_node_count(mut self, nodes: usize) -> Self {
|
||||
self.config.n_nodes = nodes;
|
||||
self
|
||||
}
|
||||
|
||||
@ -197,7 +191,7 @@ impl TopologyBuilder {
|
||||
|
||||
let kms_configs = create_kms_configs(&blend_configs, &config.wallet_config.accounts);
|
||||
|
||||
let validators = build_node_descriptors(
|
||||
let nodes = build_node_descriptors(
|
||||
&config,
|
||||
n_participants,
|
||||
&ids,
|
||||
@ -212,7 +206,7 @@ impl TopologyBuilder {
|
||||
&time_config,
|
||||
)?;
|
||||
|
||||
Ok(GeneratedTopology { config, validators })
|
||||
Ok(GeneratedTopology { config, nodes })
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@ -222,7 +216,7 @@ impl TopologyBuilder {
|
||||
}
|
||||
|
||||
fn participant_count(config: &TopologyConfig) -> Result<usize, TopologyBuildError> {
|
||||
let n_participants = config.n_validators;
|
||||
let n_participants = config.n_nodes;
|
||||
if n_participants == 0 {
|
||||
return Err(TopologyBuildError::EmptyParticipants);
|
||||
}
|
||||
@ -298,7 +292,7 @@ fn build_node_descriptors(
|
||||
kms_configs: &[key_management_system_service::backend::preload::PreloadKMSBackendSettings],
|
||||
time_config: &testing_framework_config::topology::configs::time::GeneralTimeConfig,
|
||||
) -> Result<Vec<GeneratedNodeConfig>, TopologyBuildError> {
|
||||
let mut validators = Vec::with_capacity(config.n_validators);
|
||||
let mut nodes = Vec::with_capacity(config.n_nodes);
|
||||
|
||||
for i in 0..n_participants {
|
||||
let consensus_config =
|
||||
@ -325,21 +319,19 @@ fn build_node_descriptors(
|
||||
kms_config,
|
||||
};
|
||||
|
||||
let (role, index) = (NodeRole::Validator, i);
|
||||
let (kind, index) = (NodeKind::Node, i);
|
||||
let descriptor = GeneratedNodeConfig {
|
||||
role,
|
||||
kind,
|
||||
index,
|
||||
id,
|
||||
general,
|
||||
blend_port,
|
||||
};
|
||||
|
||||
match role {
|
||||
NodeRole::Validator => validators.push(descriptor),
|
||||
}
|
||||
nodes.push(descriptor);
|
||||
}
|
||||
|
||||
Ok(validators)
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
fn get_cloned<T: Clone>(
|
||||
|
||||
@ -5,7 +5,7 @@ use thiserror::Error;
|
||||
use crate::{
|
||||
nodes::{
|
||||
common::node::SpawnNodeError,
|
||||
validator::{Validator, create_validator_config},
|
||||
node::{Node, create_node_config},
|
||||
},
|
||||
topology::{
|
||||
config::{TopologyBuildError, TopologyBuilder, TopologyConfig},
|
||||
@ -18,10 +18,10 @@ use crate::{
|
||||
|
||||
/// Runtime representation of a spawned topology with running nodes.
|
||||
pub struct Topology {
|
||||
pub(crate) validators: Vec<Validator>,
|
||||
pub(crate) nodes: Vec<Node>,
|
||||
}
|
||||
|
||||
pub type DeployedNodes = Vec<Validator>;
|
||||
pub type DeployedNodes = Vec<Node>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SpawnTopologyError {
|
||||
@ -34,15 +34,15 @@ pub enum SpawnTopologyError {
|
||||
impl Topology {
|
||||
pub async fn spawn(config: TopologyConfig) -> Result<Self, SpawnTopologyError> {
|
||||
let generated = TopologyBuilder::new(config.clone()).build()?;
|
||||
let n_validators = config.n_validators;
|
||||
let n_nodes = config.n_nodes;
|
||||
let node_configs = generated
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.general.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let validators = Self::spawn_validators(node_configs, n_validators).await?;
|
||||
let nodes = Self::spawn_nodes(node_configs, n_nodes).await?;
|
||||
|
||||
Ok(Self { validators })
|
||||
Ok(Self { nodes })
|
||||
}
|
||||
|
||||
pub async fn spawn_with_empty_membership(
|
||||
@ -56,32 +56,32 @@ impl Topology {
|
||||
.build()?;
|
||||
|
||||
let node_configs = generated
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.general.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let validators = Self::spawn_validators(node_configs, config.n_validators).await?;
|
||||
let nodes = Self::spawn_nodes(node_configs, config.n_nodes).await?;
|
||||
|
||||
Ok(Self { validators })
|
||||
Ok(Self { nodes })
|
||||
}
|
||||
|
||||
pub(crate) async fn spawn_validators(
|
||||
pub(crate) async fn spawn_nodes(
|
||||
config: Vec<GeneralConfig>,
|
||||
n_validators: usize,
|
||||
n_nodes: usize,
|
||||
) -> Result<DeployedNodes, SpawnTopologyError> {
|
||||
let mut validators = Vec::new();
|
||||
for i in 0..n_validators {
|
||||
let config = create_validator_config(config[i].clone());
|
||||
let label = format!("validator-{i}");
|
||||
validators.push(Validator::spawn(config, &label).await?);
|
||||
let mut nodes = Vec::new();
|
||||
for i in 0..n_nodes {
|
||||
let config = create_node_config(config[i].clone());
|
||||
let label = format!("node-{i}");
|
||||
nodes.push(Node::spawn(config, &label).await?);
|
||||
}
|
||||
|
||||
Ok(validators)
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn validators(&self) -> &[Validator] {
|
||||
&self.validators
|
||||
pub fn nodes(&self) -> &[Node] {
|
||||
&self.nodes
|
||||
}
|
||||
|
||||
pub async fn wait_network_ready(&self) -> Result<(), ReadinessError> {
|
||||
@ -105,14 +105,14 @@ impl Topology {
|
||||
}
|
||||
|
||||
fn node_listen_ports(&self) -> Vec<u16> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.iter()
|
||||
.map(|node| node.config().network.backend.swarm.port)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn node_initial_peer_ports(&self) -> Vec<HashSet<u16>> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
node.config()
|
||||
@ -127,15 +127,10 @@ impl Topology {
|
||||
}
|
||||
|
||||
fn node_labels(&self) -> Vec<String> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, node)| {
|
||||
format!(
|
||||
"validator#{idx}@{}",
|
||||
node.config().network.backend.swarm.port
|
||||
)
|
||||
})
|
||||
.map(|(idx, node)| format!("node#{idx}@{}", node.config().network.backend.swarm.port))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,16 +9,16 @@ use crate::topology::{
|
||||
readiness::{HttpNetworkReadiness, ReadinessCheck, ReadinessError},
|
||||
};
|
||||
|
||||
/// Node role within the generated topology.
|
||||
/// Node kind within the generated topology.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum NodeRole {
|
||||
Validator,
|
||||
pub enum NodeKind {
|
||||
Node,
|
||||
}
|
||||
|
||||
/// Fully generated configuration for an individual node.
|
||||
#[derive(Clone)]
|
||||
pub struct GeneratedNodeConfig {
|
||||
pub role: NodeRole,
|
||||
pub kind: NodeKind,
|
||||
pub index: usize,
|
||||
pub id: [u8; 32],
|
||||
pub general: GeneralConfig,
|
||||
@ -27,9 +27,9 @@ pub struct GeneratedNodeConfig {
|
||||
|
||||
impl GeneratedNodeConfig {
|
||||
#[must_use]
|
||||
/// Logical role of the node.
|
||||
pub const fn role(&self) -> NodeRole {
|
||||
self.role
|
||||
/// Logical kind of the node.
|
||||
pub const fn kind(&self) -> NodeKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@ -59,7 +59,7 @@ impl GeneratedNodeConfig {
|
||||
#[derive(Clone)]
|
||||
pub struct GeneratedTopology {
|
||||
pub(crate) config: TopologyConfig,
|
||||
pub(crate) validators: Vec<GeneratedNodeConfig>,
|
||||
pub(crate) nodes: Vec<GeneratedNodeConfig>,
|
||||
}
|
||||
|
||||
impl GeneratedTopology {
|
||||
@ -70,20 +70,20 @@ impl GeneratedTopology {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// All validator configs.
|
||||
pub fn validators(&self) -> &[GeneratedNodeConfig] {
|
||||
&self.validators
|
||||
/// All node configs.
|
||||
pub fn nodes(&self) -> &[GeneratedNodeConfig] {
|
||||
&self.nodes
|
||||
}
|
||||
|
||||
/// Iterator over all node configs in role order.
|
||||
pub fn nodes(&self) -> impl Iterator<Item = &GeneratedNodeConfig> {
|
||||
self.validators.iter()
|
||||
pub fn iter(&self) -> impl Iterator<Item = &GeneratedNodeConfig> {
|
||||
self.nodes.iter()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Slot duration from the first node (assumes homogeneous configs).
|
||||
pub fn slot_duration(&self) -> Option<Duration> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.first()
|
||||
.map(|node| node.general.time_config.slot_duration)
|
||||
}
|
||||
@ -96,21 +96,21 @@ impl GeneratedTopology {
|
||||
|
||||
pub async fn spawn_local(&self) -> Result<Topology, SpawnTopologyError> {
|
||||
let configs = self
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.general.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let validators = Topology::spawn_validators(configs, self.config.n_validators).await?;
|
||||
let nodes = Topology::spawn_nodes(configs, self.config.n_nodes).await?;
|
||||
|
||||
Ok(Topology { validators })
|
||||
Ok(Topology { nodes })
|
||||
}
|
||||
|
||||
pub async fn wait_remote_readiness(
|
||||
&self,
|
||||
// Node endpoints
|
||||
validator_endpoints: &[Url],
|
||||
node_endpoints: &[Url],
|
||||
) -> Result<(), ReadinessError> {
|
||||
let total_nodes = self.validators.len();
|
||||
let total_nodes = self.nodes.len();
|
||||
if total_nodes == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
@ -118,20 +118,20 @@ impl GeneratedTopology {
|
||||
let labels = self.labels();
|
||||
let client = Client::new();
|
||||
|
||||
let endpoints = collect_node_endpoints(self, validator_endpoints, total_nodes);
|
||||
let endpoints = collect_node_endpoints(self, node_endpoints, total_nodes);
|
||||
|
||||
wait_for_network_readiness(self, &client, &endpoints, &labels).await
|
||||
}
|
||||
|
||||
fn listen_ports(&self) -> Vec<u16> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.iter()
|
||||
.map(|node| node.general.network_config.backend.swarm.port)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn initial_peer_ports(&self) -> Vec<HashSet<u16>> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
node.general
|
||||
@ -146,12 +146,12 @@ impl GeneratedTopology {
|
||||
}
|
||||
|
||||
fn labels(&self) -> Vec<String> {
|
||||
self.validators
|
||||
self.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, node)| {
|
||||
format!(
|
||||
"validator#{idx}@{}",
|
||||
"node#{idx}@{}",
|
||||
node.general.network_config.backend.swarm.port
|
||||
)
|
||||
})
|
||||
@ -161,17 +161,17 @@ impl GeneratedTopology {
|
||||
|
||||
fn collect_node_endpoints(
|
||||
topology: &GeneratedTopology,
|
||||
validator_endpoints: &[Url],
|
||||
node_endpoints: &[Url],
|
||||
total_nodes: usize,
|
||||
) -> Vec<Url> {
|
||||
assert_eq!(
|
||||
topology.validators.len(),
|
||||
validator_endpoints.len(),
|
||||
"validator endpoints must match topology"
|
||||
topology.nodes.len(),
|
||||
node_endpoints.len(),
|
||||
"node endpoints must match topology"
|
||||
);
|
||||
|
||||
let mut endpoints = Vec::with_capacity(total_nodes);
|
||||
endpoints.extend_from_slice(validator_endpoints);
|
||||
endpoints.extend_from_slice(node_endpoints);
|
||||
endpoints
|
||||
}
|
||||
|
||||
|
||||
@ -37,7 +37,7 @@ impl<'a> ReadinessCheck<'a> for NetworkReadiness<'a> {
|
||||
type Data = Vec<NodeNetworkStatus>;
|
||||
|
||||
async fn collect(&'a self) -> Self::Data {
|
||||
collect_validator_statuses(self).await
|
||||
collect_node_statuses(self).await
|
||||
}
|
||||
|
||||
fn is_ready(&self, data: &Self::Data) -> bool {
|
||||
@ -101,10 +101,10 @@ impl<'a> ReadinessCheck<'a> for HttpNetworkReadiness<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn collect_validator_statuses(readiness: &NetworkReadiness<'_>) -> Vec<NodeNetworkStatus> {
|
||||
let validator_futures = readiness
|
||||
async fn collect_node_statuses(readiness: &NetworkReadiness<'_>) -> Vec<NodeNetworkStatus> {
|
||||
let node_futures = readiness
|
||||
.topology
|
||||
.validators
|
||||
.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, node)| {
|
||||
@ -112,7 +112,7 @@ async fn collect_validator_statuses(readiness: &NetworkReadiness<'_>) -> Vec<Nod
|
||||
.labels
|
||||
.get(idx)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| format!("validator#{idx}"));
|
||||
.unwrap_or_else(|| format!("node#{idx}"));
|
||||
let expected_peers = readiness.expected_peer_counts.get(idx).copied();
|
||||
async move {
|
||||
let result = node
|
||||
@ -128,7 +128,7 @@ async fn collect_validator_statuses(readiness: &NetworkReadiness<'_>) -> Vec<Nod
|
||||
}
|
||||
});
|
||||
|
||||
futures::future::join_all(validator_futures).await
|
||||
futures::future::join_all(node_futures).await
|
||||
}
|
||||
|
||||
pub async fn try_fetch_network_info(
|
||||
|
||||
@ -36,5 +36,4 @@ groth16 = { workspace = true }
|
||||
key-management-system-service = { workspace = true }
|
||||
nomos-core = { workspace = true }
|
||||
nomos-ledger = { workspace = true }
|
||||
tests = { workspace = true }
|
||||
zksign = { workspace = true }
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
services:
|
||||
{% for node in validators %}
|
||||
{% for node in nodes %}
|
||||
{{ node.name }}:
|
||||
image: {{ node.image }}
|
||||
{% if node.platform %} platform: {{ node.platform }}
|
||||
|
||||
@ -59,7 +59,7 @@ impl ClientBuilder {
|
||||
.await);
|
||||
}
|
||||
};
|
||||
info!("block feed connected to validator");
|
||||
info!("block feed connected to node");
|
||||
Ok(pair)
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,14 +102,14 @@ mod tests {
|
||||
use testing_framework_core::{
|
||||
scenario::ScenarioBuilder,
|
||||
topology::{
|
||||
generation::{GeneratedNodeConfig, GeneratedTopology, NodeRole as TopologyNodeRole},
|
||||
generation::{GeneratedNodeConfig, GeneratedTopology},
|
||||
utils::multiaddr_port,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn cfgsync_prebuilt_configs_preserve_genesis() {
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.validators(1))
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.nodes(1))
|
||||
.build()
|
||||
.expect("scenario build should succeed");
|
||||
let topology = scenario.topology().clone();
|
||||
@ -120,8 +120,14 @@ mod tests {
|
||||
&topology.config().consensus_params,
|
||||
&tracing_settings,
|
||||
&topology.config().wallet_config,
|
||||
Some(topology.nodes().map(|node| node.id).collect()),
|
||||
Some(topology.nodes().map(|node| node.blend_port).collect()),
|
||||
Some(topology.nodes().iter().map(|node| node.id).collect()),
|
||||
Some(
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.blend_port)
|
||||
.collect(),
|
||||
),
|
||||
hosts,
|
||||
)
|
||||
.expect("cfgsync config generation should succeed");
|
||||
@ -131,7 +137,7 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
for node in topology.nodes() {
|
||||
let identifier = identifier_for(node.role(), node.index());
|
||||
let identifier = identifier_for(node.index());
|
||||
let cfgsync_config = configs_by_identifier
|
||||
.get(&identifier)
|
||||
.unwrap_or_else(|| panic!("missing cfgsync config for {identifier}"));
|
||||
@ -162,7 +168,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn cfgsync_genesis_proofs_verify_against_ledger() {
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.validators(1))
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.nodes(1))
|
||||
.build()
|
||||
.expect("scenario build should succeed");
|
||||
let topology = scenario.topology().clone();
|
||||
@ -173,8 +179,14 @@ mod tests {
|
||||
&topology.config().consensus_params,
|
||||
&tracing_settings,
|
||||
&topology.config().wallet_config,
|
||||
Some(topology.nodes().map(|node| node.id).collect()),
|
||||
Some(topology.nodes().map(|node| node.blend_port).collect()),
|
||||
Some(topology.nodes().iter().map(|node| node.id).collect()),
|
||||
Some(
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.blend_port)
|
||||
.collect(),
|
||||
),
|
||||
hosts,
|
||||
)
|
||||
.expect("cfgsync config generation should succeed");
|
||||
@ -184,7 +196,7 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
for node in topology.nodes() {
|
||||
let identifier = identifier_for(node.role(), node.index());
|
||||
let identifier = identifier_for(node.index());
|
||||
let cfgsync_config = configs_by_identifier
|
||||
.get(&identifier)
|
||||
.unwrap_or_else(|| panic!("missing cfgsync config for {identifier}"));
|
||||
@ -199,7 +211,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn cfgsync_docker_overrides_produce_valid_genesis() {
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.validators(3))
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.nodes(3))
|
||||
.build()
|
||||
.expect("scenario build should succeed");
|
||||
let topology = scenario.topology().clone();
|
||||
@ -210,8 +222,14 @@ mod tests {
|
||||
&topology.config().consensus_params,
|
||||
&tracing_settings,
|
||||
&topology.config().wallet_config,
|
||||
Some(topology.nodes().map(|node| node.id).collect()),
|
||||
Some(topology.nodes().map(|node| node.blend_port).collect()),
|
||||
Some(topology.nodes().iter().map(|node| node.id).collect()),
|
||||
Some(
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.blend_port)
|
||||
.collect(),
|
||||
),
|
||||
hosts,
|
||||
)
|
||||
.expect("cfgsync config generation should succeed");
|
||||
@ -231,7 +249,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn cfgsync_configs_match_topology_ports_and_genesis() {
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.validators(2))
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.nodes(2))
|
||||
.build()
|
||||
.expect("scenario build should succeed");
|
||||
let topology = scenario.topology().clone();
|
||||
@ -242,8 +260,14 @@ mod tests {
|
||||
&topology.config().consensus_params,
|
||||
&tracing_settings,
|
||||
&topology.config().wallet_config,
|
||||
Some(topology.nodes().map(|node| node.id).collect()),
|
||||
Some(topology.nodes().map(|node| node.blend_port).collect()),
|
||||
Some(topology.nodes().iter().map(|node| node.id).collect()),
|
||||
Some(
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| node.blend_port)
|
||||
.collect(),
|
||||
),
|
||||
hosts,
|
||||
)
|
||||
.expect("cfgsync config generation should succeed");
|
||||
@ -253,7 +277,7 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
for node in topology.nodes() {
|
||||
let identifier = identifier_for(node.role(), node.index());
|
||||
let identifier = identifier_for(node.index());
|
||||
let cfg = configs_by_identifier
|
||||
.get(&identifier)
|
||||
.unwrap_or_else(|| panic!("missing cfgsync config for {identifier}"));
|
||||
@ -290,29 +314,30 @@ mod tests {
|
||||
}
|
||||
|
||||
fn hosts_from_topology(topology: &GeneratedTopology) -> Vec<Host> {
|
||||
topology.nodes().map(host_from_node).collect()
|
||||
topology.nodes().iter().map(host_from_node).collect()
|
||||
}
|
||||
|
||||
fn docker_style_hosts(topology: &GeneratedTopology) -> Vec<Host> {
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| docker_host(node, 10 + node.index() as u8))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn host_from_node(node: &GeneratedNodeConfig) -> Host {
|
||||
let identifier = identifier_for(node.role(), node.index());
|
||||
let identifier = identifier_for(node.index());
|
||||
let ip = Ipv4Addr::LOCALHOST;
|
||||
let mut host = make_host(node.role(), ip, identifier);
|
||||
let mut host = make_host(ip, identifier);
|
||||
host.network_port = node.network_port();
|
||||
host.blend_port = node.blend_port;
|
||||
host
|
||||
}
|
||||
|
||||
fn docker_host(node: &GeneratedNodeConfig, octet: u8) -> Host {
|
||||
let identifier = identifier_for(node.role(), node.index());
|
||||
let identifier = identifier_for(node.index());
|
||||
let ip = Ipv4Addr::new(172, 23, 0, octet);
|
||||
let mut host = make_host(node.role(), ip, identifier);
|
||||
let mut host = make_host(ip, identifier);
|
||||
host.network_port = node.network_port().saturating_add(1000);
|
||||
host.blend_port = node.blend_port.saturating_add(1000);
|
||||
host
|
||||
@ -320,7 +345,7 @@ mod tests {
|
||||
|
||||
fn tracing_settings(topology: &GeneratedTopology) -> TracingSettings {
|
||||
topology
|
||||
.validators()
|
||||
.nodes()
|
||||
.first()
|
||||
.expect("topology must contain at least one node")
|
||||
.general
|
||||
@ -329,22 +354,18 @@ mod tests {
|
||||
.clone()
|
||||
}
|
||||
|
||||
fn identifier_for(role: TopologyNodeRole, index: usize) -> String {
|
||||
match role {
|
||||
TopologyNodeRole::Validator => format!("validator-{index}"),
|
||||
}
|
||||
fn identifier_for(index: usize) -> String {
|
||||
format!("node-{index}")
|
||||
}
|
||||
|
||||
fn make_host(role: TopologyNodeRole, ip: Ipv4Addr, identifier: String) -> Host {
|
||||
fn make_host(ip: Ipv4Addr, identifier: String) -> Host {
|
||||
let ports = PortOverrides {
|
||||
network_port: None,
|
||||
blend_port: None,
|
||||
api_port: None,
|
||||
testing_http_port: None,
|
||||
};
|
||||
match role {
|
||||
TopologyNodeRole::Validator => Host::validator_from_ip(ip, identifier, ports),
|
||||
}
|
||||
Host::node_from_ip(ip, identifier, ports)
|
||||
}
|
||||
|
||||
fn declaration_fingerprint<G>(
|
||||
|
||||
@ -50,7 +50,7 @@ impl DeploymentOrchestrator {
|
||||
} = setup.prepare_workspace(&observability).await?;
|
||||
|
||||
tracing::info!(
|
||||
validators = descriptors.validators().len(),
|
||||
nodes = descriptors.nodes().len(),
|
||||
duration_secs = scenario.duration().as_secs(),
|
||||
readiness_checks = self.deployer.readiness_checks,
|
||||
metrics_query_url = observability.metrics_query_url.as_ref().map(|u| u.as_str()),
|
||||
@ -62,7 +62,7 @@ impl DeploymentOrchestrator {
|
||||
"compose deployment starting"
|
||||
);
|
||||
|
||||
let validator_count = descriptors.validators().len();
|
||||
let node_count = descriptors.nodes().len();
|
||||
let host_ports = PortManager::prepare(&mut environment, &descriptors).await?;
|
||||
|
||||
wait_for_readiness_or_grace_period(
|
||||
@ -102,7 +102,7 @@ impl DeploymentOrchestrator {
|
||||
);
|
||||
|
||||
info!(
|
||||
validators = validator_count,
|
||||
nodes = node_count,
|
||||
duration_secs = scenario.duration().as_secs(),
|
||||
readiness_checks = self.deployer.readiness_checks,
|
||||
host,
|
||||
@ -195,22 +195,22 @@ fn maybe_print_endpoints(observability: &ObservabilityInputs, host: &str, ports:
|
||||
}
|
||||
|
||||
fn log_profiling_urls(host: &str, ports: &HostPortMapping) {
|
||||
for (idx, node) in ports.validators.iter().enumerate() {
|
||||
for (idx, node) in ports.nodes.iter().enumerate() {
|
||||
tracing::info!(
|
||||
validator = idx,
|
||||
node = idx,
|
||||
profiling_url = %format!(
|
||||
"http://{}:{}/debug/pprof/profile?seconds=15&format=proto",
|
||||
host, node.api
|
||||
),
|
||||
"validator profiling endpoint (profiling feature required)"
|
||||
"node profiling endpoint (profiling feature required)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn print_profiling_urls(host: &str, ports: &HostPortMapping) {
|
||||
for (idx, node) in ports.validators.iter().enumerate() {
|
||||
for (idx, node) in ports.nodes.iter().enumerate() {
|
||||
println!(
|
||||
"TESTNET_PPROF validator_{}=http://{}:{}/debug/pprof/profile?seconds=15&format=proto",
|
||||
"TESTNET_PPROF node_{}=http://{}:{}/debug/pprof/profile?seconds=15&format=proto",
|
||||
idx, host, node.api
|
||||
);
|
||||
}
|
||||
|
||||
@ -17,13 +17,13 @@ impl PortManager {
|
||||
descriptors: &GeneratedTopology,
|
||||
) -> Result<HostPortMapping, ComposeRunnerError> {
|
||||
debug!(
|
||||
validators = descriptors.validators().len(),
|
||||
nodes = descriptors.nodes().len(),
|
||||
"resolving host ports for compose services"
|
||||
);
|
||||
match discover_host_ports(environment, descriptors).await {
|
||||
Ok(mapping) => {
|
||||
info!(
|
||||
validator_ports = ?mapping.validator_api_ports(),
|
||||
node_ports = ?mapping.node_api_ports(),
|
||||
"resolved container host ports"
|
||||
);
|
||||
Ok(mapping)
|
||||
|
||||
@ -7,7 +7,7 @@ use crate::{
|
||||
environment::StackEnvironment,
|
||||
ports::{HostPortMapping, ensure_remote_readiness_with_ports},
|
||||
},
|
||||
lifecycle::readiness::ensure_validators_ready_with_ports,
|
||||
lifecycle::readiness::ensure_nodes_ready_with_ports,
|
||||
};
|
||||
|
||||
pub struct ReadinessChecker;
|
||||
@ -18,13 +18,13 @@ impl ReadinessChecker {
|
||||
host_ports: &HostPortMapping,
|
||||
environment: &mut StackEnvironment,
|
||||
) -> Result<(), ComposeRunnerError> {
|
||||
let validator_ports = host_ports.validator_api_ports();
|
||||
info!(ports = ?validator_ports, "waiting for validator HTTP endpoints");
|
||||
if let Err(err) = ensure_validators_ready_with_ports(&validator_ports).await {
|
||||
let node_ports = host_ports.node_api_ports();
|
||||
info!(ports = ?node_ports, "waiting for node HTTP endpoints");
|
||||
if let Err(err) = ensure_nodes_ready_with_ports(&node_ports).await {
|
||||
return fail_readiness_step(
|
||||
environment,
|
||||
"validator readiness failed",
|
||||
"validator readiness failed",
|
||||
"node readiness failed",
|
||||
"node readiness failed",
|
||||
err,
|
||||
)
|
||||
.await;
|
||||
|
||||
@ -32,7 +32,7 @@ impl DeploymentSetup {
|
||||
ensure_supported_topology(&self.descriptors)?;
|
||||
|
||||
info!(
|
||||
validators = self.descriptors.validators().len(),
|
||||
nodes = self.descriptors.nodes().len(),
|
||||
"starting compose deployment"
|
||||
);
|
||||
|
||||
|
||||
@ -4,10 +4,7 @@ use std::{
|
||||
};
|
||||
|
||||
use serde::Serialize;
|
||||
use testing_framework_core::{
|
||||
kzg::KzgParamsSpec,
|
||||
topology::generation::{GeneratedNodeConfig, GeneratedTopology},
|
||||
};
|
||||
use testing_framework_core::topology::generation::{GeneratedNodeConfig, GeneratedTopology};
|
||||
use testing_framework_env as tf_env;
|
||||
|
||||
use crate::docker::platform::{host_gateway_entry, resolve_image};
|
||||
@ -20,7 +17,7 @@ use testing_framework_config::constants::DEFAULT_CFGSYNC_PORT;
|
||||
/// Top-level docker-compose descriptor built from a GeneratedTopology.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct ComposeDescriptor {
|
||||
validators: Vec<NodeDescriptor>,
|
||||
nodes: Vec<NodeDescriptor>,
|
||||
}
|
||||
|
||||
impl ComposeDescriptor {
|
||||
@ -31,8 +28,8 @@ impl ComposeDescriptor {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn validators(&self) -> &[NodeDescriptor] {
|
||||
&self.validators
|
||||
pub fn nodes(&self) -> &[NodeDescriptor] {
|
||||
&self.nodes
|
||||
}
|
||||
}
|
||||
|
||||
@ -40,7 +37,6 @@ impl ComposeDescriptor {
|
||||
/// template.
|
||||
pub struct ComposeDescriptorBuilder<'a> {
|
||||
topology: &'a GeneratedTopology,
|
||||
use_kzg_mount: bool,
|
||||
cfgsync_port: Option<u16>,
|
||||
}
|
||||
|
||||
@ -48,18 +44,10 @@ impl<'a> ComposeDescriptorBuilder<'a> {
|
||||
const fn new(topology: &'a GeneratedTopology) -> Self {
|
||||
Self {
|
||||
topology,
|
||||
use_kzg_mount: false,
|
||||
cfgsync_port: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Mount KZG parameters into nodes when enabled.
|
||||
pub const fn with_kzg_mount(mut self, enabled: bool) -> Self {
|
||||
self.use_kzg_mount = enabled;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Set cfgsync port for nodes.
|
||||
pub const fn with_cfgsync_port(mut self, port: u16) -> Self {
|
||||
@ -74,34 +62,33 @@ impl<'a> ComposeDescriptorBuilder<'a> {
|
||||
|
||||
let (image, platform) = resolve_image();
|
||||
|
||||
let validators = build_nodes(
|
||||
self.topology.validators(),
|
||||
ComposeNodeKind::Validator,
|
||||
let nodes = build_nodes(
|
||||
self.topology.nodes(),
|
||||
ComposeNodeKind::Node,
|
||||
&image,
|
||||
platform.as_deref(),
|
||||
self.use_kzg_mount,
|
||||
cfgsync_port,
|
||||
);
|
||||
|
||||
ComposeDescriptor { validators }
|
||||
ComposeDescriptor { nodes }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum ComposeNodeKind {
|
||||
Validator,
|
||||
Node,
|
||||
}
|
||||
|
||||
impl ComposeNodeKind {
|
||||
fn instance_name(self, index: usize) -> String {
|
||||
match self {
|
||||
Self::Validator => format!("validator-{index}"),
|
||||
Self::Node => format!("node-{index}"),
|
||||
}
|
||||
}
|
||||
|
||||
const fn entrypoint(self) -> &'static str {
|
||||
match self {
|
||||
Self::Validator => "/etc/nomos/scripts/run_nomos_node.sh",
|
||||
Self::Node => "/etc/nomos/scripts/run_nomos_node.sh",
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -111,31 +98,19 @@ fn build_nodes(
|
||||
kind: ComposeNodeKind,
|
||||
image: &str,
|
||||
platform: Option<&str>,
|
||||
use_kzg_mount: bool,
|
||||
cfgsync_port: u16,
|
||||
) -> Vec<NodeDescriptor> {
|
||||
nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, node)| {
|
||||
NodeDescriptor::from_node(
|
||||
kind,
|
||||
index,
|
||||
node,
|
||||
image,
|
||||
platform,
|
||||
use_kzg_mount,
|
||||
cfgsync_port,
|
||||
)
|
||||
NodeDescriptor::from_node(kind, index, node, image, platform, cfgsync_port)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn base_volumes(use_kzg_mount: bool) -> Vec<String> {
|
||||
fn base_volumes() -> Vec<String> {
|
||||
let mut volumes = vec!["./stack:/etc/nomos".into()];
|
||||
if use_kzg_mount {
|
||||
volumes.push("./kzgrs_test_params:/kzgrs_test_params:z".into());
|
||||
}
|
||||
if let Some(host_log_dir) = repo_root()
|
||||
.map(|root| root.join("tmp").join("node-logs"))
|
||||
.map(|dir| dir.display().to_string())
|
||||
@ -160,18 +135,16 @@ fn default_extra_hosts() -> Vec<String> {
|
||||
host_gateway_entry().into_iter().collect()
|
||||
}
|
||||
|
||||
fn base_environment(cfgsync_port: u16, use_kzg_mount: bool) -> Vec<EnvEntry> {
|
||||
fn base_environment(cfgsync_port: u16) -> Vec<EnvEntry> {
|
||||
let pol_mode = tf_env::pol_proof_dev_mode().unwrap_or_else(|| "true".to_string());
|
||||
let rust_log = tf_env::rust_log().unwrap_or_else(|| "info".to_string());
|
||||
let nomos_log_level = tf_env::nomos_log_level().unwrap_or_else(|| "info".to_string());
|
||||
let time_backend = tf_env::nomos_time_backend().unwrap_or_else(|| "monotonic".into());
|
||||
let kzg_path = KzgParamsSpec::for_compose(use_kzg_mount).node_params_path;
|
||||
vec![
|
||||
EnvEntry::new("POL_PROOF_DEV_MODE", pol_mode),
|
||||
EnvEntry::new("RUST_LOG", rust_log),
|
||||
EnvEntry::new("NOMOS_LOG_LEVEL", nomos_log_level),
|
||||
EnvEntry::new("NOMOS_TIME_BACKEND", time_backend),
|
||||
EnvEntry::new("LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH", kzg_path),
|
||||
EnvEntry::new(
|
||||
"CFG_SERVER_ADDR",
|
||||
format!("http://host.docker.internal:{cfgsync_port}"),
|
||||
|
||||
@ -3,7 +3,7 @@ use testing_framework_core::topology::generation::GeneratedNodeConfig;
|
||||
|
||||
use super::{ComposeNodeKind, base_environment, base_volumes, default_extra_hosts};
|
||||
|
||||
/// Describes a validator container in the compose stack.
|
||||
/// Describes a node container in the compose stack.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct NodeDescriptor {
|
||||
name: String,
|
||||
@ -50,10 +50,9 @@ impl NodeDescriptor {
|
||||
node: &GeneratedNodeConfig,
|
||||
image: &str,
|
||||
platform: Option<&str>,
|
||||
use_kzg_mount: bool,
|
||||
cfgsync_port: u16,
|
||||
) -> Self {
|
||||
let mut environment = base_environment(cfgsync_port, use_kzg_mount);
|
||||
let mut environment = base_environment(cfgsync_port);
|
||||
let identifier = kind.instance_name(index);
|
||||
let api_port = node.general.api_config.address.port();
|
||||
let testing_port = node.general.api_config.testing_http_address.port();
|
||||
@ -80,7 +79,7 @@ impl NodeDescriptor {
|
||||
name: kind.instance_name(index),
|
||||
image: image.to_owned(),
|
||||
entrypoint: kind.entrypoint().to_owned(),
|
||||
volumes: base_volumes(use_kzg_mount),
|
||||
volumes: base_volumes(),
|
||||
extra_hosts: default_extra_hosts(),
|
||||
ports,
|
||||
environment,
|
||||
|
||||
@ -45,13 +45,13 @@ pub struct ComposeNodeControl {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NodeControlHandle for ComposeNodeControl {
|
||||
async fn restart_validator(&self, index: usize) -> Result<(), DynError> {
|
||||
async fn restart_node(&self, index: usize) -> Result<(), DynError> {
|
||||
restart_compose_service(
|
||||
&self.compose_file,
|
||||
&self.project_name,
|
||||
&format!("validator-{index}"),
|
||||
&format!("node-{index}"),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| format!("validator restart failed: {err}").into())
|
||||
.map_err(|err| format!("node restart failed: {err}").into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,10 +5,7 @@ use std::{
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use tempfile::TempDir;
|
||||
use testing_framework_config::constants::{
|
||||
DEFAULT_ASSETS_STACK_DIR, DEFAULT_KZG_HOST_DIR, KZG_PARAMS_FILENAME,
|
||||
};
|
||||
use testing_framework_env;
|
||||
use testing_framework_config::constants::DEFAULT_ASSETS_STACK_DIR;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Copy the repository stack assets into a scenario-specific temp dir.
|
||||
@ -54,40 +51,6 @@ impl ComposeWorkspace {
|
||||
copy_dir_recursive(&scripts_source, &temp.path().join("stack/scripts"))?;
|
||||
}
|
||||
|
||||
let kzg_source = repo_root.join(
|
||||
testing_framework_env::nomos_kzg_dir_rel()
|
||||
.unwrap_or_else(|| DEFAULT_KZG_HOST_DIR.to_string()),
|
||||
);
|
||||
let target = temp.path().join(KZG_PARAMS_FILENAME);
|
||||
if kzg_source.exists() {
|
||||
if kzg_source.is_dir() {
|
||||
copy_dir_recursive(&kzg_source, &target)?;
|
||||
} else {
|
||||
fs::copy(&kzg_source, &target).with_context(|| {
|
||||
format!("copying {} -> {}", kzg_source.display(), target.display())
|
||||
})?;
|
||||
}
|
||||
}
|
||||
// Fail fast if the KZG bundle is missing or empty; DA verifier will panic
|
||||
// otherwise.
|
||||
if !target.exists()
|
||||
|| fs::read_dir(&target)
|
||||
.ok()
|
||||
.map(|mut it| it.next().is_none())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
anyhow::bail!(
|
||||
"\nKZG params missing in stack assets (expected files in {})\
|
||||
\nrepo_root: {}\
|
||||
\ntarget: {}\
|
||||
\nnomos_kzg_dir_rel(): {:?}\n",
|
||||
kzg_source.display(),
|
||||
repo_root.display(),
|
||||
target.display(),
|
||||
testing_framework_env::nomos_kzg_dir_rel(),
|
||||
);
|
||||
}
|
||||
|
||||
info!(root = %temp.path().display(), "compose workspace created");
|
||||
Ok(Self { root: temp })
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ use std::path::PathBuf;
|
||||
use testing_framework_core::{
|
||||
scenario::{
|
||||
MetricsError,
|
||||
http_probe::{HttpReadinessError, NodeRole},
|
||||
http_probe::{HttpReadinessError, NodeKind},
|
||||
},
|
||||
topology::readiness::ReadinessError,
|
||||
};
|
||||
@ -14,8 +14,8 @@ use crate::{docker::commands::ComposeCommandError, infrastructure::template::Tem
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
/// Top-level compose runner errors.
|
||||
pub enum ComposeRunnerError {
|
||||
#[error("compose runner requires at least one validator (validators={validators})")]
|
||||
MissingValidator { validators: usize },
|
||||
#[error("compose runner requires at least one node (nodes={nodes})")]
|
||||
MissingNode { nodes: usize },
|
||||
#[error("docker does not appear to be available on this host")]
|
||||
DockerUnavailable,
|
||||
#[error("failed to resolve host port for {service} container port {container_port}: {source}")]
|
||||
@ -37,7 +37,7 @@ pub enum ComposeRunnerError {
|
||||
NodeClients(#[from] NodeClientError),
|
||||
#[error(transparent)]
|
||||
Telemetry(#[from] MetricsError),
|
||||
#[error("block feed requires at least one validator client")]
|
||||
#[error("block feed requires at least one node client")]
|
||||
BlockFeedMissing,
|
||||
#[error("failed to start block feed: {source}")]
|
||||
BlockFeed {
|
||||
@ -105,7 +105,7 @@ pub enum StackReadinessError {
|
||||
Http(#[from] HttpReadinessError),
|
||||
#[error("failed to build readiness URL for {role} port {port}: {source}", role = role.label())]
|
||||
Endpoint {
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
port: u16,
|
||||
#[source]
|
||||
source: ParseError,
|
||||
@ -125,7 +125,7 @@ pub enum NodeClientError {
|
||||
role = role.label()
|
||||
)]
|
||||
Endpoint {
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
endpoint: &'static str,
|
||||
port: u16,
|
||||
#[source]
|
||||
|
||||
@ -4,7 +4,6 @@ use nomos_tracing::metrics::otlp::OtlpMetricsConfig;
|
||||
use nomos_tracing_service::MetricsLayer;
|
||||
use reqwest::Url;
|
||||
use testing_framework_core::{
|
||||
kzg::KzgParamsSpec,
|
||||
scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, write_cfgsync_template},
|
||||
topology::generation::GeneratedTopology,
|
||||
};
|
||||
@ -62,21 +61,18 @@ impl Drop for CfgsyncServerHandle {
|
||||
pub fn update_cfgsync_config(
|
||||
path: &Path,
|
||||
topology: &GeneratedTopology,
|
||||
use_kzg_mount: bool,
|
||||
port: u16,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> anyhow::Result<()> {
|
||||
debug!(
|
||||
path = %path.display(),
|
||||
use_kzg_mount,
|
||||
port,
|
||||
validators = topology.validators().len(),
|
||||
nodes = topology.nodes().len(),
|
||||
"updating cfgsync template"
|
||||
);
|
||||
let mut cfg = load_cfgsync_template(path)?;
|
||||
cfg.port = port;
|
||||
apply_topology_overrides(&mut cfg, topology, use_kzg_mount);
|
||||
cfg.global_params_path = KzgParamsSpec::for_compose(use_kzg_mount).node_params_path;
|
||||
apply_topology_overrides(&mut cfg, topology);
|
||||
if let Some(endpoint) = metrics_otlp_ingest_url.cloned() {
|
||||
cfg.tracing_settings.metrics = MetricsLayer::Otlp(OtlpMetricsConfig {
|
||||
endpoint,
|
||||
|
||||
@ -6,7 +6,6 @@ use std::{
|
||||
|
||||
use anyhow::anyhow;
|
||||
use reqwest::Url;
|
||||
use testing_framework_config::constants::KZG_PARAMS_FILENAME;
|
||||
use testing_framework_core::{
|
||||
adjust_timeout, scenario::CleanupGuard, topology::generation::GeneratedTopology,
|
||||
};
|
||||
@ -37,7 +36,6 @@ pub struct WorkspaceState {
|
||||
pub workspace: ComposeWorkspace,
|
||||
pub root: PathBuf,
|
||||
pub cfgsync_path: PathBuf,
|
||||
pub use_kzg: bool,
|
||||
}
|
||||
|
||||
/// Holds paths and handles for a running docker-compose stack.
|
||||
@ -133,13 +131,13 @@ impl StackEnvironment {
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies the topology has at least one validator so compose can start.
|
||||
/// Verifies the topology has at least one node so compose can start.
|
||||
pub fn ensure_supported_topology(
|
||||
descriptors: &GeneratedTopology,
|
||||
) -> Result<(), ComposeRunnerError> {
|
||||
let validators = descriptors.validators().len();
|
||||
if validators == 0 {
|
||||
return Err(ComposeRunnerError::MissingValidator { validators });
|
||||
let nodes = descriptors.nodes().len();
|
||||
if nodes == 0 {
|
||||
return Err(ComposeRunnerError::MissingNode { nodes });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -149,19 +147,15 @@ pub fn prepare_workspace_state() -> Result<WorkspaceState, WorkspaceError> {
|
||||
let workspace = ComposeWorkspace::create().map_err(WorkspaceError::new)?;
|
||||
let root = workspace.root_path().to_path_buf();
|
||||
let cfgsync_path = workspace.stack_dir().join("cfgsync.yaml");
|
||||
let use_kzg = workspace.root_path().join(KZG_PARAMS_FILENAME).exists();
|
||||
|
||||
let state = WorkspaceState {
|
||||
workspace,
|
||||
root,
|
||||
cfgsync_path,
|
||||
use_kzg,
|
||||
};
|
||||
|
||||
debug!(
|
||||
root = %state.root.display(),
|
||||
cfgsync = %state.cfgsync_path.display(),
|
||||
use_kzg = state.use_kzg,
|
||||
"prepared compose workspace state"
|
||||
);
|
||||
|
||||
@ -215,7 +209,6 @@ pub fn configure_cfgsync(
|
||||
update_cfgsync_config(
|
||||
&workspace.cfgsync_path,
|
||||
descriptors,
|
||||
workspace.use_kzg,
|
||||
cfgsync_port,
|
||||
metrics_otlp_ingest_url,
|
||||
)
|
||||
@ -315,7 +308,6 @@ pub fn write_compose_artifacts(
|
||||
"building compose descriptor"
|
||||
);
|
||||
let descriptor = ComposeDescriptor::builder(descriptors)
|
||||
.with_kzg_mount(workspace.use_kzg)
|
||||
.with_cfgsync_port(cfgsync_port)
|
||||
.build();
|
||||
|
||||
|
||||
@ -3,9 +3,8 @@ use std::time::Duration;
|
||||
use anyhow::{Context as _, anyhow};
|
||||
use reqwest::Url;
|
||||
use testing_framework_core::{
|
||||
adjust_timeout,
|
||||
scenario::http_probe::NodeRole as HttpNodeRole,
|
||||
topology::generation::{GeneratedTopology, NodeRole as TopologyNodeRole},
|
||||
adjust_timeout, scenario::http_probe::NodeKind as HttpNodeKind,
|
||||
topology::generation::GeneratedTopology,
|
||||
};
|
||||
use tokio::{process::Command, time::timeout};
|
||||
use tracing::{debug, info};
|
||||
@ -25,16 +24,16 @@ pub struct NodeHostPorts {
|
||||
pub testing: u16,
|
||||
}
|
||||
|
||||
/// All host port mappings for validators.
|
||||
/// All host port mappings for nodes.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HostPortMapping {
|
||||
pub validators: Vec<NodeHostPorts>,
|
||||
pub nodes: Vec<NodeHostPorts>,
|
||||
}
|
||||
|
||||
impl HostPortMapping {
|
||||
/// Returns API ports for all validators.
|
||||
pub fn validator_api_ports(&self) -> Vec<u16> {
|
||||
self.validators.iter().map(|ports| ports.api).collect()
|
||||
/// Returns API ports for all nodes.
|
||||
pub fn node_api_ports(&self) -> Vec<u16> {
|
||||
self.nodes.iter().map(|ports| ports.api).collect()
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,21 +45,21 @@ pub async fn discover_host_ports(
|
||||
debug!(
|
||||
compose_file = %environment.compose_path().display(),
|
||||
project = environment.project_name(),
|
||||
validators = descriptors.validators().len(),
|
||||
nodes = descriptors.nodes().len(),
|
||||
"resolving compose host ports"
|
||||
);
|
||||
let mut validators = Vec::new();
|
||||
for node in descriptors.validators() {
|
||||
let service = node_identifier(TopologyNodeRole::Validator, node.index());
|
||||
let mut nodes = Vec::new();
|
||||
for node in descriptors.nodes() {
|
||||
let service = node_identifier(node.index());
|
||||
let api = resolve_service_port(environment, &service, node.api_port()).await?;
|
||||
let testing = resolve_service_port(environment, &service, node.testing_http_port()).await?;
|
||||
validators.push(NodeHostPorts { api, testing });
|
||||
nodes.push(NodeHostPorts { api, testing });
|
||||
}
|
||||
|
||||
let mapping = HostPortMapping { validators };
|
||||
let mapping = HostPortMapping { nodes };
|
||||
|
||||
info!(
|
||||
validator_ports = ?mapping.validators,
|
||||
node_ports = ?mapping.nodes,
|
||||
"compose host ports resolved"
|
||||
);
|
||||
|
||||
@ -130,19 +129,19 @@ pub async fn ensure_remote_readiness_with_ports(
|
||||
descriptors: &GeneratedTopology,
|
||||
mapping: &HostPortMapping,
|
||||
) -> Result<(), StackReadinessError> {
|
||||
let validator_urls = mapping
|
||||
.validators
|
||||
let node_urls = mapping
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|ports| readiness_url(HttpNodeRole::Validator, ports.api))
|
||||
.map(|ports| readiness_url(HttpNodeKind::Node, ports.api))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
descriptors
|
||||
.wait_remote_readiness(&validator_urls)
|
||||
.wait_remote_readiness(&node_urls)
|
||||
.await
|
||||
.map_err(|source| StackReadinessError::Remote { source })
|
||||
}
|
||||
|
||||
fn readiness_url(role: HttpNodeRole, port: u16) -> Result<Url, StackReadinessError> {
|
||||
fn readiness_url(role: HttpNodeKind, port: u16) -> Result<Url, StackReadinessError> {
|
||||
localhost_url(port).map_err(|source| StackReadinessError::Endpoint { role, port, source })
|
||||
}
|
||||
|
||||
@ -150,10 +149,8 @@ fn localhost_url(port: u16) -> Result<Url, ParseError> {
|
||||
Url::parse(&format!("http://{}:{port}/", compose_runner_host()))
|
||||
}
|
||||
|
||||
fn node_identifier(role: TopologyNodeRole, index: usize) -> String {
|
||||
match role {
|
||||
TopologyNodeRole::Validator => format!("validator-{index}"),
|
||||
}
|
||||
fn node_identifier(index: usize) -> String {
|
||||
format!("node-{index}")
|
||||
}
|
||||
|
||||
pub(crate) fn compose_runner_host() -> String {
|
||||
|
||||
@ -13,12 +13,12 @@ async fn spawn_block_feed_with(
|
||||
node_clients: &NodeClients,
|
||||
) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> {
|
||||
debug!(
|
||||
validators = node_clients.validator_clients().len(),
|
||||
"selecting validator client for block feed"
|
||||
nodes = node_clients.node_clients().len(),
|
||||
"selecting node client for block feed"
|
||||
);
|
||||
|
||||
let block_source_client = node_clients
|
||||
.random_validator()
|
||||
.random_node()
|
||||
.ok_or(ComposeRunnerError::BlockFeedMissing)?;
|
||||
|
||||
spawn_block_feed(block_source_client)
|
||||
|
||||
@ -3,26 +3,26 @@ use std::time::Duration;
|
||||
use reqwest::Url;
|
||||
use testing_framework_core::{
|
||||
nodes::ApiClient,
|
||||
scenario::{NodeClients, http_probe::NodeRole as HttpNodeRole},
|
||||
topology::generation::{GeneratedTopology, NodeRole as TopologyNodeRole},
|
||||
scenario::{NodeClients, http_probe::NodeKind as HttpNodeKind},
|
||||
topology::generation::GeneratedTopology,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::{
|
||||
errors::{NodeClientError, StackReadinessError},
|
||||
infrastructure::ports::{HostPortMapping, NodeHostPorts},
|
||||
lifecycle::wait::wait_for_validators,
|
||||
lifecycle::wait::wait_for_nodes,
|
||||
};
|
||||
|
||||
const DISABLED_READINESS_SLEEP: Duration = Duration::from_secs(5);
|
||||
|
||||
/// Wait until all validators respond on their API ports.
|
||||
pub async fn ensure_validators_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> {
|
||||
/// Wait until all nodes respond on their API ports.
|
||||
pub async fn ensure_nodes_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> {
|
||||
if ports.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
wait_for_validators(ports).await.map_err(Into::into)
|
||||
wait_for_nodes(ports).await.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Allow a brief pause when readiness probes are disabled.
|
||||
@ -38,18 +38,18 @@ pub fn build_node_clients_with_ports(
|
||||
mapping: &HostPortMapping,
|
||||
host: &str,
|
||||
) -> Result<NodeClients, NodeClientError> {
|
||||
let validators = descriptors
|
||||
.validators()
|
||||
let nodes = descriptors
|
||||
.nodes()
|
||||
.iter()
|
||||
.zip(mapping.validators.iter())
|
||||
.map(|(node, ports)| api_client_from_host_ports(to_http_role(node.role()), ports, host))
|
||||
.zip(mapping.nodes.iter())
|
||||
.map(|(_node, ports)| api_client_from_host_ports(HttpNodeKind::Node, ports, host))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(NodeClients::new(validators))
|
||||
Ok(NodeClients::new(nodes))
|
||||
}
|
||||
|
||||
fn api_client_from_host_ports(
|
||||
role: HttpNodeRole,
|
||||
role: HttpNodeKind,
|
||||
ports: &NodeHostPorts,
|
||||
host: &str,
|
||||
) -> Result<ApiClient, NodeClientError> {
|
||||
@ -73,12 +73,6 @@ fn api_client_from_host_ports(
|
||||
Ok(ApiClient::from_urls(base_url, testing_url))
|
||||
}
|
||||
|
||||
fn to_http_role(role: TopologyNodeRole) -> testing_framework_core::scenario::http_probe::NodeRole {
|
||||
match role {
|
||||
TopologyNodeRole::Validator => HttpNodeRole::Validator,
|
||||
}
|
||||
}
|
||||
|
||||
fn localhost_url(port: u16, host: &str) -> Result<Url, url::ParseError> {
|
||||
Url::parse(&format!("http://{host}:{port}/"))
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@ use std::{env, time::Duration};
|
||||
|
||||
use testing_framework_core::{
|
||||
adjust_timeout,
|
||||
scenario::http_probe::{self, HttpReadinessError, NodeRole},
|
||||
scenario::http_probe::{self, HttpReadinessError, NodeKind},
|
||||
};
|
||||
use tracing::{debug, info};
|
||||
|
||||
@ -12,11 +12,11 @@ const POLL_INTERVAL_MILLIS: u64 = 250;
|
||||
const DEFAULT_WAIT: Duration = Duration::from_secs(DEFAULT_WAIT_TIMEOUT_SECS);
|
||||
const POLL_INTERVAL: Duration = Duration::from_millis(POLL_INTERVAL_MILLIS);
|
||||
|
||||
pub async fn wait_for_validators(ports: &[u16]) -> Result<(), HttpReadinessError> {
|
||||
wait_for_ports(ports, NodeRole::Validator).await
|
||||
pub async fn wait_for_nodes(ports: &[u16]) -> Result<(), HttpReadinessError> {
|
||||
wait_for_ports(ports, NodeKind::Node).await
|
||||
}
|
||||
|
||||
async fn wait_for_ports(ports: &[u16], role: NodeRole) -> Result<(), HttpReadinessError> {
|
||||
async fn wait_for_ports(ports: &[u16], role: NodeKind) -> Result<(), HttpReadinessError> {
|
||||
let host = compose_runner_host();
|
||||
let timeout = compose_http_timeout();
|
||||
|
||||
|
||||
@ -20,12 +20,11 @@ app.kubernetes.io/name: {{ include "nomos-runner.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "nomos-runner.validatorLabels" -}}
|
||||
{{- define "nomos-runner.nodeLabels" -}}
|
||||
{{- $root := index . "root" -}}
|
||||
{{- $index := index . "index" -}}
|
||||
app.kubernetes.io/name: {{ include "nomos-runner.chart" $root }}
|
||||
app.kubernetes.io/instance: {{ $root.Release.Name }}
|
||||
nomos/logical-role: validator
|
||||
nomos/validator-index: "{{ $index }}"
|
||||
nomos/logical-role: node
|
||||
nomos/node-index: "{{ $index }}"
|
||||
{{- end -}}
|
||||
|
||||
|
||||
@ -1,25 +1,25 @@
|
||||
{{- $root := . -}}
|
||||
{{- $nodes := default (list) .Values.validators.nodes }}
|
||||
{{- $nodes := default (list) .Values.nodes.nodes }}
|
||||
{{- range $i, $node := $nodes }}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }}
|
||||
name: {{ include "nomos-runner.fullname" $root }}-node-{{ $i }}
|
||||
labels:
|
||||
{{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }}
|
||||
{{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 6 }}
|
||||
{{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 8 }}
|
||||
{{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: validator
|
||||
- name: node
|
||||
image: {{ $root.Values.image }}
|
||||
imagePullPolicy: {{ $root.Values.imagePullPolicy }}
|
||||
command: ["/etc/nomos/scripts/run_nomos_node.sh"]
|
||||
@ -33,8 +33,6 @@ spec:
|
||||
value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }}
|
||||
- name: NOMOS_TIME_BACKEND
|
||||
value: {{ $root.Values.timeBackend | default "monotonic" | quote }}
|
||||
- name: LOGOS_BLOCKCHAIN_KZGRS_PARAMS_PATH
|
||||
value: '{{ if eq $root.Values.kzg.mode "inImage" }}{{ $root.Values.kzg.inImageParamsPath }}{{ else }}{{ $root.Values.kzg.hostPathParamsPath }}{{ end }}'
|
||||
{{- range $key, $value := $node.env }}
|
||||
- name: {{ $key }}
|
||||
value: "{{ $value }}"
|
||||
@ -43,11 +41,6 @@ spec:
|
||||
- name: assets
|
||||
mountPath: /etc/nomos
|
||||
readOnly: true
|
||||
{{- if eq $root.Values.kzg.mode "hostPath" }}
|
||||
- name: kzg-params
|
||||
mountPath: /kzgrs_test_params
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: assets
|
||||
configMap:
|
||||
@ -62,10 +55,4 @@ spec:
|
||||
path: scripts/run_nomos.sh
|
||||
- key: run_nomos_node.sh
|
||||
path: scripts/run_nomos_node.sh
|
||||
{{- if eq $root.Values.kzg.mode "hostPath" }}
|
||||
- name: kzg-params
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "nomos-runner.fullname" $root }}-kzg
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@ -1,17 +1,17 @@
|
||||
{{- $root := . -}}
|
||||
{{- $nodes := default (list) .Values.validators.nodes }}
|
||||
{{- $nodes := default (list) .Values.nodes.nodes }}
|
||||
{{- range $i, $node := $nodes }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }}
|
||||
name: {{ include "nomos-runner.fullname" $root }}-node-{{ $i }}
|
||||
labels:
|
||||
{{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }}
|
||||
{{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 4 }}
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
{{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }}
|
||||
{{- include "nomos-runner.nodeLabels" (dict "root" $root "index" $i) | nindent 4 }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ default 18080 $node.apiPort }}
|
||||
|
||||
@ -14,14 +14,6 @@ scripts:
|
||||
runNomosSh: ""
|
||||
runNomosNodeSh: ""
|
||||
|
||||
validators:
|
||||
nodes:
|
||||
count: 1
|
||||
nodes: []
|
||||
|
||||
kzg:
|
||||
mode: "hostPath"
|
||||
hostPathParamsPath: "/kzgrs_test_params/kzgrs_test_params"
|
||||
inImageParamsPath: "/opt/nomos/kzg-params/kzgrs_test_params"
|
||||
hostPath: "/var/lib/nomos/kzgrs_test_params"
|
||||
hostPathType: "Directory"
|
||||
storageSize: "1Gi"
|
||||
|
||||
@ -56,8 +56,8 @@ impl K8sDeployer {
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
/// High-level runner failures returned to the scenario harness.
|
||||
pub enum K8sRunnerError {
|
||||
#[error("kubernetes runner requires at least one validator (validators={validators})")]
|
||||
UnsupportedTopology { validators: usize },
|
||||
#[error("kubernetes runner requires at least one node (nodes={nodes})")]
|
||||
UnsupportedTopology { nodes: usize },
|
||||
#[error("failed to initialise kubernetes client: {source}")]
|
||||
ClientInit {
|
||||
#[source]
|
||||
@ -122,9 +122,9 @@ impl From<ClusterWaitError> for K8sRunnerError {
|
||||
}
|
||||
|
||||
fn ensure_supported_topology(descriptors: &GeneratedTopology) -> Result<(), K8sRunnerError> {
|
||||
let validators = descriptors.validators().len();
|
||||
if validators == 0 {
|
||||
return Err(K8sRunnerError::UnsupportedTopology { validators });
|
||||
let nodes = descriptors.nodes().len();
|
||||
if nodes == 0 {
|
||||
return Err(K8sRunnerError::UnsupportedTopology { nodes });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -137,13 +137,13 @@ async fn deploy_with_observability<Caps>(
|
||||
let observability = resolve_observability_inputs(observability)?;
|
||||
|
||||
let descriptors = scenario.topology().clone();
|
||||
let validator_count = descriptors.validators().len();
|
||||
let node_count = descriptors.nodes().len();
|
||||
ensure_supported_topology(&descriptors)?;
|
||||
|
||||
let client = init_kube_client().await?;
|
||||
|
||||
info!(
|
||||
validators = validator_count,
|
||||
nodes = node_count,
|
||||
duration_secs = scenario.duration().as_secs(),
|
||||
readiness_checks = deployer.readiness_checks,
|
||||
metrics_query_url = observability.metrics_query_url.as_ref().map(|u| u.as_str()),
|
||||
@ -195,7 +195,7 @@ async fn deploy_with_observability<Caps>(
|
||||
telemetry,
|
||||
block_feed,
|
||||
block_feed_guard,
|
||||
validator_count,
|
||||
node_count,
|
||||
)
|
||||
}
|
||||
|
||||
@ -207,13 +207,13 @@ async fn setup_cluster(
|
||||
observability: &ObservabilityInputs,
|
||||
) -> Result<ClusterEnvironment, K8sRunnerError> {
|
||||
let assets = prepare_assets(descriptors, observability.metrics_otlp_ingest_url.as_ref())?;
|
||||
let validators = descriptors.validators().len();
|
||||
let nodes = descriptors.nodes().len();
|
||||
|
||||
let (namespace, release) = cluster_identifiers();
|
||||
info!(%namespace, %release, validators, "preparing k8s assets and namespace");
|
||||
info!(%namespace, %release, nodes, "preparing k8s assets and namespace");
|
||||
|
||||
let mut cleanup_guard =
|
||||
Some(install_stack(client, &assets, &namespace, &release, validators).await?);
|
||||
Some(install_stack(client, &assets, &namespace, &release, nodes).await?);
|
||||
|
||||
info!("waiting for helm-managed services to become ready");
|
||||
let cluster_ready =
|
||||
@ -328,10 +328,10 @@ fn maybe_print_endpoints(
|
||||
.unwrap_or_else(|| "<disabled>".to_string())
|
||||
);
|
||||
|
||||
let validator_clients = node_clients.validator_clients();
|
||||
for (idx, client) in validator_clients.iter().enumerate() {
|
||||
let nodes = node_clients.node_clients();
|
||||
for (idx, client) in nodes.iter().enumerate() {
|
||||
println!(
|
||||
"TESTNET_PPROF validator_{}={}/debug/pprof/profile?seconds=15&format=proto",
|
||||
"TESTNET_PPROF node_{}={}/debug/pprof/profile?seconds=15&format=proto",
|
||||
idx,
|
||||
client.base_url()
|
||||
);
|
||||
@ -347,7 +347,7 @@ fn finalize_runner(
|
||||
telemetry: testing_framework_core::scenario::Metrics,
|
||||
block_feed: testing_framework_core::scenario::BlockFeed,
|
||||
block_feed_guard: BlockFeedTask,
|
||||
validator_count: usize,
|
||||
node_count: usize,
|
||||
) -> Result<Runner, K8sRunnerError> {
|
||||
let environment = cluster
|
||||
.take()
|
||||
@ -373,7 +373,7 @@ fn finalize_runner(
|
||||
);
|
||||
|
||||
info!(
|
||||
validators = validator_count,
|
||||
nodes = node_count,
|
||||
duration_secs = duration.as_secs(),
|
||||
"k8s deployment ready; handing control to scenario runner"
|
||||
);
|
||||
|
||||
@ -11,9 +11,7 @@ use reqwest::Url;
|
||||
use serde::Serialize;
|
||||
use tempfile::TempDir;
|
||||
use testing_framework_config::constants::{DEFAULT_ASSETS_STACK_DIR, cfgsync_port};
|
||||
pub use testing_framework_core::kzg::KzgMode;
|
||||
use testing_framework_core::{
|
||||
kzg::KzgParamsSpec,
|
||||
scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, render_cfgsync_yaml},
|
||||
topology::generation::GeneratedTopology,
|
||||
};
|
||||
@ -24,8 +22,6 @@ use tracing::{debug, info};
|
||||
/// Paths and image metadata required to deploy the Helm chart.
|
||||
pub struct RunnerAssets {
|
||||
pub image: String,
|
||||
pub kzg_mode: KzgMode,
|
||||
pub kzg_path: Option<PathBuf>,
|
||||
pub chart_path: PathBuf,
|
||||
pub cfgsync_file: PathBuf,
|
||||
pub run_cfgsync_script: PathBuf,
|
||||
@ -54,8 +50,6 @@ pub enum AssetsError {
|
||||
},
|
||||
#[error("missing required script at {path}")]
|
||||
MissingScript { path: PathBuf },
|
||||
#[error("missing KZG parameters at {path}; build them with `make kzgrs_test_params`")]
|
||||
MissingKzg { path: PathBuf },
|
||||
#[error("missing Helm chart at {path}; ensure the repository is up-to-date")]
|
||||
MissingChart { path: PathBuf },
|
||||
#[error("failed to create temporary directory for rendered assets: {source}")]
|
||||
@ -76,53 +70,37 @@ pub enum AssetsError {
|
||||
},
|
||||
}
|
||||
|
||||
/// Render cfgsync config, Helm values, and locate scripts/KZG assets for a
|
||||
/// Render cfgsync config, Helm values, and locate scripts for a
|
||||
/// topology.
|
||||
pub fn prepare_assets(
|
||||
topology: &GeneratedTopology,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> Result<RunnerAssets, AssetsError> {
|
||||
info!(
|
||||
validators = topology.validators().len(),
|
||||
nodes = topology.nodes().len(),
|
||||
"preparing k8s runner assets"
|
||||
);
|
||||
|
||||
let root = workspace_root().map_err(|source| AssetsError::WorkspaceRoot { source })?;
|
||||
let kzg_spec = KzgParamsSpec::for_k8s(&root);
|
||||
|
||||
let tempdir = create_assets_tempdir()?;
|
||||
|
||||
let cfgsync_file = render_and_write_cfgsync(
|
||||
&root,
|
||||
topology,
|
||||
&kzg_spec,
|
||||
metrics_otlp_ingest_url,
|
||||
&tempdir,
|
||||
)?;
|
||||
let cfgsync_file =
|
||||
render_and_write_cfgsync(&root, topology, metrics_otlp_ingest_url, &tempdir)?;
|
||||
let scripts = validate_scripts(&root)?;
|
||||
let kzg_path = resolve_kzg_path(&root, &kzg_spec)?;
|
||||
let chart_path = helm_chart_path()?;
|
||||
let values_file = render_and_write_values(topology, &tempdir)?;
|
||||
let image = testnet_image();
|
||||
|
||||
let kzg_display = kzg_path
|
||||
.as_ref()
|
||||
.map(|path| path.display().to_string())
|
||||
.unwrap_or_else(|| "<in-image>".to_string());
|
||||
debug!(
|
||||
cfgsync = %cfgsync_file.display(),
|
||||
values = %values_file.display(),
|
||||
image,
|
||||
kzg_mode = ?kzg_spec.mode,
|
||||
kzg = %kzg_display,
|
||||
chart = %chart_path.display(),
|
||||
"k8s runner assets prepared"
|
||||
);
|
||||
|
||||
Ok(RunnerAssets {
|
||||
image,
|
||||
kzg_mode: kzg_spec.mode,
|
||||
kzg_path,
|
||||
chart_path,
|
||||
cfgsync_file,
|
||||
run_nomos_script: scripts.run_shared,
|
||||
@ -143,21 +121,13 @@ fn create_assets_tempdir() -> Result<TempDir, AssetsError> {
|
||||
fn render_and_write_cfgsync(
|
||||
root: &Path,
|
||||
topology: &GeneratedTopology,
|
||||
kzg_spec: &KzgParamsSpec,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
tempdir: &TempDir,
|
||||
) -> Result<PathBuf, AssetsError> {
|
||||
let cfgsync_yaml = render_cfgsync_config(root, topology, kzg_spec, metrics_otlp_ingest_url)?;
|
||||
let cfgsync_yaml = render_cfgsync_config(root, topology, metrics_otlp_ingest_url)?;
|
||||
write_temp_file(tempdir.path(), "cfgsync.yaml", cfgsync_yaml)
|
||||
}
|
||||
|
||||
fn resolve_kzg_path(root: &Path, kzg_spec: &KzgParamsSpec) -> Result<Option<PathBuf>, AssetsError> {
|
||||
match kzg_spec.mode {
|
||||
KzgMode::HostPath => Ok(Some(validate_kzg_params(root, kzg_spec)?)),
|
||||
KzgMode::InImage => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn render_and_write_values(
|
||||
topology: &GeneratedTopology,
|
||||
tempdir: &TempDir,
|
||||
@ -176,7 +146,6 @@ const CFGSYNC_K8S_TIMEOUT_SECS: u64 = 300;
|
||||
fn render_cfgsync_config(
|
||||
root: &Path,
|
||||
topology: &GeneratedTopology,
|
||||
kzg_spec: &KzgParamsSpec,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> Result<String, AssetsError> {
|
||||
let cfgsync_template_path = stack_assets_root(root).join("cfgsync.yaml");
|
||||
@ -185,8 +154,7 @@ fn render_cfgsync_config(
|
||||
let mut cfg = load_cfgsync_template(&cfgsync_template_path)
|
||||
.map_err(|source| AssetsError::Cfgsync { source })?;
|
||||
|
||||
apply_topology_overrides(&mut cfg, topology, kzg_spec.mode == KzgMode::HostPath);
|
||||
cfg.global_params_path = kzg_spec.node_params_path.clone();
|
||||
apply_topology_overrides(&mut cfg, topology);
|
||||
|
||||
if let Some(endpoint) = metrics_otlp_ingest_url.cloned() {
|
||||
cfg.tracing_settings.metrics = MetricsLayer::Otlp(OtlpMetricsConfig {
|
||||
@ -232,19 +200,6 @@ fn validate_scripts(root: &Path) -> Result<ScriptPaths, AssetsError> {
|
||||
})
|
||||
}
|
||||
|
||||
fn validate_kzg_params(root: &Path, spec: &KzgParamsSpec) -> Result<PathBuf, AssetsError> {
|
||||
let Some(path) = spec.host_params_dir.clone() else {
|
||||
return Err(AssetsError::MissingKzg {
|
||||
path: root.join(testing_framework_config::constants::DEFAULT_KZG_HOST_DIR),
|
||||
});
|
||||
};
|
||||
if path.exists() {
|
||||
Ok(path)
|
||||
} else {
|
||||
Err(AssetsError::MissingKzg { path })
|
||||
}
|
||||
}
|
||||
|
||||
fn helm_chart_path() -> Result<PathBuf, AssetsError> {
|
||||
let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("helm/nomos-runner");
|
||||
if path.exists() {
|
||||
@ -309,7 +264,7 @@ struct HelmValues {
|
||||
#[serde(rename = "imagePullPolicy")]
|
||||
image_pull_policy: String,
|
||||
cfgsync: CfgsyncValues,
|
||||
validators: NodeGroup,
|
||||
nodes: NodeGroup,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@ -340,12 +295,12 @@ fn build_values(topology: &GeneratedTopology) -> HelmValues {
|
||||
let image_pull_policy =
|
||||
tf_env::nomos_testnet_image_pull_policy().unwrap_or_else(|| "IfNotPresent".into());
|
||||
debug!(pol_mode, "rendering Helm values for k8s stack");
|
||||
let validators = build_node_group("validator", topology.validators(), &pol_mode);
|
||||
let nodes = build_node_group("node", topology.nodes(), &pol_mode);
|
||||
|
||||
HelmValues {
|
||||
image_pull_policy,
|
||||
cfgsync,
|
||||
validators,
|
||||
nodes,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@ use kube::Client;
|
||||
use reqwest::Url;
|
||||
use testing_framework_core::{
|
||||
nodes::ApiClient,
|
||||
scenario::{CleanupGuard, NodeClients, http_probe::NodeRole},
|
||||
scenario::{CleanupGuard, NodeClients, http_probe::NodeKind},
|
||||
topology::{generation::GeneratedTopology, readiness::ReadinessError},
|
||||
};
|
||||
use tracing::{debug, info};
|
||||
@ -21,7 +21,7 @@ use crate::{
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PortSpecs {
|
||||
pub validators: Vec<NodeConfigPorts>,
|
||||
pub nodes: Vec<NodeConfigPorts>,
|
||||
}
|
||||
|
||||
/// Holds k8s namespace, Helm release, port forwards, and cleanup guard.
|
||||
@ -30,9 +30,9 @@ pub struct ClusterEnvironment {
|
||||
namespace: String,
|
||||
release: String,
|
||||
cleanup: Option<RunnerCleanup>,
|
||||
validator_host: String,
|
||||
validator_api_ports: Vec<u16>,
|
||||
validator_testing_ports: Vec<u16>,
|
||||
node_host: String,
|
||||
node_api_ports: Vec<u16>,
|
||||
node_testing_ports: Vec<u16>,
|
||||
port_forwards: Vec<PortForwardHandle>,
|
||||
}
|
||||
|
||||
@ -51,17 +51,17 @@ impl ClusterEnvironment {
|
||||
ports: &ClusterPorts,
|
||||
port_forwards: Vec<PortForwardHandle>,
|
||||
) -> Self {
|
||||
let validator_api_ports = ports.validators.iter().map(|ports| ports.api).collect();
|
||||
let validator_testing_ports = ports.validators.iter().map(|ports| ports.testing).collect();
|
||||
let node_api_ports = ports.nodes.iter().map(|ports| ports.api).collect();
|
||||
let node_testing_ports = ports.nodes.iter().map(|ports| ports.testing).collect();
|
||||
|
||||
Self {
|
||||
client,
|
||||
namespace,
|
||||
release,
|
||||
cleanup: Some(cleanup),
|
||||
validator_host: ports.validator_host.clone(),
|
||||
validator_api_ports,
|
||||
validator_testing_ports,
|
||||
node_host: ports.node_host.clone(),
|
||||
node_api_ports,
|
||||
node_testing_ports,
|
||||
port_forwards,
|
||||
}
|
||||
}
|
||||
@ -99,8 +99,8 @@ impl ClusterEnvironment {
|
||||
&self.release
|
||||
}
|
||||
|
||||
pub fn validator_ports(&self) -> (&[u16], &[u16]) {
|
||||
(&self.validator_api_ports, &self.validator_testing_ports)
|
||||
pub fn node_ports(&self) -> (&[u16], &[u16]) {
|
||||
(&self.node_api_ports, &self.node_testing_ports)
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ pub enum NodeClientError {
|
||||
role = role.label()
|
||||
)]
|
||||
Endpoint {
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
endpoint: &'static str,
|
||||
port: u16,
|
||||
#[source]
|
||||
@ -128,7 +128,7 @@ pub enum RemoteReadinessError {
|
||||
role = role.label()
|
||||
)]
|
||||
Endpoint {
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
port: u16,
|
||||
#[source]
|
||||
source: ParseError,
|
||||
@ -141,8 +141,8 @@ pub enum RemoteReadinessError {
|
||||
}
|
||||
|
||||
pub fn collect_port_specs(descriptors: &GeneratedTopology) -> PortSpecs {
|
||||
let validators = descriptors
|
||||
.validators()
|
||||
let nodes = descriptors
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| NodeConfigPorts {
|
||||
api: node.general.api_config.address.port(),
|
||||
@ -150,35 +150,27 @@ pub fn collect_port_specs(descriptors: &GeneratedTopology) -> PortSpecs {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let specs = PortSpecs { validators };
|
||||
let specs = PortSpecs { nodes };
|
||||
|
||||
debug!(
|
||||
validators = specs.validators.len(),
|
||||
"collected k8s port specs"
|
||||
);
|
||||
debug!(nodes = specs.nodes.len(), "collected k8s port specs");
|
||||
|
||||
specs
|
||||
}
|
||||
|
||||
pub fn build_node_clients(cluster: &ClusterEnvironment) -> Result<NodeClients, NodeClientError> {
|
||||
let validators = cluster
|
||||
.validator_api_ports
|
||||
let nodes = cluster
|
||||
.node_api_ports
|
||||
.iter()
|
||||
.copied()
|
||||
.zip(cluster.validator_testing_ports.iter().copied())
|
||||
.zip(cluster.node_testing_ports.iter().copied())
|
||||
.map(|(api_port, testing_port)| {
|
||||
api_client_from_ports(
|
||||
&cluster.validator_host,
|
||||
NodeRole::Validator,
|
||||
api_port,
|
||||
testing_port,
|
||||
)
|
||||
api_client_from_ports(&cluster.node_host, NodeKind::Node, api_port, testing_port)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
debug!(validators = validators.len(), "built k8s node clients");
|
||||
debug!(nodes = nodes.len(), "built k8s node clients");
|
||||
|
||||
Ok(NodeClients::new(validators))
|
||||
Ok(NodeClients::new(nodes))
|
||||
}
|
||||
|
||||
pub async fn ensure_cluster_readiness(
|
||||
@ -186,18 +178,17 @@ pub async fn ensure_cluster_readiness(
|
||||
cluster: &ClusterEnvironment,
|
||||
) -> Result<(), RemoteReadinessError> {
|
||||
info!("waiting for remote readiness (API + membership)");
|
||||
let (validator_api, _validator_testing) = cluster.validator_ports();
|
||||
let (node_api, _node_testing) = cluster.node_ports();
|
||||
|
||||
let validator_urls =
|
||||
readiness_urls(validator_api, NodeRole::Validator, &cluster.validator_host)?;
|
||||
let node_urls = readiness_urls(node_api, NodeKind::Node, &cluster.node_host)?;
|
||||
|
||||
descriptors
|
||||
.wait_remote_readiness(&validator_urls)
|
||||
.wait_remote_readiness(&node_urls)
|
||||
.await
|
||||
.map_err(|source| RemoteReadinessError::Remote { source })?;
|
||||
|
||||
info!(
|
||||
validator_api_ports = ?validator_api,
|
||||
node_api_ports = ?node_api,
|
||||
"k8s remote readiness confirmed"
|
||||
);
|
||||
|
||||
@ -225,14 +216,14 @@ pub async fn install_stack(
|
||||
assets: &RunnerAssets,
|
||||
namespace: &str,
|
||||
release: &str,
|
||||
validators: usize,
|
||||
nodes: usize,
|
||||
) -> Result<RunnerCleanup, crate::deployer::K8sRunnerError> {
|
||||
tracing::info!(
|
||||
release = %release,
|
||||
namespace = %namespace,
|
||||
"installing helm release"
|
||||
);
|
||||
crate::infrastructure::helm::install_release(assets, release, namespace, validators).await?;
|
||||
crate::infrastructure::helm::install_release(assets, release, namespace, nodes).await?;
|
||||
tracing::info!(release = %release, "helm install succeeded");
|
||||
|
||||
let preserve = env::var("K8S_RUNNER_PRESERVE").is_ok();
|
||||
@ -252,15 +243,15 @@ pub async fn wait_for_ports_or_cleanup(
|
||||
cleanup_guard: &mut Option<RunnerCleanup>,
|
||||
) -> Result<ClusterReady, crate::deployer::K8sRunnerError> {
|
||||
info!(
|
||||
validators = specs.validators.len(),
|
||||
nodes = specs.nodes.len(),
|
||||
%namespace,
|
||||
%release,
|
||||
"waiting for cluster port-forwards"
|
||||
);
|
||||
match wait_for_cluster_ready(client, namespace, release, &specs.validators).await {
|
||||
match wait_for_cluster_ready(client, namespace, release, &specs.nodes).await {
|
||||
Ok(ports) => {
|
||||
info!(
|
||||
validator_ports = ?ports.ports.validators,
|
||||
node_ports = ?ports.ports.nodes,
|
||||
"cluster port-forwards established"
|
||||
);
|
||||
Ok(ports)
|
||||
@ -288,7 +279,7 @@ async fn cleanup_pending(client: &Client, namespace: &str, guard: &mut Option<Ru
|
||||
|
||||
fn readiness_urls(
|
||||
ports: &[u16],
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
host: &str,
|
||||
) -> Result<Vec<Url>, RemoteReadinessError> {
|
||||
ports
|
||||
@ -298,7 +289,7 @@ fn readiness_urls(
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn readiness_url(host: &str, role: NodeRole, port: u16) -> Result<Url, RemoteReadinessError> {
|
||||
fn readiness_url(host: &str, role: NodeKind, port: u16) -> Result<Url, RemoteReadinessError> {
|
||||
cluster_host_url(host, port).map_err(|source| RemoteReadinessError::Endpoint {
|
||||
role,
|
||||
port,
|
||||
@ -312,7 +303,7 @@ fn cluster_host_url(host: &str, port: u16) -> Result<Url, ParseError> {
|
||||
|
||||
fn api_client_from_ports(
|
||||
host: &str,
|
||||
role: NodeRole,
|
||||
role: NodeKind,
|
||||
api_port: u16,
|
||||
testing_port: u16,
|
||||
) -> Result<ApiClient, NodeClientError> {
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
use std::{io, path::Path, process::Stdio};
|
||||
use std::{io, process::Stdio};
|
||||
|
||||
use thiserror::Error;
|
||||
use tokio::process::Command;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::infrastructure::assets::{KzgMode, RunnerAssets, cfgsync_port_value, workspace_root};
|
||||
use crate::infrastructure::assets::{RunnerAssets, cfgsync_port_value, workspace_root};
|
||||
|
||||
/// Errors returned from Helm invocations.
|
||||
#[derive(Debug, Error)]
|
||||
@ -15,8 +15,6 @@ pub enum HelmError {
|
||||
#[source]
|
||||
source: io::Error,
|
||||
},
|
||||
#[error("kzg_path must be present for HostPath mode")]
|
||||
MissingKzgPath,
|
||||
#[error("{command} exited with status {status:?}\nstderr:\n{stderr}\nstdout:\n{stdout}")]
|
||||
Failed {
|
||||
command: String,
|
||||
@ -31,23 +29,20 @@ pub async fn install_release(
|
||||
assets: &RunnerAssets,
|
||||
release: &str,
|
||||
namespace: &str,
|
||||
validators: usize,
|
||||
nodes: usize,
|
||||
) -> Result<(), HelmError> {
|
||||
let kzg = resolve_kzg_install_args(assets)?;
|
||||
info!(
|
||||
release,
|
||||
namespace,
|
||||
validators,
|
||||
nodes,
|
||||
image = %assets.image,
|
||||
cfgsync_port = cfgsync_port_value(),
|
||||
kzg_mode = ?assets.kzg_mode,
|
||||
kzg = %kzg.display(),
|
||||
values = %assets.values_file.display(),
|
||||
"installing helm release"
|
||||
);
|
||||
|
||||
let command = format!("helm install {release}");
|
||||
let cmd = build_install_command(assets, release, namespace, validators, &kzg, &command);
|
||||
let cmd = build_install_command(assets, release, namespace, nodes, &command);
|
||||
let output = run_helm_command(cmd, &command).await?;
|
||||
|
||||
maybe_log_install_output(&command, &output);
|
||||
@ -56,49 +51,11 @@ pub async fn install_release(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct KzgInstallArgs<'a> {
|
||||
mode: &'static str,
|
||||
host_path: Option<&'a Path>,
|
||||
host_path_type: Option<&'static str>,
|
||||
}
|
||||
|
||||
impl KzgInstallArgs<'_> {
|
||||
fn display(&self) -> String {
|
||||
self.host_path
|
||||
.map(|p| p.display().to_string())
|
||||
.unwrap_or_else(|| "<in-image>".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_kzg_install_args(assets: &RunnerAssets) -> Result<KzgInstallArgs<'_>, HelmError> {
|
||||
match assets.kzg_mode {
|
||||
KzgMode::HostPath => {
|
||||
let host_path = assets.kzg_path.as_ref().ok_or(HelmError::MissingKzgPath)?;
|
||||
let host_path_type = if host_path.is_dir() {
|
||||
"Directory"
|
||||
} else {
|
||||
"File"
|
||||
};
|
||||
Ok(KzgInstallArgs {
|
||||
mode: "kzg.mode=hostPath",
|
||||
host_path: Some(host_path),
|
||||
host_path_type: Some(host_path_type),
|
||||
})
|
||||
}
|
||||
KzgMode::InImage => Ok(KzgInstallArgs {
|
||||
mode: "kzg.mode=inImage",
|
||||
host_path: None,
|
||||
host_path_type: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_install_command(
|
||||
assets: &RunnerAssets,
|
||||
release: &str,
|
||||
namespace: &str,
|
||||
validators: usize,
|
||||
kzg: &KzgInstallArgs<'_>,
|
||||
nodes: usize,
|
||||
command: &str,
|
||||
) -> Command {
|
||||
let mut cmd = Command::new("helm");
|
||||
@ -114,13 +71,11 @@ fn build_install_command(
|
||||
.arg("--set")
|
||||
.arg(format!("image={}", assets.image))
|
||||
.arg("--set")
|
||||
.arg(format!("validators.count={validators}"))
|
||||
.arg(format!("nodes.count={nodes}"))
|
||||
.arg("--set")
|
||||
.arg(format!("cfgsync.port={}", cfgsync_port_value()))
|
||||
.arg("-f")
|
||||
.arg(&assets.values_file)
|
||||
.arg("--set")
|
||||
.arg(kzg.mode)
|
||||
.arg("--set-file")
|
||||
.arg(format!("cfgsync.config={}", assets.cfgsync_file.display()))
|
||||
.arg("--set-file")
|
||||
@ -141,13 +96,6 @@ fn build_install_command(
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
if let (Some(host_path), Some(host_path_type)) = (kzg.host_path, kzg.host_path_type) {
|
||||
cmd.arg("--set")
|
||||
.arg(format!("kzg.hostPath={}", host_path.display()))
|
||||
.arg("--set")
|
||||
.arg(format!("kzg.hostPathType={host_path_type}"));
|
||||
}
|
||||
|
||||
if let Ok(root) = workspace_root() {
|
||||
cmd.current_dir(root);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user