148 lines
4.2 KiB
Rust
Raw Permalink Normal View History

use std::time::Duration;
use anyhow::Result;
use async_trait::async_trait;
use testing_framework_core::scenario::{
2026-01-19 08:34:17 +01:00
Deployer, DynError, PeerSelection, RunContext, ScenarioBuilder, StartNodeOptions, Workload,
};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::ScenarioBuilderExt;
use tokio::time::{sleep, timeout};
use tracing_subscriber::fmt::try_init;
const START_DELAY: Duration = Duration::from_secs(5);
const READY_TIMEOUT: Duration = Duration::from_secs(60);
const READY_POLL_INTERVAL: Duration = Duration::from_secs(2);
struct JoinNodeWorkload {
name: String,
}
impl JoinNodeWorkload {
fn new(name: impl Into<String>) -> Self {
Self { name: name.into() }
}
}
#[async_trait]
impl Workload for JoinNodeWorkload {
fn name(&self) -> &str {
"dynamic_join"
}
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
let handle = ctx
.node_control()
.ok_or_else(|| "dynamic join workload requires node control".to_owned())?;
sleep(START_DELAY).await;
2026-01-26 08:26:15 +01:00
let node = handle.start_node(&self.name).await?;
let client = node.api;
timeout(READY_TIMEOUT, async {
loop {
match client.consensus_info().await {
Ok(info) if info.height > 0 => break,
Ok(_) | Err(_) => sleep(READY_POLL_INTERVAL).await,
}
}
})
.await
.map_err(|_| "dynamic join node did not become ready in time")?;
sleep(ctx.run_duration()).await;
Ok(())
}
}
struct JoinNodeWithPeersWorkload {
name: String,
peers: Vec<String>,
}
impl JoinNodeWithPeersWorkload {
fn new(name: impl Into<String>, peers: Vec<String>) -> Self {
Self {
name: name.into(),
peers,
}
}
}
#[async_trait]
impl Workload for JoinNodeWithPeersWorkload {
fn name(&self) -> &str {
"dynamic_join_with_peers"
}
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
let handle = ctx
.node_control()
.ok_or_else(|| "dynamic join workload requires node control".to_owned())?;
sleep(START_DELAY).await;
let options = StartNodeOptions {
2026-01-19 08:34:17 +01:00
peers: PeerSelection::Named(self.peers.clone()),
2026-02-05 08:23:14 +02:00
config_patch: None,
chore: merge dev into master (#29) * Add node config overrides (#14) * chore: merge master into dev and update configs after merge (#17) * Sdp config structs from logos blockchain (#15) * Update configs after main repo merge --------- Co-authored-by: gusto <bacv@users.noreply.github.com> * Local deployer allows to stop and restart nodes (#16) * Unify local node control and restart support * Add local stop-node support * Use node names for restart/stop control * merge --------- Co-authored-by: hansieodendaal <hansie.odendaal@gmail.com> * Add orphan manual cluster test utilities * Update node rev and align consensus/wallet config * Update node rev and align wallet/KMS configs * Update main repo ref (#23) * Fix genesis utxos and scale leader stake * Document leader stake constants * feat: add custom persistent dir option for working files (#26) * chore: config and naming updates (#27) * Update config and crate naming - Updated configs to the lates main repo configs. - Updated all main repo crate namings to be same as the main repo. - Added `create_dir_all` to `pub(crate) fn create_tempdir(custom_work_dir: Option<PathBuf>) -> std::io::Result<TempDir> {`. - Wired in optional `persist_dir` when using the local deployer. - Update `time` vulnerability **Note:** Unsure about the `service_params` mapping in `pub(crate) fn cryptarchia_deployment(config: &GeneralConfig) -> CryptarchiaDeploymentSettings {` * fix ntp server config --------- Co-authored-by: Andrus Salumets <andrus@status.im> Co-authored-by: gusto <bacv@users.noreply.github.com> Co-authored-by: andrussal <salumets.andrus@gmail.com>
2026-02-09 14:12:26 +02:00
persist_dir: None,
};
2026-01-26 08:26:15 +01:00
let node = handle.start_node_with(&self.name, options).await?;
let client = node.api;
timeout(READY_TIMEOUT, async {
loop {
match client.consensus_info().await {
Ok(info) if info.height > 0 => break,
Ok(_) | Err(_) => sleep(READY_POLL_INTERVAL).await,
}
}
})
.await
.map_err(|_| "dynamic join node did not become ready in time")?;
sleep(ctx.run_duration()).await;
Ok(())
}
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"]
async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
let _ = try_init();
2026-01-26 08:26:15 +01:00
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.enable_node_control()
.with_workload(JoinNodeWorkload::new("joiner"))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let _handle = runner.run(&mut scenario).await?;
Ok(())
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"]
async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> {
2026-01-26 08:26:15 +01:00
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.enable_node_control()
.with_workload(JoinNodeWithPeersWorkload::new(
"joiner",
2026-01-26 08:26:15 +01:00
vec!["node-0".to_string()],
))
.expect_consensus_liveness()
.with_run_duration(Duration::from_secs(60))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let _handle = runner.run(&mut scenario).await?;
Ok(())
}