mirror of
https://github.com/logos-blockchain/logos-blockchain-testing.git
synced 2026-01-02 13:23:13 +00:00
refactor(core): name retry policies and tuple aliases
This commit is contained in:
parent
ca9042a6b6
commit
7891bc0be3
@ -19,11 +19,18 @@ use crate::nodes::{
|
||||
create_tempdir, persist_tempdir,
|
||||
};
|
||||
|
||||
const EXIT_POLL_INTERVAL: Duration = Duration::from_millis(100);
|
||||
const STARTUP_POLL_INTERVAL: Duration = Duration::from_millis(100);
|
||||
const STARTUP_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
pub type NodeAddresses = (SocketAddr, Option<SocketAddr>);
|
||||
pub type PreparedNodeConfig<T> = (TempDir, T, SocketAddr, Option<SocketAddr>);
|
||||
|
||||
/// Minimal interface to apply common node setup.
|
||||
pub trait NodeConfigCommon {
|
||||
fn set_logger(&mut self, logger: LoggerLayer);
|
||||
fn set_paths(&mut self, base: &Path);
|
||||
fn addresses(&self) -> (SocketAddr, Option<SocketAddr>);
|
||||
fn addresses(&self) -> NodeAddresses;
|
||||
}
|
||||
|
||||
/// Shared handle for spawned nodes that exposes common operations.
|
||||
@ -71,7 +78,7 @@ impl<T> NodeHandle<T> {
|
||||
if !is_running(&mut self.child) {
|
||||
return;
|
||||
}
|
||||
time::sleep(Duration::from_millis(100)).await;
|
||||
time::sleep(EXIT_POLL_INTERVAL).await;
|
||||
}
|
||||
})
|
||||
.await
|
||||
@ -85,7 +92,7 @@ pub fn prepare_node_config<T: NodeConfigCommon>(
|
||||
mut config: T,
|
||||
log_prefix: &str,
|
||||
enable_logging: bool,
|
||||
) -> (TempDir, T, SocketAddr, Option<SocketAddr>) {
|
||||
) -> PreparedNodeConfig<T> {
|
||||
let dir = create_tempdir().expect("tempdir");
|
||||
|
||||
debug!(dir = %dir.path().display(), log_prefix, enable_logging, "preparing node config");
|
||||
@ -140,12 +147,12 @@ where
|
||||
let mut handle = NodeHandle::new(child, dir, config, ApiClient::new(addr, testing_addr));
|
||||
|
||||
// Wait for readiness via consensus_info
|
||||
let ready = time::timeout(Duration::from_secs(60), async {
|
||||
let ready = time::timeout(STARTUP_TIMEOUT, async {
|
||||
loop {
|
||||
if handle.api.consensus_info().await.is_ok() {
|
||||
break;
|
||||
}
|
||||
time::sleep(Duration::from_millis(100)).await;
|
||||
time::sleep(STARTUP_POLL_INTERVAL).await;
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
time::Duration,
|
||||
@ -18,7 +17,7 @@ use crate::{
|
||||
common::{
|
||||
binary::{BinaryConfig, BinaryResolver},
|
||||
lifecycle::{kill::kill_child, monitor::is_running},
|
||||
node::{NodeConfigCommon, NodeHandle, spawn_node},
|
||||
node::{NodeAddresses, NodeConfigCommon, NodeHandle, spawn_node},
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -103,7 +102,7 @@ impl NodeConfigCommon for Config {
|
||||
);
|
||||
}
|
||||
|
||||
fn addresses(&self) -> (SocketAddr, Option<SocketAddr>) {
|
||||
fn addresses(&self) -> NodeAddresses {
|
||||
(
|
||||
self.http.backend_settings.address,
|
||||
Some(self.testing_http.backend_settings.address),
|
||||
|
||||
@ -14,7 +14,7 @@ use crate::{
|
||||
common::{
|
||||
binary::{BinaryConfig, BinaryResolver},
|
||||
lifecycle::{kill::kill_child, monitor::is_running},
|
||||
node::{NodeConfigCommon, NodeHandle, spawn_node},
|
||||
node::{NodeAddresses, NodeConfigCommon, NodeHandle, spawn_node},
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -104,7 +104,7 @@ impl NodeConfigCommon for Config {
|
||||
);
|
||||
}
|
||||
|
||||
fn addresses(&self) -> (std::net::SocketAddr, Option<std::net::SocketAddr>) {
|
||||
fn addresses(&self) -> NodeAddresses {
|
||||
(
|
||||
self.http.backend_settings.address,
|
||||
Some(self.testing_http.backend_settings.address),
|
||||
|
||||
@ -14,6 +14,9 @@ use crate::scenario::{
|
||||
|
||||
type WorkloadOutcome = Result<(), DynError>;
|
||||
|
||||
const COOLDOWN_BLOCK_INTERVAL_MULTIPLIER: f64 = 5.0;
|
||||
const MIN_NODE_CONTROL_COOLDOWN: Duration = Duration::from_secs(30);
|
||||
|
||||
/// Represents a fully prepared environment capable of executing a scenario.
|
||||
pub struct Runner {
|
||||
context: Arc<RunContext>,
|
||||
@ -171,7 +174,7 @@ impl Runner {
|
||||
if interval.is_zero() {
|
||||
return None;
|
||||
}
|
||||
let mut wait = interval.mul_f64(5.0);
|
||||
let mut wait = interval.mul_f64(COOLDOWN_BLOCK_INTERVAL_MULTIPLIER);
|
||||
// Expectations observe blocks via `BlockFeed`, which ultimately
|
||||
// follows the chain information returned by `consensus_info`.
|
||||
// When the consensus uses a security parameter (finality depth),
|
||||
@ -186,12 +189,11 @@ impl Runner {
|
||||
.security_param;
|
||||
wait = wait.max(interval.mul_f64(security_param.get() as f64));
|
||||
if needs_stabilization {
|
||||
let minimum = Duration::from_secs(30);
|
||||
wait = wait.max(minimum);
|
||||
wait = wait.max(MIN_NODE_CONTROL_COOLDOWN);
|
||||
}
|
||||
Some(wait)
|
||||
} else if needs_stabilization {
|
||||
Some(Duration::from_secs(30))
|
||||
Some(MIN_NODE_CONTROL_COOLDOWN)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@ -25,6 +25,8 @@ pub struct Topology {
|
||||
pub(crate) executors: Vec<Executor>,
|
||||
}
|
||||
|
||||
pub type DeployedNodes = (Vec<Validator>, Vec<Executor>);
|
||||
|
||||
impl Topology {
|
||||
pub async fn spawn(config: TopologyConfig) -> Self {
|
||||
let generated = TopologyBuilder::new(config.clone()).build();
|
||||
@ -75,7 +77,7 @@ impl Topology {
|
||||
config: Vec<GeneralConfig>,
|
||||
n_validators: usize,
|
||||
n_executors: usize,
|
||||
) -> (Vec<Validator>, Vec<Executor>) {
|
||||
) -> DeployedNodes {
|
||||
let mut validators = Vec::new();
|
||||
for i in 0..n_validators {
|
||||
let config = create_validator_config(config[i].clone());
|
||||
|
||||
@ -3,6 +3,8 @@ use nomos_da_network_core::swarm::BalancerStats;
|
||||
use super::ReadinessCheck;
|
||||
use crate::topology::deployment::Topology;
|
||||
|
||||
const POLL_INTERVAL: std::time::Duration = std::time::Duration::from_secs(1);
|
||||
|
||||
pub struct DaBalancerReadiness<'a> {
|
||||
pub(crate) topology: &'a Topology,
|
||||
pub(crate) labels: &'a [String],
|
||||
@ -55,7 +57,7 @@ impl<'a> ReadinessCheck<'a> for DaBalancerReadiness<'a> {
|
||||
}
|
||||
|
||||
fn poll_interval(&self) -> std::time::Duration {
|
||||
std::time::Duration::from_secs(1)
|
||||
POLL_INTERVAL
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -12,6 +12,9 @@ use tokio::time::{sleep, timeout};
|
||||
|
||||
use crate::adjust_timeout;
|
||||
|
||||
const DEFAULT_POLL_INTERVAL: Duration = Duration::from_millis(200);
|
||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ReadinessError {
|
||||
#[error("{message}")]
|
||||
@ -29,11 +32,11 @@ pub trait ReadinessCheck<'a> {
|
||||
fn timeout_message(&self, data: Self::Data) -> String;
|
||||
|
||||
fn poll_interval(&self) -> Duration {
|
||||
Duration::from_millis(200)
|
||||
DEFAULT_POLL_INTERVAL
|
||||
}
|
||||
|
||||
async fn wait(&'a self) -> Result<(), ReadinessError> {
|
||||
let timeout_duration = adjust_timeout(Duration::from_secs(60));
|
||||
let timeout_duration = adjust_timeout(DEFAULT_TIMEOUT);
|
||||
let poll_interval = self.poll_interval();
|
||||
let mut data = self.collect().await;
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user