Better tests (#232)
* add basic integration tests * add a way to configure overlay threshold * Save logs to file in case of failure * Increase number of test nodes to 10 * fix tests * use fraction instead of tuple * fmt
This commit is contained in:
parent
5199ee12e9
commit
90cf29bf86
|
@ -13,6 +13,7 @@ sha2 = "0.10"
|
|||
rand = "0.8"
|
||||
rand_chacha = "0.3"
|
||||
thiserror = "1"
|
||||
fraction = { version = "0.13" }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
|
|
@ -415,6 +415,7 @@ mod test {
|
|||
FlatOverlay::new(Settings {
|
||||
nodes,
|
||||
leader: RoundRobin::default(),
|
||||
leader_super_majority_threshold: None,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
use super::LeaderSelection;
|
||||
use crate::{Committee, NodeId, Overlay};
|
||||
use fraction::{Fraction, ToPrimitive};
|
||||
use serde::{Deserialize, Serialize};
|
||||
const LEADER_SUPER_MAJORITY_THRESHOLD_NUM: u64 = 2;
|
||||
const LEADER_SUPER_MAJORITY_THRESHOLD_DEN: u64 = 3;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
/// Flat overlay with a single committee and round robin leader selection.
|
||||
pub struct FlatOverlay<L: LeaderSelection> {
|
||||
nodes: Vec<NodeId>,
|
||||
leader: L,
|
||||
leader_threshold: Fraction,
|
||||
}
|
||||
|
||||
impl<L> Overlay for FlatOverlay<L>
|
||||
|
@ -16,8 +20,23 @@ where
|
|||
type Settings = Settings<L>;
|
||||
type LeaderSelection = L;
|
||||
|
||||
fn new(Settings { leader, nodes }: Self::Settings) -> Self {
|
||||
Self { nodes, leader }
|
||||
fn new(
|
||||
Settings {
|
||||
leader,
|
||||
nodes,
|
||||
leader_super_majority_threshold,
|
||||
}: Self::Settings,
|
||||
) -> Self {
|
||||
Self {
|
||||
nodes,
|
||||
leader,
|
||||
leader_threshold: leader_super_majority_threshold.unwrap_or_else(|| {
|
||||
Fraction::new(
|
||||
LEADER_SUPER_MAJORITY_THRESHOLD_NUM,
|
||||
LEADER_SUPER_MAJORITY_THRESHOLD_DEN,
|
||||
)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn root_committee(&self) -> crate::Committee {
|
||||
|
@ -69,7 +88,11 @@ where
|
|||
}
|
||||
|
||||
fn leader_super_majority_threshold(&self, _id: NodeId) -> usize {
|
||||
self.nodes.len() * 2 / 3 + 1
|
||||
// self.leader_threshold is a tuple of (num, den) where num/den is the super majority threshold
|
||||
(Fraction::from(self.nodes.len()) * self.leader_threshold)
|
||||
.floor()
|
||||
.to_usize()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn update_leader_selection<F, E>(&self, f: F) -> Result<Self, E>
|
||||
|
@ -114,5 +137,32 @@ impl LeaderSelection for RoundRobin {
|
|||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct Settings<L> {
|
||||
pub nodes: Vec<NodeId>,
|
||||
/// A fraction representing the threshold in the form `<num>/<den>'
|
||||
/// Defaults to 2/3
|
||||
#[serde(with = "deser")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub leader_super_majority_threshold: Option<Fraction>,
|
||||
pub leader: L,
|
||||
}
|
||||
|
||||
mod deser {
|
||||
use fraction::Fraction;
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::str::FromStr;
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<Fraction>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
<Option<String>>::deserialize(deserializer)?
|
||||
.map(|s| FromStr::from_str(&s).map_err(de::Error::custom))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub fn serialize<S>(value: &Option<Fraction>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
value.map(|v| v.to_string()).serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ impl ConsensusEngineTest {
|
|||
FlatOverlay::new(Settings {
|
||||
nodes: vec![[0; 32]],
|
||||
leader: RoundRobin::default(),
|
||||
leader_super_majority_threshold: None,
|
||||
}),
|
||||
);
|
||||
|
||||
|
|
|
@ -31,10 +31,10 @@ pub enum LoggerBackend {
|
|||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct LoggerSettings {
|
||||
backend: LoggerBackend,
|
||||
format: LoggerFormat,
|
||||
pub backend: LoggerBackend,
|
||||
pub format: LoggerFormat,
|
||||
#[serde(with = "serde_level")]
|
||||
level: Level,
|
||||
pub level: Level,
|
||||
}
|
||||
|
||||
impl Default for LoggerSettings {
|
||||
|
|
|
@ -87,6 +87,7 @@ impl SimulationApp {
|
|||
let overlay_settings = consensus_engine::overlay::Settings {
|
||||
nodes: nodes.to_vec(),
|
||||
leader: RoundRobin::new(),
|
||||
leader_super_majority_threshold: None,
|
||||
};
|
||||
// FIXME: Actually use a proposer and a key to generate random beacon state
|
||||
let genesis = nomos_core::block::Block::new(
|
||||
|
|
|
@ -21,10 +21,11 @@ secp256k1 = { version = "0.26", features = ["rand"] }
|
|||
waku-bindings = "0.1.1"
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
tempfile = "3.6"
|
||||
serde_json = "1"
|
||||
serde_yaml = "0.9"
|
||||
tokio = "1"
|
||||
futures = "0.3"
|
||||
async-trait = "0.1"
|
||||
fraction = "0.13"
|
||||
|
||||
[[test]]
|
||||
name = "test_consensus_happy_path"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
// std
|
||||
use std::io::Read;
|
||||
use std::net::SocketAddr;
|
||||
use std::process::{Child, Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
@ -8,6 +7,7 @@ use crate::{get_available_port, Node, SpawnConfig, RNG};
|
|||
use consensus_engine::overlay::{RoundRobin, Settings};
|
||||
use nomos_consensus::{CarnotInfo, CarnotSettings};
|
||||
use nomos_http::backends::axum::AxumBackendSettings;
|
||||
use nomos_log::{LoggerBackend, LoggerFormat};
|
||||
use nomos_network::{
|
||||
backends::waku::{WakuConfig, WakuInfo},
|
||||
NetworkConfig,
|
||||
|
@ -15,6 +15,7 @@ use nomos_network::{
|
|||
use nomos_node::Config;
|
||||
use waku_bindings::{Multiaddr, PeerId};
|
||||
// crates
|
||||
use fraction::{Fraction, One};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::Rng;
|
||||
use reqwest::Client;
|
||||
|
@ -24,6 +25,7 @@ static CLIENT: Lazy<Client> = Lazy::new(Client::new);
|
|||
const NOMOS_BIN: &str = "../target/debug/nomos-node";
|
||||
const CARNOT_INFO_API: &str = "carnot/info";
|
||||
const NETWORK_INFO_API: &str = "network/info";
|
||||
const LOGS_PREFIX: &str = "__logs";
|
||||
|
||||
pub struct NomosNode {
|
||||
addr: SocketAddr,
|
||||
|
@ -33,24 +35,34 @@ pub struct NomosNode {
|
|||
|
||||
impl Drop for NomosNode {
|
||||
fn drop(&mut self) {
|
||||
let mut output = String::new();
|
||||
if let Some(stdout) = &mut self.child.stdout {
|
||||
stdout.read_to_string(&mut output).unwrap();
|
||||
if std::thread::panicking() {
|
||||
println!("persisting directory at {}", self._tempdir.path().display());
|
||||
// we need ownership of the dir to persist it
|
||||
let dir = std::mem::replace(&mut self._tempdir, tempfile::tempdir().unwrap());
|
||||
// a bit confusing but `into_path` persists the directory
|
||||
let _ = dir.into_path();
|
||||
}
|
||||
// self.child.stdout.as_mut().unwrap().read_to_string(&mut output).unwrap();
|
||||
println!("{} stdout: {}", self.addr, output);
|
||||
|
||||
self.child.kill().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl NomosNode {
|
||||
pub async fn spawn(config: &Config) -> Self {
|
||||
pub async fn spawn(mut config: Config) -> Self {
|
||||
// Waku stores the messages in a db file in the current dir, we need a different
|
||||
// directory for each node to avoid conflicts
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
let config_path = file.path().to_owned();
|
||||
serde_json::to_writer(&mut file, config).unwrap();
|
||||
|
||||
// setup logging so that we can intercept it later in testing
|
||||
config.log.backend = LoggerBackend::File {
|
||||
directory: dir.path().to_owned(),
|
||||
prefix: Some(LOGS_PREFIX.into()),
|
||||
};
|
||||
config.log.format = LoggerFormat::Json;
|
||||
|
||||
serde_yaml::to_writer(&mut file, &config).unwrap();
|
||||
let child = Command::new(std::env::current_dir().unwrap().join(NOMOS_BIN))
|
||||
.arg(&config_path)
|
||||
.current_dir(dir.path())
|
||||
|
@ -101,6 +113,28 @@ impl NomosNode {
|
|||
.unwrap()
|
||||
.swap_remove(0)
|
||||
}
|
||||
|
||||
// not async so that we can use this in `Drop`
|
||||
pub fn get_logs_from_file(&self) -> String {
|
||||
println!(
|
||||
"fetching logs from dir {}...",
|
||||
self._tempdir.path().display()
|
||||
);
|
||||
// std::thread::sleep(std::time::Duration::from_secs(50));
|
||||
std::fs::read_dir(self._tempdir.path())
|
||||
.unwrap()
|
||||
.filter_map(|entry| {
|
||||
let entry = entry.unwrap();
|
||||
let path = entry.path();
|
||||
if path.is_file() && path.to_str().unwrap().contains(LOGS_PREFIX) {
|
||||
Some(path)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.map(|f| std::fs::read_to_string(f).unwrap())
|
||||
.collect::<String>()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
@ -118,10 +152,9 @@ impl Node for NomosNode {
|
|||
.iter()
|
||||
.map(|id| create_node_config(ids.clone(), *id))
|
||||
.collect::<Vec<_>>();
|
||||
let mut nodes = vec![Self::spawn(&configs[0]).await];
|
||||
let mut nodes = vec![Self::spawn(configs.swap_remove(0)).await];
|
||||
let listening_addr = nodes[0].get_listening_address().await;
|
||||
configs.drain(0..1);
|
||||
for conf in &mut configs {
|
||||
for mut conf in configs {
|
||||
conf.network
|
||||
.backend
|
||||
.initial_peers
|
||||
|
@ -161,6 +194,10 @@ fn create_node_config(nodes: Vec<[u8; 32]>, private_key: [u8; 32]) -> Config {
|
|||
overlay_settings: Settings {
|
||||
nodes,
|
||||
leader: RoundRobin::new(),
|
||||
// By setting the leader_threshold to 1 we ensure that all nodes come
|
||||
// online before progressing. This is only necessary until we add a way
|
||||
// to recover poast blocks from other nodes.
|
||||
leader_super_majority_threshold: Some(Fraction::one()),
|
||||
},
|
||||
},
|
||||
log: Default::default(),
|
||||
|
|
|
@ -5,20 +5,27 @@ use tests::{Node, NomosNode, SpawnConfig};
|
|||
const TARGET_VIEW: i64 = 20;
|
||||
|
||||
async fn happy_test(nodes: Vec<NomosNode>) {
|
||||
while stream::iter(&nodes)
|
||||
.any(|n| async move { n.consensus_info().await.current_view < TARGET_VIEW })
|
||||
.await
|
||||
{
|
||||
println!(
|
||||
"waiting... {}",
|
||||
stream::iter(&nodes)
|
||||
.then(|n| async move { format!("{}", n.consensus_info().await.current_view) })
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.join(" | ")
|
||||
);
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
let timeout = std::time::Duration::from_secs(20);
|
||||
let timeout = tokio::time::sleep(timeout);
|
||||
tokio::select! {
|
||||
_ = timeout => panic!("timed out waiting for nodes to reach view {}", TARGET_VIEW),
|
||||
_ = async { while stream::iter(&nodes)
|
||||
.any(|n| async move { n.consensus_info().await.current_view < TARGET_VIEW })
|
||||
.await
|
||||
{
|
||||
println!(
|
||||
"waiting... {}",
|
||||
stream::iter(&nodes)
|
||||
.then(|n| async move { format!("{}", n.consensus_info().await.current_view) })
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.join(" | ")
|
||||
);
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
} => {}
|
||||
};
|
||||
|
||||
let infos = stream::iter(nodes)
|
||||
.then(|n| async move { n.consensus_info().await })
|
||||
.collect::<Vec<_>>()
|
||||
|
@ -43,7 +50,7 @@ async fn two_nodes_happy() {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn three_nodes_happy() {
|
||||
let nodes = NomosNode::spawn_nodes(SpawnConfig::Star { n_participants: 3 }).await;
|
||||
async fn ten_nodes_happy() {
|
||||
let nodes = NomosNode::spawn_nodes(SpawnConfig::Star { n_participants: 10 }).await;
|
||||
happy_test(nodes).await;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue