test(unhappy): mask `sender` when comparing timeout_qcs (#458)

This commit is contained in:
Youngjoon Lee 2023-10-17 00:20:28 +09:00 committed by GitHub
parent 39d45d3c09
commit 81c34036f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 70 additions and 23 deletions

View File

@ -1,10 +1,12 @@
use consensus_engine::View;
use consensus_engine::{Block, NodeId, TimeoutQc, View};
use fraction::Fraction;
use futures::stream::{self, StreamExt};
use nomos_consensus::CarnotInfo;
use std::collections::HashSet;
use tests::{ConsensusConfig, MixNode, Node, NomosNode, SpawnConfig};
const TARGET_VIEW: View = View::new(20);
const DUMMY_NODE_ID: NodeId = NodeId::new([0u8; 32]);
#[tokio::test]
async fn ten_nodes_one_down() {
@ -46,30 +48,75 @@ async fn ten_nodes_one_down() {
.collect::<Vec<_>>()
.await;
// check that they have the same block
let target_blocks = infos
.iter()
.map(|i| i.safe_blocks.values().find(|b| b.view == TARGET_VIEW))
let target_block = assert_block_consensus(&infos, TARGET_VIEW);
// If no node has the target block, check that TARGET_VIEW was reached by timeout_qc.
if target_block.is_none() {
println!("No node has the block with {TARGET_VIEW:?}. Checking timeout_qcs...");
assert_timeout_qc_consensus(&infos, TARGET_VIEW.prev());
}
}
// Check if all nodes have the same block at the specific view.
fn assert_block_consensus<'a>(
consensus_infos: impl IntoIterator<Item = &'a CarnotInfo>,
view: View,
) -> Option<Block> {
let blocks = consensus_infos
.into_iter()
.map(|i| i.safe_blocks.values().find(|b| b.view == view))
.collect::<HashSet<_>>();
// Every nodes must have the same target block (Some(block))
// , or no node must have it (None).
assert_eq!(target_blocks.len(), 1);
assert_eq!(
blocks.len(),
1,
"multiple blocks found at {:?}: {:?}",
view,
blocks
);
// If no node has the target block, check that TARGET_VIEW was reached by timeout_qc.
let target_block = target_blocks.iter().next().unwrap();
if target_block.is_none() {
println!("No node has the block with {TARGET_VIEW:?}. Checking timeout_qcs...");
blocks.iter().next().unwrap().cloned()
}
let timeout_qcs = infos
.iter()
.map(|i| i.last_view_timeout_qc.clone())
// Check if all nodes have the same timeout_qc at the specific view.
fn assert_timeout_qc_consensus<'a>(
consensus_infos: impl IntoIterator<Item = &'a CarnotInfo>,
view: View,
) -> TimeoutQc {
let timeout_qcs = consensus_infos
.into_iter()
.map(|i| {
i.last_view_timeout_qc.clone().map(|timeout_qc| {
// Masking the `sender` field because we want timeout_qcs from different
// senders to be considered the same if all other fields are the same.
TimeoutQc::new(
timeout_qc.view(),
timeout_qc.high_qc().clone(),
DUMMY_NODE_ID,
)
})
})
.collect::<HashSet<_>>();
assert_eq!(timeout_qcs.len(), 1);
assert_eq!(
timeout_qcs.len(),
1,
"multiple timeout_qcs found at {:?}: {:?}",
view,
timeout_qcs
);
let timeout_qc = timeout_qcs.iter().next().unwrap().clone();
assert!(timeout_qc.is_some());
// NOTE: This check could be failed if other timeout_qc had occured before `infos` were gathered.
let timeout_qc = timeout_qcs
.iter()
.next()
.unwrap()
.clone()
.expect("collected timeout_qc shouldn't be None");
// NOTE: This check could be failed if other timeout_qcs had occured
// before `consensus_infos` were gathered.
// But it should be okay as long as the `timeout` is not too short.
assert_eq!(timeout_qc.unwrap().view(), TARGET_VIEW.prev());
}
assert_eq!(timeout_qc.view(), view);
timeout_qc
}