refactor(tests): Gather utilities (#951)

This commit is contained in:
Álex 2024-12-19 18:09:21 +01:00 committed by GitHub
parent 83539a075a
commit 8f79bafe55
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 74 additions and 130 deletions

View File

@ -34,6 +34,7 @@ parallel = [
"rayon",
"kzgrs/parallel"
]
testutils = []
[[bench]]
name = "encoder"

View File

@ -3,6 +3,8 @@ pub mod dispersal;
pub mod encoder;
pub mod global;
pub mod reconstruction;
#[cfg(feature = "testutils")]
pub mod testutils;
pub mod verifier;
pub use kzgrs::KzgRsError;

View File

@ -0,0 +1,40 @@
// STD
// Crates
use nomos_core::da::DaEncoder;
// Internal
use crate::common::blob::DaBlob;
use crate::testutils::encoder::get_encoder;
pub fn get_default_da_blob_data() -> Vec<u8> {
vec![
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
]
}
pub fn get_da_blob(data: Option<Vec<u8>>) -> DaBlob {
let encoder = get_encoder();
let data = data.unwrap_or_else(get_default_da_blob_data);
let encoded_data = encoder.encode(&data).unwrap();
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
let index = 0;
let da_blob = DaBlob {
column: columns[index].clone(),
column_idx: index
.try_into()
.expect("Column index shouldn't overflow the target type"),
column_commitment: encoded_data.column_commitments[index],
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
rows_commitments: encoded_data.row_commitments.clone(),
rows_proofs: encoded_data
.rows_proofs
.iter()
.map(|proofs| proofs.get(index).cloned().unwrap())
.collect(),
};
da_blob
}

View File

@ -0,0 +1,9 @@
// Internal
use crate::encoder;
const ENCODER_DOMAIN_SIZE: usize = 16;
pub fn get_encoder() -> encoder::DaEncoder {
let params = encoder::DaEncoderParams::default_with(ENCODER_DOMAIN_SIZE);
encoder::DaEncoder::new(params)
}

View File

@ -0,0 +1,6 @@
// Internal
pub mod blob;
pub mod encoder;
// Exports
pub use blob::*;
pub use encoder::*;

View File

@ -28,4 +28,4 @@ thiserror = "1.0"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
libp2p = { version = "0.53", features = ["ed25519", "ping", "macros", "quic", "tcp", "yamux", "noise"] }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
kzgrs-backend = { path = "../../kzgrs-backend", features = ["testutils"] }

View File

@ -243,52 +243,14 @@ where
mod tests {
use super::*;
use futures::task::{waker_ref, ArcWake};
use kzgrs_backend::common::blob::DaBlob;
use kzgrs_backend::encoder;
use kzgrs_backend::encoder::DaEncoderParams;
use kzgrs_backend::testutils::get_da_blob;
use libp2p::{identity, PeerId};
use nomos_core::da::{BlobId, DaEncoder};
use nomos_core::da::BlobId;
use nomos_da_messages::common::Blob;
use std::collections::HashSet;
use std::sync::Arc;
use std::task::{Context, Poll};
fn get_encoder() -> encoder::DaEncoder {
const DOMAIN_SIZE: usize = 16;
let params = DaEncoderParams::default_with(DOMAIN_SIZE);
encoder::DaEncoder::new(params)
}
fn get_da_blob() -> DaBlob {
let encoder = get_encoder();
let data = vec![
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
];
let encoded_data = encoder.encode(&data).unwrap();
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
let index = 0;
let da_blob = DaBlob {
column: columns[index].clone(),
column_idx: index
.try_into()
.expect("Column index shouldn't overflow the target type"),
column_commitment: encoded_data.column_commitments[index],
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
rows_commitments: encoded_data.row_commitments.clone(),
rows_proofs: encoded_data
.rows_proofs
.iter()
.map(|proofs| proofs.get(index).cloned().unwrap())
.collect(),
};
da_blob
}
#[derive(Clone, Debug)]
struct MockMembershipHandler {
membership: HashMap<PeerId, HashSet<SubnetworkId>>,
@ -302,6 +264,10 @@ mod tests {
self.membership.get(peer_id).cloned().unwrap_or_default()
}
fn is_allowed(&self, _id: &Self::Id) -> bool {
unimplemented!()
}
fn members_of(&self, subnetwork: &Self::NetworkId) -> HashSet<Self::Id> {
self.membership
.iter()
@ -315,10 +281,6 @@ mod tests {
.collect()
}
fn is_allowed(&self, _id: &Self::Id) -> bool {
unimplemented!()
}
fn members(&self) -> HashSet<Self::Id> {
unimplemented!()
}
@ -434,7 +396,7 @@ mod tests {
}
// Simulate sending a message from the first behavior.
let message = DaMessage::new(Blob::new(BlobId::from([0; 32]), get_da_blob()), 0);
let message = DaMessage::new(Blob::new(BlobId::from([0; 32]), get_da_blob(None)), 0);
all_behaviours[0].replicate_message(message.clone());
let waker = Arc::new(TestWaker);

View File

@ -7,58 +7,18 @@ mod test {
use crate::protocols::replication::handler::DaMessage;
use crate::test_utils::AllNeighbours;
use futures::StreamExt;
use kzgrs_backend::common::blob::DaBlob;
use kzgrs_backend::encoder;
use kzgrs_backend::encoder::DaEncoderParams;
use kzgrs_backend::testutils::get_da_blob;
use libp2p::identity::Keypair;
use libp2p::swarm::SwarmEvent;
use libp2p::{quic, Multiaddr, PeerId, Swarm};
use log::info;
use nomos_core::da::{BlobId, DaEncoder};
use nomos_core::da::BlobId;
use nomos_da_messages::common::Blob;
use std::time::Duration;
use tracing_subscriber::fmt::TestWriter;
use tracing_subscriber::EnvFilter;
fn get_encoder() -> encoder::DaEncoder {
const DOMAIN_SIZE: usize = 16;
let params = DaEncoderParams::default_with(DOMAIN_SIZE);
encoder::DaEncoder::new(params)
}
fn get_da_blob(data: Option<Vec<u8>>) -> DaBlob {
let encoder = get_encoder();
let data = data.unwrap_or_else(|| {
vec![
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
]
});
let encoded_data = encoder.encode(&data).unwrap();
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
let index = 0;
let da_blob = DaBlob {
column: columns[index].clone(),
column_idx: index
.try_into()
.expect("Column index shouldn't overflow the target type"),
column_commitment: encoded_data.column_commitments[index],
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
rows_commitments: encoded_data.row_commitments.clone(),
rows_proofs: encoded_data
.rows_proofs
.iter()
.map(|proofs| proofs.get(index).cloned().unwrap())
.collect(),
};
da_blob
}
#[tokio::test]
async fn test_connects_and_receives_replication_messages() {
fn get_swarm(

View File

@ -12,3 +12,4 @@ tokio = "1"
[dev-dependencies]
tokio = { version = "1", features = ["macros"] }
kzgrs-backend = { path = "../../kzgrs-backend", features = ["testutils"] }

View File

@ -104,50 +104,13 @@ mod tests {
use crate::common::Blob;
use crate::dispersal::{DispersalError, DispersalErrorType, DispersalRequest};
use futures::io::BufReader;
use kzgrs_backend::common::blob::DaBlob;
use kzgrs_backend::encoder::{self, DaEncoderParams};
use nomos_core::da::{BlobId, DaEncoder};
fn get_encoder() -> encoder::DaEncoder {
const DOMAIN_SIZE: usize = 16;
let params = DaEncoderParams::default_with(DOMAIN_SIZE);
encoder::DaEncoder::new(params)
}
fn get_da_blob() -> DaBlob {
let encoder = get_encoder();
let data = vec![
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
];
let encoded_data = encoder.encode(&data).unwrap();
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
let index = 0;
let da_blob = DaBlob {
column: columns[index].clone(),
column_idx: index
.try_into()
.expect("Column index shouldn't overflow the target type"),
column_commitment: encoded_data.column_commitments[index],
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
rows_commitments: encoded_data.row_commitments.clone(),
rows_proofs: encoded_data
.rows_proofs
.iter()
.map(|proofs| proofs.get(index).cloned().unwrap())
.collect(),
};
da_blob
}
use kzgrs_backend::testutils::get_da_blob;
use nomos_core::da::BlobId;
#[tokio::test]
async fn pack_and_unpack() -> Result<()> {
let blob_id = BlobId::from([0; 32]);
let data = get_da_blob();
let data = get_da_blob(None);
let blob = Blob::new(blob_id, data);
let subnetwork_id = 0;
let message = DispersalRequest::new(blob, subnetwork_id);
@ -162,7 +125,7 @@ mod tests {
#[tokio::test]
async fn pack_to_writer_and_unpack_from_reader() -> Result<()> {
let blob_id = BlobId::from([0; 32]);
let data = get_da_blob();
let data = get_da_blob(None);
let blob = Blob::new(blob_id, data);
let subnetwork_id = 0;
let message = DispersalRequest::new(blob, subnetwork_id);