refactor(tests): Gather utilities (#951)
This commit is contained in:
parent
83539a075a
commit
8f79bafe55
@ -34,6 +34,7 @@ parallel = [
|
|||||||
"rayon",
|
"rayon",
|
||||||
"kzgrs/parallel"
|
"kzgrs/parallel"
|
||||||
]
|
]
|
||||||
|
testutils = []
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "encoder"
|
name = "encoder"
|
||||||
|
@ -3,6 +3,8 @@ pub mod dispersal;
|
|||||||
pub mod encoder;
|
pub mod encoder;
|
||||||
pub mod global;
|
pub mod global;
|
||||||
pub mod reconstruction;
|
pub mod reconstruction;
|
||||||
|
#[cfg(feature = "testutils")]
|
||||||
|
pub mod testutils;
|
||||||
pub mod verifier;
|
pub mod verifier;
|
||||||
|
|
||||||
pub use kzgrs::KzgRsError;
|
pub use kzgrs::KzgRsError;
|
||||||
|
40
nomos-da/kzgrs-backend/src/testutils/blob.rs
Normal file
40
nomos-da/kzgrs-backend/src/testutils/blob.rs
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// STD
|
||||||
|
// Crates
|
||||||
|
use nomos_core::da::DaEncoder;
|
||||||
|
// Internal
|
||||||
|
use crate::common::blob::DaBlob;
|
||||||
|
use crate::testutils::encoder::get_encoder;
|
||||||
|
|
||||||
|
pub fn get_default_da_blob_data() -> Vec<u8> {
|
||||||
|
vec![
|
||||||
|
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
||||||
|
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_da_blob(data: Option<Vec<u8>>) -> DaBlob {
|
||||||
|
let encoder = get_encoder();
|
||||||
|
|
||||||
|
let data = data.unwrap_or_else(get_default_da_blob_data);
|
||||||
|
let encoded_data = encoder.encode(&data).unwrap();
|
||||||
|
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
|
||||||
|
|
||||||
|
let index = 0;
|
||||||
|
let da_blob = DaBlob {
|
||||||
|
column: columns[index].clone(),
|
||||||
|
column_idx: index
|
||||||
|
.try_into()
|
||||||
|
.expect("Column index shouldn't overflow the target type"),
|
||||||
|
column_commitment: encoded_data.column_commitments[index],
|
||||||
|
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
|
||||||
|
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
|
||||||
|
rows_commitments: encoded_data.row_commitments.clone(),
|
||||||
|
rows_proofs: encoded_data
|
||||||
|
.rows_proofs
|
||||||
|
.iter()
|
||||||
|
.map(|proofs| proofs.get(index).cloned().unwrap())
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
|
||||||
|
da_blob
|
||||||
|
}
|
9
nomos-da/kzgrs-backend/src/testutils/encoder.rs
Normal file
9
nomos-da/kzgrs-backend/src/testutils/encoder.rs
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// Internal
|
||||||
|
use crate::encoder;
|
||||||
|
|
||||||
|
const ENCODER_DOMAIN_SIZE: usize = 16;
|
||||||
|
|
||||||
|
pub fn get_encoder() -> encoder::DaEncoder {
|
||||||
|
let params = encoder::DaEncoderParams::default_with(ENCODER_DOMAIN_SIZE);
|
||||||
|
encoder::DaEncoder::new(params)
|
||||||
|
}
|
6
nomos-da/kzgrs-backend/src/testutils/mod.rs
Normal file
6
nomos-da/kzgrs-backend/src/testutils/mod.rs
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
// Internal
|
||||||
|
pub mod blob;
|
||||||
|
pub mod encoder;
|
||||||
|
// Exports
|
||||||
|
pub use blob::*;
|
||||||
|
pub use encoder::*;
|
@ -28,4 +28,4 @@ thiserror = "1.0"
|
|||||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
||||||
libp2p = { version = "0.53", features = ["ed25519", "ping", "macros", "quic", "tcp", "yamux", "noise"] }
|
libp2p = { version = "0.53", features = ["ed25519", "ping", "macros", "quic", "tcp", "yamux", "noise"] }
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
kzgrs-backend = { path = "../../kzgrs-backend", features = ["testutils"] }
|
||||||
|
@ -243,52 +243,14 @@ where
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use futures::task::{waker_ref, ArcWake};
|
use futures::task::{waker_ref, ArcWake};
|
||||||
use kzgrs_backend::common::blob::DaBlob;
|
use kzgrs_backend::testutils::get_da_blob;
|
||||||
use kzgrs_backend::encoder;
|
|
||||||
use kzgrs_backend::encoder::DaEncoderParams;
|
|
||||||
use libp2p::{identity, PeerId};
|
use libp2p::{identity, PeerId};
|
||||||
use nomos_core::da::{BlobId, DaEncoder};
|
use nomos_core::da::BlobId;
|
||||||
use nomos_da_messages::common::Blob;
|
use nomos_da_messages::common::Blob;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
fn get_encoder() -> encoder::DaEncoder {
|
|
||||||
const DOMAIN_SIZE: usize = 16;
|
|
||||||
let params = DaEncoderParams::default_with(DOMAIN_SIZE);
|
|
||||||
encoder::DaEncoder::new(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_da_blob() -> DaBlob {
|
|
||||||
let encoder = get_encoder();
|
|
||||||
let data = vec![
|
|
||||||
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
|
||||||
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
|
||||||
];
|
|
||||||
|
|
||||||
let encoded_data = encoder.encode(&data).unwrap();
|
|
||||||
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
|
|
||||||
|
|
||||||
let index = 0;
|
|
||||||
let da_blob = DaBlob {
|
|
||||||
column: columns[index].clone(),
|
|
||||||
column_idx: index
|
|
||||||
.try_into()
|
|
||||||
.expect("Column index shouldn't overflow the target type"),
|
|
||||||
column_commitment: encoded_data.column_commitments[index],
|
|
||||||
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
|
|
||||||
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
|
|
||||||
rows_commitments: encoded_data.row_commitments.clone(),
|
|
||||||
rows_proofs: encoded_data
|
|
||||||
.rows_proofs
|
|
||||||
.iter()
|
|
||||||
.map(|proofs| proofs.get(index).cloned().unwrap())
|
|
||||||
.collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
da_blob
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct MockMembershipHandler {
|
struct MockMembershipHandler {
|
||||||
membership: HashMap<PeerId, HashSet<SubnetworkId>>,
|
membership: HashMap<PeerId, HashSet<SubnetworkId>>,
|
||||||
@ -302,6 +264,10 @@ mod tests {
|
|||||||
self.membership.get(peer_id).cloned().unwrap_or_default()
|
self.membership.get(peer_id).cloned().unwrap_or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn is_allowed(&self, _id: &Self::Id) -> bool {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
fn members_of(&self, subnetwork: &Self::NetworkId) -> HashSet<Self::Id> {
|
fn members_of(&self, subnetwork: &Self::NetworkId) -> HashSet<Self::Id> {
|
||||||
self.membership
|
self.membership
|
||||||
.iter()
|
.iter()
|
||||||
@ -315,10 +281,6 @@ mod tests {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_allowed(&self, _id: &Self::Id) -> bool {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn members(&self) -> HashSet<Self::Id> {
|
fn members(&self) -> HashSet<Self::Id> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -434,7 +396,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Simulate sending a message from the first behavior.
|
// Simulate sending a message from the first behavior.
|
||||||
let message = DaMessage::new(Blob::new(BlobId::from([0; 32]), get_da_blob()), 0);
|
let message = DaMessage::new(Blob::new(BlobId::from([0; 32]), get_da_blob(None)), 0);
|
||||||
all_behaviours[0].replicate_message(message.clone());
|
all_behaviours[0].replicate_message(message.clone());
|
||||||
|
|
||||||
let waker = Arc::new(TestWaker);
|
let waker = Arc::new(TestWaker);
|
||||||
|
@ -7,58 +7,18 @@ mod test {
|
|||||||
use crate::protocols::replication::handler::DaMessage;
|
use crate::protocols::replication::handler::DaMessage;
|
||||||
use crate::test_utils::AllNeighbours;
|
use crate::test_utils::AllNeighbours;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use kzgrs_backend::common::blob::DaBlob;
|
|
||||||
use kzgrs_backend::encoder;
|
use kzgrs_backend::testutils::get_da_blob;
|
||||||
use kzgrs_backend::encoder::DaEncoderParams;
|
|
||||||
use libp2p::identity::Keypair;
|
use libp2p::identity::Keypair;
|
||||||
use libp2p::swarm::SwarmEvent;
|
use libp2p::swarm::SwarmEvent;
|
||||||
use libp2p::{quic, Multiaddr, PeerId, Swarm};
|
use libp2p::{quic, Multiaddr, PeerId, Swarm};
|
||||||
use log::info;
|
use log::info;
|
||||||
use nomos_core::da::{BlobId, DaEncoder};
|
use nomos_core::da::BlobId;
|
||||||
use nomos_da_messages::common::Blob;
|
use nomos_da_messages::common::Blob;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tracing_subscriber::fmt::TestWriter;
|
use tracing_subscriber::fmt::TestWriter;
|
||||||
use tracing_subscriber::EnvFilter;
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
fn get_encoder() -> encoder::DaEncoder {
|
|
||||||
const DOMAIN_SIZE: usize = 16;
|
|
||||||
let params = DaEncoderParams::default_with(DOMAIN_SIZE);
|
|
||||||
encoder::DaEncoder::new(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_da_blob(data: Option<Vec<u8>>) -> DaBlob {
|
|
||||||
let encoder = get_encoder();
|
|
||||||
|
|
||||||
let data = data.unwrap_or_else(|| {
|
|
||||||
vec![
|
|
||||||
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
|
||||||
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
let encoded_data = encoder.encode(&data).unwrap();
|
|
||||||
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
|
|
||||||
|
|
||||||
let index = 0;
|
|
||||||
let da_blob = DaBlob {
|
|
||||||
column: columns[index].clone(),
|
|
||||||
column_idx: index
|
|
||||||
.try_into()
|
|
||||||
.expect("Column index shouldn't overflow the target type"),
|
|
||||||
column_commitment: encoded_data.column_commitments[index],
|
|
||||||
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
|
|
||||||
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
|
|
||||||
rows_commitments: encoded_data.row_commitments.clone(),
|
|
||||||
rows_proofs: encoded_data
|
|
||||||
.rows_proofs
|
|
||||||
.iter()
|
|
||||||
.map(|proofs| proofs.get(index).cloned().unwrap())
|
|
||||||
.collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
da_blob
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_connects_and_receives_replication_messages() {
|
async fn test_connects_and_receives_replication_messages() {
|
||||||
fn get_swarm(
|
fn get_swarm(
|
||||||
|
@ -12,3 +12,4 @@ tokio = "1"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "1", features = ["macros"] }
|
tokio = { version = "1", features = ["macros"] }
|
||||||
|
kzgrs-backend = { path = "../../kzgrs-backend", features = ["testutils"] }
|
||||||
|
@ -104,50 +104,13 @@ mod tests {
|
|||||||
use crate::common::Blob;
|
use crate::common::Blob;
|
||||||
use crate::dispersal::{DispersalError, DispersalErrorType, DispersalRequest};
|
use crate::dispersal::{DispersalError, DispersalErrorType, DispersalRequest};
|
||||||
use futures::io::BufReader;
|
use futures::io::BufReader;
|
||||||
use kzgrs_backend::common::blob::DaBlob;
|
use kzgrs_backend::testutils::get_da_blob;
|
||||||
use kzgrs_backend::encoder::{self, DaEncoderParams};
|
use nomos_core::da::BlobId;
|
||||||
use nomos_core::da::{BlobId, DaEncoder};
|
|
||||||
|
|
||||||
fn get_encoder() -> encoder::DaEncoder {
|
|
||||||
const DOMAIN_SIZE: usize = 16;
|
|
||||||
let params = DaEncoderParams::default_with(DOMAIN_SIZE);
|
|
||||||
encoder::DaEncoder::new(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_da_blob() -> DaBlob {
|
|
||||||
let encoder = get_encoder();
|
|
||||||
let data = vec![
|
|
||||||
49u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
|
||||||
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
|
||||||
];
|
|
||||||
|
|
||||||
let encoded_data = encoder.encode(&data).unwrap();
|
|
||||||
let columns: Vec<_> = encoded_data.extended_data.columns().collect();
|
|
||||||
|
|
||||||
let index = 0;
|
|
||||||
let da_blob = DaBlob {
|
|
||||||
column: columns[index].clone(),
|
|
||||||
column_idx: index
|
|
||||||
.try_into()
|
|
||||||
.expect("Column index shouldn't overflow the target type"),
|
|
||||||
column_commitment: encoded_data.column_commitments[index],
|
|
||||||
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
|
|
||||||
aggregated_column_proof: encoded_data.aggregated_column_proofs[index],
|
|
||||||
rows_commitments: encoded_data.row_commitments.clone(),
|
|
||||||
rows_proofs: encoded_data
|
|
||||||
.rows_proofs
|
|
||||||
.iter()
|
|
||||||
.map(|proofs| proofs.get(index).cloned().unwrap())
|
|
||||||
.collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
da_blob
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn pack_and_unpack() -> Result<()> {
|
async fn pack_and_unpack() -> Result<()> {
|
||||||
let blob_id = BlobId::from([0; 32]);
|
let blob_id = BlobId::from([0; 32]);
|
||||||
let data = get_da_blob();
|
let data = get_da_blob(None);
|
||||||
let blob = Blob::new(blob_id, data);
|
let blob = Blob::new(blob_id, data);
|
||||||
let subnetwork_id = 0;
|
let subnetwork_id = 0;
|
||||||
let message = DispersalRequest::new(blob, subnetwork_id);
|
let message = DispersalRequest::new(blob, subnetwork_id);
|
||||||
@ -162,7 +125,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn pack_to_writer_and_unpack_from_reader() -> Result<()> {
|
async fn pack_to_writer_and_unpack_from_reader() -> Result<()> {
|
||||||
let blob_id = BlobId::from([0; 32]);
|
let blob_id = BlobId::from([0; 32]);
|
||||||
let data = get_da_blob();
|
let data = get_da_blob(None);
|
||||||
let blob = Blob::new(blob_id, data);
|
let blob = Blob::new(blob_id, data);
|
||||||
let subnetwork_id = 0;
|
let subnetwork_id = 0;
|
||||||
let message = DispersalRequest::new(blob, subnetwork_id);
|
let message = DispersalRequest::new(blob, subnetwork_id);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user