Add encode benchmark (wip)

This commit is contained in:
Daniel Sanchez Quiros 2024-04-26 16:43:59 +02:00
parent a289390beb
commit dd0334264a
5 changed files with 105 additions and 18 deletions

View File

@ -20,4 +20,9 @@ sha3 = "0.10"
[dev-dependencies]
rand = "0.8"
divan = "0.1"
[[bench]]
name = "encode"
harness = false

View File

@ -0,0 +1,70 @@
use divan::{black_box, counter::BytesCount, AllocProfiler, Bencher};
use rand::RngCore;
use kzgrs_backend::encoder::{DaEncoder, DaEncoderParams};
fn main() {
divan::main()
}
#[global_allocator]
static ALLOC: AllocProfiler = AllocProfiler::system();
const SIZES: &[usize] = &[1usize, 2, 3, 4, 5, 6, 7, 8];
fn rand_data(size_mb: usize) -> Vec<u8> {
let elements_count = size_mb * 1024 * 1024 / DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE;
let mut buff = vec![0u8; elements_count * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE];
rand::thread_rng().fill_bytes(&mut buff);
buff
}
#[allow(non_snake_case)]
#[divan::bench(args = SIZES)]
fn chunkify_MB(bencher: Bencher, size: usize) {
let encoder_params: DaEncoderParams = DaEncoderParams::default_with(10);
let encoder: DaEncoder = DaEncoder::new(encoder_params);
bencher
.with_inputs(|| rand_data(size))
.input_counter(BytesCount::of_slice)
.bench_refs(|data| black_box(encoder.chunkify(data)));
}
#[allow(non_snake_case)]
#[divan::bench(args = [100, 1000, 10000], sample_count = 10)]
fn compute_1MB_data_single_kzg_row_commitments_with_column_count(
bencher: Bencher,
column_count: usize,
) {
let encoder_params: DaEncoderParams = DaEncoderParams::default_with(column_count);
let encoder: DaEncoder = DaEncoder::new(encoder_params);
let size = bencher
.with_inputs(|| encoder.chunkify(rand_data(1).as_ref()))
.input_counter(|matrix| BytesCount::of_slice(&matrix.0[0].as_bytes()))
.bench_refs(|matrix| {
black_box(DaEncoder::compute_kzg_row_commitment(&matrix.0[0]).is_ok())
});
}
#[allow(non_snake_case)]
#[divan::bench(args = [100, 1000, 10000], sample_count = 1, sample_size = 1)]
fn compute_1MB_data_matrix_kzg_row_commitments_with_column_count(
bencher: Bencher,
column_count: usize,
) {
let encoder_params: DaEncoderParams = DaEncoderParams::default_with(column_count);
let encoder: DaEncoder = DaEncoder::new(encoder_params);
let size = bencher
.with_inputs(|| encoder.chunkify(rand_data(1).as_ref()))
.input_counter(|matrix| BytesCount::new(matrix.bytes_size()))
.bench_refs(|matrix| black_box(DaEncoder::compute_kzg_row_commitments(&matrix).is_ok()));
}
#[allow(non_snake_case)]
#[divan::bench(args = [100, 1000, 10000], sample_count = 1, sample_size = 1)]
fn encode_1MB_with_column_count(bencher: Bencher, column_count: usize) {
let encoder_params: DaEncoderParams = DaEncoderParams::default_with(column_count);
let encoder: DaEncoder = DaEncoder::new(encoder_params);
let size = bencher
.with_inputs(|| rand_data(1))
.input_counter(|data| BytesCount::of_slice(&data))
.bench_refs(|data| black_box(encoder.encode(data).is_ok()));
}

View File

@ -107,6 +107,15 @@ impl ChunksMatrix {
pub fn iter(&self) -> impl Iterator<Item = &Row> {
self.0.iter()
}
pub fn bytes_size(&self) -> usize {
// This can be done taking into consideration that each chunk should be the same size and just
// multiplying sizes. But for now iterating should be ok.
self.0
.iter()
.flat_map(|row: &Row| row.0.iter().map(|chunk| chunk.0.len()))
.sum()
}
}
impl FromIterator<Row> for ChunksMatrix {

View File

@ -19,7 +19,7 @@ pub struct DaEncoderParams {
impl DaEncoderParams {
pub const MAX_BLS12_381_ENCODING_CHUNK_SIZE: usize = 31;
const fn default_with(column_count: usize) -> Self {
pub const fn default_with(column_count: usize) -> Self {
Self { column_count }
}
}
@ -44,7 +44,7 @@ impl DaEncoder {
Self { params: settings }
}
fn chunkify(&self, data: &[u8]) -> ChunksMatrix {
pub fn chunkify(&self, data: &[u8]) -> ChunksMatrix {
let chunk_size =
// column count is divided by two, as later on rows are encoded to twice the size
self.params.column_count.div(2) * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE;
@ -62,23 +62,26 @@ impl DaEncoder {
.collect()
}
pub fn compute_kzg_row_commitment(
row: &Row,
) -> Result<((Evaluations, Polynomial), Commitment), KzgRsError> {
// Using the unchecked version here. Because during the process of chunkifiying
// we already make sure to have the chunks of proper elements.
// Also, after rs encoding, we are sure all `Fr` elements already fits within modulus.
let (evals, poly) = bytes_to_polynomial_unchecked::<BYTES_PER_FIELD_ELEMENT>(
row.as_bytes().as_ref(),
*DOMAIN,
);
commit_polynomial(&poly, &GLOBAL_PARAMETERS).map(|commitment| ((evals, poly), commitment))
}
#[allow(clippy::type_complexity)]
fn compute_kzg_row_commitments(
pub fn compute_kzg_row_commitments(
matrix: &ChunksMatrix,
) -> Result<Vec<((Evaluations, Polynomial), Commitment)>, KzgRsError> {
matrix
.rows()
.map(|r| {
// Using the unchecked version here. Because during the process of chunkifiying
// we already make sure to have the chunks of proper elements.
// Also, after rs encoding, we are sure all `Fr` elements already fits within modulus.
let (evals, poly) = bytes_to_polynomial_unchecked::<BYTES_PER_FIELD_ELEMENT>(
r.as_bytes().as_ref(),
*DOMAIN,
);
commit_polynomial(&poly, &GLOBAL_PARAMETERS)
.map(|commitment| ((evals, poly), commitment))
})
.map(Self::compute_kzg_row_commitment)
.collect()
}

View File

@ -1,4 +1,4 @@
mod common;
mod encoder;
mod global;
mod verifier;
pub mod common;
pub mod encoder;
pub mod global;
pub mod verifier;