Da: parallel encoder (#665)

* Added basic parallelization to encoder

* Added encoder benchmarks
This commit is contained in:
Daniel Sanchez 2024-06-25 15:39:40 +02:00 committed by GitHub
parent 96a8ef6032
commit 8936c331a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 85 additions and 21 deletions

View File

@ -25,6 +25,7 @@ rayon = { version = "1.10.0", optional = true }
[dev-dependencies]
rand = "0.8"
ark-bls12-381 = "0.4.0"
divan = "0.1"
[features]
default = []
@ -34,3 +35,7 @@ parallel = [
"kzgrs/parallel"
]
[[bench]]
name = "encoder"
harness = false

View File

@ -0,0 +1,26 @@
use divan::counter::BytesCount;
use divan::Bencher;
use kzgrs_backend::encoder::{DaEncoder, DaEncoderParams};
use rand::RngCore;
use std::hint::black_box;
fn main() {
divan::main()
}
const PARAMS: DaEncoderParams = DaEncoderParams::default_with(4096);
const ENCODER: DaEncoder = DaEncoder::new(PARAMS);
const KB: usize = 1024;
pub fn rand_data(elements_count: usize) -> Vec<u8> {
let mut buff = vec![0; elements_count * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE];
rand::thread_rng().fill_bytes(&mut buff);
buff
}
#[divan::bench(args = [32, 64, 128, 256, 512, 1024, 2048], sample_count = 1, sample_size = 1)]
fn encode(bencher: Bencher, size: usize) {
bencher
.with_inputs(|| rand_data(size * KB / DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE))
.input_counter(|buff| BytesCount::new(buff.len()))
.bench_refs(|buff| black_box(ENCODER.encode(buff)));
}

View File

@ -7,6 +7,8 @@ use serde::{Deserialize, Serialize};
use std::io::Cursor;
// crates
use blake2::digest::{Update, VariableOutput};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use sha3::{Digest, Sha3_256};
// internal
use kzgrs::Commitment;
@ -107,6 +109,10 @@ impl ChunksMatrix {
pub fn rows(&self) -> impl Iterator<Item = &Row> + '_ {
self.0.iter()
}
#[cfg(feature = "parallel")]
pub fn par_rows(&self) -> impl ParallelIterator<Item = &Row> + '_ {
self.0.par_iter()
}
pub fn columns(&self) -> impl Iterator<Item = Column> + '_ {
let size = self.0.first().map(|r| r.0.len()).unwrap_or(0);
(0..size).map(|i| {

View File

@ -10,6 +10,8 @@ use kzgrs::{
bytes_to_polynomial, commit_polynomial, encode, Commitment, Evaluations, KzgRsError,
Polynomial, PolynomialEvaluationDomain, Proof, BYTES_PER_FIELD_ELEMENT,
};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
// internal
use crate::common::{hash_column_and_commitment, Chunk, ChunksMatrix, Row};
@ -71,20 +73,28 @@ impl DaEncoder {
matrix: &ChunksMatrix,
polynomial_evaluation_domain: PolynomialEvaluationDomain,
) -> Result<Vec<((Evaluations, Polynomial), Commitment)>, KzgRsError> {
matrix
.rows()
.map(|r| {
// Using the unchecked version here. Because during the process of chunkifiying
// we already make sure to have the chunks of proper elements.
// Also, after rs encoding, we are sure all `Fr` elements already fits within modulus.
let (evals, poly) = bytes_to_polynomial_unchecked::<BYTES_PER_FIELD_ELEMENT>(
r.as_bytes().as_ref(),
polynomial_evaluation_domain,
);
commit_polynomial(&poly, &GLOBAL_PARAMETERS)
.map(|commitment| ((evals, poly), commitment))
})
.collect()
{
#[cfg(not(feature = "parallel"))]
{
matrix.rows()
}
#[cfg(feature = "parallel")]
{
matrix.par_rows()
}
}
.map(|r| {
// Using the unchecked version here. Because during the process of chunkifiying
// we already make sure to have the chunks of proper elements.
// Also, after rs encoding, we are sure all `Fr` elements already fits within modulus.
let (evals, poly) = bytes_to_polynomial_unchecked::<BYTES_PER_FIELD_ELEMENT>(
r.as_bytes().as_ref(),
polynomial_evaluation_domain,
);
commit_polynomial(&poly, &GLOBAL_PARAMETERS)
.map(|commitment| ((evals, poly), commitment))
})
.collect()
}
fn rs_encode_row(
@ -98,16 +108,33 @@ impl DaEncoder {
rows: &[Polynomial],
polynomial_evaluation_domain: PolynomialEvaluationDomain,
) -> Vec<Evaluations> {
rows.iter()
.map(|poly| Self::rs_encode_row(poly, polynomial_evaluation_domain))
.collect()
{
#[cfg(not(feature = "parallel"))]
{
rows.iter()
}
#[cfg(feature = "parallel")]
{
rows.par_iter()
}
}
.map(|poly| Self::rs_encode_row(poly, polynomial_evaluation_domain))
.collect()
}
fn compute_rows_proofs(polynomials: &[Polynomial]) -> Result<Vec<Vec<Proof>>, KzgRsError> {
Ok(polynomials
.iter()
.map(|poly| fk20_batch_generate_elements_proofs(poly, &GLOBAL_PARAMETERS))
.collect())
Ok({
#[cfg(not(feature = "parallel"))]
{
polynomials.iter()
}
#[cfg(feature = "parallel")]
{
polynomials.par_iter()
}
}
.map(|poly| fk20_batch_generate_elements_proofs(poly, &GLOBAL_PARAMETERS))
.collect())
}
#[allow(clippy::type_complexity)]