Da: parallel encoder (#665)

* Added basic parallelization to encoder

* Added encoder benchmarks
This commit is contained in:
Daniel Sanchez 2024-06-25 15:39:40 +02:00 committed by GitHub
parent 96a8ef6032
commit 8936c331a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 85 additions and 21 deletions

View File

@ -25,6 +25,7 @@ rayon = { version = "1.10.0", optional = true }
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.8"
ark-bls12-381 = "0.4.0" ark-bls12-381 = "0.4.0"
divan = "0.1"
[features] [features]
default = [] default = []
@ -34,3 +35,7 @@ parallel = [
"kzgrs/parallel" "kzgrs/parallel"
] ]
[[bench]]
name = "encoder"
harness = false

View File

@ -0,0 +1,26 @@
use divan::counter::BytesCount;
use divan::Bencher;
use kzgrs_backend::encoder::{DaEncoder, DaEncoderParams};
use rand::RngCore;
use std::hint::black_box;
fn main() {
divan::main()
}
const PARAMS: DaEncoderParams = DaEncoderParams::default_with(4096);
const ENCODER: DaEncoder = DaEncoder::new(PARAMS);
const KB: usize = 1024;
pub fn rand_data(elements_count: usize) -> Vec<u8> {
let mut buff = vec![0; elements_count * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE];
rand::thread_rng().fill_bytes(&mut buff);
buff
}
#[divan::bench(args = [32, 64, 128, 256, 512, 1024, 2048], sample_count = 1, sample_size = 1)]
fn encode(bencher: Bencher, size: usize) {
bencher
.with_inputs(|| rand_data(size * KB / DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE))
.input_counter(|buff| BytesCount::new(buff.len()))
.bench_refs(|buff| black_box(ENCODER.encode(buff)));
}

View File

@ -7,6 +7,8 @@ use serde::{Deserialize, Serialize};
use std::io::Cursor; use std::io::Cursor;
// crates // crates
use blake2::digest::{Update, VariableOutput}; use blake2::digest::{Update, VariableOutput};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use sha3::{Digest, Sha3_256}; use sha3::{Digest, Sha3_256};
// internal // internal
use kzgrs::Commitment; use kzgrs::Commitment;
@ -107,6 +109,10 @@ impl ChunksMatrix {
pub fn rows(&self) -> impl Iterator<Item = &Row> + '_ { pub fn rows(&self) -> impl Iterator<Item = &Row> + '_ {
self.0.iter() self.0.iter()
} }
#[cfg(feature = "parallel")]
pub fn par_rows(&self) -> impl ParallelIterator<Item = &Row> + '_ {
self.0.par_iter()
}
pub fn columns(&self) -> impl Iterator<Item = Column> + '_ { pub fn columns(&self) -> impl Iterator<Item = Column> + '_ {
let size = self.0.first().map(|r| r.0.len()).unwrap_or(0); let size = self.0.first().map(|r| r.0.len()).unwrap_or(0);
(0..size).map(|i| { (0..size).map(|i| {

View File

@ -10,6 +10,8 @@ use kzgrs::{
bytes_to_polynomial, commit_polynomial, encode, Commitment, Evaluations, KzgRsError, bytes_to_polynomial, commit_polynomial, encode, Commitment, Evaluations, KzgRsError,
Polynomial, PolynomialEvaluationDomain, Proof, BYTES_PER_FIELD_ELEMENT, Polynomial, PolynomialEvaluationDomain, Proof, BYTES_PER_FIELD_ELEMENT,
}; };
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
// internal // internal
use crate::common::{hash_column_and_commitment, Chunk, ChunksMatrix, Row}; use crate::common::{hash_column_and_commitment, Chunk, ChunksMatrix, Row};
@ -71,8 +73,16 @@ impl DaEncoder {
matrix: &ChunksMatrix, matrix: &ChunksMatrix,
polynomial_evaluation_domain: PolynomialEvaluationDomain, polynomial_evaluation_domain: PolynomialEvaluationDomain,
) -> Result<Vec<((Evaluations, Polynomial), Commitment)>, KzgRsError> { ) -> Result<Vec<((Evaluations, Polynomial), Commitment)>, KzgRsError> {
matrix {
.rows() #[cfg(not(feature = "parallel"))]
{
matrix.rows()
}
#[cfg(feature = "parallel")]
{
matrix.par_rows()
}
}
.map(|r| { .map(|r| {
// Using the unchecked version here. Because during the process of chunkifiying // Using the unchecked version here. Because during the process of chunkifiying
// we already make sure to have the chunks of proper elements. // we already make sure to have the chunks of proper elements.
@ -98,14 +108,31 @@ impl DaEncoder {
rows: &[Polynomial], rows: &[Polynomial],
polynomial_evaluation_domain: PolynomialEvaluationDomain, polynomial_evaluation_domain: PolynomialEvaluationDomain,
) -> Vec<Evaluations> { ) -> Vec<Evaluations> {
{
#[cfg(not(feature = "parallel"))]
{
rows.iter() rows.iter()
}
#[cfg(feature = "parallel")]
{
rows.par_iter()
}
}
.map(|poly| Self::rs_encode_row(poly, polynomial_evaluation_domain)) .map(|poly| Self::rs_encode_row(poly, polynomial_evaluation_domain))
.collect() .collect()
} }
fn compute_rows_proofs(polynomials: &[Polynomial]) -> Result<Vec<Vec<Proof>>, KzgRsError> { fn compute_rows_proofs(polynomials: &[Polynomial]) -> Result<Vec<Vec<Proof>>, KzgRsError> {
Ok(polynomials Ok({
.iter() #[cfg(not(feature = "parallel"))]
{
polynomials.iter()
}
#[cfg(feature = "parallel")]
{
polynomials.par_iter()
}
}
.map(|poly| fk20_batch_generate_elements_proofs(poly, &GLOBAL_PARAMETERS)) .map(|poly| fk20_batch_generate_elements_proofs(poly, &GLOBAL_PARAMETERS))
.collect()) .collect())
} }