DA Kzgrs Backend Certificate implementation (#651)

* Kzgrs backend certificate definition

* Encoded data to certificate test

* Nomos da domain specific tag

* Handle errors in da certificate creation

* Add nomos core traits to da cert

* Derive ordering traits for da index

* Add failure test cases to kzgrs certificate
This commit is contained in:
gusto 2024-05-27 12:50:36 +03:00 committed by GitHub
parent 26febb7328
commit d8f6f0318a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 497 additions and 19 deletions

View File

@ -6,7 +6,6 @@ use nomos_core::da::certificate::metadata::Next;
use nomos_core::da::certificate::CertificateStrategy;
// internal
use nomos_core::da::certificate::{self, metadata};
use std::cmp::Ordering;
// std
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
@ -18,7 +17,7 @@ use blake2::{
use bytes::Bytes;
use serde::{Deserialize, Serialize};
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Copy, Clone, Default, Debug, PartialEq, PartialOrd, Ord, Eq, Serialize, Deserialize)]
pub struct Index([u8; 8]);
/// Re-export the types for OpenAPI
@ -230,18 +229,6 @@ impl AsRef<[u8]> for Index {
}
}
impl PartialOrd for Index {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Index {
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
}
fn hash(item: impl AsRef<[u8]>) -> [u8; 32] {
let mut hasher = Blake2bVar::new(32).unwrap();
hasher.update(item.as_ref());

View File

@ -9,6 +9,7 @@ edition = "2021"
ark-ff = "0.4"
ark-serialize = "0.4.2"
ark-poly = "0.4.2"
bitvec = "1.0.1"
blake2 = "0.10"
blst = { version = "0.3.11", features = ["serde"] }
itertools = "0.12"

View File

@ -18,6 +18,8 @@ pub struct Row(pub Vec<Chunk>);
pub struct Column(pub Vec<Chunk>);
pub struct ChunksMatrix(pub Vec<Row>);
pub const NOMOS_DA_DST: &[u8] = b"NOMOS_DA_AVAIL";
impl Chunk {
pub fn len(&self) -> usize {
self.0.len()

View File

@ -0,0 +1,448 @@
// std
use std::hash::{Hash, Hasher};
// crates
use bitvec::prelude::*;
use blst::min_sig::{AggregateSignature, PublicKey, Signature};
use blst::BLST_ERROR;
use kzgrs::{Commitment, KzgRsError};
use nomos_core::da::certificate::metadata::Next;
use nomos_core::da::certificate::{self, metadata};
// internal
use crate::common::{attestation::Attestation, build_attestation_message, NOMOS_DA_DST};
use crate::encoder::EncodedData;
#[derive(Debug, Clone, PartialEq)]
pub struct Certificate {
aggregated_signatures: Signature,
signers: BitVec<u8>,
aggregated_column_commitment: Commitment,
row_commitments: Vec<Commitment>,
metadata: Metadata,
}
impl Certificate {
pub fn id(&self) -> Vec<u8> {
build_attestation_message(&self.aggregated_column_commitment, &self.row_commitments)
}
pub fn verify(&self, nodes_public_keys: &[PublicKey]) -> bool {
let signers_keys: Vec<&PublicKey> = nodes_public_keys
.iter()
.enumerate()
.filter(|(index, _)| self.signers[*index])
.map(|(_, pk)| pk)
.collect();
let message = self.id();
let messages: Vec<&[u8]> = std::iter::repeat(message.as_slice())
.take(signers_keys.len())
.collect();
verify_aggregate_signature(&self.aggregated_signatures, &signers_keys, &messages)
}
pub fn build_certificate(
encoded_data: &EncodedData,
attestations: &[Attestation],
signers: BitVec<u8>,
threshold: usize,
metadata: Metadata,
) -> Result<Self, KzgRsError> {
if attestations.len() < threshold {
return Err(KzgRsError::NotEnoughAttestations {
required: threshold,
received: attestations.len(),
});
}
if attestations.len() != signers.count_ones() {
return Err(KzgRsError::AttestationSignersMismatch {
attestations_count: attestations.len(),
signers_count: signers.count_ones(),
});
}
let signatures: Vec<Signature> = attestations
.iter()
.filter_map(|att| Signature::from_bytes(&att.signature).ok())
.collect();
// Certificate will fail to be built if number of valid signatures from the attestations
// doesn't satisfy the same threshold used for attestations.
if signatures.len() < threshold {
return Err(KzgRsError::NotEnoughAttestations {
required: threshold,
received: signatures.len(),
});
}
let aggregated_signatures = aggregate_signatures(signatures)?;
Ok(Self {
aggregated_signatures,
signers,
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
row_commitments: encoded_data.row_commitments.clone(),
metadata,
})
}
}
fn aggregate_signatures(signatures: Vec<Signature>) -> Result<Signature, BLST_ERROR> {
let refs: Vec<&Signature> = signatures.iter().collect();
AggregateSignature::aggregate(&refs, true).map(|agg_sig| agg_sig.to_signature())
}
fn verify_aggregate_signature(
aggregate_signature: &Signature,
public_keys: &[&PublicKey],
messages: &[&[u8]],
) -> bool {
BLST_ERROR::BLST_SUCCESS
== aggregate_signature.aggregate_verify(true, messages, NOMOS_DA_DST, public_keys, true)
}
#[derive(Clone, Debug)]
pub struct CertificateVerificationParameters {
pub nodes_public_keys: Vec<PublicKey>,
}
impl certificate::Certificate for Certificate {
type Signature = Signature;
type Id = Vec<u8>;
type VerificationParameters = CertificateVerificationParameters;
fn signers(&self) -> Vec<bool> {
self.signers.iter().map(|b| *b).collect()
}
fn signature(&self) -> Self::Signature {
self.aggregated_signatures
}
fn id(&self) -> Self::Id {
build_attestation_message(&self.aggregated_column_commitment, &self.row_commitments)
}
fn verify(&self, params: Self::VerificationParameters) -> bool {
self.verify(&params.nodes_public_keys)
}
}
#[derive(Copy, Clone, Default, Debug, Ord, PartialOrd, PartialEq, Eq)]
pub struct Index([u8; 8]);
#[derive(Default, Debug, Copy, Clone, Eq, PartialEq)]
pub struct Metadata {
app_id: [u8; 32],
index: Index,
}
impl Metadata {
pub fn size(&self) -> usize {
std::mem::size_of_val(&self.app_id) + std::mem::size_of_val(&self.index)
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct VidCertificate {
id: Vec<u8>,
metadata: Metadata,
}
impl certificate::vid::VidCertificate for VidCertificate {
type CertificateId = Vec<u8>;
fn certificate_id(&self) -> Self::CertificateId {
self.id.clone()
}
fn size(&self) -> usize {
std::mem::size_of_val(&self.id) + self.metadata.size()
}
}
impl metadata::Metadata for VidCertificate {
type AppId = [u8; 32];
type Index = Index;
fn metadata(&self) -> (Self::AppId, Self::Index) {
(self.metadata.app_id, self.metadata.index)
}
}
impl Hash for VidCertificate {
fn hash<H: Hasher>(&self, state: &mut H) {
state.write(
<VidCertificate as certificate::vid::VidCertificate>::certificate_id(self).as_ref(),
);
}
}
impl From<Certificate> for VidCertificate {
fn from(cert: Certificate) -> Self {
Self {
id: cert.id(),
metadata: cert.metadata,
}
}
}
impl metadata::Metadata for Certificate {
type AppId = [u8; 32];
type Index = Index;
fn metadata(&self) -> (Self::AppId, Self::Index) {
(self.metadata.app_id, self.metadata.index)
}
}
impl From<u64> for Index {
fn from(value: u64) -> Self {
Self(value.to_be_bytes())
}
}
impl Next for Index {
fn next(self) -> Self {
let num = u64::from_be_bytes(self.0);
let incremented_num = num.wrapping_add(1);
Self(incremented_num.to_be_bytes())
}
}
impl AsRef<[u8]> for Index {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
#[cfg(test)]
mod tests {
use bitvec::prelude::*;
use blst::min_sig::{PublicKey, SecretKey};
use rand::{rngs::OsRng, thread_rng, Rng, RngCore};
use crate::{
common::{attestation::Attestation, blob::DaBlob, NOMOS_DA_DST},
dispersal::{aggregate_signatures, verify_aggregate_signature, Metadata},
encoder::{
test::{rand_data, ENCODER},
EncodedData,
},
verifier::DaVerifier,
};
use super::Certificate;
fn generate_keys() -> (PublicKey, SecretKey) {
let mut rng = OsRng;
let sk_bytes: [u8; 32] = rng.gen();
let sk = SecretKey::key_gen(&sk_bytes, &[]).unwrap();
let pk = sk.sk_to_pk();
(pk, sk)
}
fn attest_encoded_data(
encoded_data: &EncodedData,
verifiers: &[DaVerifier],
) -> Vec<Attestation> {
let mut attestations = Vec::new();
for (i, column) in encoded_data.extended_data.columns().enumerate() {
let verifier = &verifiers[i];
let da_blob = DaBlob {
column,
column_commitment: encoded_data.column_commitments[i],
aggregated_column_commitment: encoded_data.aggregated_column_commitment,
aggregated_column_proof: encoded_data.aggregated_column_proofs[i],
rows_commitments: encoded_data.row_commitments.clone(),
rows_proofs: encoded_data
.rows_proofs
.iter()
.map(|proofs| proofs.get(i).cloned().unwrap())
.collect(),
};
attestations.push(verifier.verify(da_blob).unwrap());
}
attestations
}
#[test]
fn test_signature_aggregation_and_verification() {
let (pk1, sk1) = generate_keys();
let (pk2, sk2) = generate_keys();
let (pk3, sk3) = generate_keys();
let message = b"Test message";
let sig1 = sk1.sign(message, NOMOS_DA_DST, &[]);
let sig2 = sk2.sign(message, NOMOS_DA_DST, &[]);
let sig3 = sk3.sign(message, NOMOS_DA_DST, &[]);
let aggregated_signature = aggregate_signatures(vec![sig1, sig2, sig3]).unwrap();
let public_keys = vec![&pk1, &pk2, &pk3];
let messages = vec![message.as_ref(), message.as_ref(), message.as_ref()];
let result = verify_aggregate_signature(&aggregated_signature, &public_keys, &messages);
assert!(result, "Aggregated signature should be valid.");
}
#[test]
fn test_invalid_signature_aggregation() {
let (pk1, sk1) = generate_keys();
let (pk2, sk2) = generate_keys();
let (_, sk3) = generate_keys();
let message = b"Test message";
let sig1 = sk1.sign(message, NOMOS_DA_DST, &[]);
let sig2 = sk2.sign(message, NOMOS_DA_DST, &[]);
let sig3 = sk3.sign(message, NOMOS_DA_DST, &[]);
let aggregated_signature = aggregate_signatures(vec![sig1, sig2, sig3]).unwrap();
let (wrong_pk3, _) = generate_keys(); // Generate another key pair for the "wrong" public key
let public_keys = vec![&pk1, &pk2, &wrong_pk3]; // Incorrect public key for sig3 to demonstrate failure.
let messages = vec![message.as_ref(), message.as_ref(), message.as_ref()];
let result = verify_aggregate_signature(&aggregated_signature, &public_keys, &messages);
assert!(
!result,
"Aggregated signature with a mismatched public key should not be valid."
);
}
#[test]
fn test_encoded_data_verification() {
const THRESHOLD: usize = 16;
let encoder = &ENCODER;
let data = rand_data(8);
let mut rng = thread_rng();
let sks: Vec<SecretKey> = (0..16)
.map(|_| {
let mut buff = [0u8; 32];
rng.fill_bytes(&mut buff);
SecretKey::key_gen(&buff, &[]).unwrap()
})
.collect();
let verifiers: Vec<DaVerifier> = sks
.clone()
.into_iter()
.enumerate()
.map(|(index, sk)| DaVerifier { sk, index })
.collect();
let encoded_data = encoder.encode(&data).unwrap();
let attestations = attest_encoded_data(&encoded_data, &verifiers);
let signers = bitvec![u8, Lsb0; 1; 16];
let cert = Certificate::build_certificate(
&encoded_data,
&attestations,
signers,
THRESHOLD,
Metadata::default(),
)
.unwrap();
let public_keys: Vec<PublicKey> = sks.iter().map(|sk| sk.sk_to_pk()).collect();
assert!(cert.verify(&public_keys));
}
#[test]
fn test_encoded_data_insufficient_verification() {
const THRESHOLD: usize = 16;
let encoder = &ENCODER;
let data = rand_data(8);
let mut rng = thread_rng();
let sks: Vec<SecretKey> = (0..16)
.map(|_| {
let mut buff = [0u8; 32];
rng.fill_bytes(&mut buff);
SecretKey::key_gen(&buff, &[]).unwrap()
})
.collect();
let verifiers: Vec<DaVerifier> = sks
.clone()
.into_iter()
.enumerate()
.map(|(index, sk)| DaVerifier { sk, index })
.collect();
let encoded_data = encoder.encode(&data).unwrap();
let mut attestations = attest_encoded_data(&encoded_data, &verifiers);
// Imitate missing attestation.
attestations.pop();
let signers = bitvec![u8, Lsb0; 1; 16];
let cert_result = Certificate::build_certificate(
&encoded_data,
&attestations,
signers,
THRESHOLD,
Metadata::default(),
);
// Certificate won't be created because of not reaching required threshold.
assert!(cert_result.is_err());
}
#[test]
fn test_encoded_data_wrong_pk_verification() {
const THRESHOLD: usize = 16;
let encoder = &ENCODER;
let data = rand_data(8);
let mut rng = thread_rng();
let sks: Vec<SecretKey> = (0..16)
.map(|_| {
let mut buff = [0u8; 32];
rng.fill_bytes(&mut buff);
SecretKey::key_gen(&buff, &[]).unwrap()
})
.collect();
let verifiers: Vec<DaVerifier> = sks
.clone()
.into_iter()
.enumerate()
.map(|(index, sk)| DaVerifier { sk, index })
.collect();
let encoded_data = encoder.encode(&data).unwrap();
let attestations = attest_encoded_data(&encoded_data, &verifiers);
let signers = bitvec![u8, Lsb0; 1; 16];
let cert = Certificate::build_certificate(
&encoded_data,
&attestations,
signers,
THRESHOLD,
Metadata::default(),
)
.unwrap();
let mut public_keys: Vec<PublicKey> = sks.iter().map(|sk| sk.sk_to_pk()).collect();
// Imitate different set of public keys on the verifier side.
let (wrong_pk, _) = generate_keys();
public_keys.pop();
public_keys.push(wrong_pk);
// Certificate should fail to be verified.
assert!(!cert.verify(&public_keys));
}
}

View File

@ -1,4 +1,5 @@
pub mod common;
pub mod dispersal;
pub mod encoder;
pub mod global;
pub mod verifier;

View File

@ -10,6 +10,7 @@ use kzgrs::{
};
use crate::common::blob::DaBlob;
use crate::common::NOMOS_DA_DST;
// internal
use crate::common::{
attestation::Attestation, build_attestation_message, hash_column_and_commitment, Chunk, Column,
@ -19,8 +20,8 @@ use crate::global::{DOMAIN, GLOBAL_PARAMETERS};
pub struct DaVerifier {
// TODO: substitute this for an abstraction to sign things over
sk: SecretKey,
index: usize,
pub sk: SecretKey,
pub index: usize,
}
impl DaVerifier {
@ -108,7 +109,7 @@ impl DaVerifier {
fn build_attestation(&self, blob: &DaBlob) -> Attestation {
let message =
build_attestation_message(&blob.aggregated_column_commitment, &blob.rows_commitments);
let signature = self.sk.sign(&message, b"", b"");
let signature = self.sk.sign(&message, NOMOS_DA_DST, b"");
let blob_id = blob.id();
let blob_hash: [u8; 32] = blob_id
@ -148,11 +149,12 @@ impl DaVerifier {
#[cfg(test)]
mod test {
use crate::common::blob::DaBlob;
use crate::common::{hash_column_and_commitment, Chunk, Column};
use crate::encoder::test::{rand_data, ENCODER};
use crate::encoder::DaEncoderParams;
use crate::global::{DOMAIN, GLOBAL_PARAMETERS};
use crate::verifier::{DaBlob, DaVerifier};
use crate::verifier::DaVerifier;
use blst::min_sig::SecretKey;
use kzgrs::{
bytes_to_polynomial, commit_polynomial, generate_element_proof, BYTES_PER_FIELD_ELEMENT,

View File

@ -14,6 +14,7 @@ ark-ff = { version = "0.4.2", features = ["asm", "parallel"] }
ark-poly = { version = "0.4.2", features = ["parallel", "rayon"] }
ark-poly-commit = { version = "0.4.0", features = ["rayon", "parallel"] }
ark-serialize = { version = "0.4" }
blst = "0.3.11"
num-bigint = "0.4.4"
thiserror = "1.0.58"
num-traits = "0.2.18"
@ -25,4 +26,4 @@ rayon = "1.10"
[[bench]]
name = "kzg"
harness = false
harness = false

View File

@ -1,3 +1,5 @@
use std::fmt;
// std
// crates
use crate::{FieldElement, BYTES_PER_FIELD_ELEMENT};
@ -6,10 +8,35 @@ use ark_ff::Zero;
use ark_poly::domain::general::GeneralEvaluationDomain;
use ark_poly::evaluations::univariate::Evaluations;
use ark_poly::univariate::DensePolynomial;
use blst::BLST_ERROR;
use num_bigint::BigUint;
use thiserror::Error;
// internal
#[derive(Error, Debug)]
pub struct BlstError(pub BLST_ERROR);
impl fmt::Display for BlstError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
BLST_ERROR::BLST_SUCCESS => write!(f, "Operation successful"),
BLST_ERROR::BLST_BAD_ENCODING => write!(f, "Bad encoding"),
BLST_ERROR::BLST_POINT_NOT_ON_CURVE => write!(f, "Point not on curve"),
BLST_ERROR::BLST_POINT_NOT_IN_GROUP => write!(f, "Point not in group"),
BLST_ERROR::BLST_AGGR_TYPE_MISMATCH => write!(f, "Aggregate type mismatch"),
BLST_ERROR::BLST_VERIFY_FAIL => write!(f, "Verification failed"),
BLST_ERROR::BLST_PK_IS_INFINITY => write!(f, "Public key is infinity"),
BLST_ERROR::BLST_BAD_SCALAR => write!(f, "Bad scalar value"),
}
}
}
impl From<BLST_ERROR> for KzgRsError {
fn from(err: BLST_ERROR) -> Self {
KzgRsError::BlstError(BlstError(err))
}
}
#[derive(Error, Debug)]
pub enum KzgRsError {
#[error("Data isn't properly padded, data len must match modulus {expected_modulus} but it is {current_size}")]
@ -19,8 +46,17 @@ pub enum KzgRsError {
},
#[error("ChunkSize should be <= 32 (bytes), got {0}")]
ChunkSizeTooBig(usize),
#[error("Not enough attestations, required {required} but received {received}")]
NotEnoughAttestations { required: usize, received: usize },
#[error("Mismatch between number of attestations ({attestations_count}) and number of signers ({signers_count})")]
AttestationSignersMismatch {
attestations_count: usize,
signers_count: usize,
},
#[error(transparent)]
PolyCommitError(#[from] ark_poly_commit::Error),
#[error("BLST error: {0}")]
BlstError(BlstError),
}
/// Transform chunks of bytes (of size `CHUNK_SIZE`) into `Fr` which are considered evaluations of a