diff --git a/nomos-core/src/da/mod.rs b/nomos-core/src/da/mod.rs index 06411c00..72b534bb 100644 --- a/nomos-core/src/da/mod.rs +++ b/nomos-core/src/da/mod.rs @@ -14,8 +14,14 @@ pub trait DaEncoder { pub trait DaVerifier { type DaBlob; + type Sk; + type Pk; type Attestation; - fn verify(&self, blob: &Self::DaBlob) -> Result; + fn verify( + blob: &Self::DaBlob, + sk: &Self::Sk, + nodes_public_keys: &[Self::Pk], + ) -> Option; } pub trait DaDispersal { diff --git a/nomos-da/kzgrs-backend/Cargo.toml b/nomos-da/kzgrs-backend/Cargo.toml index 52b6855a..1435695c 100644 --- a/nomos-da/kzgrs-backend/Cargo.toml +++ b/nomos-da/kzgrs-backend/Cargo.toml @@ -17,6 +17,7 @@ num-bigint = "0.4.4" rand = "0.8.5" once_cell = "1.19" sha3 = "0.10" +nomos-core = { path = "../../nomos-core" } [dev-dependencies] rand = "0.8" diff --git a/nomos-da/kzgrs-backend/src/common.rs b/nomos-da/kzgrs-backend/src/common.rs index f7e1bcf2..29fd1f30 100644 --- a/nomos-da/kzgrs-backend/src/common.rs +++ b/nomos-da/kzgrs-backend/src/common.rs @@ -10,8 +10,13 @@ use kzgrs::Commitment; #[derive(Clone, Eq, PartialEq, Debug)] pub struct Chunk(pub Vec); + +#[derive(Debug)] pub struct Row(pub Vec); + +#[derive(Debug)] pub struct Column(pub Vec); + pub struct ChunksMatrix(pub Vec); impl Chunk { @@ -153,6 +158,7 @@ pub fn commitment_to_bytes(commitment: &Commitment) -> Vec { buff.into_inner() } +#[derive(Debug)] pub struct Attestation { pub signature: Signature, } diff --git a/nomos-da/kzgrs-backend/src/lib.rs b/nomos-da/kzgrs-backend/src/lib.rs index 3916547d..5ed0faea 100644 --- a/nomos-da/kzgrs-backend/src/lib.rs +++ b/nomos-da/kzgrs-backend/src/lib.rs @@ -1,4 +1,4 @@ -mod common; -mod encoder; -mod global; -mod verifier; +pub mod common; +pub mod encoder; +pub mod global; +pub mod verifier; diff --git a/nomos-da/kzgrs-backend/src/verifier.rs b/nomos-da/kzgrs-backend/src/verifier.rs index f3c5813a..73795058 100644 --- a/nomos-da/kzgrs-backend/src/verifier.rs +++ b/nomos-da/kzgrs-backend/src/verifier.rs @@ -1,9 +1,7 @@ // std - // crates -use blst::min_sig::{PublicKey, SecretKey, Signature}; +use blst::min_sig::{PublicKey, SecretKey}; use itertools::{izip, Itertools}; -use num_bigint::BigUint; use sha3::{Digest, Sha3_256}; // internal @@ -14,10 +12,11 @@ use crate::encoder::DaEncoderParams; use crate::global::{DOMAIN, GLOBAL_PARAMETERS}; use kzgrs::common::field_element_from_bytes_le; use kzgrs::{ - bytes_to_polynomial, commit_polynomial, verify_element_proof, Commitment, FieldElement, Proof, + bytes_to_polynomial, commit_polynomial, verify_element_proof, Commitment, Proof, BYTES_PER_FIELD_ELEMENT, }; +#[derive(Debug)] pub struct DaBlob { column: Column, column_commitment: Commitment, @@ -39,25 +38,9 @@ impl DaBlob { } } -pub struct DaVerifier { - // TODO: substitute this for an abstraction to sign things over - sk: SecretKey, - index: usize, -} +pub struct DaVerifier; impl DaVerifier { - pub fn new(sk: SecretKey, nodes_public_keys: &[PublicKey]) -> Self { - // TODO: `is_sorted` is experimental, and by contract `nodes_public_keys` should be shorted - // but not sure how we could enforce it here without re-sorting anyway. - // assert!(nodes_public_keys.is_sorted()); - let self_pk = sk.sk_to_pk(); - let (index, _) = nodes_public_keys - .iter() - .find_position(|&pk| pk == &self_pk) - .expect("Self pk should be registered"); - Self { sk, index } - } - fn verify_column( column: &Column, column_commitment: &Commitment, @@ -127,20 +110,20 @@ impl DaVerifier { true } - fn build_attestation(&self, blob: &DaBlob) -> Attestation { + fn build_attestation(sk: &SecretKey, blob: &DaBlob) -> Attestation { let message = build_attestation_message(&blob.aggregated_column_commitment, &blob.rows_commitments); - let signature = self.sk.sign(&message, b"", b""); + let signature = sk.sign(&message, b"", b""); Attestation { signature } } - pub fn verify(&self, blob: DaBlob) -> Option { + pub fn verify_with_index(blob: &DaBlob, sk: &SecretKey, index: usize) -> Option { let is_column_verified = DaVerifier::verify_column( &blob.column, &blob.column_commitment, &blob.aggregated_column_commitment, &blob.aggregated_column_proof, - self.index, + index, ); if !is_column_verified { return None; @@ -150,12 +133,42 @@ impl DaVerifier { blob.column.as_ref(), &blob.rows_commitments, &blob.rows_proofs, - self.index, + index, ); if !are_chunks_verified { return None; } - Some(self.build_attestation(&blob)) + Some(Self::build_attestation(&sk, &blob)) + } + pub fn verify( + blob: &DaBlob, + sk: &SecretKey, + nodes_public_keys: &[PublicKey], + ) -> Option { + // TODO: `is_sorted` is experimental, and by contract `nodes_public_keys` should be shorted + // but not sure how we could enforce it here without re-sorting anyway. + // assert!(nodes_public_keys.is_sorted()); + let self_pk = sk.sk_to_pk(); + let (index, _) = nodes_public_keys + .iter() + .find_position(|&pk| pk == &self_pk) + .expect("Self pk should be registered"); + Self::verify_with_index(blob, sk, index) + } +} + +impl nomos_core::da::DaVerifier for DaVerifier { + type DaBlob = DaBlob; + type Sk = SecretKey; + type Pk = PublicKey; + type Attestation = Attestation; + + fn verify( + blob: &Self::DaBlob, + sk: &Self::Sk, + nodes_public_keys: &[Self::Pk], + ) -> Option { + DaVerifier::verify(blob, sk, nodes_public_keys) } } @@ -171,6 +184,7 @@ mod test { bytes_to_polynomial, commit_polynomial, generate_element_proof, BYTES_PER_FIELD_ELEMENT, }; use rand::{thread_rng, RngCore}; + use std::collections::HashMap; #[test] fn test_verify_column() { @@ -215,14 +229,9 @@ mod test { SecretKey::key_gen(&buff, &[]).unwrap() }) .collect(); - let verifiers: Vec = sks - .into_iter() - .enumerate() - .map(|(index, sk)| DaVerifier { sk, index }) - .collect(); + let pks: Vec<_> = sks.iter().map(SecretKey::sk_to_pk).enumerate().collect(); let encoded_data = encoder.encode(&data).unwrap(); for (i, column) in encoded_data.extended_data.columns().enumerate() { - let verifier = &verifiers[i]; let da_blob = DaBlob { column, column_commitment: encoded_data.column_commitments[i].clone(), @@ -235,7 +244,7 @@ mod test { .map(|proofs| proofs.get(i).cloned().unwrap()) .collect(), }; - assert!(verifier.verify(da_blob).is_some()); + assert!(DaVerifier::verify_with_index(&da_blob, &sks[i], i).is_some()); } } } diff --git a/nomos-services/data-availability/Cargo.toml b/nomos-services/data-availability/Cargo.toml index 069ae646..c810235f 100644 --- a/nomos-services/data-availability/Cargo.toml +++ b/nomos-services/data-availability/Cargo.toml @@ -11,6 +11,7 @@ futures = "0.3" moka = { version = "0.11", features = ["future"] } nomos-core = { path = "../../nomos-core" } nomos-network = { path = "../network" } +kzgrs-backend = { path = "../../nomos-da/kzgrs-backend" } overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } serde = "1.0" tracing = "0.1" diff --git a/nomos-services/data-availability/src/verifier/backend/mod.rs b/nomos-services/data-availability/src/verifier/backend/mod.rs deleted file mode 100644 index 8d15295c..00000000 --- a/nomos-services/data-availability/src/verifier/backend/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -use nomos_core::da::DaVerifier; - -pub trait VerifierBackend: DaVerifier { - type Settings; - fn new(settings: Self::Settings) -> Self; -} diff --git a/nomos-services/data-availability/src/verifier/mod.rs b/nomos-services/data-availability/src/verifier/mod.rs index c82c69d4..6db5783b 100644 --- a/nomos-services/data-availability/src/verifier/mod.rs +++ b/nomos-services/data-availability/src/verifier/mod.rs @@ -1,14 +1,12 @@ -mod backend; mod network; -// std - -// crates - -use std::error::Error; use std::fmt::Debug; +use std::marker::PhantomData; +// std +// crates +use tokio_stream::StreamExt; +use tracing::error; // internal -use crate::verifier::backend::VerifierBackend; use crate::verifier::network::NetworkAdapter; use nomos_core::da::DaVerifier; use nomos_network::NetworkService; @@ -17,60 +15,51 @@ use overwatch_rs::services::relay::{NoMessage, Relay}; use overwatch_rs::services::state::{NoOperator, NoState}; use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId}; use overwatch_rs::DynError; -use tokio_stream::StreamExt; -use tracing::error; -pub struct DaVerifierService +pub struct DaVerifierService where - Backend: VerifierBackend, - Backend::Settings: Clone, N: NetworkAdapter, N::Settings: Clone, + V: DaVerifier, { network_relay: Relay>, service_state: ServiceStateHandle, - verifier: Backend, + _verifier: PhantomData, } #[derive(Clone)] -pub struct DaVerifierServiceSettings { - verifier_settings: BackendSettings, +pub struct DaVerifierServiceSettings { network_adapter_settings: AdapterSettings, } -impl ServiceData for DaVerifierService +impl ServiceData for DaVerifierService where - Backend: VerifierBackend, - Backend::Settings: Clone, N: NetworkAdapter, N::Settings: Clone, + V: DaVerifier, { const SERVICE_ID: ServiceId = "DaVerifier"; - type Settings = DaVerifierServiceSettings; + type Settings = DaVerifierServiceSettings; type State = NoState; type StateOperator = NoOperator; type Message = NoMessage; } #[async_trait::async_trait] -impl ServiceCore for DaVerifierService +impl ServiceCore for DaVerifierService where - Backend: VerifierBackend + Send + 'static, - Backend::Settings: Clone + Send + Sync + 'static, - Backend::DaBlob: Debug, - Backend::Attestation: Debug, - N: NetworkAdapter + Send + 'static, + N: NetworkAdapter + Send + 'static, N::Settings: Clone + Send + Sync + 'static, + V: DaVerifier + Send + Sync + 'static, + V::DaBlob: Debug, + V::Attestation: Debug, { fn init(service_state: ServiceStateHandle) -> Result { - let DaVerifierServiceSettings { - verifier_settings, .. - } = service_state.settings_reader.get_updated_settings(); let network_relay = service_state.overwatch_handle.relay(); Ok(Self { network_relay, service_state, - verifier: Backend::new(verifier_settings), + _verifier: Default::default(), }) } @@ -78,7 +67,7 @@ where let Self { network_relay, service_state, - verifier, + .. } = self; let DaVerifierServiceSettings { network_adapter_settings, @@ -88,13 +77,15 @@ where let adapter = N::new(network_adapter_settings, network_relay).await; let mut blob_stream = adapter.blob_stream().await; while let Some((blob, reply_channel)) = blob_stream.next().await { - match verifier.verify(&blob) { - Ok(attestation) => { + let sk = get_sk(); + let pks = &[]; + match V::verify(&blob, sk, pks) { + Some(attestation) => { if let Err(attestation) = reply_channel.send(attestation) { error!("Error replying attestation {:?}", attestation); } } - Err(e) => { + _ => { error!("Received unverified blob {:?}", blob); } } @@ -102,3 +93,7 @@ where Ok(()) } } + +fn get_sk() -> &'static Sk { + todo!() +}