Implement verifier service

This commit is contained in:
Daniel Sanchez Quiros 2024-04-18 16:52:07 +02:00
parent 3d831100a5
commit a1042f287e
8 changed files with 90 additions and 78 deletions

View File

@ -14,8 +14,14 @@ pub trait DaEncoder {
pub trait DaVerifier { pub trait DaVerifier {
type DaBlob; type DaBlob;
type Sk;
type Pk;
type Attestation; type Attestation;
fn verify(&self, blob: &Self::DaBlob) -> Result<Self::Attestation, impl Error>; fn verify(
blob: &Self::DaBlob,
sk: &Self::Sk,
nodes_public_keys: &[Self::Pk],
) -> Option<Self::Attestation>;
} }
pub trait DaDispersal { pub trait DaDispersal {

View File

@ -17,6 +17,7 @@ num-bigint = "0.4.4"
rand = "0.8.5" rand = "0.8.5"
once_cell = "1.19" once_cell = "1.19"
sha3 = "0.10" sha3 = "0.10"
nomos-core = { path = "../../nomos-core" }
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.8"

View File

@ -10,8 +10,13 @@ use kzgrs::Commitment;
#[derive(Clone, Eq, PartialEq, Debug)] #[derive(Clone, Eq, PartialEq, Debug)]
pub struct Chunk(pub Vec<u8>); pub struct Chunk(pub Vec<u8>);
#[derive(Debug)]
pub struct Row(pub Vec<Chunk>); pub struct Row(pub Vec<Chunk>);
#[derive(Debug)]
pub struct Column(pub Vec<Chunk>); pub struct Column(pub Vec<Chunk>);
pub struct ChunksMatrix(pub Vec<Row>); pub struct ChunksMatrix(pub Vec<Row>);
impl Chunk { impl Chunk {
@ -153,6 +158,7 @@ pub fn commitment_to_bytes(commitment: &Commitment) -> Vec<u8> {
buff.into_inner() buff.into_inner()
} }
#[derive(Debug)]
pub struct Attestation { pub struct Attestation {
pub signature: Signature, pub signature: Signature,
} }

View File

@ -1,4 +1,4 @@
mod common; pub mod common;
mod encoder; pub mod encoder;
mod global; pub mod global;
mod verifier; pub mod verifier;

View File

@ -1,9 +1,7 @@
// std // std
// crates // crates
use blst::min_sig::{PublicKey, SecretKey, Signature}; use blst::min_sig::{PublicKey, SecretKey};
use itertools::{izip, Itertools}; use itertools::{izip, Itertools};
use num_bigint::BigUint;
use sha3::{Digest, Sha3_256}; use sha3::{Digest, Sha3_256};
// internal // internal
@ -14,10 +12,11 @@ use crate::encoder::DaEncoderParams;
use crate::global::{DOMAIN, GLOBAL_PARAMETERS}; use crate::global::{DOMAIN, GLOBAL_PARAMETERS};
use kzgrs::common::field_element_from_bytes_le; use kzgrs::common::field_element_from_bytes_le;
use kzgrs::{ use kzgrs::{
bytes_to_polynomial, commit_polynomial, verify_element_proof, Commitment, FieldElement, Proof, bytes_to_polynomial, commit_polynomial, verify_element_proof, Commitment, Proof,
BYTES_PER_FIELD_ELEMENT, BYTES_PER_FIELD_ELEMENT,
}; };
#[derive(Debug)]
pub struct DaBlob { pub struct DaBlob {
column: Column, column: Column,
column_commitment: Commitment, column_commitment: Commitment,
@ -39,25 +38,9 @@ impl DaBlob {
} }
} }
pub struct DaVerifier { pub struct DaVerifier;
// TODO: substitute this for an abstraction to sign things over
sk: SecretKey,
index: usize,
}
impl DaVerifier { impl DaVerifier {
pub fn new(sk: SecretKey, nodes_public_keys: &[PublicKey]) -> Self {
// TODO: `is_sorted` is experimental, and by contract `nodes_public_keys` should be shorted
// but not sure how we could enforce it here without re-sorting anyway.
// assert!(nodes_public_keys.is_sorted());
let self_pk = sk.sk_to_pk();
let (index, _) = nodes_public_keys
.iter()
.find_position(|&pk| pk == &self_pk)
.expect("Self pk should be registered");
Self { sk, index }
}
fn verify_column( fn verify_column(
column: &Column, column: &Column,
column_commitment: &Commitment, column_commitment: &Commitment,
@ -127,20 +110,20 @@ impl DaVerifier {
true true
} }
fn build_attestation(&self, blob: &DaBlob) -> Attestation { fn build_attestation(sk: &SecretKey, blob: &DaBlob) -> Attestation {
let message = let message =
build_attestation_message(&blob.aggregated_column_commitment, &blob.rows_commitments); build_attestation_message(&blob.aggregated_column_commitment, &blob.rows_commitments);
let signature = self.sk.sign(&message, b"", b""); let signature = sk.sign(&message, b"", b"");
Attestation { signature } Attestation { signature }
} }
pub fn verify(&self, blob: DaBlob) -> Option<Attestation> { pub fn verify_with_index(blob: &DaBlob, sk: &SecretKey, index: usize) -> Option<Attestation> {
let is_column_verified = DaVerifier::verify_column( let is_column_verified = DaVerifier::verify_column(
&blob.column, &blob.column,
&blob.column_commitment, &blob.column_commitment,
&blob.aggregated_column_commitment, &blob.aggregated_column_commitment,
&blob.aggregated_column_proof, &blob.aggregated_column_proof,
self.index, index,
); );
if !is_column_verified { if !is_column_verified {
return None; return None;
@ -150,12 +133,42 @@ impl DaVerifier {
blob.column.as_ref(), blob.column.as_ref(),
&blob.rows_commitments, &blob.rows_commitments,
&blob.rows_proofs, &blob.rows_proofs,
self.index, index,
); );
if !are_chunks_verified { if !are_chunks_verified {
return None; return None;
} }
Some(self.build_attestation(&blob)) Some(Self::build_attestation(&sk, &blob))
}
pub fn verify(
blob: &DaBlob,
sk: &SecretKey,
nodes_public_keys: &[PublicKey],
) -> Option<Attestation> {
// TODO: `is_sorted` is experimental, and by contract `nodes_public_keys` should be shorted
// but not sure how we could enforce it here without re-sorting anyway.
// assert!(nodes_public_keys.is_sorted());
let self_pk = sk.sk_to_pk();
let (index, _) = nodes_public_keys
.iter()
.find_position(|&pk| pk == &self_pk)
.expect("Self pk should be registered");
Self::verify_with_index(blob, sk, index)
}
}
impl nomos_core::da::DaVerifier for DaVerifier {
type DaBlob = DaBlob;
type Sk = SecretKey;
type Pk = PublicKey;
type Attestation = Attestation;
fn verify(
blob: &Self::DaBlob,
sk: &Self::Sk,
nodes_public_keys: &[Self::Pk],
) -> Option<Self::Attestation> {
DaVerifier::verify(blob, sk, nodes_public_keys)
} }
} }
@ -171,6 +184,7 @@ mod test {
bytes_to_polynomial, commit_polynomial, generate_element_proof, BYTES_PER_FIELD_ELEMENT, bytes_to_polynomial, commit_polynomial, generate_element_proof, BYTES_PER_FIELD_ELEMENT,
}; };
use rand::{thread_rng, RngCore}; use rand::{thread_rng, RngCore};
use std::collections::HashMap;
#[test] #[test]
fn test_verify_column() { fn test_verify_column() {
@ -215,14 +229,9 @@ mod test {
SecretKey::key_gen(&buff, &[]).unwrap() SecretKey::key_gen(&buff, &[]).unwrap()
}) })
.collect(); .collect();
let verifiers: Vec<DaVerifier> = sks let pks: Vec<_> = sks.iter().map(SecretKey::sk_to_pk).enumerate().collect();
.into_iter()
.enumerate()
.map(|(index, sk)| DaVerifier { sk, index })
.collect();
let encoded_data = encoder.encode(&data).unwrap(); let encoded_data = encoder.encode(&data).unwrap();
for (i, column) in encoded_data.extended_data.columns().enumerate() { for (i, column) in encoded_data.extended_data.columns().enumerate() {
let verifier = &verifiers[i];
let da_blob = DaBlob { let da_blob = DaBlob {
column, column,
column_commitment: encoded_data.column_commitments[i].clone(), column_commitment: encoded_data.column_commitments[i].clone(),
@ -235,7 +244,7 @@ mod test {
.map(|proofs| proofs.get(i).cloned().unwrap()) .map(|proofs| proofs.get(i).cloned().unwrap())
.collect(), .collect(),
}; };
assert!(verifier.verify(da_blob).is_some()); assert!(DaVerifier::verify_with_index(&da_blob, &sks[i], i).is_some());
} }
} }
} }

View File

@ -11,6 +11,7 @@ futures = "0.3"
moka = { version = "0.11", features = ["future"] } moka = { version = "0.11", features = ["future"] }
nomos-core = { path = "../../nomos-core" } nomos-core = { path = "../../nomos-core" }
nomos-network = { path = "../network" } nomos-network = { path = "../network" }
kzgrs-backend = { path = "../../nomos-da/kzgrs-backend" }
overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" }
serde = "1.0" serde = "1.0"
tracing = "0.1" tracing = "0.1"

View File

@ -1,6 +0,0 @@
use nomos_core::da::DaVerifier;
pub trait VerifierBackend: DaVerifier {
type Settings;
fn new(settings: Self::Settings) -> Self;
}

View File

@ -1,14 +1,12 @@
mod backend;
mod network; mod network;
// std
// crates
use std::error::Error;
use std::fmt::Debug; use std::fmt::Debug;
use std::marker::PhantomData;
// std
// crates
use tokio_stream::StreamExt;
use tracing::error;
// internal // internal
use crate::verifier::backend::VerifierBackend;
use crate::verifier::network::NetworkAdapter; use crate::verifier::network::NetworkAdapter;
use nomos_core::da::DaVerifier; use nomos_core::da::DaVerifier;
use nomos_network::NetworkService; use nomos_network::NetworkService;
@ -17,60 +15,51 @@ use overwatch_rs::services::relay::{NoMessage, Relay};
use overwatch_rs::services::state::{NoOperator, NoState}; use overwatch_rs::services::state::{NoOperator, NoState};
use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId}; use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId};
use overwatch_rs::DynError; use overwatch_rs::DynError;
use tokio_stream::StreamExt;
use tracing::error;
pub struct DaVerifierService<Backend, N> pub struct DaVerifierService<N, V>
where where
Backend: VerifierBackend,
Backend::Settings: Clone,
N: NetworkAdapter, N: NetworkAdapter,
N::Settings: Clone, N::Settings: Clone,
V: DaVerifier,
{ {
network_relay: Relay<NetworkService<N::Backend>>, network_relay: Relay<NetworkService<N::Backend>>,
service_state: ServiceStateHandle<Self>, service_state: ServiceStateHandle<Self>,
verifier: Backend, _verifier: PhantomData<V>,
} }
#[derive(Clone)] #[derive(Clone)]
pub struct DaVerifierServiceSettings<BackendSettings, AdapterSettings> { pub struct DaVerifierServiceSettings<AdapterSettings> {
verifier_settings: BackendSettings,
network_adapter_settings: AdapterSettings, network_adapter_settings: AdapterSettings,
} }
impl<Backend, N> ServiceData for DaVerifierService<Backend, N> impl<N, V> ServiceData for DaVerifierService<N, V>
where where
Backend: VerifierBackend,
Backend::Settings: Clone,
N: NetworkAdapter, N: NetworkAdapter,
N::Settings: Clone, N::Settings: Clone,
V: DaVerifier,
{ {
const SERVICE_ID: ServiceId = "DaVerifier"; const SERVICE_ID: ServiceId = "DaVerifier";
type Settings = DaVerifierServiceSettings<Backend::Settings, N::Settings>; type Settings = DaVerifierServiceSettings<N::Settings>;
type State = NoState<Self::Settings>; type State = NoState<Self::Settings>;
type StateOperator = NoOperator<Self::State>; type StateOperator = NoOperator<Self::State>;
type Message = NoMessage; type Message = NoMessage;
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl<Backend, N> ServiceCore for DaVerifierService<Backend, N> impl<N, V> ServiceCore for DaVerifierService<N, V>
where where
Backend: VerifierBackend + Send + 'static, N: NetworkAdapter<Blob = V::DaBlob, Attestation = V::Attestation> + Send + 'static,
Backend::Settings: Clone + Send + Sync + 'static,
Backend::DaBlob: Debug,
Backend::Attestation: Debug,
N: NetworkAdapter<Blob = Backend::DaBlob, Attestation = Backend::Attestation> + Send + 'static,
N::Settings: Clone + Send + Sync + 'static, N::Settings: Clone + Send + Sync + 'static,
V: DaVerifier + Send + Sync + 'static,
V::DaBlob: Debug,
V::Attestation: Debug,
{ {
fn init(service_state: ServiceStateHandle<Self>) -> Result<Self, DynError> { fn init(service_state: ServiceStateHandle<Self>) -> Result<Self, DynError> {
let DaVerifierServiceSettings {
verifier_settings, ..
} = service_state.settings_reader.get_updated_settings();
let network_relay = service_state.overwatch_handle.relay(); let network_relay = service_state.overwatch_handle.relay();
Ok(Self { Ok(Self {
network_relay, network_relay,
service_state, service_state,
verifier: Backend::new(verifier_settings), _verifier: Default::default(),
}) })
} }
@ -78,7 +67,7 @@ where
let Self { let Self {
network_relay, network_relay,
service_state, service_state,
verifier, ..
} = self; } = self;
let DaVerifierServiceSettings { let DaVerifierServiceSettings {
network_adapter_settings, network_adapter_settings,
@ -88,13 +77,15 @@ where
let adapter = N::new(network_adapter_settings, network_relay).await; let adapter = N::new(network_adapter_settings, network_relay).await;
let mut blob_stream = adapter.blob_stream().await; let mut blob_stream = adapter.blob_stream().await;
while let Some((blob, reply_channel)) = blob_stream.next().await { while let Some((blob, reply_channel)) = blob_stream.next().await {
match verifier.verify(&blob) { let sk = get_sk();
Ok(attestation) => { let pks = &[];
match V::verify(&blob, sk, pks) {
Some(attestation) => {
if let Err(attestation) = reply_channel.send(attestation) { if let Err(attestation) = reply_channel.send(attestation) {
error!("Error replying attestation {:?}", attestation); error!("Error replying attestation {:?}", attestation);
} }
} }
Err(e) => { _ => {
error!("Received unverified blob {:?}", blob); error!("Received unverified blob {:?}", blob);
} }
} }
@ -102,3 +93,7 @@ where
Ok(()) Ok(())
} }
} }
fn get_sk<Sk>() -> &'static Sk {
todo!()
}