Refactor block (#609)

* Refactor Block/Header definition

Refactor block/header definition so that it's now responsibility
of the nomos-core crate. This removes definitions in ledger/consensus
crates since there's no need at that level to have an understanding
of the block format.

The new header format supports both carnot and cryptarchia.
This commit is contained in:
Giacomo Pasini 2024-03-13 18:46:10 +01:00 committed by GitHub
parent e7d591b7bc
commit 50cff241fe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 1046 additions and 730 deletions

View File

@ -1,4 +1,7 @@
use std::collections::{HashMap, HashSet}; use std::{
collections::{HashMap, HashSet},
hash::Hash,
};
pub mod overlay; pub mod overlay;
mod types; mod types;
@ -12,23 +15,27 @@ pub mod openapi {
} }
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct Carnot<O: Overlay> { pub struct Carnot<O: Overlay, Id: Eq + Hash> {
id: NodeId, id: NodeId,
current_view: View, current_view: View,
highest_voted_view: View, highest_voted_view: View,
local_high_qc: StandardQc, local_high_qc: StandardQc<Id>,
safe_blocks: HashMap<BlockId, Block>, safe_blocks: HashMap<Id, Block<Id>>,
tip: BlockId, tip: Id,
last_view_timeout_qc: Option<TimeoutQc>, last_view_timeout_qc: Option<TimeoutQc<Id>>,
latest_committed_block: Option<BlockId>, latest_committed_block: Option<Id>,
overlay: O, overlay: O,
} }
impl<O: Overlay> Carnot<O> { impl<O, Id> Carnot<O, Id>
pub fn from_genesis(id: NodeId, genesis_block: Block, overlay: O) -> Self { where
O: Overlay,
Id: Copy + Eq + Hash + core::fmt::Debug,
{
pub fn from_genesis(id: NodeId, genesis_block: Block<Id>, overlay: O) -> Self {
Self { Self {
current_view: View(0), current_view: View(0),
local_high_qc: StandardQc::genesis(), local_high_qc: StandardQc::genesis(genesis_block.id),
id, id,
highest_voted_view: View(-1), highest_voted_view: View(-1),
last_view_timeout_qc: None, last_view_timeout_qc: None,
@ -47,12 +54,12 @@ impl<O: Overlay> Carnot<O> {
self.highest_voted_view self.highest_voted_view
} }
pub fn safe_blocks(&self) -> &HashMap<BlockId, Block> { pub fn safe_blocks(&self) -> &HashMap<Id, Block<Id>> {
&self.safe_blocks &self.safe_blocks
} }
/// Return the most recent safe block /// Return the most recent safe block
pub fn tip(&self) -> Block { pub fn tip(&self) -> Block<Id> {
self.safe_blocks[&self.tip].clone() self.safe_blocks[&self.tip].clone()
} }
@ -65,7 +72,7 @@ impl<O: Overlay> Carnot<O> {
/// * Overlay changes for views < block.view should be made available before trying to process /// * Overlay changes for views < block.view should be made available before trying to process
/// a block by calling `receive_timeout_qc`. /// a block by calling `receive_timeout_qc`.
#[allow(clippy::result_unit_err)] #[allow(clippy::result_unit_err)]
pub fn receive_block(&self, block: Block) -> Result<Self, ()> { pub fn receive_block(&self, block: Block<Id>) -> Result<Self, ()> {
assert!( assert!(
self.safe_blocks.contains_key(&block.parent()), self.safe_blocks.contains_key(&block.parent()),
"out of order view not supported, missing parent block for {block:?}", "out of order view not supported, missing parent block for {block:?}",
@ -114,7 +121,7 @@ impl<O: Overlay> Carnot<O> {
/// Upon reception of a global timeout event /// Upon reception of a global timeout event
/// ///
/// Preconditions: /// Preconditions:
pub fn receive_timeout_qc(&self, timeout_qc: TimeoutQc) -> Self { pub fn receive_timeout_qc(&self, timeout_qc: TimeoutQc<Id>) -> Self {
let mut new_state = self.clone(); let mut new_state = self.clone();
if timeout_qc.view() < new_state.current_view { if timeout_qc.view() < new_state.current_view {
@ -134,7 +141,7 @@ impl<O: Overlay> Carnot<O> {
/// Preconditions: /// Preconditions:
/// * `receive_block(b)` must have been called successfully before trying to approve a block b. /// * `receive_block(b)` must have been called successfully before trying to approve a block b.
/// * A node should not attempt to vote for a block in a view earlier than the latest one it actively participated in. /// * A node should not attempt to vote for a block in a view earlier than the latest one it actively participated in.
pub fn approve_block(&self, block: Block) -> (Self, Send) { pub fn approve_block(&self, block: Block<Id>) -> (Self, Send<Id>) {
assert!( assert!(
self.safe_blocks.contains_key(&block.id), self.safe_blocks.contains_key(&block.id),
"{:?} not in {:?}", "{:?} not in {:?}",
@ -179,9 +186,9 @@ impl<O: Overlay> Carnot<O> {
/// * A node should not attempt to approve a view earlier than the latest one it actively participated in. /// * A node should not attempt to approve a view earlier than the latest one it actively participated in.
pub fn approve_new_view( pub fn approve_new_view(
&self, &self,
timeout_qc: TimeoutQc, timeout_qc: TimeoutQc<Id>,
new_views: HashSet<NewView>, new_views: HashSet<NewView<Id>>,
) -> (Self, Send) { ) -> (Self, Send<Id>) {
let new_view = timeout_qc.view().next(); let new_view = timeout_qc.view().next();
assert!( assert!(
new_view new_view
@ -243,7 +250,7 @@ impl<O: Overlay> Carnot<O> {
/// Preconditions: none! /// Preconditions: none!
/// Just notice that the timer only reset after a view change, i.e. a node can't timeout /// Just notice that the timer only reset after a view change, i.e. a node can't timeout
/// more than once for the same view /// more than once for the same view
pub fn local_timeout(&self) -> (Self, Option<Send>) { pub fn local_timeout(&self) -> (Self, Option<Send<Id>>) {
let mut new_state = self.clone(); let mut new_state = self.clone();
new_state.highest_voted_view = new_state.current_view; new_state.highest_voted_view = new_state.current_view;
@ -268,11 +275,11 @@ impl<O: Overlay> Carnot<O> {
(new_state, None) (new_state, None)
} }
fn block_is_safe(&self, block: Block) -> bool { fn block_is_safe(&self, block: Block<Id>) -> bool {
block.view >= self.current_view && block.view == block.parent_qc.view().next() block.view >= self.current_view && block.view == block.parent_qc.view().next()
} }
fn update_high_qc(&mut self, qc: Qc) { fn update_high_qc(&mut self, qc: Qc<Id>) {
let qc_view = qc.view(); let qc_view = qc.view();
match qc { match qc {
Qc::Standard(new_qc) if new_qc.view > self.local_high_qc.view => { Qc::Standard(new_qc) if new_qc.view > self.local_high_qc.view => {
@ -288,7 +295,7 @@ impl<O: Overlay> Carnot<O> {
} }
} }
fn update_timeout_qc(&mut self, timeout_qc: TimeoutQc) { fn update_timeout_qc(&mut self, timeout_qc: TimeoutQc<Id>) {
match (&self.last_view_timeout_qc, timeout_qc) { match (&self.last_view_timeout_qc, timeout_qc) {
(None, timeout_qc) => { (None, timeout_qc) => {
self.last_view_timeout_qc = Some(timeout_qc); self.last_view_timeout_qc = Some(timeout_qc);
@ -300,13 +307,13 @@ impl<O: Overlay> Carnot<O> {
} }
} }
fn update_latest_committed_block(&mut self, block: &Block) { fn update_latest_committed_block(&mut self, block: &Block<Id>) {
if let Some(block) = self.can_commit_grandparent(block) { if let Some(block) = self.can_commit_grandparent(block) {
self.latest_committed_block = Some(block.id); self.latest_committed_block = Some(block.id);
} }
} }
pub fn blocks_in_view(&self, view: View) -> Vec<Block> { pub fn blocks_in_view(&self, view: View) -> Vec<Block<Id>> {
self.safe_blocks self.safe_blocks
.iter() .iter()
.filter(|(_, b)| b.view == view) .filter(|(_, b)| b.view == view)
@ -314,12 +321,12 @@ impl<O: Overlay> Carnot<O> {
.collect() .collect()
} }
pub fn genesis_block(&self) -> Block { pub fn genesis_block(&self) -> Block<Id> {
self.blocks_in_view(View(0))[0].clone() self.blocks_in_view(View(0))[0].clone()
} }
// Returns the id of the grandparent block if it can be committed or None otherwise // Returns the id of the grandparent block if it can be committed or None otherwise
fn can_commit_grandparent(&self, block: &Block) -> Option<Block> { fn can_commit_grandparent(&self, block: &Block<Id>) -> Option<Block<Id>> {
let parent = self.safe_blocks.get(&block.parent())?; let parent = self.safe_blocks.get(&block.parent())?;
let grandparent = self.safe_blocks.get(&parent.parent())?; let grandparent = self.safe_blocks.get(&parent.parent())?;
@ -332,7 +339,7 @@ impl<O: Overlay> Carnot<O> {
None None
} }
pub fn latest_committed_block(&self) -> Block { pub fn latest_committed_block(&self) -> Block<Id> {
self.latest_committed_block self.latest_committed_block
.and_then(|id| self.safe_blocks.get(&id).cloned()) .and_then(|id| self.safe_blocks.get(&id).cloned())
.unwrap_or_else(|| self.genesis_block()) .unwrap_or_else(|| self.genesis_block())
@ -342,7 +349,7 @@ impl<O: Overlay> Carnot<O> {
self.latest_committed_block().view self.latest_committed_block().view
} }
pub fn latest_committed_blocks(&self, limit: Option<usize>) -> Vec<BlockId> { pub fn latest_committed_blocks(&self, limit: Option<usize>) -> Vec<Id> {
let limit = limit.unwrap_or(self.safe_blocks.len()); let limit = limit.unwrap_or(self.safe_blocks.len());
let mut res = vec![]; let mut res = vec![];
let mut current = self.latest_committed_block(); let mut current = self.latest_committed_block();
@ -363,11 +370,11 @@ impl<O: Overlay> Carnot<O> {
res res
} }
pub fn last_view_timeout_qc(&self) -> Option<TimeoutQc> { pub fn last_view_timeout_qc(&self) -> Option<TimeoutQc<Id>> {
self.last_view_timeout_qc.clone() self.last_view_timeout_qc.clone()
} }
pub fn high_qc(&self) -> StandardQc { pub fn high_qc(&self) -> StandardQc<Id> {
self.local_high_qc.clone() self.local_high_qc.clone()
} }
@ -444,15 +451,15 @@ mod test {
use super::*; use super::*;
fn init(nodes: Vec<NodeId>) -> Carnot<FlatOverlay<RoundRobin, FreezeMembership>> { fn init(nodes: Vec<NodeId>) -> Carnot<FlatOverlay<RoundRobin, FreezeMembership>, usize> {
assert!(!nodes.is_empty()); assert!(!nodes.is_empty());
Carnot::from_genesis( Carnot::from_genesis(
*nodes.first().unwrap(), *nodes.first().unwrap(),
Block { Block {
view: View(0), view: View(0),
id: BlockId::zeros(), id: 0,
parent_qc: Qc::Standard(StandardQc::genesis()), parent_qc: Qc::Standard(StandardQc::genesis(0)),
leader_proof: LeaderProof::LeaderId { leader_proof: LeaderProof::LeaderId {
leader_id: *nodes.first().unwrap(), leader_id: *nodes.first().unwrap(),
}, },
@ -466,11 +473,10 @@ mod test {
} }
fn next_block( fn next_block(
engine: &Carnot<FlatOverlay<RoundRobin, FreezeMembership>>, engine: &Carnot<FlatOverlay<RoundRobin, FreezeMembership>, usize>,
block: &Block, block: &Block<usize>,
) -> Block { ) -> Block<usize> {
let mut next_id = block.id; let next_id = block.id + 1;
next_id.0[0] += 1;
Block { Block {
view: block.view.next(), view: block.view.next(),
@ -486,8 +492,8 @@ mod test {
} }
fn update_leader_selection( fn update_leader_selection(
engine: &Carnot<FlatOverlay<RoundRobin, FreezeMembership>>, engine: &Carnot<FlatOverlay<RoundRobin, FreezeMembership>, usize>,
) -> Carnot<FlatOverlay<RoundRobin, FreezeMembership>> { ) -> Carnot<FlatOverlay<RoundRobin, FreezeMembership>, usize> {
engine engine
.update_overlay(|overlay| { .update_overlay(|overlay| {
overlay.update_leader_selection( overlay.update_leader_selection(
@ -545,11 +551,10 @@ mod test {
// Ensure that receive_block() fails if the parent block has never been received. // Ensure that receive_block() fails if the parent block has never been received.
fn receive_block_with_unknown_parent() { fn receive_block_with_unknown_parent() {
let engine = init(vec![NodeId::new([0; 32])]); let engine = init(vec![NodeId::new([0; 32])]);
let mut parent_block_id = engine.genesis_block().id; let parent_block_id = 42;
parent_block_id.0[0] += 1; // generate an unknown parent block ID
let block = Block { let block = Block {
view: engine.current_view().next(), view: engine.current_view().next(),
id: BlockId::new([1; 32]), id: 1,
parent_qc: Qc::Standard(StandardQc { parent_qc: Qc::Standard(StandardQc {
view: engine.current_view(), view: engine.current_view(),
id: parent_block_id, id: parent_block_id,
@ -649,7 +654,7 @@ mod test {
// a future block should be rejected // a future block should be rejected
let future_block = Block { let future_block = Block {
id: BlockId::new([10; 32]), id: 10,
view: View(11), // a future view view: View(11), // a future view
parent_qc: Qc::Aggregated(AggregateQc { parent_qc: Qc::Aggregated(AggregateQc {
view: View(10), view: View(10),
@ -667,7 +672,7 @@ mod test {
// a past block should be also rejected // a past block should be also rejected
let mut past_block = block1; // with the same view as block1 let mut past_block = block1; // with the same view as block1
past_block.id = BlockId::new([10; 32]); past_block.id = 10;
assert!(engine.receive_block(past_block).is_err()); assert!(engine.receive_block(past_block).is_err());
} }
@ -744,7 +749,7 @@ mod test {
sender: NodeId::new([0; 32]), sender: NodeId::new([0; 32]),
high_qc: StandardQc { high_qc: StandardQc {
view: View(0), // genesis view: View(0), // genesis
id: BlockId::zeros(), id: 0,
}, },
timeout_qc: None timeout_qc: None
}), }),
@ -767,7 +772,7 @@ mod test {
View(1), View(1),
StandardQc { StandardQc {
view: View::new(0), // genesis view: View::new(0), // genesis
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );
@ -792,7 +797,7 @@ mod test {
View(1), View(1),
StandardQc { StandardQc {
view: View(0), // genesis view: View(0), // genesis
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );
@ -819,7 +824,7 @@ mod test {
View(1), View(1),
StandardQc { StandardQc {
view: View(0), // genesis view: View(0), // genesis
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );
@ -861,7 +866,7 @@ mod test {
View(1), View(1),
StandardQc { StandardQc {
view: View(0), // genesis view: View(0), // genesis
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );
@ -874,7 +879,7 @@ mod test {
View(2), View(2),
StandardQc { StandardQc {
view: View(0), // genesis view: View(0), // genesis
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );

View File

@ -8,8 +8,6 @@ mod committee;
pub use committee::{Committee, CommitteeId}; pub use committee::{Committee, CommitteeId};
mod node_id; mod node_id;
pub use node_id::NodeId; pub use node_id::NodeId;
mod block_id;
pub use block_id::BlockId;
mod view; mod view;
pub use view::View; pub use view::View;
@ -21,32 +19,32 @@ pub use view::View;
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub enum Payload { pub enum Payload<Id> {
/// Vote for a block in a view /// Vote for a block in a view
Vote(Vote), Vote(Vote<Id>),
/// Signal that a local timeout has occurred /// Signal that a local timeout has occurred
Timeout(Timeout), Timeout(Timeout<Id>),
/// Vote for moving to a new view /// Vote for moving to a new view
NewView(NewView), NewView(NewView<Id>),
} }
/// Returned /// Returned
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct Vote { pub struct Vote<Id> {
pub view: View, pub view: View,
pub block: BlockId, pub block: Id,
} }
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct Timeout { pub struct Timeout<Id> {
pub view: View, pub view: View,
pub sender: NodeId, pub sender: NodeId,
pub high_qc: StandardQc, pub high_qc: StandardQc<Id>,
pub timeout_qc: Option<TimeoutQc>, pub timeout_qc: Option<TimeoutQc<Id>>,
} }
// TODO: We are making "mandatory" to have received the timeout_qc before the new_view votes. // TODO: We are making "mandatory" to have received the timeout_qc before the new_view votes.
@ -54,24 +52,24 @@ pub struct Timeout {
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct NewView { pub struct NewView<Id> {
pub view: View, pub view: View,
pub sender: NodeId, pub sender: NodeId,
pub timeout_qc: TimeoutQc, pub timeout_qc: TimeoutQc<Id>,
pub high_qc: StandardQc, pub high_qc: StandardQc<Id>,
} }
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct TimeoutQc { pub struct TimeoutQc<Id> {
view: View, view: View,
high_qc: StandardQc, high_qc: StandardQc<Id>,
sender: NodeId, sender: NodeId,
} }
impl TimeoutQc { impl<Id> TimeoutQc<Id> {
pub fn new(view: View, high_qc: StandardQc, sender: NodeId) -> Self { pub fn new(view: View, high_qc: StandardQc<Id>, sender: NodeId) -> Self {
assert!( assert!(
view >= high_qc.view, view >= high_qc.view,
"timeout_qc.view:{} shouldn't be lower than timeout_qc.high_qc.view:{}", "timeout_qc.view:{} shouldn't be lower than timeout_qc.high_qc.view:{}",
@ -90,7 +88,7 @@ impl TimeoutQc {
self.view self.view
} }
pub fn high_qc(&self) -> &StandardQc { pub fn high_qc(&self) -> &StandardQc<Id> {
&self.high_qc &self.high_qc
} }
@ -102,10 +100,10 @@ impl TimeoutQc {
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct Block { pub struct Block<Id> {
pub id: BlockId, pub id: Id,
pub view: View, pub view: View,
pub parent_qc: Qc, pub parent_qc: Qc<Id>,
pub leader_proof: LeaderProof, pub leader_proof: LeaderProof,
} }
@ -116,16 +114,16 @@ pub enum LeaderProof {
LeaderId { leader_id: NodeId }, LeaderId { leader_id: NodeId },
} }
impl Block { impl<Id: Copy> Block<Id> {
pub fn parent(&self) -> BlockId { pub fn parent(&self) -> Id {
self.parent_qc.block() self.parent_qc.block()
} }
pub fn genesis() -> Self { pub fn genesis(id: Id) -> Self {
Self { Self {
view: View(0), view: View(0),
id: BlockId::zeros(), id,
parent_qc: Qc::Standard(StandardQc::genesis()), parent_qc: Qc::Standard(StandardQc::genesis(id)),
leader_proof: LeaderProof::LeaderId { leader_proof: LeaderProof::LeaderId {
leader_id: NodeId::new([0; 32]), leader_id: NodeId::new([0; 32]),
}, },
@ -135,45 +133,42 @@ impl Block {
/// Possible output events. /// Possible output events.
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]
pub struct Send { pub struct Send<Id> {
pub to: Committee, pub to: Committee,
pub payload: Payload, pub payload: Payload<Id>,
} }
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct StandardQc { pub struct StandardQc<Id> {
pub view: View, pub view: View,
pub id: BlockId, pub id: Id,
} }
impl StandardQc { impl<Id> StandardQc<Id> {
pub fn genesis() -> Self { pub fn genesis(id: Id) -> Self {
Self { Self { view: View(-1), id }
view: View(-1),
id: BlockId::zeros(),
}
} }
} }
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct AggregateQc { pub struct AggregateQc<Id> {
pub high_qc: StandardQc, pub high_qc: StandardQc<Id>,
pub view: View, pub view: View,
} }
#[derive(Debug, Clone, Eq, PartialEq, Hash)] #[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub enum Qc { pub enum Qc<Id> {
Standard(StandardQc), Standard(StandardQc<Id>),
Aggregated(AggregateQc), Aggregated(AggregateQc<Id>),
} }
impl Qc { impl<Id: Copy> Qc<Id> {
/// The view in which this Qc was built. /// The view in which this Qc was built.
pub fn view(&self) -> View { pub fn view(&self) -> View {
match self { match self {
@ -184,14 +179,14 @@ impl Qc {
/// The id of the block this qc is for. /// The id of the block this qc is for.
/// This will be the parent of the block which will include this qc /// This will be the parent of the block which will include this qc
pub fn block(&self) -> BlockId { pub fn block(&self) -> Id {
match self { match self {
Qc::Standard(StandardQc { id, .. }) => *id, Qc::Standard(StandardQc { id, .. }) => *id,
Qc::Aggregated(AggregateQc { high_qc, .. }) => high_qc.id, Qc::Aggregated(AggregateQc { high_qc, .. }) => high_qc.id,
} }
} }
pub fn high_qc(&self) -> StandardQc { pub fn high_qc(&self) -> StandardQc<Id> {
match self { match self {
Qc::Standard(qc) => qc.clone(), Qc::Standard(qc) => qc.clone(),
Qc::Aggregated(AggregateQc { high_qc, .. }) => high_qc.clone(), Qc::Aggregated(AggregateQc { high_qc, .. }) => high_qc.clone(),
@ -207,11 +202,11 @@ mod test {
fn standard_qc() { fn standard_qc() {
let standard_qc = StandardQc { let standard_qc = StandardQc {
view: View(10), view: View(10),
id: BlockId::zeros(), id: 0,
}; };
let qc = Qc::Standard(standard_qc.clone()); let qc = Qc::Standard(standard_qc.clone());
assert_eq!(qc.view(), View(10)); assert_eq!(qc.view(), View(10));
assert_eq!(qc.block(), BlockId::new([0; 32])); assert_eq!(qc.block(), 0);
assert_eq!(qc.high_qc(), standard_qc); assert_eq!(qc.high_qc(), standard_qc);
} }
@ -221,12 +216,12 @@ mod test {
view: View(20), view: View(20),
high_qc: StandardQc { high_qc: StandardQc {
view: View(10), view: View(10),
id: BlockId::zeros(), id: 0,
}, },
}; };
let qc = Qc::Aggregated(aggregated_qc.clone()); let qc = Qc::Aggregated(aggregated_qc.clone());
assert_eq!(qc.view(), View(20)); assert_eq!(qc.view(), View(20));
assert_eq!(qc.block(), BlockId::new([0; 32])); assert_eq!(qc.block(), 0);
assert_eq!(qc.high_qc(), aggregated_qc.high_qc); assert_eq!(qc.high_qc(), aggregated_qc.high_qc);
} }
@ -236,26 +231,26 @@ mod test {
View(2), View(2),
StandardQc { StandardQc {
view: View(1), view: View(1),
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );
assert_eq!(timeout_qc.view(), View(2)); assert_eq!(timeout_qc.view(), View(2));
assert_eq!(timeout_qc.high_qc().view, View(1)); assert_eq!(timeout_qc.high_qc().view, View(1));
assert_eq!(timeout_qc.high_qc().id, BlockId::new([0; 32])); assert_eq!(timeout_qc.high_qc().id, 0);
assert_eq!(timeout_qc.sender(), NodeId::new([0; 32])); assert_eq!(timeout_qc.sender(), NodeId::new([0; 32]));
let timeout_qc = TimeoutQc::new( let timeout_qc = TimeoutQc::new(
View(2), View(2),
StandardQc { StandardQc {
view: View(2), view: View(2),
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );
assert_eq!(timeout_qc.view(), View(2)); assert_eq!(timeout_qc.view(), View(2));
assert_eq!(timeout_qc.high_qc().view, View(2)); assert_eq!(timeout_qc.high_qc().view, View(2));
assert_eq!(timeout_qc.high_qc().id, BlockId::new([0; 32])); assert_eq!(timeout_qc.high_qc().id, 0);
assert_eq!(timeout_qc.sender(), NodeId::new([0; 32])); assert_eq!(timeout_qc.sender(), NodeId::new([0; 32]));
} }
@ -268,7 +263,7 @@ mod test {
View(1), View(1),
StandardQc { StandardQc {
view: View(2), view: View(2),
id: BlockId::zeros(), id: 0,
}, },
NodeId::new([0; 32]), NodeId::new([0; 32]),
); );

View File

@ -3,23 +3,6 @@
#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
pub struct BlockId(pub(crate) [u8; 32]); pub struct BlockId(pub(crate) [u8; 32]);
#[cfg(feature = "serde")]
impl serde::Serialize for BlockId {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
nomos_utils::serde::serialize_bytes_array(self.0, serializer)
}
}
#[cfg(feature = "serde")]
impl<'de> serde::de::Deserialize<'de> for BlockId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
nomos_utils::serde::deserialize_bytes_array(deserializer).map(Self)
}
}
impl BlockId { impl BlockId {
pub const fn new(val: [u8; 32]) -> Self { pub const fn new(val: [u8; 32]) -> Self {
Self(val) Self(val)

View File

@ -1,3 +1,10 @@
mod ref_state; mod ref_state;
pub mod sut; pub mod sut;
mod transition; mod transition;
type Block = carnot_engine::Block<[u8; 32]>;
type AggregateQc = carnot_engine::AggregateQc<[u8; 32]>;
type Qc = carnot_engine::Qc<[u8; 32]>;
type StandardQc = carnot_engine::StandardQc<[u8; 32]>;
type TimeoutQc = carnot_engine::TimeoutQc<[u8; 32]>;
type NewView = carnot_engine::NewView<[u8; 32]>;

View File

@ -1,13 +1,12 @@
use std::collections::{BTreeMap, HashSet}; use std::collections::{BTreeMap, HashSet};
use carnot_engine::{ use carnot_engine::{LeaderProof, NodeId, View};
AggregateQc, Block, BlockId, LeaderProof, NodeId, Qc, StandardQc, TimeoutQc, View,
};
use proptest::prelude::*; use proptest::prelude::*;
use proptest::strategy::BoxedStrategy; use proptest::strategy::BoxedStrategy;
use proptest_state_machine::ReferenceStateMachine; use proptest_state_machine::ReferenceStateMachine;
use crate::fuzz::transition::Transition; use crate::fuzz::transition::Transition;
use crate::fuzz::{AggregateQc, Block, Qc, StandardQc, TimeoutQc};
// A reference state machine (RefState) is used to generated state transitions. // A reference state machine (RefState) is used to generated state transitions.
// To generate some kinds of transition, we may need to keep historical blocks in RefState. // To generate some kinds of transition, we may need to keep historical blocks in RefState.
@ -42,8 +41,8 @@ impl ReferenceStateMachine for RefState {
fn init_state() -> BoxedStrategy<Self::State> { fn init_state() -> BoxedStrategy<Self::State> {
let genesis_block = Block { let genesis_block = Block {
view: View::new(0), view: View::new(0),
id: BlockId::zeros(), id: [0; 32],
parent_qc: Qc::Standard(StandardQc::genesis()), parent_qc: Qc::Standard(StandardQc::genesis([0; 32])),
leader_proof: LEADER_PROOF.clone(), leader_proof: LEADER_PROOF.clone(),
}; };
@ -330,10 +329,11 @@ impl RefState {
fn transition_receive_safe_block_with_aggregated_qc(&self) -> BoxedStrategy<Transition> { fn transition_receive_safe_block_with_aggregated_qc(&self) -> BoxedStrategy<Transition> {
//TODO: more randomness //TODO: more randomness
let current_view = self.current_view(); let current_view = self.current_view();
let mut id = [0; 32];
rand::thread_rng().fill_bytes(&mut id);
Just(Transition::ReceiveSafeBlock(Block { Just(Transition::ReceiveSafeBlock(Block {
view: current_view.next(), view: current_view.next(),
id: BlockId::random(&mut rand::thread_rng()), id,
parent_qc: Qc::Aggregated(AggregateQc { parent_qc: Qc::Aggregated(AggregateQc {
high_qc: self.high_qc(), high_qc: self.high_qc(),
view: current_view, view: current_view,
@ -360,9 +360,13 @@ impl RefState {
pub fn high_qc(&self) -> StandardQc { pub fn high_qc(&self) -> StandardQc {
self.chain self.chain
.values() .values()
.map(|entry| entry.high_qc().unwrap_or_else(StandardQc::genesis)) .map(|entry| {
entry
.high_qc()
.unwrap_or_else(|| StandardQc::genesis([0; 32]))
})
.max_by_key(|qc| qc.view) .max_by_key(|qc| qc.view)
.unwrap_or_else(StandardQc::genesis) .unwrap_or_else(|| StandardQc::genesis([0; 32]))
} }
pub fn latest_timeout_qcs(&self) -> Vec<TimeoutQc> { pub fn latest_timeout_qcs(&self) -> Vec<TimeoutQc> {
@ -386,17 +390,19 @@ impl RefState {
self.contains_block(block.parent_qc.block()) self.contains_block(block.parent_qc.block())
} }
fn contains_block(&self, block_id: BlockId) -> bool { fn contains_block(&self, block_id: [u8; 32]) -> bool {
self.chain self.chain
.iter() .iter()
.any(|(_, entry)| entry.blocks.iter().any(|block| block.id == block_id)) .any(|(_, entry)| entry.blocks.iter().any(|block| block.id == block_id))
} }
fn consecutive_block(parent: &Block) -> Block { fn consecutive_block(parent: &Block) -> Block {
let mut id = [0; 32];
rand::thread_rng().fill_bytes(&mut id);
Block { Block {
// use rand because we don't want this to be shrinked by proptest // use rand because we don't want this to be shrinked by proptest
view: parent.view.next(), view: parent.view.next(),
id: BlockId::random(&mut rand::thread_rng()), id,
parent_qc: Qc::Standard(StandardQc { parent_qc: Qc::Standard(StandardQc {
view: parent.view, view: parent.view,
id: parent.id, id: parent.id,

View File

@ -8,13 +8,13 @@ use carnot_engine::{
use proptest_state_machine::{ReferenceStateMachine, StateMachineTest}; use proptest_state_machine::{ReferenceStateMachine, StateMachineTest};
use crate::fuzz::ref_state::RefState; use crate::fuzz::ref_state::RefState;
use crate::fuzz::transition::Transition; use crate::fuzz::{transition::Transition, Block};
// ConsensusEngineTest defines a state that we want to test. // ConsensusEngineTest defines a state that we want to test.
// This is called as SUT (System Under Test). // This is called as SUT (System Under Test).
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ConsensusEngineTest { pub struct ConsensusEngineTest {
pub engine: Carnot<FlatOverlay<RoundRobin, FreezeMembership>>, pub engine: Carnot<FlatOverlay<RoundRobin, FreezeMembership>, [u8; 32]>,
} }
impl ConsensusEngineTest { impl ConsensusEngineTest {
@ -23,8 +23,8 @@ impl ConsensusEngineTest {
NodeId::new([0; 32]), NodeId::new([0; 32]),
Block { Block {
view: View::new(0), view: View::new(0),
id: BlockId::zeros(), id: [0; 32],
parent_qc: Qc::Standard(StandardQc::genesis()), parent_qc: Qc::Standard(StandardQc::genesis([0; 32])),
leader_proof: LeaderProof::LeaderId { leader_proof: LeaderProof::LeaderId {
leader_id: NodeId::new([0; 32]), leader_id: NodeId::new([0; 32]),
}, },

View File

@ -1,6 +1,6 @@
use std::collections::HashSet; use std::collections::HashSet;
use carnot_engine::{Block, NewView, TimeoutQc}; use crate::fuzz::{Block, NewView, TimeoutQc};
// State transtitions that will be picked randomly // State transtitions that will be picked randomly
#[derive(Clone, Debug)] #[derive(Clone, Debug)]

View File

@ -7,3 +7,9 @@ edition = "2021"
[dependencies] [dependencies]
thiserror = "1" thiserror = "1"
serde = { version = "1.0", features = ["derive"], optional = true }
nomos-utils = { path = "../../nomos-utils", optional = true }
[features]
default = []
serde = ["dep:serde", "nomos-utils/serde"]

View File

@ -1,5 +1,6 @@
use std::ops::Add; use std::ops::Add;
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)] #[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)]
pub struct Slot(u64); pub struct Slot(u64);

View File

@ -9,5 +9,10 @@ edition = "2021"
blake2 = "0.10" blake2 = "0.10"
rpds = "1" rpds = "1"
thiserror = "1" thiserror = "1"
serde = { version = "1.0", features = ["derive"], optional = true }
# TODO: we only need types definition from this crate # TODO: we only need types definition from this crate
cryptarchia-engine = { path = "../../consensus/cryptarchia-engine" } cryptarchia-engine = { path = "../../consensus/cryptarchia-engine" }
nomos-utils = { path = "../../nomos-utils", optional = true }
[features]
serde = ["dep:serde", "nomos-utils/serde"]

View File

@ -1,5 +1,6 @@
use cryptarchia_engine::Slot; use cryptarchia_engine::Slot;
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)] #[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
pub struct LeaderProof { pub struct LeaderProof {
commitment: Commitment, commitment: Commitment,
@ -91,3 +92,9 @@ impl AsRef<[u8]> for Commitment {
&self.0 &self.0
} }
} }
// ----------- serialization
use crate::utils::serialize_bytes_newtype;
serialize_bytes_newtype!(Commitment);
serialize_bytes_newtype!(Nullifier);

View File

@ -1,21 +1,22 @@
mod block;
mod config; mod config;
mod crypto; mod crypto;
mod leader_proof; mod leader_proof;
mod nonce;
mod utils;
use crate::{crypto::Blake2b, Commitment, LeaderProof, Nullifier};
use blake2::Digest; use blake2::Digest;
use cryptarchia_engine::{Epoch, Slot}; use cryptarchia_engine::{Epoch, Slot};
use crypto::Blake2b;
use rpds::HashTrieSet; use rpds::HashTrieSet;
use std::collections::HashMap; use std::{collections::HashMap, hash::Hash};
use thiserror::Error; use thiserror::Error;
pub use block::*;
pub use config::Config; pub use config::Config;
pub use leader_proof::*; pub use leader_proof::*;
pub use nonce::*;
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum LedgerError { pub enum LedgerError<Id> {
#[error("Commitment not found in the ledger state")] #[error("Commitment not found in the ledger state")]
CommitmentNotFound, CommitmentNotFound,
#[error("Nullifier already exists in the ledger state")] #[error("Nullifier already exists in the ledger state")]
@ -25,9 +26,9 @@ pub enum LedgerError {
#[error("Invalid block slot {block:?} for parent slot {parent:?}")] #[error("Invalid block slot {block:?} for parent slot {parent:?}")]
InvalidSlot { parent: Slot, block: Slot }, InvalidSlot { parent: Slot, block: Slot },
#[error("Parent block not found: {0:?}")] #[error("Parent block not found: {0:?}")]
ParentNotFound(HeaderId), ParentNotFound(Id),
#[error("Orphan block missing: {0:?}. Importing leader proofs requires the block to be validated first")] #[error("Orphan block missing: {0:?}. Importing leader proofs requires the block to be validated first")]
OrphanMissing(HeaderId), OrphanMissing(Id),
} }
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
@ -69,13 +70,16 @@ impl EpochState {
} }
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct Ledger { pub struct Ledger<Id: Eq + Hash> {
states: HashMap<HeaderId, LedgerState>, states: HashMap<Id, LedgerState>,
config: Config, config: Config,
} }
impl Ledger { impl<Id> Ledger<Id>
pub fn from_genesis(id: HeaderId, state: LedgerState, config: Config) -> Self { where
Id: Eq + Hash + Copy,
{
pub fn from_genesis(id: Id, state: LedgerState, config: Config) -> Self {
Self { Self {
states: [(id, state)].into_iter().collect(), states: [(id, state)].into_iter().collect(),
config, config,
@ -83,8 +87,15 @@ impl Ledger {
} }
#[must_use = "this returns the result of the operation, without modifying the original"] #[must_use = "this returns the result of the operation, without modifying the original"]
pub fn try_apply_header(&self, header: &Header) -> Result<Self, LedgerError> { pub fn try_update(
let parent_id = header.parent(); &self,
id: Id,
parent_id: Id,
slot: Slot,
proof: &LeaderProof,
// (update corresponding to the leader proof, leader proof)
orphan_proofs: impl IntoIterator<Item = (Id, LeaderProof)>,
) -> Result<Self, LedgerError<Id>> {
let parent_state = self let parent_state = self
.states .states
.get(&parent_id) .get(&parent_id)
@ -96,25 +107,27 @@ impl Ledger {
// * not in conflict with the current ledger state // * not in conflict with the current ledger state
// This first condition is checked here, the second one is checked in the state update // This first condition is checked here, the second one is checked in the state update
// (in particular, we do not check the imported leader proof is for an earlier slot) // (in particular, we do not check the imported leader proof is for an earlier slot)
for orphan in header.orphaned_proofs() { let (orphan_ids, orphan_proofs): (Vec<_>, Vec<_>) = orphan_proofs.into_iter().unzip();
if !self.states.contains_key(&orphan.id()) { for orphan_id in orphan_ids {
return Err(LedgerError::OrphanMissing(orphan.id())); if !self.states.contains_key(&orphan_id) {
return Err(LedgerError::OrphanMissing(orphan_id));
} }
} }
let new_state = parent_state let new_state =
.clone() parent_state
.try_apply_header(header, &self.config)?; .clone()
.try_update(slot, proof, &orphan_proofs, &self.config)?;
let mut states = self.states.clone(); let mut states = self.states.clone();
states.insert(header.id(), new_state); states.insert(id, new_state);
Ok(Self { states, config }) Ok(Self { states, config })
} }
pub fn state(&self, header_id: &HeaderId) -> Option<&LedgerState> { pub fn state(&self, id: &Id) -> Option<&LedgerState> {
self.states.get(header_id) self.states.get(id)
} }
} }
@ -134,13 +147,19 @@ pub struct LedgerState {
} }
impl LedgerState { impl LedgerState {
fn try_apply_header(self, header: &Header, config: &Config) -> Result<Self, LedgerError> { fn try_update<Id>(
self,
slot: Slot,
proof: &LeaderProof,
orphan_proofs: &[LeaderProof],
config: &Config,
) -> Result<Self, LedgerError<Id>> {
// TODO: import leader proofs // TODO: import leader proofs
self.update_epoch_state(header.slot(), config)? self.update_epoch_state(slot, config)?
.try_apply_leadership(header, config) .try_apply_leadership(proof, orphan_proofs, config)
} }
fn update_epoch_state(self, slot: Slot, config: &Config) -> Result<Self, LedgerError> { fn update_epoch_state<Id>(self, slot: Slot, config: &Config) -> Result<Self, LedgerError<Id>> {
if slot <= self.slot { if slot <= self.slot {
return Err(LedgerError::InvalidSlot { return Err(LedgerError::InvalidSlot {
parent: self.slot, parent: self.slot,
@ -204,7 +223,11 @@ impl LedgerState {
} }
} }
fn try_apply_proof(self, proof: &LeaderProof, config: &Config) -> Result<Self, LedgerError> { fn try_apply_proof<Id>(
self,
proof: &LeaderProof,
config: &Config,
) -> Result<Self, LedgerError<Id>> {
assert_eq!(config.epoch(proof.slot()), self.epoch_state.epoch); assert_eq!(config.epoch(proof.slot()), self.epoch_state.epoch);
// The leadership coin either has to be in the state snapshot or be derived from // The leadership coin either has to be in the state snapshot or be derived from
// a coin that is in the state snapshot (i.e. be in the lead coins commitments) // a coin that is in the state snapshot (i.e. be in the lead coins commitments)
@ -234,18 +257,17 @@ impl LedgerState {
}) })
} }
fn try_apply_leadership( fn try_apply_leadership<Id>(
mut self, mut self,
header: &Header, proof: &LeaderProof,
orphan_proofs: &[LeaderProof],
config: &Config, config: &Config,
) -> Result<Self, LedgerError> { ) -> Result<Self, LedgerError<Id>> {
for proof in header.orphaned_proofs() { for proof in orphan_proofs {
self = self.try_apply_proof(proof.leader_proof(), config)?; self = self.try_apply_proof(proof, config)?;
} }
self = self self = self.try_apply_proof(proof, config)?.update_nonce(proof);
.try_apply_proof(header.leader_proof(), config)?
.update_nonce(header.leader_proof());
Ok(self) Ok(self)
} }
@ -280,6 +302,27 @@ impl LedgerState {
..self ..self
} }
} }
pub fn from_commitments(commitments: impl IntoIterator<Item = Commitment>) -> Self {
let commitments = commitments.into_iter().collect::<HashTrieSet<_>>();
Self {
lead_commitments: commitments.clone(),
spend_commitments: commitments,
nullifiers: Default::default(),
nonce: [0; 32].into(),
slot: 0.into(),
next_epoch_state: EpochState {
epoch: 1.into(),
nonce: [0; 32].into(),
commitments: Default::default(),
},
epoch_state: EpochState {
epoch: 0.into(),
nonce: [0; 32].into(),
commitments: Default::default(),
},
}
}
} }
impl core::fmt::Debug for LedgerState { impl core::fmt::Debug for LedgerState {
@ -303,35 +346,51 @@ impl core::fmt::Debug for LedgerState {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::{EpochState, Ledger, LedgerState}; use super::{EpochState, Ledger, LedgerState};
use crate::{ use crate::{crypto::Blake2b, Commitment, Config, LeaderProof, LedgerError, Nullifier};
crypto::Blake2b, Commitment, Config, Header, HeaderId, LeaderProof, LedgerError, Nullifier,
};
use blake2::Digest; use blake2::Digest;
use cryptarchia_engine::Slot; use cryptarchia_engine::Slot;
use std::hash::{DefaultHasher, Hash, Hasher}; use std::hash::{DefaultHasher, Hash, Hasher};
pub fn header(slot: impl Into<Slot>, parent: HeaderId, coin: Coin) -> Header { type HeaderId = [u8; 32];
let slot = slot.into();
Header::new(parent, 0, [0; 32].into(), slot, coin.to_proof(slot))
}
pub fn header_with_orphans( fn update_ledger(
slot: impl Into<Slot>, ledger: &mut Ledger<HeaderId>,
parent: HeaderId, parent: HeaderId,
slot: impl Into<Slot>,
coin: Coin, coin: Coin,
orphans: Vec<Header>, ) -> Result<HeaderId, LedgerError<HeaderId>> {
) -> Header { update_orphans(ledger, parent, slot, coin, vec![])
header(slot, parent, coin).with_orphaned_proofs(orphans)
} }
pub fn genesis_header() -> Header { fn make_id(parent: HeaderId, slot: impl Into<Slot>, coin: Coin) -> HeaderId {
Header::new( Blake2b::new()
[0; 32].into(), .chain_update(parent)
0, .chain_update(slot.into().to_be_bytes())
[0; 32].into(), .chain_update(coin.sk.to_be_bytes())
0.into(), .chain_update(coin.nonce.to_be_bytes())
LeaderProof::dummy(0.into()), .finalize()
) .into()
}
fn update_orphans(
ledger: &mut Ledger<HeaderId>,
parent: HeaderId,
slot: impl Into<Slot>,
coin: Coin,
orphans: Vec<(HeaderId, (u64, Coin))>,
) -> Result<HeaderId, LedgerError<HeaderId>> {
let slot = slot.into();
let id = make_id(parent, slot, coin);
*ledger = ledger.try_update(
id,
parent,
slot,
&coin.to_proof(slot),
orphans
.into_iter()
.map(|(id, (slot, coin))| (id, coin.to_proof(slot.into()))),
)?;
Ok(id)
} }
pub fn config() -> Config { pub fn config() -> Config {
@ -414,36 +473,41 @@ pub mod tests {
} }
} }
fn ledger(commitments: &[Commitment]) -> (Ledger, Header) { fn ledger(commitments: &[Commitment]) -> (Ledger<HeaderId>, HeaderId) {
let genesis_state = genesis_state(commitments); let genesis_state = genesis_state(commitments);
let genesis_header = genesis_header();
( (
Ledger::from_genesis(genesis_header.id(), genesis_state, config()), Ledger::from_genesis([0; 32], genesis_state, config()),
genesis_header, [0; 32],
) )
} }
fn apply_and_add_coin(mut ledger: Ledger, header: Header, coin: Coin) -> Ledger { fn apply_and_add_coin(
let header_id = header.id(); ledger: &mut Ledger<HeaderId>,
ledger = ledger.try_apply_header(&header).unwrap(); parent: HeaderId,
slot: impl Into<Slot>,
coin_proof: Coin,
coin_add: Coin,
) -> HeaderId {
let id = update_ledger(ledger, parent, slot, coin_proof).unwrap();
// we still don't have transactions, so the only way to add a commitment to spendable commitments and // we still don't have transactions, so the only way to add a commitment to spendable commitments and
// test epoch snapshotting is by doing this manually // test epoch snapshotting is by doing this manually
let mut block_state = ledger.states[&header_id].clone(); let mut block_state = ledger.states[&id].clone();
block_state.spend_commitments = block_state.spend_commitments.insert(coin.commitment()); block_state.spend_commitments = block_state.spend_commitments.insert(coin_add.commitment());
ledger.states.insert(header_id, block_state); ledger.states.insert(id, block_state);
ledger id
} }
#[test] #[test]
fn test_ledger_state_prevents_coin_reuse() { fn test_ledger_state_prevents_coin_reuse() {
let coin = Coin::new(0); let coin = Coin::new(0);
let (mut ledger, genesis) = ledger(&[coin.commitment()]); let (mut ledger, genesis) = ledger(&[coin.commitment()]);
let h = header(1, genesis.id(), coin);
ledger = ledger.try_apply_header(&h).unwrap(); let h = update_ledger(&mut ledger, genesis, 1, coin).unwrap();
// reusing the same coin should be prevented // reusing the same coin should be prevented
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header(2, h.id(), coin)), update_ledger(&mut ledger, h, 2, coin),
Err(LedgerError::NullifierExists), Err(LedgerError::NullifierExists),
)); ));
} }
@ -451,10 +515,9 @@ pub mod tests {
#[test] #[test]
fn test_ledger_state_uncommited_coin() { fn test_ledger_state_uncommited_coin() {
let coin = Coin::new(0); let coin = Coin::new(0);
let (ledger, genesis) = ledger(&[]); let (mut ledger, genesis) = ledger(&[]);
let h = header(1, genesis.id(), coin);
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&h), update_ledger(&mut ledger, genesis, 1, coin),
Err(LedgerError::CommitmentNotFound), Err(LedgerError::CommitmentNotFound),
)); ));
} }
@ -472,17 +535,14 @@ pub mod tests {
]); ]);
// coin_1 & coin_2 both concurrently win slot 0 // coin_1 & coin_2 both concurrently win slot 0
let h_1 = header(1, genesis.id(), coin_1);
let h_2 = header(1, genesis.id(), coin_2);
ledger = ledger.try_apply_header(&h_1).unwrap(); update_ledger(&mut ledger, genesis, 1, coin_1).unwrap();
ledger = ledger.try_apply_header(&h_2).unwrap(); let h = update_ledger(&mut ledger, genesis, 1, coin_2).unwrap();
// then coin_3 wins slot 1 and chooses to extend from block_2 // then coin_3 wins slot 1 and chooses to extend from block_2
let h_3 = header(2, h_2.id(), coin_3); let h_3 = update_ledger(&mut ledger, h, 2, coin_3).unwrap();
ledger = ledger.try_apply_header(&h_3).unwrap();
// coin 1 is not spent in the chain that ends with block_3 // coin 1 is not spent in the chain that ends with block_3
assert!(!ledger.states[&h_3.id()].is_nullified(&coin_1.nullifier())); assert!(!ledger.states[&h_3].is_nullified(&coin_1.nullifier()));
} }
#[test] #[test]
@ -496,45 +556,39 @@ pub mod tests {
// An epoch will be 10 slots long, with stake distribution snapshot taken at the start of the epoch // An epoch will be 10 slots long, with stake distribution snapshot taken at the start of the epoch
// and nonce snapshot before slot 7 // and nonce snapshot before slot 7
let h_1 = header(1, genesis.id(), coins[0]); let h_1 = update_ledger(&mut ledger, genesis, 1, coins[0]).unwrap();
ledger = ledger.try_apply_header(&h_1).unwrap(); assert_eq!(ledger.states[&h_1].epoch_state.epoch, 0.into());
assert_eq!(ledger.states[&h_1.id()].epoch_state.epoch, 0.into());
let h_2 = header(6, h_1.id(), coins[1]); let h_2 = update_ledger(&mut ledger, h_1, 6, coins[1]).unwrap();
ledger = ledger.try_apply_header(&h_2).unwrap();
let h_3 = header(9, h_2.id(), coins[2]); let h_3 = apply_and_add_coin(&mut ledger, h_2, 9, coins[2], coin_4);
ledger = apply_and_add_coin(ledger, h_3.clone(), coin_4);
// test epoch jump // test epoch jump
let h_4 = header(20, h_3.id(), coins[3]); let h_4 = update_ledger(&mut ledger, h_3, 20, coins[3]).unwrap();
ledger = ledger.try_apply_header(&h_4).unwrap();
// nonce for epoch 2 should be taken at the end of slot 16, but in our case the last block is at slot 9 // nonce for epoch 2 should be taken at the end of slot 16, but in our case the last block is at slot 9
assert_eq!( assert_eq!(
ledger.states[&h_4.id()].epoch_state.nonce, ledger.states[&h_4].epoch_state.nonce,
ledger.states[&h_3.id()].nonce, ledger.states[&h_3].nonce,
); );
// stake distribution snapshot should be taken at the end of slot 9 // stake distribution snapshot should be taken at the end of slot 9
assert_eq!( assert_eq!(
ledger.states[&h_4.id()].epoch_state.commitments, ledger.states[&h_4].epoch_state.commitments,
ledger.states[&h_3.id()].spend_commitments, ledger.states[&h_3].spend_commitments,
); );
// nonce for epoch 1 should be taken at the end of slot 6 // nonce for epoch 1 should be taken at the end of slot 6
let h_5 = header(10, h_3.id(), coins[3]); let h_5 = apply_and_add_coin(&mut ledger, h_3, 10, coins[3], coin_5);
ledger = apply_and_add_coin(ledger, h_5.clone(), coin_5);
assert_eq!( assert_eq!(
ledger.states[&h_5.id()].epoch_state.nonce, ledger.states[&h_5].epoch_state.nonce,
ledger.states[&h_2.id()].nonce, ledger.states[&h_2].nonce,
); );
let h_6 = header(20, h_5.id(), coins[3].evolve()); let h_6 = update_ledger(&mut ledger, h_5, 20, coins[3].evolve()).unwrap();
ledger = ledger.try_apply_header(&h_6).unwrap();
// stake distribution snapshot should be taken at the end of slot 9, check that changes in slot 10 // stake distribution snapshot should be taken at the end of slot 9, check that changes in slot 10
// are ignored // are ignored
assert_eq!( assert_eq!(
ledger.states[&h_6.id()].epoch_state.commitments, ledger.states[&h_6].epoch_state.commitments,
ledger.states[&h_3.id()].spend_commitments, ledger.states[&h_3].spend_commitments,
); );
} }
@ -542,25 +596,23 @@ pub mod tests {
fn test_evolved_coin_is_eligible_for_leadership() { fn test_evolved_coin_is_eligible_for_leadership() {
let coin = Coin::new(0); let coin = Coin::new(0);
let (mut ledger, genesis) = ledger(&[coin.commitment()]); let (mut ledger, genesis) = ledger(&[coin.commitment()]);
let h = header(1, genesis.id(), coin);
ledger = ledger.try_apply_header(&h).unwrap(); let h = update_ledger(&mut ledger, genesis, 1, coin).unwrap();
// reusing the same coin should be prevented // reusing the same coin should be prevented
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header(2, h.id(), coin)), update_ledger(&mut ledger, h, 2, coin),
Err(LedgerError::NullifierExists), Err(LedgerError::NullifierExists),
)); ));
// the evolved coin is not elibile before block 2 as it has not appeared on the ledger yet // the evolved coin is not elibile before block 2 as it has not appeared on the ledger yet
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header(2, genesis.id(), coin.evolve())), update_ledger(&mut ledger, genesis, 2, coin.evolve()),
Err(LedgerError::CommitmentNotFound), Err(LedgerError::CommitmentNotFound),
)); ));
// the evolved coin is eligible after coin 1 is spent // the evolved coin is eligible after coin 1 is spent
assert!(ledger assert!(update_ledger(&mut ledger, h, 2, coin.evolve()).is_ok());
.try_apply_header(&header(2, h.id(), coin.evolve()))
.is_ok());
} }
#[test] #[test]
@ -570,39 +622,34 @@ pub mod tests {
let (mut ledger, genesis) = ledger(&[coin.commitment()]); let (mut ledger, genesis) = ledger(&[coin.commitment()]);
// EPOCH 0 // EPOCH 0
let h_0_1 = header(1, genesis.id(), coin);
// mint a new coin to be used for leader elections in upcoming epochs // mint a new coin to be used for leader elections in upcoming epochs
ledger = apply_and_add_coin(ledger, h_0_1.clone(), coin_1); let h_0_1 = apply_and_add_coin(&mut ledger, genesis, 1, coin, coin_1);
let h_0_2 = header(2, h_0_1.id(), coin_1);
// the new coin is not yet eligible for leader elections // the new coin is not yet eligible for leader elections
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&h_0_2), update_ledger(&mut ledger, h_0_1, 2, coin_1),
Err(LedgerError::CommitmentNotFound), Err(LedgerError::CommitmentNotFound),
)); ));
// but the evolved coin can // // but the evolved coin can
let h_0_2 = header(2, h_0_1.id(), coin.evolve()); let h_0_2 = update_ledger(&mut ledger, h_0_1, 2, coin.evolve()).unwrap();
ledger = ledger.try_apply_header(&h_0_2).unwrap();
// EPOCH 1 // EPOCH 1
for i in 10..20 { for i in 10..20 {
// the newly minted coin is still not eligible in the following epoch since the // the newly minted coin is still not eligible in the following epoch since the
// stake distribution snapshot is taken at the beginning of the previous epoch // stake distribution snapshot is taken at the beginning of the previous epoch
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header(i, h_0_2.id(), coin_1)), update_ledger(&mut ledger, h_0_2, i, coin_1),
Err(LedgerError::CommitmentNotFound), Err(LedgerError::CommitmentNotFound),
)); ));
} }
// EPOCH 2 // EPOCH 2
// the coin is finally eligible 2 epochs after it was first minted // the coin is finally eligible 2 epochs after it was first minted
let h_2_0 = header(20, h_0_2.id(), coin_1); let h_2_0 = update_ledger(&mut ledger, h_0_2, 20, coin_1).unwrap();
ledger = ledger.try_apply_header(&h_2_0).unwrap();
// and now the minted coin can freely use the evolved coin for subsequent blocks // and now the minted coin can freely use the evolved coin for subsequent blocks
let h_2_1 = header(21, h_2_0.id(), coin_1.evolve()); update_ledger(&mut ledger, h_2_0, 21, coin_1.evolve()).unwrap();
ledger.try_apply_header(&h_2_1).unwrap();
} }
#[test] #[test]
@ -614,83 +661,83 @@ pub mod tests {
let coin_new_new = coin_new.evolve(); let coin_new_new = coin_new.evolve();
// produce a fork where the coin has been spent twice // produce a fork where the coin has been spent twice
let fork_1 = header(1, genesis.id(), coin); let fork_1 = make_id(genesis, 1, coin);
let fork_2 = header(2, fork_1.id(), coin_new); let fork_2 = make_id(fork_1, 2, coin_new);
// neither of the evolved coins should be usable right away in another branch // neither of the evolved coins should be usable right away in another branch
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header(1, genesis.id(), coin_new)), update_ledger(&mut ledger, genesis, 1, coin_new),
Err(LedgerError::CommitmentNotFound) Err(LedgerError::CommitmentNotFound)
)); ));
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header(1, genesis.id(), coin_new_new)), update_ledger(&mut ledger, genesis, 1, coin_new_new),
Err(LedgerError::CommitmentNotFound) Err(LedgerError::CommitmentNotFound)
)); ));
// they also should not be accepted if the fork from where they have been imported has not been seen already // they also should not be accepted if the fork from where they have been imported has not been seen already
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header_with_orphans( update_orphans(&mut ledger, genesis, 1, coin_new, vec![(fork_1, (1, coin))]),
1,
genesis.id(),
coin_new,
vec![fork_1.clone()]
)),
Err(LedgerError::OrphanMissing(_)) Err(LedgerError::OrphanMissing(_))
)); ));
// now the first block of the fork is seen (and accepted) // now the first block of the fork is seen (and accepted)
ledger = ledger.try_apply_header(&fork_1).unwrap(); let h_1 = update_ledger(&mut ledger, genesis, 1, coin).unwrap();
assert_eq!(h_1, fork_1);
// and it can now be imported in another branch (note this does not validate it's for an earlier slot) // and it can now be imported in another branch (note this does not validate it's for an earlier slot)
ledger update_orphans(
.try_apply_header(&header_with_orphans( &mut ledger.clone(),
1, genesis,
genesis.id(), 1,
coin_new, coin_new,
vec![fork_1.clone()], vec![(fork_1, (1, coin))],
)) )
.unwrap(); .unwrap();
// but the next coin is still not accepted since the second block using the evolved coin has not been seen yet // but the next coin is still not accepted since the second block using the evolved coin has not been seen yet
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header_with_orphans( update_orphans(
&mut ledger.clone(),
genesis,
1, 1,
genesis.id(),
coin_new_new, coin_new_new,
vec![fork_1.clone(), fork_2.clone()] vec![(fork_1, (1, coin)), (fork_2, (2, coin_new))],
)), ),
Err(LedgerError::OrphanMissing(_)) Err(LedgerError::OrphanMissing(_))
)); ));
// now the second block of the fork is seen as well and the coin evolved twice can be used in another branch // now the second block of the fork is seen as well and the coin evolved twice can be used in another branch
ledger = ledger.try_apply_header(&fork_2).unwrap(); let h_2 = update_ledger(&mut ledger, h_1, 2, coin_new).unwrap();
ledger assert_eq!(h_2, fork_2);
.try_apply_header(&header_with_orphans( update_orphans(
1, &mut ledger.clone(),
genesis.id(), genesis,
coin_new_new, 1,
vec![fork_1.clone(), fork_2.clone()], coin_new_new,
)) vec![(fork_1, (1, coin)), (fork_2, (2, coin_new))],
.unwrap(); )
.unwrap();
// but we can't import just the second proof because it's using an evolved coin that has not been seen yet // but we can't import just the second proof because it's using an evolved coin that has not been seen yet
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header_with_orphans( update_orphans(
&mut ledger.clone(),
genesis,
1, 1,
genesis.id(),
coin_new_new, coin_new_new,
vec![fork_2.clone()] vec![(fork_2, (2, coin_new))],
)), ),
Err(LedgerError::CommitmentNotFound) Err(LedgerError::CommitmentNotFound)
)); ));
// an imported proof that uses a coin that was already used in the base branch should not be allowed // an imported proof that uses a coin that was already used in the base branch should not be allowed
let header_1 = header(1, genesis.id(), coin); let header_1 = update_ledger(&mut ledger, genesis, 1, coin).unwrap();
ledger = ledger.try_apply_header(&header_1).unwrap();
assert!(matches!( assert!(matches!(
ledger.try_apply_header(&header_with_orphans( update_orphans(
&mut ledger,
header_1,
2, 2,
header_1.id(),
coin_new_new, coin_new_new,
vec![fork_1.clone(), fork_2.clone()] vec![(fork_1, (1, coin)), (fork_2, (2, coin_new))],
)), ),
Err(LedgerError::NullifierExists) Err(LedgerError::NullifierExists)
)); ));
} }

View File

@ -0,0 +1,17 @@
use crate::utils::serialize_bytes_newtype;
#[derive(Clone, Debug, Eq, PartialEq, Copy)]
pub struct Nonce([u8; 32]);
impl From<[u8; 32]> for Nonce {
fn from(nonce: [u8; 32]) -> Self {
Self(nonce)
}
}
impl From<Nonce> for [u8; 32] {
fn from(nonce: Nonce) -> [u8; 32] {
nonce.0
}
}
serialize_bytes_newtype!(Nonce);

View File

@ -0,0 +1,22 @@
macro_rules! serialize_bytes_newtype {
($newtype:ty) => {
#[cfg(feature = "serde")]
impl serde::Serialize for $newtype {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
nomos_utils::serde::serialize_bytes_array(self.0, serializer)
}
}
#[cfg(feature = "serde")]
impl<'de> serde::de::Deserialize<'de> for $newtype {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
nomos_utils::serde::deserialize_bytes_array(deserializer).map(Self)
}
}
};
}
pub(crate) use serialize_bytes_newtype;

View File

@ -19,9 +19,8 @@ use tower_http::{
use utoipa::OpenApi; use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi; use utoipa_swagger_ui::SwaggerUi;
use carnot_engine::BlockId;
use full_replication::{Blob, Certificate}; use full_replication::{Blob, Certificate};
use nomos_core::{da::blob, tx::Transaction}; use nomos_core::{da::blob, header::HeaderId, tx::Transaction};
use nomos_mempool::{network::adapters::libp2p::Libp2pAdapter, openapi::Status, MempoolMetrics}; use nomos_mempool::{network::adapters::libp2p::Libp2pAdapter, openapi::Status, MempoolMetrics};
use nomos_network::backends::libp2p::Libp2p; use nomos_network::backends::libp2p::Libp2p;
use nomos_storage::backends::StorageSerde; use nomos_storage::backends::StorageSerde;
@ -53,7 +52,7 @@ pub struct AxumBackend<T, S, const SIZE: usize> {
da_status, da_status,
), ),
components( components(
schemas(Status<BlockId>, MempoolMetrics) schemas(Status<HeaderId>, MempoolMetrics)
), ),
tags( tags(
(name = "da", description = "data availibility related APIs") (name = "da", description = "data availibility related APIs")
@ -255,8 +254,8 @@ where
#[derive(Deserialize)] #[derive(Deserialize)]
struct QueryParams { struct QueryParams {
from: Option<BlockId>, from: Option<HeaderId>,
to: Option<BlockId>, to: Option<HeaderId>,
} }
#[utoipa::path( #[utoipa::path(
@ -300,7 +299,7 @@ async fn libp2p_info(State(handle): State<OverwatchHandle>) -> Response {
(status = 500, description = "Internal server error", body = String), (status = 500, description = "Internal server error", body = String),
) )
)] )]
async fn block<S, Tx>(State(handle): State<OverwatchHandle>, Json(id): Json<BlockId>) -> Response async fn block<S, Tx>(State(handle): State<OverwatchHandle>, Json(id): Json<HeaderId>) -> Response
where where
Tx: serde::Serialize + serde::de::DeserializeOwned + Clone + Eq + core::hash::Hash, Tx: serde::Serialize + serde::de::DeserializeOwned + Clone + Eq + core::hash::Hash,
S: StorageSerde + Send + Sync + 'static, S: StorageSerde + Send + Sync + 'static,

View File

@ -15,8 +15,8 @@ use bytes::Bytes;
use carnot_consensus::CarnotConsensus; use carnot_consensus::CarnotConsensus;
use nomos_api::ApiService; use nomos_api::ApiService;
use nomos_core::{ use nomos_core::{
block::BlockId,
da::{blob, certificate}, da::{blob, certificate},
header::HeaderId,
tx::Transaction, tx::Transaction,
wire, wire,
}; };
@ -59,10 +59,10 @@ const MB16: usize = 1024 * 1024 * 16;
pub type Carnot = CarnotConsensus< pub type Carnot = CarnotConsensus<
ConsensusLibp2pAdapter, ConsensusLibp2pAdapter,
MockPool<BlockId, Tx, <Tx as Transaction>::Hash>, MockPool<HeaderId, Tx, <Tx as Transaction>::Hash>,
MempoolLibp2pAdapter<Tx, <Tx as Transaction>::Hash>, MempoolLibp2pAdapter<Tx, <Tx as Transaction>::Hash>,
MockPool< MockPool<
BlockId, HeaderId,
Certificate, Certificate,
<<Certificate as certificate::Certificate>::Blob as blob::Blob>::Hash, <<Certificate as certificate::Certificate>::Blob as blob::Blob>::Hash,
>, >,
@ -82,7 +82,7 @@ pub type DataAvailability = DataAvailabilityService<
DaLibp2pAdapter<Blob, Attestation>, DaLibp2pAdapter<Blob, Attestation>,
>; >;
type Mempool<K, V, D> = MempoolService<MempoolLibp2pAdapter<K, V>, MockPool<BlockId, K, V>, D>; type Mempool<K, V, D> = MempoolService<MempoolLibp2pAdapter<K, V>, MockPool<HeaderId, K, V>, D>;
#[derive(Services)] #[derive(Services)]
pub struct Nomos { pub struct Nomos {

View File

@ -1,6 +1,7 @@
use super::CLIENT; use super::CLIENT;
use carnot_consensus::CarnotInfo; use carnot_consensus::CarnotInfo;
use carnot_engine::{Block, BlockId}; use carnot_engine::Block;
use nomos_core::header::HeaderId;
use reqwest::Url; use reqwest::Url;
pub async fn carnot_info(node: &Url) -> Result<CarnotInfo, reqwest::Error> { pub async fn carnot_info(node: &Url) -> Result<CarnotInfo, reqwest::Error> {
@ -15,9 +16,9 @@ pub async fn carnot_info(node: &Url) -> Result<CarnotInfo, reqwest::Error> {
pub async fn get_blocks_info( pub async fn get_blocks_info(
node: &Url, node: &Url,
from: Option<BlockId>, from: Option<HeaderId>,
to: Option<BlockId>, to: Option<HeaderId>,
) -> Result<Vec<Block>, reqwest::Error> { ) -> Result<Vec<Block<HeaderId>>, reqwest::Error> {
const NODE_CARNOT_INFO_PATH: &str = "carnot/blocks"; const NODE_CARNOT_INFO_PATH: &str = "carnot/blocks";
let mut req = CLIENT.get(node.join(NODE_CARNOT_INFO_PATH).unwrap()); let mut req = CLIENT.get(node.join(NODE_CARNOT_INFO_PATH).unwrap());
if let Some(from) = from { if let Some(from) = from {

View File

@ -1,13 +1,13 @@
use super::CLIENT; use super::CLIENT;
use carnot_engine::BlockId;
use full_replication::Certificate; use full_replication::Certificate;
use nomos_core::block::Block; use nomos_core::block::Block;
use nomos_core::header::HeaderId;
use nomos_node::Tx; use nomos_node::Tx;
use reqwest::Url; use reqwest::Url;
pub async fn get_block_contents( pub async fn get_block_contents(
node: &Url, node: &Url,
block: &BlockId, block: &HeaderId,
) -> Result<Option<Block<Tx, Certificate>>, reqwest::Error> { ) -> Result<Option<Block<Tx, Certificate>>, reqwest::Error> {
const BLOCK_PATH: &str = "storage/block"; const BLOCK_PATH: &str = "storage/block";
CLIENT CLIENT

View File

@ -18,7 +18,7 @@ use full_replication::{
AbsoluteNumber, Attestation, Certificate, FullReplication, Settings as DaSettings, AbsoluteNumber, Attestation, Certificate, FullReplication, Settings as DaSettings,
}; };
use futures::{stream, StreamExt}; use futures::{stream, StreamExt};
use nomos_core::{block::BlockId, da::DaProtocol, wire}; use nomos_core::{da::DaProtocol, header::HeaderId, wire};
use nomos_log::{LoggerBackend, LoggerSettings, SharedWriter}; use nomos_log::{LoggerBackend, LoggerSettings, SharedWriter};
use nomos_network::{backends::libp2p::Libp2p, NetworkService}; use nomos_network::{backends::libp2p::Libp2p, NetworkService};
use overwatch_rs::{overwatch::OverwatchRunner, services::ServiceData}; use overwatch_rs::{overwatch::OverwatchRunner, services::ServiceData};
@ -266,7 +266,7 @@ struct ChatMessage {
#[tokio::main] #[tokio::main]
async fn check_for_messages(sender: Sender<Vec<ChatMessage>>, node: Url) { async fn check_for_messages(sender: Sender<Vec<ChatMessage>>, node: Url) {
// Should ask for the genesis block to be more robust // Should ask for the genesis block to be more robust
let mut last_tip = BlockId::zeros(); let mut last_tip = [0; 32].into();
loop { loop {
if let Ok((new_tip, messages)) = fetch_new_messages(&last_tip, &node).await { if let Ok((new_tip, messages)) = fetch_new_messages(&last_tip, &node).await {
@ -280,7 +280,7 @@ async fn check_for_messages(sender: Sender<Vec<ChatMessage>>, node: Url) {
// Process a single block's blobs and return chat messages // Process a single block's blobs and return chat messages
async fn process_block_blobs( async fn process_block_blobs(
node: Url, node: Url,
block_id: &BlockId, block_id: &HeaderId,
da_settings: DaSettings, da_settings: DaSettings,
) -> Result<Vec<ChatMessage>, Box<dyn std::error::Error>> { ) -> Result<Vec<ChatMessage>, Box<dyn std::error::Error>> {
let blobs = get_block_blobs(&node, block_id).await?; let blobs = get_block_blobs(&node, block_id).await?;
@ -304,9 +304,9 @@ async fn process_block_blobs(
// Fetch new messages since the last tip // Fetch new messages since the last tip
async fn fetch_new_messages( async fn fetch_new_messages(
last_tip: &BlockId, last_tip: &HeaderId,
node: &Url, node: &Url,
) -> Result<(BlockId, Vec<ChatMessage>), Box<dyn std::error::Error>> { ) -> Result<(HeaderId, Vec<ChatMessage>), Box<dyn std::error::Error>> {
// By only specifying the 'to' parameter we get all the blocks since the last tip // By only specifying the 'to' parameter we get all the blocks since the last tip
let mut new_blocks = get_blocks_info(node, None, Some(*last_tip)) let mut new_blocks = get_blocks_info(node, None, Some(*last_tip))
.await? .await?

View File

@ -1,6 +1,6 @@
use carnot_engine::BlockId;
use full_replication::Blob; use full_replication::Blob;
use nomos_core::da::certificate::Certificate; use nomos_core::da::certificate::Certificate;
use nomos_core::header::HeaderId;
use reqwest::Url; use reqwest::Url;
use thiserror::Error; use thiserror::Error;
@ -15,7 +15,7 @@ pub enum Error {
} }
/// Return the blobs whose certificate has been included in the provided block. /// Return the blobs whose certificate has been included in the provided block.
pub async fn get_block_blobs(node: &Url, block: &BlockId) -> Result<Vec<Blob>, Error> { pub async fn get_block_blobs(node: &Url, block: &HeaderId) -> Result<Vec<Blob>, Error> {
let block = get_block_contents(node, block) let block = get_block_contents(node, block)
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;

View File

@ -13,6 +13,8 @@ async-trait = { version = "0.1" }
blake2 = { version = "0.10" } blake2 = { version = "0.10" }
bytes = "1.3" bytes = "1.3"
carnot-engine = { path = "../consensus/carnot-engine", features = ["serde"]} carnot-engine = { path = "../consensus/carnot-engine", features = ["serde"]}
cryptarchia-engine = { path = "../consensus/cryptarchia-engine", features = ["serde"]}
cryptarchia-ledger = { path = "../ledger/cryptarchia-ledger", features = ["serde"]}
futures = "0.3" futures = "0.3"
raptorq = { version = "1.7", optional = true } raptorq = { version = "1.7", optional = true }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
@ -20,6 +22,7 @@ thiserror = "1.0"
bincode = "1.3" bincode = "1.3"
once_cell = "1.0" once_cell = "1.0"
indexmap = { version = "1.9", features = ["serde"] } indexmap = { version = "1.9", features = ["serde"] }
const-hex = "1"
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.8"

View File

@ -1,16 +1,22 @@
// std // std
use indexmap::IndexSet;
use std::hash::Hash; use std::hash::Hash;
// crates // crates
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::Serialize; use serde::Serialize;
// internal // internal
use crate::block::Block; use crate::block::Block;
use crate::crypto::Blake2b;
use crate::da::certificate::BlobCertificateSelect; use crate::da::certificate::BlobCertificateSelect;
use crate::da::certificate::Certificate; use crate::da::certificate::Certificate;
use crate::header::{
carnot::Builder as CarnotBuilder, cryptarchia::Builder as CryptarchiaBuilder, Header, HeaderId,
};
use crate::tx::{Transaction, TxSelect}; use crate::tx::{Transaction, TxSelect};
use crate::wire;
use blake2::digest::Digest;
use carnot_engine::overlay::RandomBeaconState; use carnot_engine::overlay::RandomBeaconState;
use carnot_engine::{NodeId, Qc, View}; use carnot_engine::{LeaderProof, Qc, View};
/// Wrapper over a block building `new` method than holds intermediary state and can be /// Wrapper over a block building `new` method than holds intermediary state and can be
/// passed around. It also compounds the transaction selection and blob selection heuristics to be /// passed around. It also compounds the transaction selection and blob selection heuristics to be
/// used for transaction and blob selection. /// used for transaction and blob selection.
@ -20,10 +26,6 @@ use carnot_engine::{NodeId, Qc, View};
/// use nomos_core::block::builder::BlockBuilder; /// use nomos_core::block::builder::BlockBuilder;
/// let builder: BlockBuilder<(), (), FirstTx, FirstBlob> = { /// let builder: BlockBuilder<(), (), FirstTx, FirstBlob> = {
/// BlockBuilder::new( FirstTx::default(), FirstBlob::default()) /// BlockBuilder::new( FirstTx::default(), FirstBlob::default())
/// .with_view(View::from(0))
/// .with_parent_qc(qc)
/// .with_proposer(proposer)
/// .with_beacon_state(beacon)
/// .with_transactions([tx1].into_iter()) /// .with_transactions([tx1].into_iter())
/// .with_blobs([blob1].into_iter()) /// .with_blobs([blob1].into_iter())
/// }; /// };
@ -32,14 +34,33 @@ use carnot_engine::{NodeId, Qc, View};
pub struct BlockBuilder<Tx, Blob, TxSelector, BlobSelector> { pub struct BlockBuilder<Tx, Blob, TxSelector, BlobSelector> {
tx_selector: TxSelector, tx_selector: TxSelector,
blob_selector: BlobSelector, blob_selector: BlobSelector,
view: Option<View>, carnot_header_builder: Option<CarnotBuilder>,
parent_qc: Option<Qc>, cryptarchia_header_builder: Option<CryptarchiaBuilder>,
proposer: Option<NodeId>,
beacon: Option<RandomBeaconState>,
txs: Option<Box<dyn Iterator<Item = Tx>>>, txs: Option<Box<dyn Iterator<Item = Tx>>>,
blobs: Option<Box<dyn Iterator<Item = Blob>>>, blobs: Option<Box<dyn Iterator<Item = Blob>>>,
} }
impl<Tx, C, TxSelector, BlobSelector> BlockBuilder<Tx, C, TxSelector, BlobSelector>
where
Tx: Clone + Eq + Hash,
C: Clone + Eq + Hash,
{
pub fn empty_carnot(
beacon: RandomBeaconState,
view: View,
parent_qc: Qc<HeaderId>,
leader_proof: LeaderProof,
) -> Block<Tx, C> {
Block {
header: Header::Carnot(
CarnotBuilder::new(beacon, view, parent_qc, leader_proof).build([0; 32].into(), 0),
),
cl_transactions: IndexSet::new(),
bl_blobs: IndexSet::new(),
}
}
}
impl<Tx, C, TxSelector, BlobSelector> BlockBuilder<Tx, C, TxSelector, BlobSelector> impl<Tx, C, TxSelector, BlobSelector> BlockBuilder<Tx, C, TxSelector, BlobSelector>
where where
Tx: Transaction + Clone + Eq + Hash + Serialize + DeserializeOwned, Tx: Transaction + Clone + Eq + Hash + Serialize + DeserializeOwned,
@ -51,36 +72,25 @@ where
Self { Self {
tx_selector, tx_selector,
blob_selector, blob_selector,
view: None, carnot_header_builder: None,
parent_qc: None, cryptarchia_header_builder: None,
proposer: None,
beacon: None,
txs: None, txs: None,
blobs: None, blobs: None,
} }
} }
#[must_use] #[must_use]
pub fn with_view(mut self, view: View) -> Self { pub fn with_carnot_builder(mut self, carnot_header_builder: CarnotBuilder) -> Self {
self.view = Some(view); self.carnot_header_builder = Some(carnot_header_builder);
self self
} }
#[must_use] #[must_use]
pub fn with_parent_qc(mut self, qc: Qc) -> Self { pub fn with_cryptarchia_builder(
self.parent_qc = Some(qc); mut self,
self cryptarchia_header_builder: CryptarchiaBuilder,
} ) -> Self {
self.cryptarchia_header_builder = Some(cryptarchia_header_builder);
#[must_use]
pub fn with_proposer(mut self, proposer: NodeId) -> Self {
self.proposer = Some(proposer);
self
}
#[must_use]
pub fn with_beacon_state(mut self, beacon: RandomBeaconState) -> Self {
self.beacon = Some(beacon);
self self
} }
@ -100,28 +110,48 @@ where
} }
#[allow(clippy::result_large_err)] #[allow(clippy::result_large_err)]
pub fn build(self) -> Result<Block<Tx, C>, Self> { pub fn build(self) -> Result<Block<Tx, C>, String> {
if let Self { if let Self {
tx_selector, tx_selector,
blob_selector, blob_selector,
view: Some(view), carnot_header_builder: carnot_builder,
parent_qc: Some(parent_qc), cryptarchia_header_builder: cryptarchia_builder,
proposer: Some(proposer),
beacon: Some(beacon),
txs: Some(txs), txs: Some(txs),
blobs: Some(blobs), blobs: Some(blobs),
} = self } = self
{ {
Ok(Block::new( let txs = tx_selector.select_tx_from(txs).collect::<IndexSet<_>>();
view, let blobs = blob_selector
parent_qc, .select_blob_from(blobs)
tx_selector.select_tx_from(txs), .collect::<IndexSet<_>>();
blob_selector.select_blob_from(blobs),
proposer, let serialized_content = wire::serialize(&(&txs, &blobs)).unwrap();
beacon, let content_size = u32::try_from(serialized_content.len()).map_err(|_| {
)) format!(
"Content is too big: {} out of {} max",
serialized_content.len(),
u32::MAX
)
})?;
let content_id = <[u8; 32]>::from(Blake2b::digest(&serialized_content)).into();
let header = match (carnot_builder, cryptarchia_builder) {
(Some(carnot_builder), None) => {
Header::Carnot(carnot_builder.build(content_id, content_size))
}
(None, Some(cryptarchia_builder)) => {
Header::Cryptarchia(cryptarchia_builder.build(content_id, content_size))
}
_ => return Err("Exactly one header builder should be set".to_string()),
};
Ok(Block {
header,
cl_transactions: txs,
bl_blobs: blobs,
})
} else { } else {
Err(self) Err("incomplete block".to_string())
} }
} }
} }

View File

@ -1,68 +1,27 @@
pub mod builder; pub mod builder;
use carnot_engine::overlay::RandomBeaconState;
use indexmap::IndexSet; use indexmap::IndexSet;
// std // std
use core::hash::Hash; use core::hash::Hash;
// crates // crates
use crate::header::Header;
use crate::wire; use crate::wire;
use ::serde::{ use ::serde::{de::DeserializeOwned, Deserialize, Serialize};
de::{DeserializeOwned, Deserializer},
Deserialize, Serialize, Serializer,
};
use bytes::Bytes; use bytes::Bytes;
pub use carnot_engine::BlockId;
use carnot_engine::{LeaderProof, NodeId, Qc, View};
// internal // internal
pub type TxHash = [u8; 32]; pub type TxHash = [u8; 32];
/// A block /// A block
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Block<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> { pub struct Block<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> {
header: carnot_engine::Block, header: Header,
beacon: RandomBeaconState,
cl_transactions: IndexSet<Tx>, cl_transactions: IndexSet<Tx>,
bl_blobs: IndexSet<BlobCertificate>, bl_blobs: IndexSet<BlobCertificate>,
} }
impl<
Tx: Clone + Eq + Hash + Serialize + DeserializeOwned,
BlobCertificate: Clone + Eq + Hash + Serialize + DeserializeOwned,
> Block<Tx, BlobCertificate>
{
pub fn new(
view: View,
parent_qc: Qc,
txs: impl Iterator<Item = Tx>,
blobs: impl Iterator<Item = BlobCertificate>,
proposer: NodeId,
beacon: RandomBeaconState,
) -> Self {
let transactions = txs.collect();
let blobs = blobs.collect();
let header = carnot_engine::Block {
id: BlockId::zeros(),
view,
parent_qc,
leader_proof: LeaderProof::LeaderId {
leader_id: proposer,
},
};
let mut s = Self {
header,
beacon,
cl_transactions: transactions,
bl_blobs: blobs,
};
let id = block_id_from_wire_content(&s);
s.header.id = id;
s
}
}
impl<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> Block<Tx, BlobCertificate> { impl<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> Block<Tx, BlobCertificate> {
pub fn header(&self) -> &carnot_engine::Block { pub fn header(&self) -> &Header {
&self.header &self.header
} }
@ -73,24 +32,6 @@ impl<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> Block<Tx, BlobCe
pub fn blobs(&self) -> impl Iterator<Item = &BlobCertificate> + '_ { pub fn blobs(&self) -> impl Iterator<Item = &BlobCertificate> + '_ {
self.bl_blobs.iter() self.bl_blobs.iter()
} }
pub fn beacon(&self) -> &RandomBeaconState {
&self.beacon
}
}
pub fn block_id_from_wire_content<
Tx: Clone + Eq + Hash + Serialize + DeserializeOwned,
BlobCertificate: Clone + Eq + Hash + Serialize + DeserializeOwned,
>(
block: &Block<Tx, BlobCertificate>,
) -> carnot_engine::BlockId {
use blake2::digest::{consts::U32, Digest};
use blake2::Blake2b;
let bytes = block.as_bytes();
let mut hasher = Blake2b::<U32>::new();
hasher.update(bytes);
BlockId::new(hasher.finalize().into())
} }
impl< impl<
@ -104,87 +45,6 @@ impl<
} }
pub fn from_bytes(bytes: &[u8]) -> Self { pub fn from_bytes(bytes: &[u8]) -> Self {
let mut result: Self = wire::deserialize(bytes).unwrap(); wire::deserialize(bytes).unwrap()
result.header.id = block_id_from_wire_content(&result);
result
}
}
mod serde {
use super::*;
// use ::serde::{de::Deserializer, Deserialize, Serialize};
/// consensus_engine::Block but without the id field, which will be computed
/// from the rest of the block.
#[derive(Serialize, Deserialize)]
struct StrippedHeader {
pub view: View,
pub parent_qc: Qc,
pub leader_proof: LeaderProof,
}
#[derive(Serialize, Deserialize)]
struct StrippedBlock<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> {
header: StrippedHeader,
beacon: RandomBeaconState,
cl_transactions: IndexSet<Tx>,
bl_blobs: IndexSet<BlobCertificate>,
}
impl<
'de,
Tx: Clone + Eq + Hash + Serialize + DeserializeOwned,
BlobCertificate: Clone + Eq + Hash + Serialize + DeserializeOwned,
> Deserialize<'de> for Block<Tx, BlobCertificate>
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let StrippedBlock {
header,
beacon,
cl_transactions,
bl_blobs,
} = StrippedBlock::deserialize(deserializer)?;
let header = carnot_engine::Block {
id: BlockId::zeros(),
view: header.view,
parent_qc: header.parent_qc,
leader_proof: header.leader_proof,
};
let mut block = Block {
beacon,
cl_transactions,
bl_blobs,
header,
};
block.header.id = block_id_from_wire_content(&block);
Ok(block)
}
}
impl<
Tx: Clone + Eq + Hash + Serialize + DeserializeOwned,
BlobCertificate: Clone + Eq + Hash + Serialize + DeserializeOwned,
> Serialize for Block<Tx, BlobCertificate>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// TODO: zero copy serialization
let block = StrippedBlock {
header: StrippedHeader {
view: self.header.view,
parent_qc: self.header.parent_qc.clone(),
leader_proof: self.header.leader_proof.clone(),
},
beacon: self.beacon.clone(),
cl_transactions: self.cl_transactions.clone(),
bl_blobs: self.bl_blobs.clone(),
};
block.serialize(serializer)
}
} }
} }

View File

@ -1,3 +1,7 @@
use blake2::digest::typenum::U32;
pub type PublicKey = [u8; 32]; pub type PublicKey = [u8; 32];
pub type PrivateKey = [u8; 32]; pub type PrivateKey = [u8; 32];
pub type Signature = [u8; 32]; pub type Signature = [u8; 32];
pub(crate) type Blake2b = blake2::Blake2b<U32>;

View File

@ -0,0 +1,116 @@
use super::{ContentId, HeaderId};
use crate::crypto::Blake2b;
use crate::wire;
use blake2::Digest;
use serde::{Deserialize, Serialize};
use carnot_engine::overlay::RandomBeaconState;
use carnot_engine::{LeaderProof, Qc, View};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Header {
beacon: RandomBeaconState,
view: View,
parent_qc: Qc<HeaderId>,
leader_proof: LeaderProof,
content_id: ContentId,
content_size: u32,
}
impl Header {
pub fn new(
beacon: RandomBeaconState,
view: View,
parent_qc: Qc<HeaderId>,
leader_proof: LeaderProof,
content_id: ContentId,
content_size: u32,
) -> Self {
Self {
beacon,
view,
parent_qc,
leader_proof,
content_id,
content_size,
}
}
pub fn beacon(&self) -> &RandomBeaconState {
&self.beacon
}
pub fn id(&self) -> HeaderId {
let mut h = Blake2b::new();
let bytes = wire::serialize(&self).unwrap();
h.update(&bytes);
HeaderId(h.finalize().into())
}
pub fn parent_qc(&self) -> &Qc<HeaderId> {
&self.parent_qc
}
pub fn leader_proof(&self) -> &LeaderProof {
&self.leader_proof
}
pub fn content_id(&self) -> ContentId {
self.content_id
}
pub fn content_size(&self) -> u32 {
self.content_size
}
pub fn view(&self) -> View {
self.view
}
pub fn parent(&self) -> HeaderId {
self.parent_qc.block()
}
pub fn to_carnot_block(&self) -> carnot_engine::Block<HeaderId> {
carnot_engine::Block {
id: self.id(),
parent_qc: self.parent_qc.clone(),
view: self.view(),
leader_proof: self.leader_proof().clone(),
}
}
}
pub struct Builder {
beacon: RandomBeaconState,
view: View,
parent_qc: Qc<HeaderId>,
leader_proof: LeaderProof,
}
impl Builder {
pub fn new(
beacon: RandomBeaconState,
view: View,
parent_qc: Qc<HeaderId>,
leader_proof: LeaderProof,
) -> Self {
Self {
beacon,
view,
parent_qc,
leader_proof,
}
}
pub fn build(self, content_id: ContentId, content_size: u32) -> Header {
Header::new(
self.beacon,
self.view,
self.parent_qc,
self.leader_proof,
content_id,
content_size,
)
}
}

View File

@ -1,24 +1,22 @@
use crate::{crypto::Blake2b, leader_proof::LeaderProof}; use super::{ContentId, HeaderId};
use crate::crypto::Blake2b;
use blake2::Digest; use blake2::Digest;
use cryptarchia_engine::Slot; use cryptarchia_engine::Slot;
use cryptarchia_ledger::LeaderProof;
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)] use serde::{Deserialize, Serialize};
pub struct HeaderId([u8; 32]);
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
pub struct ContentId([u8; 32]);
#[derive(Clone, Debug, Eq, PartialEq, Copy)] #[derive(Clone, Debug, Eq, PartialEq, Copy)]
pub struct Nonce([u8; 32]); pub struct Nonce([u8; 32]);
#[derive(Clone, Debug, Eq, PartialEq, Hash)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Header { pub struct Header {
parent: HeaderId, parent: HeaderId,
slot: Slot,
// TODO: move this to common header fields
// length of block contents in bytes // length of block contents in bytes
content_size: u32, content_size: u32,
// id of block contents // id of block contents
content_id: ContentId, content_id: ContentId,
slot: Slot,
leader_proof: LeaderProof, leader_proof: LeaderProof,
orphaned_leader_proofs: Vec<Header>, orphaned_leader_proofs: Vec<Header>,
} }
@ -85,40 +83,36 @@ impl Header {
} }
} }
// ----------- conversions pub struct Builder {
parent: HeaderId,
impl From<[u8; 32]> for Nonce { slot: Slot,
fn from(nonce: [u8; 32]) -> Self { leader_proof: LeaderProof,
Self(nonce) orphaned_leader_proofs: Vec<Header>,
}
} }
impl From<Nonce> for [u8; 32] { impl Builder {
fn from(nonce: Nonce) -> [u8; 32] { pub fn new(parent: HeaderId, slot: Slot, leader_proof: LeaderProof) -> Self {
nonce.0 Self {
parent,
slot,
leader_proof,
orphaned_leader_proofs: vec![],
}
} }
}
impl From<[u8; 32]> for HeaderId { pub fn with_orphaned_proofs(mut self, orphaned_leader_proofs: Vec<Header>) -> Self {
fn from(id: [u8; 32]) -> Self { self.orphaned_leader_proofs = orphaned_leader_proofs;
Self(id) self
} }
}
impl From<HeaderId> for [u8; 32] { pub fn build(self, content_id: ContentId, content_size: u32) -> Header {
fn from(id: HeaderId) -> Self { Header {
id.0 parent: self.parent,
} slot: self.slot,
} content_size,
content_id,
impl From<[u8; 32]> for ContentId { leader_proof: self.leader_proof,
fn from(id: [u8; 32]) -> Self { orphaned_leader_proofs: self.orphaned_leader_proofs,
Self(id) }
}
}
impl From<ContentId> for [u8; 32] {
fn from(id: ContentId) -> Self {
id.0
} }
} }

View File

@ -0,0 +1,89 @@
use serde::{Deserialize, Serialize};
use crate::utils::{display_hex_bytes_newtype, serde_bytes_newtype};
pub mod carnot;
pub mod cryptarchia;
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)]
pub struct HeaderId([u8; 32]);
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
pub struct ContentId([u8; 32]);
// This lint is a false positive?
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Header {
Cryptarchia(cryptarchia::Header),
Carnot(carnot::Header),
}
impl Header {
pub fn cryptarchia(&self) -> &cryptarchia::Header {
match self {
Self::Cryptarchia(header) => header,
Self::Carnot(_) => panic!("Header is not a Cryptarchia header"),
}
}
pub fn carnot(&self) -> &carnot::Header {
match self {
Self::Carnot(header) => header,
Self::Cryptarchia(_) => panic!("Header is not a Carnot header"),
}
}
pub fn id(&self) -> HeaderId {
match self {
Self::Cryptarchia(header) => header.id(),
Self::Carnot(header) => header.id(),
}
}
pub fn parent(&self) -> HeaderId {
match self {
Self::Cryptarchia(header) => header.parent(),
Self::Carnot(header) => header.parent(),
}
}
}
impl From<[u8; 32]> for HeaderId {
fn from(id: [u8; 32]) -> Self {
Self(id)
}
}
impl From<HeaderId> for [u8; 32] {
fn from(id: HeaderId) -> Self {
id.0
}
}
impl From<[u8; 32]> for ContentId {
fn from(id: [u8; 32]) -> Self {
Self(id)
}
}
impl From<ContentId> for [u8; 32] {
fn from(id: ContentId) -> Self {
id.0
}
}
display_hex_bytes_newtype!(HeaderId);
display_hex_bytes_newtype!(ContentId);
serde_bytes_newtype!(HeaderId, 32);
serde_bytes_newtype!(ContentId, 32);
#[test]
fn test_serde() {
assert_eq!(
crate::wire::deserialize::<HeaderId>(&crate::wire::serialize(&HeaderId([0; 32])).unwrap())
.unwrap(),
HeaderId([0; 32])
);
}

View File

@ -2,6 +2,7 @@ pub mod account;
pub mod block; pub mod block;
pub mod crypto; pub mod crypto;
pub mod da; pub mod da;
pub mod header;
pub mod staking; pub mod staking;
pub mod tx; pub mod tx;
pub mod utils; pub mod utils;

View File

@ -1 +1,53 @@
pub mod select; pub mod select;
macro_rules! display_hex_bytes_newtype {
($newtype:ty) => {
impl core::fmt::Display for $newtype {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "0x")?;
for v in self.0 {
write!(f, "{:02x}", v)?;
}
Ok(())
}
}
};
}
macro_rules! serde_bytes_newtype {
($newtype:ty, $len:expr) => {
impl serde::Serialize for $newtype {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
if serializer.is_human_readable() {
const_hex::const_encode::<$len, false>(&self.0)
.as_str()
.serialize(serializer)
} else {
self.0.serialize(serializer)
}
}
}
impl<'de> serde::Deserialize<'de> for $newtype {
fn deserialize<D>(deserializer: D) -> Result<$newtype, D::Error>
where
D: serde::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = <&str>::deserialize(deserializer)?;
const_hex::decode_to_array(s)
.map(Self)
.map_err(serde::de::Error::custom)
} else {
<[u8; $len]>::deserialize(deserializer).map(Self)
}
}
}
};
}
pub(crate) use display_hex_bytes_newtype;
pub(crate) use serde_bytes_newtype;

View File

@ -1,5 +1,6 @@
// std // std
// crates // crates
use crate::header::HeaderId;
use carnot_engine::{Block, View}; use carnot_engine::{Block, View};
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -49,7 +50,7 @@ impl Tally for MockTally {
type Vote = MockVote; type Vote = MockVote;
type Qc = MockQc; type Qc = MockQc;
type Outcome = (); type Outcome = ();
type Subject = Block; type Subject = Block<HeaderId>;
type TallyError = Error; type TallyError = Error;
type Settings = MockTallySettings; type Settings = MockTallySettings;
@ -60,7 +61,7 @@ impl Tally for MockTally {
async fn tally<S: Stream<Item = Self::Vote> + Unpin + Send>( async fn tally<S: Stream<Item = Self::Vote> + Unpin + Send>(
&self, &self,
block: Block, block: Block<HeaderId>,
mut vote_stream: S, mut vote_stream: S,
) -> Result<(Self::Qc, Self::Outcome), Self::TallyError> { ) -> Result<(Self::Qc, Self::Outcome), Self::TallyError> {
let mut count_votes = 0; let mut count_votes = 0;

View File

@ -1,6 +1,6 @@
use core::{fmt::Debug, hash::Hash}; use core::{fmt::Debug, hash::Hash};
use nomos_core::block::BlockId; use nomos_core::header::HeaderId;
use nomos_core::tx::Transaction; use nomos_core::tx::Transaction;
use nomos_mempool::{ use nomos_mempool::{
backend::mockpool::MockPool, backend::mockpool::MockPool,
@ -13,7 +13,7 @@ use tokio::sync::oneshot;
type ClMempoolService<T> = MempoolService< type ClMempoolService<T> = MempoolService<
Libp2pAdapter<T, <T as Transaction>::Hash>, Libp2pAdapter<T, <T as Transaction>::Hash>,
MockPool<BlockId, T, <T as Transaction>::Hash>, MockPool<HeaderId, T, <T as Transaction>::Hash>,
TxDiscriminant, TxDiscriminant,
>; >;
@ -47,7 +47,7 @@ where
pub async fn cl_mempool_status<T>( pub async fn cl_mempool_status<T>(
handle: &overwatch_rs::overwatch::handle::OverwatchHandle, handle: &overwatch_rs::overwatch::handle::OverwatchHandle,
items: Vec<<T as Transaction>::Hash>, items: Vec<<T as Transaction>::Hash>,
) -> Result<Vec<Status<BlockId>>, super::DynError> ) -> Result<Vec<Status<HeaderId>>, super::DynError>
where where
T: Transaction T: Transaction
+ Clone + Clone

View File

@ -10,7 +10,7 @@ use carnot_consensus::{
}; };
use carnot_engine::{ use carnot_engine::{
overlay::{RandomBeaconState, RoundRobin, TreeOverlay}, overlay::{RandomBeaconState, RoundRobin, TreeOverlay},
Block, BlockId, Block,
}; };
use full_replication::Certificate; use full_replication::Certificate;
use nomos_core::{ use nomos_core::{
@ -18,6 +18,7 @@ use nomos_core::{
blob, blob,
certificate::{self, select::FillSize as FillSizeWithBlobsCertificate}, certificate::{self, select::FillSize as FillSizeWithBlobsCertificate},
}, },
header::HeaderId,
tx::{select::FillSize as FillSizeWithTx, Transaction}, tx::{select::FillSize as FillSizeWithTx, Transaction},
}; };
use nomos_mempool::{ use nomos_mempool::{
@ -27,10 +28,10 @@ use nomos_storage::backends::{sled::SledBackend, StorageSerde};
pub type Carnot<Tx, SS, const SIZE: usize> = CarnotConsensus< pub type Carnot<Tx, SS, const SIZE: usize> = CarnotConsensus<
ConsensusLibp2pAdapter, ConsensusLibp2pAdapter,
MockPool<BlockId, Tx, <Tx as Transaction>::Hash>, MockPool<HeaderId, Tx, <Tx as Transaction>::Hash>,
MempoolLibp2pAdapter<Tx, <Tx as Transaction>::Hash>, MempoolLibp2pAdapter<Tx, <Tx as Transaction>::Hash>,
MockPool< MockPool<
BlockId, HeaderId,
Certificate, Certificate,
<<Certificate as certificate::Certificate>::Blob as blob::Blob>::Hash, <<Certificate as certificate::Certificate>::Blob as blob::Blob>::Hash,
>, >,
@ -64,9 +65,9 @@ where
pub async fn carnot_blocks<Tx, SS, const SIZE: usize>( pub async fn carnot_blocks<Tx, SS, const SIZE: usize>(
handle: &OverwatchHandle, handle: &OverwatchHandle,
from: Option<BlockId>, from: Option<HeaderId>,
to: Option<BlockId>, to: Option<HeaderId>,
) -> Result<Vec<Block>, super::DynError> ) -> Result<Vec<Block<HeaderId>>, super::DynError>
where where
Tx: Transaction + Clone + Debug + Hash + Serialize + DeserializeOwned + Send + Sync + 'static, Tx: Transaction + Clone + Debug + Hash + Serialize + DeserializeOwned + Send + Sync + 'static,
<Tx as Transaction>::Hash: std::cmp::Ord + Debug + Send + Sync + 'static, <Tx as Transaction>::Hash: std::cmp::Ord + Debug + Send + Sync + 'static,

View File

@ -1,6 +1,6 @@
use full_replication::{AbsoluteNumber, Attestation, Blob, Certificate, FullReplication}; use full_replication::{AbsoluteNumber, Attestation, Blob, Certificate, FullReplication};
use nomos_core::block::BlockId;
use nomos_core::da::blob; use nomos_core::da::blob;
use nomos_core::header::HeaderId;
use nomos_da::{ use nomos_da::{
backend::memory_cache::BlobCache, network::adapters::libp2p::Libp2pAdapter as DaLibp2pAdapter, backend::memory_cache::BlobCache, network::adapters::libp2p::Libp2pAdapter as DaLibp2pAdapter,
DaMsg, DataAvailabilityService, DaMsg, DataAvailabilityService,
@ -15,7 +15,7 @@ use tokio::sync::oneshot;
pub type DaMempoolService = MempoolService< pub type DaMempoolService = MempoolService<
Libp2pAdapter<Certificate, <Blob as blob::Blob>::Hash>, Libp2pAdapter<Certificate, <Blob as blob::Blob>::Hash>,
MockPool<BlockId, Certificate, <Blob as blob::Blob>::Hash>, MockPool<HeaderId, Certificate, <Blob as blob::Blob>::Hash>,
CertDiscriminant, CertDiscriminant,
>; >;
@ -43,7 +43,7 @@ pub async fn da_mempool_metrics(
pub async fn da_mempool_status( pub async fn da_mempool_status(
handle: &overwatch_rs::overwatch::handle::OverwatchHandle, handle: &overwatch_rs::overwatch::handle::OverwatchHandle,
items: Vec<<Blob as blob::Blob>::Hash>, items: Vec<<Blob as blob::Blob>::Hash>,
) -> Result<Vec<Status<BlockId>>, super::DynError> { ) -> Result<Vec<Status<HeaderId>>, super::DynError> {
let relay = handle.relay::<DaMempoolService>().connect().await?; let relay = handle.relay::<DaMempoolService>().connect().await?;
let (sender, receiver) = oneshot::channel(); let (sender, receiver) = oneshot::channel();
relay relay

View File

@ -1,5 +1,5 @@
use core::{fmt::Debug, hash::Hash}; use core::{fmt::Debug, hash::Hash};
use nomos_core::block::BlockId; use nomos_core::header::HeaderId;
use nomos_mempool::{ use nomos_mempool::{
backend::mockpool::MockPool, network::NetworkAdapter, Discriminant, MempoolMsg, MempoolService, backend::mockpool::MockPool, network::NetworkAdapter, Discriminant, MempoolMsg, MempoolService,
}; };
@ -20,7 +20,7 @@ where
Key: Clone + Debug + Ord + Hash + 'static, Key: Clone + Debug + Ord + Hash + 'static,
{ {
let relay = handle let relay = handle
.relay::<MempoolService<A, MockPool<BlockId, Item, Key>, D>>() .relay::<MempoolService<A, MockPool<HeaderId, Item, Key>, D>>()
.connect() .connect()
.await?; .await?;
let (sender, receiver) = oneshot::channel(); let (sender, receiver) = oneshot::channel();

View File

@ -1,5 +1,5 @@
use carnot_engine::BlockId;
use nomos_core::block::Block; use nomos_core::block::Block;
use nomos_core::header::HeaderId;
use nomos_storage::{ use nomos_storage::{
backends::{sled::SledBackend, StorageSerde}, backends::{sled::SledBackend, StorageSerde},
StorageMsg, StorageService, StorageMsg, StorageService,
@ -7,7 +7,7 @@ use nomos_storage::{
pub async fn block_req<S, Tx>( pub async fn block_req<S, Tx>(
handle: &overwatch_rs::overwatch::handle::OverwatchHandle, handle: &overwatch_rs::overwatch::handle::OverwatchHandle,
id: BlockId, id: HeaderId,
) -> Result<Option<Block<Tx, full_replication::Certificate>>, super::DynError> ) -> Result<Option<Block<Tx, full_replication::Certificate>>, super::DynError>
where where
Tx: serde::Serialize + serde::de::DeserializeOwned + Clone + Eq + core::hash::Hash, Tx: serde::Serialize + serde::de::DeserializeOwned + Clone + Eq + core::hash::Hash,

View File

@ -6,10 +6,10 @@ use std::hash::Hash;
// crates // crates
// internal // internal
use crate::TimeoutQc;
use carnot_engine::overlay::{ use carnot_engine::overlay::{
CommitteeMembership, Error as RandomBeaconError, FreezeMembership, RandomBeaconState, CommitteeMembership, Error as RandomBeaconError, FreezeMembership, RandomBeaconState,
}; };
use carnot_engine::TimeoutQc;
use nomos_core::block::Block; use nomos_core::block::Block;
pub trait UpdateableCommitteeMembership: CommitteeMembership { pub trait UpdateableCommitteeMembership: CommitteeMembership {
@ -44,7 +44,10 @@ impl UpdateableCommitteeMembership for RandomBeaconState {
&self, &self,
block: &Block<Tx, Blob>, block: &Block<Tx, Blob>,
) -> Result<Self, Self::Error> { ) -> Result<Self, Self::Error> {
self.check_advance_happy(block.beacon().clone(), block.header().parent_qc.view()) self.check_advance_happy(
block.header().carnot().beacon().clone(),
block.header().carnot().parent_qc().view(),
)
} }
fn on_timeout_qc_received(&self, qc: &TimeoutQc) -> Result<Self, Self::Error> { fn on_timeout_qc_received(&self, qc: &TimeoutQc) -> Result<Self, Self::Error> {

View File

@ -1,8 +1,6 @@
use crate::TimeoutQc;
use carnot_engine::overlay::RoundRobin; use carnot_engine::overlay::RoundRobin;
use carnot_engine::{ use carnot_engine::overlay::{Error as RandomBeaconError, LeaderSelection, RandomBeaconState};
overlay::{Error as RandomBeaconError, LeaderSelection, RandomBeaconState},
TimeoutQc,
};
use nomos_core::block::Block; use nomos_core::block::Block;
use std::{convert::Infallible, error::Error, hash::Hash}; use std::{convert::Infallible, error::Error, hash::Hash};
@ -38,7 +36,10 @@ impl UpdateableLeaderSelection for RandomBeaconState {
&self, &self,
block: &Block<Tx, Blob>, block: &Block<Tx, Blob>,
) -> Result<Self, Self::Error> { ) -> Result<Self, Self::Error> {
self.check_advance_happy(block.beacon().clone(), block.header().parent_qc.view()) self.check_advance_happy(
block.header().carnot().beacon().clone(),
block.header().carnot().parent_qc().view(),
)
// TODO: check random beacon public keys is leader id // TODO: check random beacon public keys is leader id
} }

View File

@ -29,8 +29,7 @@ use crate::tally::{
happy::CarnotTally, timeout::TimeoutTally, unhappy::NewViewTally, CarnotTallySettings, happy::CarnotTally, timeout::TimeoutTally, unhappy::NewViewTally, CarnotTallySettings,
}; };
use carnot_engine::{ use carnot_engine::{
overlay::RandomBeaconState, AggregateQc, BlockId, Carnot, Committee, LeaderProof, NewView, overlay::RandomBeaconState, Carnot, Committee, LeaderProof, Overlay, Payload, View,
Overlay, Payload, Qc, StandardQc, Timeout, TimeoutQc, View, Vote,
}; };
use task_manager::TaskManager; use task_manager::TaskManager;
@ -38,6 +37,7 @@ use crate::committee_membership::UpdateableCommitteeMembership;
use nomos_core::block::builder::BlockBuilder; use nomos_core::block::builder::BlockBuilder;
use nomos_core::block::Block; use nomos_core::block::Block;
use nomos_core::da::certificate::{BlobCertificateSelect, Certificate}; use nomos_core::da::certificate::{BlobCertificateSelect, Certificate};
use nomos_core::header::{carnot::Builder, HeaderId};
use nomos_core::tx::{Transaction, TxSelect}; use nomos_core::tx::{Transaction, TxSelect};
use nomos_core::vote::Tally; use nomos_core::vote::Tally;
use nomos_mempool::{ use nomos_mempool::{
@ -65,6 +65,13 @@ fn default_timeout() -> Duration {
// Random seed for each round provided by the protocol // Random seed for each round provided by the protocol
pub type Seed = [u8; 32]; pub type Seed = [u8; 32];
type TimeoutQc = carnot_engine::TimeoutQc<HeaderId>;
type NewView = carnot_engine::NewView<HeaderId>;
type AggregateQc = carnot_engine::AggregateQc<HeaderId>;
type Qc = carnot_engine::Qc<HeaderId>;
type StandardQc = carnot_engine::StandardQc<HeaderId>;
type Vote = carnot_engine::Vote<HeaderId>;
type Timeout = carnot_engine::Timeout<HeaderId>;
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
pub struct CarnotSettings<O: Overlay, Ts, Bs> { pub struct CarnotSettings<O: Overlay, Ts, Bs> {
@ -113,8 +120,8 @@ pub struct CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, T
where where
A: NetworkAdapter, A: NetworkAdapter,
ClPoolAdapter: MempoolAdapter<Item = ClPool::Item, Key = ClPool::Key>, ClPoolAdapter: MempoolAdapter<Item = ClPool::Item, Key = ClPool::Key>,
ClPool: MemPool<BlockId = BlockId>, ClPool: MemPool<BlockId = HeaderId>,
DaPool: MemPool<BlockId = BlockId>, DaPool: MemPool<BlockId = HeaderId>,
DaPoolAdapter: MempoolAdapter<Item = DaPool::Item, Key = DaPool::Key>, DaPoolAdapter: MempoolAdapter<Item = DaPool::Item, Key = DaPool::Key>,
O: Overlay + Debug, O: Overlay + Debug,
ClPool::Item: Debug + 'static, ClPool::Item: Debug + 'static,
@ -140,10 +147,10 @@ impl<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage> Servi
for CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage> for CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage>
where where
A: NetworkAdapter, A: NetworkAdapter,
ClPool: MemPool<BlockId = BlockId>, ClPool: MemPool<BlockId = HeaderId>,
ClPool::Item: Debug, ClPool::Item: Debug,
ClPool::Key: Debug, ClPool::Key: Debug,
DaPool: MemPool<BlockId = BlockId>, DaPool: MemPool<BlockId = HeaderId>,
DaPool::Item: Debug, DaPool::Item: Debug,
DaPool::Key: Debug, DaPool::Key: Debug,
ClPoolAdapter: MempoolAdapter<Item = ClPool::Item, Key = ClPool::Key>, ClPoolAdapter: MempoolAdapter<Item = ClPool::Item, Key = ClPool::Key>,
@ -165,9 +172,9 @@ impl<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage> Servi
for CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage> for CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage>
where where
A: NetworkAdapter + Clone + Send + Sync + 'static, A: NetworkAdapter + Clone + Send + Sync + 'static,
ClPool: MemPool<BlockId = BlockId> + Send + Sync + 'static, ClPool: MemPool<BlockId = HeaderId> + Send + Sync + 'static,
ClPool::Settings: Send + Sync + 'static, ClPool::Settings: Send + Sync + 'static,
DaPool: MemPool<BlockId = BlockId> + Send + Sync + 'static, DaPool: MemPool<BlockId = HeaderId> + Send + Sync + 'static,
DaPool::Settings: Send + Sync + 'static, DaPool::Settings: Send + Sync + 'static,
ClPool::Item: Transaction<Hash = ClPool::Key> ClPool::Item: Transaction<Hash = ClPool::Key>
+ Debug + Debug
@ -252,9 +259,9 @@ where
let overlay = O::new(overlay_settings); let overlay = O::new(overlay_settings);
let genesis = carnot_engine::Block { let genesis = carnot_engine::Block {
id: BlockId::zeros(), id: [0; 32].into(),
view: View::new(0), view: View::new(0),
parent_qc: Qc::Standard(StandardQc::genesis()), parent_qc: Qc::Standard(StandardQc::genesis([0; 32].into())),
leader_proof: LeaderProof::LeaderId { leader_proof: LeaderProof::LeaderId {
leader_id: NodeId::new([0; 32]), leader_id: NodeId::new([0; 32]),
}, },
@ -299,6 +306,7 @@ where
); );
if carnot.is_next_leader() { if carnot.is_next_leader() {
tracing::info!("is next leader, gathering vores");
let network_adapter = adapter.clone(); let network_adapter = adapter.clone();
task_manager.push(genesis_block.view.next(), async move { task_manager.push(genesis_block.view.next(), async move {
let Event::Approve { qc, .. } = Self::gather_votes( let Event::Approve { qc, .. } = Self::gather_votes(
@ -312,6 +320,7 @@ where
tracing::debug!("Failed to gather initial votes"); tracing::debug!("Failed to gather initial votes");
return Event::None; return Event::None;
}; };
tracing::info!("got enough votes");
Event::ProposeBlock { qc } Event::ProposeBlock { qc }
}); });
} }
@ -351,7 +360,7 @@ where
#[derive(Debug)] #[derive(Debug)]
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
enum Output<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> { enum Output<Tx: Clone + Eq + Hash, BlobCertificate: Clone + Eq + Hash> {
Send(carnot_engine::Send), Send(carnot_engine::Send<HeaderId>),
BroadcastTimeoutQc { BroadcastTimeoutQc {
timeout_qc: TimeoutQc, timeout_qc: TimeoutQc,
}, },
@ -364,9 +373,9 @@ impl<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage>
CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage> CarnotConsensus<A, ClPool, ClPoolAdapter, DaPool, DaPoolAdapter, O, TxS, BS, Storage>
where where
A: NetworkAdapter + Clone + Send + Sync + 'static, A: NetworkAdapter + Clone + Send + Sync + 'static,
ClPool: MemPool<BlockId = BlockId> + Send + Sync + 'static, ClPool: MemPool<BlockId = HeaderId> + Send + Sync + 'static,
ClPool::Settings: Send + Sync + 'static, ClPool::Settings: Send + Sync + 'static,
DaPool: MemPool<BlockId = BlockId> + Send + Sync + 'static, DaPool: MemPool<BlockId = HeaderId> + Send + Sync + 'static,
DaPool::Settings: Send + Sync + 'static, DaPool::Settings: Send + Sync + 'static,
ClPool::Item: Transaction<Hash = ClPool::Key> ClPool::Item: Transaction<Hash = ClPool::Key>
+ Debug + Debug
@ -414,7 +423,7 @@ where
} }
} }
fn process_message(carnot: &Carnot<O>, msg: ConsensusMsg) { fn process_message(carnot: &Carnot<O, HeaderId>, msg: ConsensusMsg) {
match msg { match msg {
ConsensusMsg::Info { tx } => { ConsensusMsg::Info { tx } => {
let info = CarnotInfo { let info = CarnotInfo {
@ -457,18 +466,18 @@ where
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn process_carnot_event( async fn process_carnot_event(
mut carnot: Carnot<O>, mut carnot: Carnot<O, HeaderId>,
event: Event<ClPool::Item, DaPool::Item>, event: Event<ClPool::Item, DaPool::Item>,
task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>, task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>,
adapter: A, adapter: A,
private_key: PrivateKey, private_key: PrivateKey,
cl_mempool_relay: OutboundRelay<MempoolMsg<BlockId, ClPool::Item, ClPool::Key>>, cl_mempool_relay: OutboundRelay<MempoolMsg<HeaderId, ClPool::Item, ClPool::Key>>,
da_mempool_relay: OutboundRelay<MempoolMsg<BlockId, DaPool::Item, DaPool::Key>>, da_mempool_relay: OutboundRelay<MempoolMsg<HeaderId, DaPool::Item, DaPool::Key>>,
storage_relay: OutboundRelay<StorageMsg<Storage>>, storage_relay: OutboundRelay<StorageMsg<Storage>>,
tx_selector: TxS, tx_selector: TxS,
blobl_selector: BS, blobl_selector: BS,
timeout: Duration, timeout: Duration,
) -> Carnot<O> { ) -> Carnot<O, HeaderId> {
let mut output = None; let mut output = None;
let prev_view = carnot.current_view(); let prev_view = carnot.current_view();
match event { match event {
@ -571,24 +580,26 @@ where
) )
)] )]
async fn process_block( async fn process_block(
mut carnot: Carnot<O>, mut carnot: Carnot<O, HeaderId>,
block: Block<ClPool::Item, DaPool::Item>, block: Block<ClPool::Item, DaPool::Item>,
mut stream: Pin<Box<dyn Stream<Item = Block<ClPool::Item, DaPool::Item>> + Send>>, mut stream: Pin<Box<dyn Stream<Item = Block<ClPool::Item, DaPool::Item>> + Send>>,
task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>, task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>,
adapter: A, adapter: A,
storage_relay: OutboundRelay<StorageMsg<Storage>>, storage_relay: OutboundRelay<StorageMsg<Storage>>,
cl_mempool_relay: OutboundRelay<MempoolMsg<BlockId, ClPool::Item, ClPool::Key>>, cl_mempool_relay: OutboundRelay<MempoolMsg<HeaderId, ClPool::Item, ClPool::Key>>,
da_mempool_relay: OutboundRelay<MempoolMsg<BlockId, DaPool::Item, DaPool::Key>>, da_mempool_relay: OutboundRelay<MempoolMsg<HeaderId, DaPool::Item, DaPool::Key>>,
) -> (Carnot<O>, Option<Output<ClPool::Item, DaPool::Item>>) { ) -> (
Carnot<O, HeaderId>,
Option<Output<ClPool::Item, DaPool::Item>>,
) {
tracing::debug!("received proposal {:?}", block); tracing::debug!("received proposal {:?}", block);
if carnot.highest_voted_view() >= block.header().view { let original_block = block;
tracing::debug!("already voted for view {}", block.header().view); let block = original_block.header().carnot().clone();
if carnot.highest_voted_view() >= block.view() {
tracing::debug!("already voted for view {}", block.view());
return (carnot, None); return (carnot, None);
} }
let original_block = block;
let block = original_block.header().clone();
let self_committee = carnot.self_committee(); let self_committee = carnot.self_committee();
let leader_committee = [carnot.id()].into_iter().collect(); let leader_committee = [carnot.id()].into_iter().collect();
@ -602,10 +613,10 @@ where
participating_nodes: carnot.root_committee(), participating_nodes: carnot.root_committee(),
}; };
match carnot.receive_block(block.clone()) { match carnot.receive_block(block.to_carnot_block()) {
Ok(mut new_state) => { Ok(mut new_state) => {
let new_view = new_state.current_view(); let new_view = new_state.current_view();
let msg = <StorageMsg<_>>::new_store_message(block.id, original_block.clone()); let msg = <StorageMsg<_>>::new_store_message(block.id(), original_block.clone());
if let Err((e, _msg)) = storage_relay.send(msg).await { if let Err((e, _msg)) = storage_relay.send(msg).await {
tracing::error!("Could not send block to storage: {e}"); tracing::error!("Could not send block to storage: {e}");
} }
@ -614,24 +625,24 @@ where
mark_in_block( mark_in_block(
cl_mempool_relay, cl_mempool_relay,
original_block.transactions().map(Transaction::hash), original_block.transactions().map(Transaction::hash),
block.id, block.id(),
) )
.await; .await;
mark_in_block( mark_in_block(
da_mempool_relay, da_mempool_relay,
original_block.blobs().map(Certificate::hash), original_block.blobs().map(Certificate::hash),
block.id, block.id(),
) )
.await; .await;
if new_view != carnot.current_view() { if new_view != carnot.current_view() {
task_manager.push( task_manager.push(
block.view, block.view(),
Self::gather_votes( Self::gather_votes(
adapter.clone(), adapter.clone(),
self_committee, self_committee,
block.clone(), block.to_carnot_block(),
tally_settings, tally_settings,
), ),
); );
@ -643,7 +654,7 @@ where
}, },
); );
} else { } else {
task_manager.push(block.view, async move { task_manager.push(block.view(), async move {
if let Some(block) = stream.next().await { if let Some(block) = stream.next().await {
Event::Proposal { block, stream } Event::Proposal { block, stream }
} else { } else {
@ -657,10 +668,14 @@ where
} }
if carnot.is_next_leader() { if carnot.is_next_leader() {
task_manager.push(block.view, async move { task_manager.push(block.view(), async move {
let Event::Approve { qc, .. } = let Event::Approve { qc, .. } = Self::gather_votes(
Self::gather_votes(adapter, leader_committee, block, leader_tally_settings) adapter,
.await leader_committee,
block.to_carnot_block(),
leader_tally_settings,
)
.await
else { else {
unreachable!() unreachable!()
}; };
@ -674,12 +689,15 @@ where
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[instrument(level = "debug", skip(task_manager, adapter))] #[instrument(level = "debug", skip(task_manager, adapter))]
async fn approve_new_view( async fn approve_new_view(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
timeout_qc: TimeoutQc, timeout_qc: TimeoutQc,
new_views: HashSet<NewView>, new_views: HashSet<NewView>,
task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>, task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>,
adapter: A, adapter: A,
) -> (Carnot<O>, Option<Output<ClPool::Item, DaPool::Item>>) { ) -> (
Carnot<O, HeaderId>,
Option<Output<ClPool::Item, DaPool::Item>>,
) {
let leader_committee = [carnot.id()].into_iter().collect(); let leader_committee = [carnot.id()].into_iter().collect();
let leader_tally_settings = CarnotTallySettings { let leader_tally_settings = CarnotTallySettings {
threshold: carnot.leader_super_majority_threshold(), threshold: carnot.leader_super_majority_threshold(),
@ -713,11 +731,14 @@ where
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[instrument(level = "debug", skip(task_manager, adapter))] #[instrument(level = "debug", skip(task_manager, adapter))]
async fn receive_timeout_qc( async fn receive_timeout_qc(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
timeout_qc: TimeoutQc, timeout_qc: TimeoutQc,
task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>, task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>,
adapter: A, adapter: A,
) -> (Carnot<O>, Option<Output<ClPool::Item, DaPool::Item>>) { ) -> (
Carnot<O, HeaderId>,
Option<Output<ClPool::Item, DaPool::Item>>,
) {
let mut new_state = carnot.receive_timeout_qc(timeout_qc.clone()); let mut new_state = carnot.receive_timeout_qc(timeout_qc.clone());
let self_committee = carnot.self_committee(); let self_committee = carnot.self_committee();
let tally_settings = CarnotTallySettings { let tally_settings = CarnotTallySettings {
@ -741,9 +762,12 @@ where
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[instrument(level = "debug")] #[instrument(level = "debug")]
async fn process_root_timeout( async fn process_root_timeout(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
timeouts: HashSet<Timeout>, timeouts: HashSet<Timeout>,
) -> (Carnot<O>, Option<Output<ClPool::Item, DaPool::Item>>) { ) -> (
Carnot<O, HeaderId>,
Option<Output<ClPool::Item, DaPool::Item>>,
) {
// we might have received a timeout_qc sent by some other node and advanced the view // we might have received a timeout_qc sent by some other node and advanced the view
// already, in which case we should ignore the timeout // already, in which case we should ignore the timeout
if carnot.current_view() if carnot.current_view()
@ -793,8 +817,8 @@ where
qc: Qc, qc: Qc,
tx_selector: TxS, tx_selector: TxS,
blob_selector: BS, blob_selector: BS,
cl_mempool_relay: OutboundRelay<MempoolMsg<BlockId, ClPool::Item, ClPool::Key>>, cl_mempool_relay: OutboundRelay<MempoolMsg<HeaderId, ClPool::Item, ClPool::Key>>,
da_mempool_relay: OutboundRelay<MempoolMsg<BlockId, DaPool::Item, DaPool::Key>>, da_mempool_relay: OutboundRelay<MempoolMsg<HeaderId, DaPool::Item, DaPool::Key>>,
) -> Option<Output<ClPool::Item, DaPool::Item>> { ) -> Option<Output<ClPool::Item, DaPool::Item>> {
let mut output = None; let mut output = None;
let cl_txs = get_mempool_contents(cl_mempool_relay); let cl_txs = get_mempool_contents(cl_mempool_relay);
@ -804,10 +828,12 @@ where
(Ok(cl_txs), Ok(da_certs)) => { (Ok(cl_txs), Ok(da_certs)) => {
let beacon = RandomBeaconState::generate_happy(qc.view(), &private_key); let beacon = RandomBeaconState::generate_happy(qc.view(), &private_key);
let Ok(proposal) = BlockBuilder::new(tx_selector, blob_selector) let Ok(proposal) = BlockBuilder::new(tx_selector, blob_selector)
.with_view(qc.view().next()) .with_carnot_builder(Builder::new(
.with_parent_qc(qc) beacon,
.with_proposer(id) qc.view().next(),
.with_beacon_state(beacon) qc,
LeaderProof::LeaderId { leader_id: id },
))
.with_transactions(cl_txs) .with_transactions(cl_txs)
.with_blobs_certificates(da_certs) .with_blobs_certificates(da_certs)
.build() .build()
@ -823,7 +849,7 @@ where
} }
async fn process_view_change( async fn process_view_change(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
prev_view: View, prev_view: View,
task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>, task_manager: &mut TaskManager<View, Event<ClPool::Item, DaPool::Item>>,
adapter: A, adapter: A,
@ -883,7 +909,7 @@ where
async fn gather_votes( async fn gather_votes(
adapter: A, adapter: A,
committee: Committee, committee: Committee,
block: carnot_engine::Block, block: carnot_engine::Block<HeaderId>,
tally: CarnotTallySettings, tally: CarnotTallySettings,
) -> Event<ClPool::Item, DaPool::Item> { ) -> Event<ClPool::Item, DaPool::Item> {
let tally = CarnotTally::new(tally); let tally = CarnotTally::new(tally);
@ -947,7 +973,7 @@ where
.filter_map(move |msg| { .filter_map(move |msg| {
async move { async move {
let proposal = Block::from_bytes(&msg.data); let proposal = Block::from_bytes(&msg.data);
if proposal.header().id == msg.proposal { if proposal.header().id() == msg.proposal {
// TODO: Leader is faulty? what should we do? // TODO: Leader is faulty? what should we do?
Some(proposal) Some(proposal)
} else { } else {
@ -967,9 +993,9 @@ where
E: std::error::Error, E: std::error::Error,
Fl: FnOnce(O::LeaderSelection) -> Result<O::LeaderSelection, E>, Fl: FnOnce(O::LeaderSelection) -> Result<O::LeaderSelection, E>,
>( >(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
leader_selection_f: Fl, leader_selection_f: Fl,
) -> Carnot<O> { ) -> Carnot<O, HeaderId> {
carnot carnot
.update_overlay(|overlay| overlay.update_leader_selection(leader_selection_f)) .update_overlay(|overlay| overlay.update_leader_selection(leader_selection_f))
.unwrap() .unwrap()
@ -979,9 +1005,9 @@ where
E: std::error::Error, E: std::error::Error,
Fm: FnOnce(O::CommitteeMembership) -> Result<O::CommitteeMembership, E>, Fm: FnOnce(O::CommitteeMembership) -> Result<O::CommitteeMembership, E>,
>( >(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
committee_membership_f: Fm, committee_membership_f: Fm,
) -> Carnot<O> { ) -> Carnot<O, HeaderId> {
carnot carnot
.update_overlay(|overlay| overlay.update_committees(committee_membership_f)) .update_overlay(|overlay| overlay.update_committees(committee_membership_f))
.unwrap() .unwrap()
@ -993,10 +1019,10 @@ where
Fl: FnOnce(O::LeaderSelection) -> Result<O::LeaderSelection, El>, Fl: FnOnce(O::LeaderSelection) -> Result<O::LeaderSelection, El>,
Fm: FnOnce(O::CommitteeMembership) -> Result<O::CommitteeMembership, Em>, Fm: FnOnce(O::CommitteeMembership) -> Result<O::CommitteeMembership, Em>,
>( >(
carnot: Carnot<O>, carnot: Carnot<O, HeaderId>,
leader_selection_f: Fl, leader_selection_f: Fl,
committee_membership_f: Fm, committee_membership_f: Fm,
) -> Carnot<O> { ) -> Carnot<O, HeaderId> {
let carnot = Self::update_leader_selection(carnot, leader_selection_f); let carnot = Self::update_leader_selection(carnot, leader_selection_f);
Self::update_committee_membership(carnot, committee_membership_f) Self::update_committee_membership(carnot, committee_membership_f)
} }
@ -1048,9 +1074,9 @@ where
Output::BroadcastProposal { proposal } => { Output::BroadcastProposal { proposal } => {
adapter adapter
.broadcast(NetworkMessage::Proposal(ProposalMsg { .broadcast(NetworkMessage::Proposal(ProposalMsg {
proposal: proposal.header().id, proposal: proposal.header().id(),
data: proposal.as_bytes().to_vec().into_boxed_slice(), data: proposal.as_bytes().to_vec().into_boxed_slice(),
view: proposal.header().view, view: proposal.header().carnot().view(),
})) }))
.await; .await;
} }
@ -1074,7 +1100,7 @@ enum Event<Tx: Clone + Hash + Eq, BlobCertificate: Clone + Eq + Hash> {
#[allow(dead_code)] #[allow(dead_code)]
Approve { Approve {
qc: Qc, qc: Qc,
block: carnot_engine::Block, block: carnot_engine::Block<HeaderId>,
votes: HashSet<Vote>, votes: HashSet<Vote>,
}, },
LocalTimeout { LocalTimeout {
@ -1105,9 +1131,9 @@ pub enum ConsensusMsg {
/// 'to' (the oldest block). If 'from' is None, the tip of the chain is used as a starting /// 'to' (the oldest block). If 'from' is None, the tip of the chain is used as a starting
/// point. If 'to' is None or not known to the node, the genesis block is used as an end point. /// point. If 'to' is None or not known to the node, the genesis block is used as an end point.
GetBlocks { GetBlocks {
from: Option<BlockId>, from: Option<HeaderId>,
to: Option<BlockId>, to: Option<HeaderId>,
tx: Sender<Vec<carnot_engine::Block>>, tx: Sender<Vec<carnot_engine::Block<HeaderId>>>,
}, },
} }
@ -1121,19 +1147,19 @@ pub struct CarnotInfo {
pub current_view: View, pub current_view: View,
pub highest_voted_view: View, pub highest_voted_view: View,
pub local_high_qc: StandardQc, pub local_high_qc: StandardQc,
pub tip: carnot_engine::Block, pub tip: carnot_engine::Block<HeaderId>,
pub last_view_timeout_qc: Option<TimeoutQc>, pub last_view_timeout_qc: Option<TimeoutQc>,
pub last_committed_block: carnot_engine::Block, pub last_committed_block: carnot_engine::Block<HeaderId>,
} }
async fn get_mempool_contents<Item, Key>( async fn get_mempool_contents<Item, Key>(
mempool: OutboundRelay<MempoolMsg<BlockId, Item, Key>>, mempool: OutboundRelay<MempoolMsg<HeaderId, Item, Key>>,
) -> Result<Box<dyn Iterator<Item = Item> + Send>, tokio::sync::oneshot::error::RecvError> { ) -> Result<Box<dyn Iterator<Item = Item> + Send>, tokio::sync::oneshot::error::RecvError> {
let (reply_channel, rx) = tokio::sync::oneshot::channel(); let (reply_channel, rx) = tokio::sync::oneshot::channel();
mempool mempool
.send(MempoolMsg::View { .send(MempoolMsg::View {
ancestor_hint: BlockId::zeros(), ancestor_hint: [0; 32].into(),
reply_channel, reply_channel,
}) })
.await .await
@ -1143,9 +1169,9 @@ async fn get_mempool_contents<Item, Key>(
} }
async fn mark_in_block<Item, Key>( async fn mark_in_block<Item, Key>(
mempool: OutboundRelay<MempoolMsg<BlockId, Item, Key>>, mempool: OutboundRelay<MempoolMsg<HeaderId, Item, Key>>,
ids: impl Iterator<Item = Key>, ids: impl Iterator<Item = Key>,
block: BlockId, block: HeaderId,
) { ) {
mempool mempool
.send(MempoolMsg::MarkInBlock { .send(MempoolMsg::MarkInBlock {
@ -1170,14 +1196,14 @@ mod tests {
highest_voted_view: View::new(-1), highest_voted_view: View::new(-1),
local_high_qc: StandardQc { local_high_qc: StandardQc {
view: View::new(0), view: View::new(0),
id: BlockId::zeros(), id: [0; 32].into(),
}, },
tip: Block { tip: Block {
id: BlockId::zeros(), id: [0; 32].into(),
view: View::new(0), view: View::new(0),
parent_qc: Qc::Standard(StandardQc { parent_qc: Qc::Standard(StandardQc {
view: View::new(0), view: View::new(0),
id: BlockId::zeros(), id: [0; 32].into(),
}), }),
leader_proof: LeaderProof::LeaderId { leader_proof: LeaderProof::LeaderId {
leader_id: NodeId::new([0; 32]), leader_id: NodeId::new([0; 32]),
@ -1185,11 +1211,11 @@ mod tests {
}, },
last_view_timeout_qc: None, last_view_timeout_qc: None,
last_committed_block: Block { last_committed_block: Block {
id: BlockId::zeros(), id: [0; 32].into(),
view: View::new(0), view: View::new(0),
parent_qc: Qc::Standard(StandardQc { parent_qc: Qc::Standard(StandardQc {
view: View::new(0), view: View::new(0),
id: BlockId::zeros(), id: [0; 32].into(),
}), }),
leader_proof: LeaderProof::LeaderId { leader_proof: LeaderProof::LeaderId {
leader_id: NodeId::new([0; 32]), leader_id: NodeId::new([0; 32]),

View File

@ -13,8 +13,8 @@ use crate::network::{
messages::{NetworkMessage, ProposalMsg, VoteMsg}, messages::{NetworkMessage, ProposalMsg, VoteMsg},
BoxedStream, NetworkAdapter, BoxedStream, NetworkAdapter,
}; };
use carnot_engine::{BlockId, Committee, CommitteeId, View}; use carnot_engine::{Committee, CommitteeId, View};
use nomos_core::wire; use nomos_core::{header::HeaderId, wire};
use nomos_network::{ use nomos_network::{
backends::libp2p::{Command, Event, EventKind, Libp2p}, backends::libp2p::{Command, Event, EventKind, Libp2p},
NetworkMsg, NetworkService, NetworkMsg, NetworkService,
@ -94,7 +94,7 @@ impl<T> Spsc<T> {
#[derive(Default)] #[derive(Default)]
struct Messages { struct Messages {
proposal_chunks: Spsc<ProposalMsg>, proposal_chunks: Spsc<ProposalMsg>,
votes: HashMap<CommitteeId, HashMap<BlockId, Spsc<VoteMsg>>>, votes: HashMap<CommitteeId, HashMap<HeaderId, Spsc<VoteMsg>>>,
new_views: HashMap<CommitteeId, Spsc<NewViewMsg>>, new_views: HashMap<CommitteeId, Spsc<NewViewMsg>>,
timeouts: HashMap<CommitteeId, Spsc<TimeoutMsg>>, timeouts: HashMap<CommitteeId, Spsc<TimeoutMsg>>,
timeout_qcs: Spsc<TimeoutQcMsg>, timeout_qcs: Spsc<TimeoutQcMsg>,
@ -153,7 +153,7 @@ impl MessageCache {
&self, &self,
view: View, view: View,
committee_id: CommitteeId, committee_id: CommitteeId,
proposal_id: BlockId, proposal_id: HeaderId,
) -> Option<Receiver<VoteMsg>> { ) -> Option<Receiver<VoteMsg>> {
self.cache.lock().unwrap().get_mut(&view).map(|m| { self.cache.lock().unwrap().get_mut(&view).map(|m| {
m.votes m.votes
@ -264,7 +264,7 @@ impl NetworkAdapter for Libp2pAdapter {
} }
} }
NetworkMessage::Vote(msg) => { NetworkMessage::Vote(msg) => {
tracing::debug!("received vote"); tracing::debug!("received vote {:?}", msg);
let mut cache = cache.cache.lock().unwrap(); let mut cache = cache.cache.lock().unwrap();
let view = msg.vote.view; let view = msg.vote.view;
if let Some(messages) = cache.get_mut(&view) { if let Some(messages) = cache.get_mut(&view) {
@ -356,7 +356,7 @@ impl NetworkAdapter for Libp2pAdapter {
&self, &self,
committee: &Committee, committee: &Committee,
view: View, view: View,
proposal_id: BlockId, proposal_id: HeaderId,
) -> BoxedStream<VoteMsg> { ) -> BoxedStream<VoteMsg> {
self.message_cache self.message_cache
.get_votes(view, committee.id::<blake2::Blake2s256>(), proposal_id) .get_votes(view, committee.id::<blake2::Blake2s256>(), proposal_id)

View File

@ -3,13 +3,15 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
// internal // internal
use crate::NodeId; use crate::NodeId;
use carnot_engine::{BlockId, NewView, Qc, Timeout, TimeoutQc, View, Vote}; use crate::{NewView, Qc, Timeout, TimeoutQc, Vote};
use carnot_engine::View;
use nomos_core::header::HeaderId;
use nomos_core::wire; use nomos_core::wire;
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, Hash)] #[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, Hash)]
pub struct ProposalMsg { pub struct ProposalMsg {
pub data: Box<[u8]>, pub data: Box<[u8]>,
pub proposal: BlockId, pub proposal: HeaderId,
pub view: View, pub view: View,
} }
@ -84,7 +86,7 @@ impl TimeoutQcMsg {
} }
} }
#[derive(Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub enum NetworkMessage { pub enum NetworkMessage {
Timeout(TimeoutMsg), Timeout(TimeoutMsg),
TimeoutQc(TimeoutQcMsg), TimeoutQc(TimeoutQcMsg),

View File

@ -4,11 +4,12 @@ pub mod messages;
// std // std
// crates // crates
use futures::Stream; use futures::Stream;
use nomos_core::header::HeaderId;
// internal // internal
use crate::network::messages::{ use crate::network::messages::{
NetworkMessage, NewViewMsg, ProposalMsg, TimeoutMsg, TimeoutQcMsg, VoteMsg, NetworkMessage, NewViewMsg, ProposalMsg, TimeoutMsg, TimeoutQcMsg, VoteMsg,
}; };
use carnot_engine::{BlockId, Committee, View}; use carnot_engine::{Committee, View};
use nomos_network::backends::NetworkBackend; use nomos_network::backends::NetworkBackend;
use nomos_network::NetworkService; use nomos_network::NetworkService;
use overwatch_rs::services::relay::OutboundRelay; use overwatch_rs::services::relay::OutboundRelay;
@ -33,7 +34,7 @@ pub trait NetworkAdapter {
&self, &self,
committee: &Committee, committee: &Committee,
view: View, view: View,
proposal_id: BlockId, proposal_id: HeaderId,
) -> BoxedStream<VoteMsg>; ) -> BoxedStream<VoteMsg>;
async fn new_view_stream(&self, committee: &Committee, view: View) -> BoxedStream<NewViewMsg>; async fn new_view_stream(&self, committee: &Committee, view: View) -> BoxedStream<NewViewMsg>;
async fn send(&self, message: NetworkMessage, committee: &Committee); async fn send(&self, message: NetworkMessage, committee: &Committee);

View File

@ -4,15 +4,17 @@
use std::collections::HashSet; use std::collections::HashSet;
// crates // crates
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use nomos_core::header::HeaderId;
// internal // internal
use super::CarnotTallySettings; use super::CarnotTallySettings;
use crate::network::messages::VoteMsg; use crate::network::messages::VoteMsg;
use carnot_engine::{Block, Qc, StandardQc, Vote}; use crate::{Qc, StandardQc, Vote};
use nomos_core::crypto::PublicKey; use nomos_core::crypto::PublicKey;
use nomos_core::vote::Tally; use nomos_core::vote::Tally;
pub type NodeId = PublicKey; pub type NodeId = PublicKey;
type Block = carnot_engine::Block<HeaderId>;
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum CarnotTallyError { pub enum CarnotTallyError {
@ -82,7 +84,6 @@ impl Tally for CarnotTally {
)); ));
} }
} }
Err(CarnotTallyError::StreamEnded) Err(CarnotTallyError::StreamEnded)
} }
} }

View File

@ -5,7 +5,8 @@ use futures::{Stream, StreamExt};
// internal // internal
use super::CarnotTallySettings; use super::CarnotTallySettings;
use crate::network::messages::TimeoutMsg; use crate::network::messages::TimeoutMsg;
use carnot_engine::{Timeout, View}; use crate::Timeout;
use carnot_engine::View;
use nomos_core::vote::Tally; use nomos_core::vote::Tally;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]

View File

@ -6,9 +6,10 @@ use serde::{Deserialize, Serialize};
// internal // internal
use super::CarnotTallySettings; use super::CarnotTallySettings;
use crate::network::messages::NewViewMsg; use crate::network::messages::NewViewMsg;
use carnot_engine::{NewView, TimeoutQc};
use nomos_core::vote::Tally; use nomos_core::vote::Tally;
use crate::{NewView, TimeoutQc};
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum NewViewTallyError { pub enum NewViewTallyError {
#[error("Did not receive enough votes")] #[error("Did not receive enough votes")]

View File

@ -1,5 +1,5 @@
use nomos_core::{ use nomos_core::{
block::BlockId, header::HeaderId,
tx::mock::{MockTransaction, MockTxId}, tx::mock::{MockTransaction, MockTxId},
}; };
use nomos_log::{Logger, LoggerSettings}; use nomos_log::{Logger, LoggerSettings};
@ -23,7 +23,7 @@ struct MockPoolNode {
mockpool: ServiceHandle< mockpool: ServiceHandle<
MempoolService< MempoolService<
MockAdapter, MockAdapter,
MockPool<BlockId, MockTransaction<MockMessage>, MockTxId>, MockPool<HeaderId, MockTransaction<MockMessage>, MockTxId>,
Transaction, Transaction,
>, >,
>, >,
@ -80,7 +80,7 @@ fn test_mockmempool() {
let network = app.handle().relay::<NetworkService<Mock>>(); let network = app.handle().relay::<NetworkService<Mock>>();
let mempool = app.handle().relay::<MempoolService< let mempool = app.handle().relay::<MempoolService<
MockAdapter, MockAdapter,
MockPool<BlockId, MockTransaction<MockMessage>, MockTxId>, MockPool<HeaderId, MockTransaction<MockMessage>, MockTxId>,
Transaction, Transaction,
>>(); >>();
@ -102,7 +102,7 @@ fn test_mockmempool() {
let (mtx, mrx) = tokio::sync::oneshot::channel(); let (mtx, mrx) = tokio::sync::oneshot::channel();
mempool_outbound mempool_outbound
.send(MempoolMsg::View { .send(MempoolMsg::View {
ancestor_hint: BlockId::default(), ancestor_hint: [0; 32].into(),
reply_channel: mtx, reply_channel: mtx,
}) })
.await .await

View File

@ -6,9 +6,10 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
// crates // crates
use anyhow::Ok; use anyhow::Ok;
use carnot_engine::overlay::RandomBeaconState; use carnot_engine::overlay::RandomBeaconState;
use carnot_engine::{Block, View}; use carnot_engine::{Block, LeaderProof, View};
use clap::Parser; use clap::Parser;
use crossbeam::channel; use crossbeam::channel;
use nomos_core::block::builder::BlockBuilder;
use parking_lot::Mutex; use parking_lot::Mutex;
use rand::rngs::SmallRng; use rand::rngs::SmallRng;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@ -129,15 +130,13 @@ impl SimulationApp {
let leader = nodes.first().copied().unwrap(); let leader = nodes.first().copied().unwrap();
// FIXME: Actually use a proposer and a key to generate random beacon state // FIXME: Actually use a proposer and a key to generate random beacon state
let genesis = nomos_core::block::Block::new( let genesis = <BlockBuilder<_, _, (), ()>>::empty_carnot(
View::new(0),
Block::genesis().parent_qc,
[].into_iter(),
[].into_iter(),
leader,
RandomBeaconState::Sad { RandomBeaconState::Sad {
entropy: Box::new([0; 32]), entropy: Box::new([0; 32]),
}, },
View::new(0),
Block::genesis([0; 32].into()).parent_qc,
LeaderProof::LeaderId { leader_id: leader },
); );
let mut rng = SmallRng::seed_from_u64(seed); let mut rng = SmallRng::seed_from_u64(seed);
overlay_node::to_overlay_node( overlay_node::to_overlay_node(

View File

@ -1,10 +1,10 @@
use crate::node::carnot::{messages::CarnotMessage, tally::Tally, timeout::TimeoutHandler}; use crate::node::carnot::{messages::CarnotMessage, tally::Tally, timeout::TimeoutHandler};
use crate::node::carnot::{AggregateQc, Carnot, NewView, Qc, StandardQc, Timeout, TimeoutQc, Vote};
use carnot_consensus::network::messages::{NewViewMsg, TimeoutMsg, VoteMsg}; use carnot_consensus::network::messages::{NewViewMsg, TimeoutMsg, VoteMsg};
use carnot_consensus::NodeId; use carnot_consensus::NodeId;
use carnot_engine::{ use carnot_engine::{Overlay, View};
AggregateQc, Carnot, NewView, Overlay, Qc, StandardQc, Timeout, TimeoutQc, View, Vote,
};
use nomos_core::block::Block; use nomos_core::block::Block;
use nomos_core::header::HeaderId;
use std::collections::HashSet; use std::collections::HashSet;
use std::hash::Hash; use std::hash::Hash;
use std::time::Duration; use std::time::Duration;
@ -97,8 +97,8 @@ impl EventBuilder {
tracing::info!( tracing::info!(
node=%self.id, node=%self.id,
current_view = %engine.current_view(), current_view = %engine.current_view(),
block_view=%block.header().view, block_view=%block.header().carnot().view(),
block=?block.header().id, block=?block.header().id(),
parent_block=?block.header().parent(), parent_block=?block.header().parent(),
"receive proposal message", "receive proposal message",
); );
@ -236,7 +236,7 @@ pub enum Event<Tx: Clone + Hash + Eq> {
#[allow(dead_code)] #[allow(dead_code)]
Approve { Approve {
qc: Qc, qc: Qc,
block: carnot_engine::Block, block: carnot_engine::Block<HeaderId>,
votes: HashSet<Vote>, votes: HashSet<Vote>,
}, },
ProposeBlock { ProposeBlock {

View File

@ -4,6 +4,8 @@ mod event_builder;
mod message_cache; mod message_cache;
pub mod messages; pub mod messages;
mod state; mod state;
use nomos_core::block::builder::BlockBuilder;
use nomos_core::header::HeaderId;
pub use state::*; pub use state::*;
mod serde_util; mod serde_util;
mod tally; mod tally;
@ -36,9 +38,18 @@ use carnot_consensus::{
network::messages::{NewViewMsg, TimeoutMsg, VoteMsg}, network::messages::{NewViewMsg, TimeoutMsg, VoteMsg},
}; };
use carnot_engine::overlay::RandomBeaconState; use carnot_engine::overlay::RandomBeaconState;
use carnot_engine::{ use carnot_engine::{Committee, LeaderProof, Overlay, View};
Block, BlockId, Carnot, Committee, Overlay, Payload, Qc, StandardQc, TimeoutQc, View, Vote,
}; type Block = carnot_engine::Block<HeaderId>;
type AggregateQc = carnot_engine::AggregateQc<HeaderId>;
type Carnot<O> = carnot_engine::Carnot<O, HeaderId>;
type Payload = carnot_engine::Payload<HeaderId>;
type TimeoutQc = carnot_engine::TimeoutQc<HeaderId>;
type Vote = carnot_engine::Vote<HeaderId>;
type Qc = carnot_engine::Qc<HeaderId>;
type StandardQc = carnot_engine::StandardQc<HeaderId>;
type NewView = carnot_engine::NewView<HeaderId>;
type Timeout = carnot_engine::Timeout<HeaderId>;
static RECORD_SETTINGS: std::sync::OnceLock<BTreeMap<String, bool>> = std::sync::OnceLock::new(); static RECORD_SETTINGS: std::sync::OnceLock<BTreeMap<String, bool>> = std::sync::OnceLock::new();
@ -95,7 +106,7 @@ impl<
rng: &mut R, rng: &mut R,
) -> Self { ) -> Self {
let overlay = O::new(overlay_settings); let overlay = O::new(overlay_settings);
let engine = Carnot::from_genesis(id, genesis.header().clone(), overlay); let engine = Carnot::from_genesis(id, genesis.header().carnot().to_carnot_block(), overlay);
let state = CarnotState::from(&engine); let state = CarnotState::from(&engine);
let timeout = settings.timeout; let timeout = settings.timeout;
RECORD_SETTINGS.get_or_init(|| settings.record_settings.clone()); RECORD_SETTINGS.get_or_init(|| settings.record_settings.clone());
@ -179,8 +190,8 @@ impl<
self.network_interface self.network_interface
.broadcast(CarnotMessage::Proposal(ProposalMsg { .broadcast(CarnotMessage::Proposal(ProposalMsg {
data: proposal.as_bytes().to_vec().into(), data: proposal.as_bytes().to_vec().into(),
proposal: proposal.header().id, proposal: proposal.header().id(),
view: proposal.header().view, view: proposal.header().carnot().view(),
})) }))
} }
} }
@ -195,12 +206,15 @@ impl<
node=%self.id, node=%self.id,
last_committed_view=%self.engine.latest_committed_view(), last_committed_view=%self.engine.latest_committed_view(),
current_view = %current_view, current_view = %current_view,
block_view = %block.header().view, block_view = %block.header().carnot().view(),
block = %block.header().id, block = %block.header().id(),
parent_block=%block.header().parent(), parent_block=%block.header().parent(),
"receive block proposal", "receive block proposal",
); );
match self.engine.receive_block(block.header().clone()) { match self
.engine
.receive_block(block.header().carnot().to_carnot_block())
{
Ok(mut new) => { Ok(mut new) => {
if self.engine.current_view() != new.current_view() { if self.engine.current_view() != new.current_view() {
new = Self::update_overlay_with_block(new, &block); new = Self::update_overlay_with_block(new, &block);
@ -211,7 +225,7 @@ impl<
tracing::error!( tracing::error!(
node = %self.id, node = %self.id,
current_view = %self.engine.current_view(), current_view = %self.engine.current_view(),
block_view = %block.header().view, block = %block.header().id, block_view = %block.header().carnot().view(), block = %block.header().id(),
"receive block proposal, but is invalid", "receive block proposal, but is invalid",
); );
} }
@ -230,7 +244,7 @@ impl<
to, to,
payload: Payload::Vote(Vote { payload: Payload::Vote(Vote {
view: self.engine.current_view(), view: self.engine.current_view(),
block: block.header().id, block: block.header().id(),
}), }),
})) }))
} }
@ -265,13 +279,13 @@ impl<
} }
Event::ProposeBlock { qc } => { Event::ProposeBlock { qc } => {
output = Some(Output::BroadcastProposal { output = Some(Output::BroadcastProposal {
proposal: nomos_core::block::Block::new( proposal: <BlockBuilder<_, _, (), ()>>::empty_carnot(
qc.view().next(),
qc.clone(),
[].into_iter(),
[].into_iter(),
self.id,
RandomBeaconState::generate_happy(qc.view().next(), &self.random_beacon_pk), RandomBeaconState::generate_happy(qc.view().next(), &self.random_beacon_pk),
qc.view().next(),
qc,
LeaderProof::LeaderId {
leader_id: [0; 32].into(),
},
), ),
}); });
} }
@ -440,7 +454,7 @@ impl<
#[derive(Debug)] #[derive(Debug)]
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
enum Output<Tx: Clone + Eq + Hash, Blob: Clone + Eq + Hash> { enum Output<Tx: Clone + Eq + Hash, Blob: Clone + Eq + Hash> {
Send(carnot_engine::Send), Send(carnot_engine::Send<HeaderId>),
BroadcastTimeoutQc { BroadcastTimeoutQc {
timeout_qc: TimeoutQc, timeout_qc: TimeoutQc,
}, },

View File

@ -10,7 +10,8 @@ use self::{
standard_qc::StandardQcHelper, standard_qc::StandardQcHelper,
timeout_qc::TimeoutQcHelper, timeout_qc::TimeoutQcHelper,
}; };
use carnot_engine::{AggregateQc, Block, BlockId, Committee, Qc, StandardQc, TimeoutQc, View}; use crate::node::carnot::{AggregateQc, Block, Committee, Qc, StandardQc, TimeoutQc};
use carnot_engine::View;
const NODE_ID: &str = "node_id"; const NODE_ID: &str = "node_id";
const CURRENT_VIEW: &str = "current_view"; const CURRENT_VIEW: &str = "current_view";
@ -238,16 +239,24 @@ pub(crate) mod timeout_qc {
} }
pub(crate) mod serde_id { pub(crate) mod serde_id {
use carnot_engine::{BlockId, NodeId}; use carnot_engine::NodeId;
use nomos_core::header::HeaderId;
use super::*; use super::*;
#[derive(Serialize)] #[derive(Serialize)]
pub(crate) struct BlockIdHelper<'a>(#[serde(with = "serde_array32")] &'a [u8; 32]); pub(crate) struct BlockIdHelper<'a> {
#[serde(with = "serde_array32")]
header: [u8; 32],
_marker: std::marker::PhantomData<&'a HeaderId>,
}
impl<'a> From<&'a BlockId> for BlockIdHelper<'a> { impl<'a> From<&'a HeaderId> for BlockIdHelper<'a> {
fn from(val: &'a BlockId) -> Self { fn from(val: &'a HeaderId) -> Self {
Self(val.into()) Self {
header: (*val).into(),
_marker: std::marker::PhantomData,
}
} }
} }

View File

@ -1,4 +1,5 @@
use super::*; use super::*;
use nomos_core::header::HeaderId;
use serde_block::BlockHelper; use serde_block::BlockHelper;
serializer!(CarnotStateCsvSerializer); serializer!(CarnotStateCsvSerializer);
@ -76,10 +77,10 @@ impl<'a> From<&'a StandardQc> for LocalHighQcHelper<'a> {
} }
} }
struct SafeBlocksHelper<'a>(&'a HashMap<BlockId, Block>); struct SafeBlocksHelper<'a>(&'a HashMap<HeaderId, Block>);
impl<'a> From<&'a HashMap<BlockId, Block>> for SafeBlocksHelper<'a> { impl<'a> From<&'a HashMap<HeaderId, Block>> for SafeBlocksHelper<'a> {
fn from(val: &'a HashMap<BlockId, Block>) -> Self { fn from(val: &'a HashMap<HeaderId, Block>) -> Self {
Self(val) Self(val)
} }
} }
@ -142,10 +143,10 @@ impl<'a> Serialize for CommitteesHelper<'a> {
} }
} }
struct CommittedBlockHelper<'a>(&'a [BlockId]); struct CommittedBlockHelper<'a>(&'a [HeaderId]);
impl<'a> From<&'a [BlockId]> for CommittedBlockHelper<'a> { impl<'a> From<&'a [HeaderId]> for CommittedBlockHelper<'a> {
fn from(val: &'a [BlockId]) -> Self { fn from(val: &'a [HeaderId]) -> Self {
Self(val) Self(val)
} }
} }

View File

@ -1,4 +1,5 @@
use super::*; use super::*;
use nomos_core::header::HeaderId;
use serde_block::BlockHelper; use serde_block::BlockHelper;
serializer!(CarnotStateJsonSerializer); serializer!(CarnotStateJsonSerializer);
@ -50,10 +51,10 @@ pub(crate) mod serde_block {
} }
} }
struct SafeBlocksHelper<'a>(&'a HashMap<BlockId, Block>); struct SafeBlocksHelper<'a>(&'a HashMap<HeaderId, Block>);
impl<'a> From<&'a HashMap<BlockId, Block>> for SafeBlocksHelper<'a> { impl<'a> From<&'a HashMap<HeaderId, Block>> for SafeBlocksHelper<'a> {
fn from(val: &'a HashMap<BlockId, Block>) -> Self { fn from(val: &'a HashMap<HeaderId, Block>) -> Self {
Self(val) Self(val)
} }
} }
@ -115,10 +116,10 @@ impl<'a> Serialize for CommitteesHelper<'a> {
} }
} }
struct CommittedBlockHelper<'a>(&'a [BlockId]); struct CommittedBlockHelper<'a>(&'a [HeaderId]);
impl<'a> From<&'a [BlockId]> for CommittedBlockHelper<'a> { impl<'a> From<&'a [HeaderId]> for CommittedBlockHelper<'a> {
fn from(val: &'a [BlockId]) -> Self { fn from(val: &'a [HeaderId]) -> Self {
Self(val) Self(val)
} }
} }

View File

@ -8,14 +8,14 @@ pub struct CarnotState {
pub(crate) current_view: View, pub(crate) current_view: View,
pub(crate) highest_voted_view: View, pub(crate) highest_voted_view: View,
pub(crate) local_high_qc: StandardQc, pub(crate) local_high_qc: StandardQc,
pub(crate) safe_blocks: HashMap<BlockId, Block>, pub(crate) safe_blocks: HashMap<HeaderId, Block>,
pub(crate) last_view_timeout_qc: Option<TimeoutQc>, pub(crate) last_view_timeout_qc: Option<TimeoutQc>,
pub(crate) latest_committed_block: Block, pub(crate) latest_committed_block: Block,
pub(crate) latest_committed_view: View, pub(crate) latest_committed_view: View,
pub(crate) root_committee: Committee, pub(crate) root_committee: Committee,
pub(crate) parent_committee: Option<Committee>, pub(crate) parent_committee: Option<Committee>,
pub(crate) child_committees: Vec<Committee>, pub(crate) child_committees: Vec<Committee>,
pub(crate) committed_blocks: Vec<BlockId>, pub(crate) committed_blocks: Vec<HeaderId>,
pub(super) step_duration: Duration, pub(super) step_duration: Duration,
/// Step id for this state /// Step id for this state

View File

@ -7,9 +7,9 @@ use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
use crate::{adjust_timeout, get_available_port, ConsensusConfig, Node, SpawnConfig}; use crate::{adjust_timeout, get_available_port, ConsensusConfig, Node, SpawnConfig};
use carnot_consensus::{CarnotInfo, CarnotSettings}; use carnot_consensus::{CarnotInfo, CarnotSettings};
use carnot_engine::overlay::{RandomBeaconState, RoundRobin, TreeOverlay, TreeOverlaySettings}; use carnot_engine::overlay::{RandomBeaconState, RoundRobin, TreeOverlay, TreeOverlaySettings};
use carnot_engine::{BlockId, NodeId, Overlay}; use carnot_engine::{NodeId, Overlay};
use full_replication::Certificate; use full_replication::Certificate;
use nomos_core::block::Block; use nomos_core::{block::Block, header::HeaderId};
use nomos_libp2p::{Multiaddr, Swarm}; use nomos_libp2p::{Multiaddr, Swarm};
use nomos_log::{LoggerBackend, LoggerFormat}; use nomos_log::{LoggerBackend, LoggerFormat};
use nomos_mempool::MempoolMetrics; use nomos_mempool::MempoolMetrics;
@ -112,7 +112,7 @@ impl NomosNode {
format!("http://{}", self.addr).parse().unwrap() format!("http://{}", self.addr).parse().unwrap()
} }
pub async fn get_block(&self, id: BlockId) -> Option<Block<Tx, Certificate>> { pub async fn get_block(&self, id: HeaderId) -> Option<Block<Tx, Certificate>> {
CLIENT CLIENT
.post(&format!("http://{}/{}", self.addr, STORAGE_BLOCKS_API)) .post(&format!("http://{}/{}", self.addr, STORAGE_BLOCKS_API))
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
@ -146,9 +146,9 @@ impl NomosNode {
pub async fn get_blocks_info( pub async fn get_blocks_info(
&self, &self,
from: Option<BlockId>, from: Option<HeaderId>,
to: Option<BlockId>, to: Option<HeaderId>,
) -> Vec<carnot_engine::Block> { ) -> Vec<carnot_engine::Block<HeaderId>> {
let mut req = CLIENT.get(format!("http://{}/{}", self.addr, GET_BLOCKS_INFO)); let mut req = CLIENT.get(format!("http://{}/{}", self.addr, GET_BLOCKS_INFO));
if let Some(from) = from { if let Some(from) = from {
@ -162,7 +162,7 @@ impl NomosNode {
req.send() req.send()
.await .await
.unwrap() .unwrap()
.json::<Vec<carnot_engine::Block>>() .json::<Vec<carnot_engine::Block<_>>>()
.await .await
.unwrap() .unwrap()
} }

View File

@ -1,13 +1,17 @@
use carnot_consensus::CarnotInfo; use carnot_consensus::CarnotInfo;
use carnot_engine::{Block, NodeId, TimeoutQc, View}; use carnot_engine::{NodeId, View};
use fraction::Fraction; use fraction::Fraction;
use futures::stream::{self, StreamExt}; use futures::stream::{self, StreamExt};
use nomos_core::header::HeaderId;
use std::{collections::HashSet, time::Duration}; use std::{collections::HashSet, time::Duration};
use tests::{adjust_timeout, ConsensusConfig, Node, NomosNode, SpawnConfig}; use tests::{adjust_timeout, ConsensusConfig, Node, NomosNode, SpawnConfig};
const TARGET_VIEW: View = View::new(20); const TARGET_VIEW: View = View::new(20);
const DUMMY_NODE_ID: NodeId = NodeId::new([0u8; 32]); const DUMMY_NODE_ID: NodeId = NodeId::new([0u8; 32]);
type Block = carnot_engine::Block<HeaderId>;
type TimeoutQc = carnot_engine::TimeoutQc<HeaderId>;
#[tokio::test] #[tokio::test]
async fn ten_nodes_one_down() { async fn ten_nodes_one_down() {
let mut nodes = NomosNode::spawn_nodes(SpawnConfig::Chain { let mut nodes = NomosNode::spawn_nodes(SpawnConfig::Chain {