Cryptarchia engine (#583)
* add cryptarchia engine * address comments * split into separate files * clarify comment
This commit is contained in:
parent
fde0d29860
commit
2730c2f579
|
@ -22,6 +22,7 @@ members = [
|
|||
"nodes/mixnode",
|
||||
"simulations",
|
||||
"consensus/carnot-engine",
|
||||
"consensus/cryptarchia-engine",
|
||||
"tests",
|
||||
"mixnet/node",
|
||||
"mixnet/client",
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
[package]
|
||||
name = "cryptarchia-engine"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
blake2 = "0.10"
|
||||
rpds = "1"
|
||||
thiserror = "1"
|
|
@ -0,0 +1,142 @@
|
|||
use crate::{crypto::Blake2b, leader_proof::LeaderProof, time::Slot};
|
||||
use blake2::Digest;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct HeaderId([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct ContentId([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy)]
|
||||
pub struct Nonce([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct Header {
|
||||
parent: HeaderId,
|
||||
// length of block contents in bytes
|
||||
content_size: u32,
|
||||
// id of block contents
|
||||
content_id: ContentId,
|
||||
slot: Slot,
|
||||
leader_proof: LeaderProof,
|
||||
orphaned_leader_proofs: Vec<Header>,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn parent(&self) -> HeaderId {
|
||||
self.parent
|
||||
}
|
||||
|
||||
fn update_hasher(&self, h: &mut Blake2b) {
|
||||
h.update(b"\x01");
|
||||
h.update(self.content_size.to_be_bytes());
|
||||
h.update(self.content_id.0);
|
||||
h.update(self.slot.to_be_bytes());
|
||||
h.update(self.parent.0);
|
||||
|
||||
h.update(self.leader_proof.commitment());
|
||||
h.update(self.leader_proof.nullifier());
|
||||
h.update(self.leader_proof.evolved_commitment());
|
||||
|
||||
for proof in &self.orphaned_leader_proofs {
|
||||
proof.update_hasher(h)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn id(&self) -> HeaderId {
|
||||
let mut h = Blake2b::new();
|
||||
self.update_hasher(&mut h);
|
||||
HeaderId(h.finalize().into())
|
||||
}
|
||||
|
||||
pub fn leader_proof(&self) -> &LeaderProof {
|
||||
&self.leader_proof
|
||||
}
|
||||
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
|
||||
pub fn orphaned_proofs(&self) -> &[Header] {
|
||||
&self.orphaned_leader_proofs
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
parent: HeaderId,
|
||||
content_size: u32,
|
||||
content_id: ContentId,
|
||||
slot: Slot,
|
||||
leader_proof: LeaderProof,
|
||||
) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
content_size,
|
||||
content_id,
|
||||
slot,
|
||||
leader_proof,
|
||||
orphaned_leader_proofs: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_orphaned_proofs(mut self, orphaned_leader_proofs: Vec<Header>) -> Self {
|
||||
self.orphaned_leader_proofs = orphaned_leader_proofs;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Block {
|
||||
header: Header,
|
||||
_contents: (),
|
||||
}
|
||||
|
||||
impl Block {
|
||||
pub fn header(&self) -> &Header {
|
||||
&self.header
|
||||
}
|
||||
|
||||
pub fn new(header: Header) -> Self {
|
||||
Self {
|
||||
header,
|
||||
_contents: (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----------- conversions
|
||||
|
||||
impl From<[u8; 32]> for Nonce {
|
||||
fn from(nonce: [u8; 32]) -> Self {
|
||||
Self(nonce)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Nonce> for [u8; 32] {
|
||||
fn from(nonce: Nonce) -> [u8; 32] {
|
||||
nonce.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 32]> for HeaderId {
|
||||
fn from(id: [u8; 32]) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HeaderId> for [u8; 32] {
|
||||
fn from(id: HeaderId) -> Self {
|
||||
id.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 32]> for ContentId {
|
||||
fn from(id: [u8; 32]) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ContentId> for [u8; 32] {
|
||||
fn from(id: ContentId) -> Self {
|
||||
id.0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
use crate::{Epoch, Slot};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct TimeConfig {
|
||||
// How long a slot lasts in seconds
|
||||
pub slot_duration: u64,
|
||||
// Start of the first epoch, in unix timestamp second precision
|
||||
pub chain_start_time: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Config {
|
||||
// The k parameter in the Common Prefix property.
|
||||
// Blocks deeper than k are generally considered stable and forks deeper than that
|
||||
// trigger the additional fork selection rule, which is however only expected to be used
|
||||
// during bootstrapping.
|
||||
pub security_param: u32,
|
||||
// f, the rate of occupied slots
|
||||
pub active_slot_coeff: f64,
|
||||
// The stake distribution is always taken at the beginning of the previous epoch.
|
||||
// This parameters controls how many slots to wait for it to be stabilized
|
||||
// The value is computed as epoch_stake_distribution_stabilization * int(floor(k / f))
|
||||
pub epoch_stake_distribution_stabilization: u8,
|
||||
// This parameter controls how many slots we wait after the stake distribution
|
||||
// snapshot has stabilized to take the nonce snapshot.
|
||||
pub epoch_period_nonce_buffer: u8,
|
||||
// This parameter controls how many slots we wait for the nonce snapshot to be considered
|
||||
// stabilized
|
||||
pub epoch_period_nonce_stabilization: u8,
|
||||
pub time: TimeConfig,
|
||||
}
|
||||
impl Config {
|
||||
pub fn time_config(&self) -> &TimeConfig {
|
||||
&self.time
|
||||
}
|
||||
|
||||
pub fn base_period_length(&self) -> u64 {
|
||||
(f64::from(self.security_param) / self.active_slot_coeff).floor() as u64
|
||||
}
|
||||
|
||||
// return the number of slots required to have great confidence at least k blocks have been produced
|
||||
pub fn s(&self) -> u64 {
|
||||
self.base_period_length() * 3
|
||||
}
|
||||
|
||||
pub fn epoch_length(&self) -> u64 {
|
||||
(self.epoch_stake_distribution_stabilization as u64
|
||||
+ self.epoch_period_nonce_buffer as u64
|
||||
+ self.epoch_period_nonce_stabilization as u64)
|
||||
* self.base_period_length()
|
||||
}
|
||||
|
||||
pub fn nonce_snapshot(&self, epoch: Epoch) -> Slot {
|
||||
let offset = self.base_period_length()
|
||||
* (self.epoch_period_nonce_buffer + self.epoch_stake_distribution_stabilization) as u64;
|
||||
let base = u32::from(epoch) as u64 * self.epoch_length();
|
||||
(base + offset).into()
|
||||
}
|
||||
|
||||
pub fn stake_distribution_snapshot(&self, epoch: Epoch) -> Slot {
|
||||
(u32::from(epoch) as u64 * self.epoch_length()).into()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
use blake2::digest::typenum::U32;
|
||||
|
||||
pub(crate) type Blake2b = blake2::Blake2b<U32>;
|
|
@ -0,0 +1,93 @@
|
|||
use crate::time::Slot;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct LeaderProof {
|
||||
commitment: Commitment,
|
||||
nullifier: Nullifier,
|
||||
slot: Slot,
|
||||
evolved_commitment: Commitment,
|
||||
}
|
||||
|
||||
impl LeaderProof {
|
||||
pub fn commitment(&self) -> &Commitment {
|
||||
&self.commitment
|
||||
}
|
||||
|
||||
pub fn nullifier(&self) -> &Nullifier {
|
||||
&self.nullifier
|
||||
}
|
||||
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
|
||||
pub fn evolved_commitment(&self) -> &Commitment {
|
||||
&self.evolved_commitment
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn dummy(slot: Slot) -> Self {
|
||||
Self {
|
||||
commitment: Commitment([0; 32]),
|
||||
nullifier: Nullifier([0; 32]),
|
||||
slot,
|
||||
evolved_commitment: Commitment([0; 32]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
commitment: Commitment,
|
||||
nullifier: Nullifier,
|
||||
slot: Slot,
|
||||
evolved_commitment: Commitment,
|
||||
) -> Self {
|
||||
Self {
|
||||
commitment,
|
||||
nullifier,
|
||||
slot,
|
||||
evolved_commitment,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct Commitment([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct Nullifier([u8; 32]);
|
||||
|
||||
impl From<[u8; 32]> for Commitment {
|
||||
fn from(commitment: [u8; 32]) -> Self {
|
||||
Self(commitment)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Commitment> for [u8; 32] {
|
||||
fn from(commitment: Commitment) -> Self {
|
||||
commitment.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 32]> for Nullifier {
|
||||
fn from(nullifier: [u8; 32]) -> Self {
|
||||
Self(nullifier)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Nullifier> for [u8; 32] {
|
||||
fn from(nullifier: Nullifier) -> Self {
|
||||
nullifier.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Nullifier {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Commitment {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,502 @@
|
|||
use crate::{
|
||||
crypto::Blake2b, Commitment, Config, Epoch, Header, HeaderId, LeaderProof, Nonce, Nullifier,
|
||||
Slot,
|
||||
};
|
||||
use blake2::Digest;
|
||||
use rpds::HashTrieSet;
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum LedgerError {
|
||||
#[error("Commitment not found in the ledger state")]
|
||||
CommitmentNotFound,
|
||||
#[error("Nullifier already exists in the ledger state")]
|
||||
NullifierExists,
|
||||
#[error("Commitment already exists in the ledger state")]
|
||||
CommitmentExists,
|
||||
#[error("Invalid block slot {block:?} for parent slot {parent:?}")]
|
||||
InvalidSlot { parent: Slot, block: Slot },
|
||||
#[error("Parent block not found: {0:?}")]
|
||||
ParentNotFound(HeaderId),
|
||||
#[error("Orphan block missing: {0:?}. Importing leader proofs requires the block to be validated first")]
|
||||
OrphanMissing(HeaderId),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct EpochState {
|
||||
// The epoch this snapshot is for
|
||||
epoch: Epoch,
|
||||
// value of the ledger nonce after 'epoch_period_nonce_buffer' slots from the beginning of the epoch
|
||||
nonce: Nonce,
|
||||
// stake distribution snapshot taken at the beginning of the epoch
|
||||
// (in practice, this is equivalent to the coins the are spendable at the beginning of the epoch)
|
||||
commitments: HashTrieSet<Commitment>,
|
||||
}
|
||||
|
||||
impl EpochState {
|
||||
fn update_from_ledger(self, ledger: &LedgerState, config: &Config) -> Self {
|
||||
let nonce_snapshot_slot = config.nonce_snapshot(self.epoch);
|
||||
let nonce = if ledger.slot < nonce_snapshot_slot {
|
||||
ledger.nonce
|
||||
} else {
|
||||
self.nonce
|
||||
};
|
||||
|
||||
let stake_snapshot_slot = config.stake_distribution_snapshot(self.epoch);
|
||||
let commitments = if ledger.slot < stake_snapshot_slot {
|
||||
ledger.lead_commitments.clone()
|
||||
} else {
|
||||
self.commitments
|
||||
};
|
||||
Self {
|
||||
epoch: self.epoch,
|
||||
nonce,
|
||||
commitments,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_eligible_leader(&self, commitment: &Commitment) -> bool {
|
||||
self.commitments.contains(commitment)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Ledger {
|
||||
states: HashMap<HeaderId, LedgerState>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl Ledger {
|
||||
pub fn from_genesis(id: HeaderId, state: LedgerState, config: Config) -> Self {
|
||||
Self {
|
||||
states: [(id, state)].into_iter().collect(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use = "this returns the result of the operation, without modifying the original"]
|
||||
pub fn try_apply_header(&self, header: &Header) -> Result<Self, LedgerError> {
|
||||
let parent_id = header.parent();
|
||||
let parent_state = self
|
||||
.states
|
||||
.get(&parent_id)
|
||||
.ok_or(LedgerError::ParentNotFound(parent_id))?;
|
||||
let config = self.config.clone();
|
||||
|
||||
let new_state = parent_state
|
||||
.clone()
|
||||
.try_apply_header(header, &self.config)?;
|
||||
|
||||
let mut states = self.states.clone();
|
||||
|
||||
states.insert(header.id(), new_state);
|
||||
|
||||
Ok(Self { states, config })
|
||||
}
|
||||
|
||||
pub fn state(&self, header_id: &HeaderId) -> Option<&LedgerState> {
|
||||
self.states.get(header_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq)]
|
||||
pub struct LedgerState {
|
||||
// commitments to coins that can be used to propose new blocks
|
||||
lead_commitments: HashTrieSet<Commitment>,
|
||||
// commitments to coins that can be spent, this is a superset of lead_commitments
|
||||
spend_commitments: HashTrieSet<Commitment>,
|
||||
nullifiers: HashTrieSet<Nullifier>,
|
||||
// randomness contribution
|
||||
nonce: Nonce,
|
||||
slot: Slot,
|
||||
// rolling snapshot of the state for the next epoch, used for epoch transitions
|
||||
next_epoch_state: EpochState,
|
||||
epoch_state: EpochState,
|
||||
}
|
||||
|
||||
impl LedgerState {
|
||||
fn try_apply_header(self, header: &Header, config: &Config) -> Result<Self, LedgerError> {
|
||||
// TODO: import leader proofs
|
||||
self.update_epoch_state(header.slot(), config)?
|
||||
.try_apply_leadership(header, config)
|
||||
}
|
||||
|
||||
fn update_epoch_state(self, slot: Slot, config: &Config) -> Result<Self, LedgerError> {
|
||||
if slot <= self.slot {
|
||||
return Err(LedgerError::InvalidSlot {
|
||||
parent: self.slot,
|
||||
block: slot,
|
||||
});
|
||||
}
|
||||
|
||||
let current_epoch = self.slot.epoch(config);
|
||||
let new_epoch = slot.epoch(config);
|
||||
|
||||
// there are 3 cases to consider:
|
||||
// 1. we are in the same epoch as the parent state
|
||||
// update the next epoch state
|
||||
// 2. we are in the next epoch
|
||||
// use the next epoch state as the current epoch state and reset next epoch state
|
||||
// 3. we are in the next-next or later epoch:
|
||||
// use the parent state as the epoch state and reset next epoch state
|
||||
|
||||
if current_epoch == new_epoch {
|
||||
// case 1)
|
||||
let next_epoch_state = self
|
||||
.next_epoch_state
|
||||
.clone()
|
||||
.update_from_ledger(&self, config);
|
||||
Ok(Self {
|
||||
slot,
|
||||
next_epoch_state,
|
||||
..self
|
||||
})
|
||||
} else if new_epoch == current_epoch + 1 {
|
||||
// case 2)
|
||||
let epoch_state = self.next_epoch_state.clone();
|
||||
let next_epoch_state = EpochState {
|
||||
epoch: new_epoch + 1,
|
||||
nonce: self.nonce,
|
||||
commitments: self.spend_commitments.clone(),
|
||||
};
|
||||
Ok(Self {
|
||||
slot,
|
||||
next_epoch_state,
|
||||
epoch_state,
|
||||
..self
|
||||
})
|
||||
} else {
|
||||
// case 3)
|
||||
let epoch_state = EpochState {
|
||||
epoch: new_epoch,
|
||||
nonce: self.nonce,
|
||||
commitments: self.spend_commitments.clone(),
|
||||
};
|
||||
let next_epoch_state = EpochState {
|
||||
epoch: new_epoch + 1,
|
||||
nonce: self.nonce,
|
||||
commitments: self.spend_commitments.clone(),
|
||||
};
|
||||
Ok(Self {
|
||||
slot,
|
||||
next_epoch_state,
|
||||
epoch_state,
|
||||
..self
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn try_apply_proof(self, proof: &LeaderProof, config: &Config) -> Result<Self, LedgerError> {
|
||||
assert_eq!(proof.slot().epoch(config), self.epoch_state.epoch);
|
||||
// The leadership coin either has to be in the state snapshot or be derived from
|
||||
// a coin that is in the state snapshot (i.e. be in the lead coins commitments)
|
||||
if !self.can_lead(proof.commitment())
|
||||
&& !self.epoch_state.is_eligible_leader(proof.commitment())
|
||||
{
|
||||
return Err(LedgerError::CommitmentNotFound);
|
||||
}
|
||||
|
||||
if self.is_nullified(proof.nullifier()) {
|
||||
return Err(LedgerError::NullifierExists);
|
||||
}
|
||||
|
||||
if self.is_committed(proof.evolved_commitment()) {
|
||||
return Err(LedgerError::CommitmentExists);
|
||||
}
|
||||
|
||||
let lead_commitments = self.lead_commitments.insert(*proof.evolved_commitment());
|
||||
let spend_commitments = self.spend_commitments.insert(*proof.evolved_commitment());
|
||||
let nullifiers = self.nullifiers.insert(*proof.nullifier());
|
||||
|
||||
Ok(Self {
|
||||
lead_commitments,
|
||||
spend_commitments,
|
||||
nullifiers,
|
||||
..self
|
||||
})
|
||||
}
|
||||
|
||||
fn try_apply_leadership(
|
||||
mut self,
|
||||
header: &Header,
|
||||
config: &Config,
|
||||
) -> Result<Self, LedgerError> {
|
||||
self = self
|
||||
.try_apply_proof(header.leader_proof(), config)?
|
||||
.update_nonce(header.leader_proof());
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub fn can_spend(&self, commitment: &Commitment) -> bool {
|
||||
self.spend_commitments.contains(commitment)
|
||||
}
|
||||
|
||||
pub fn can_lead(&self, commitment: &Commitment) -> bool {
|
||||
self.lead_commitments.contains(commitment)
|
||||
}
|
||||
|
||||
pub fn is_nullified(&self, nullifier: &Nullifier) -> bool {
|
||||
self.nullifiers.contains(nullifier)
|
||||
}
|
||||
|
||||
pub fn is_committed(&self, commitment: &Commitment) -> bool {
|
||||
// spendable coins are a superset of coins that can lead, so it's sufficient to check only one set
|
||||
self.spend_commitments.contains(commitment)
|
||||
}
|
||||
|
||||
fn update_nonce(self, proof: &LeaderProof) -> Self {
|
||||
Self {
|
||||
nonce: <[u8; 32]>::from(
|
||||
Blake2b::new_with_prefix("epoch-nonce".as_bytes())
|
||||
.chain_update(<[u8; 32]>::from(self.nonce))
|
||||
.chain_update(proof.nullifier())
|
||||
.chain_update(proof.slot().to_be_bytes())
|
||||
.finalize(),
|
||||
)
|
||||
.into(),
|
||||
..self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for LedgerState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("LedgerState")
|
||||
.field(
|
||||
"lead_commitment",
|
||||
&self.lead_commitments.iter().collect::<Vec<_>>(),
|
||||
)
|
||||
.field(
|
||||
"spend_commitments",
|
||||
&self.spend_commitments.iter().collect::<Vec<_>>(),
|
||||
)
|
||||
.field("nullifiers", &self.nullifiers.iter().collect::<Vec<_>>())
|
||||
.field("nonce", &self.nonce)
|
||||
.field("slot", &self.slot)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use crate::{ledger::LedgerError, Commitment, Header};
|
||||
|
||||
use super::{
|
||||
super::tests::{config, genesis_header, header, Coin},
|
||||
EpochState, Ledger, LedgerState,
|
||||
};
|
||||
|
||||
pub fn genesis_state(commitments: &[Commitment]) -> LedgerState {
|
||||
LedgerState {
|
||||
lead_commitments: commitments.iter().cloned().collect(),
|
||||
spend_commitments: commitments.iter().cloned().collect(),
|
||||
nullifiers: Default::default(),
|
||||
nonce: [0; 32].into(),
|
||||
slot: 0.into(),
|
||||
next_epoch_state: EpochState {
|
||||
epoch: 1.into(),
|
||||
nonce: [0; 32].into(),
|
||||
commitments: commitments.iter().cloned().collect(),
|
||||
},
|
||||
epoch_state: EpochState {
|
||||
epoch: 0.into(),
|
||||
nonce: [0; 32].into(),
|
||||
commitments: commitments.iter().cloned().collect(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn ledger(commitments: &[Commitment]) -> (Ledger, Header) {
|
||||
let genesis_state = genesis_state(commitments);
|
||||
let genesis_header = genesis_header();
|
||||
(
|
||||
Ledger::from_genesis(genesis_header.id(), genesis_state, config()),
|
||||
genesis_header,
|
||||
)
|
||||
}
|
||||
|
||||
fn apply_and_add_coin(mut ledger: Ledger, header: Header, coin: Coin) -> Ledger {
|
||||
let header_id = header.id();
|
||||
ledger = ledger.try_apply_header(&header).unwrap();
|
||||
// we still don't have transactions, so the only way to add a commitment to spendable commitments and
|
||||
// test epoch snapshotting is by doing this manually
|
||||
let mut block_state = ledger.states[&header_id].clone();
|
||||
block_state.spend_commitments = block_state.spend_commitments.insert(coin.commitment());
|
||||
ledger.states.insert(header_id, block_state);
|
||||
ledger
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_state_prevents_coin_reuse() {
|
||||
let coin = Coin::new(0);
|
||||
let (mut ledger, genesis) = ledger(&[coin.commitment()]);
|
||||
let h = header(1, genesis.id(), coin);
|
||||
ledger = ledger.try_apply_header(&h).unwrap();
|
||||
|
||||
// reusing the same coin should be prevented
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(2, h.id(), coin)),
|
||||
Err(LedgerError::NullifierExists),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_state_uncommited_coin() {
|
||||
let coin = Coin::new(0);
|
||||
let (ledger, genesis) = ledger(&[]);
|
||||
let h = header(1, genesis.id(), coin);
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&h),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_state_is_properly_updated_on_reorg() {
|
||||
let coin_1 = Coin::new(0);
|
||||
let coin_2 = Coin::new(1);
|
||||
let coin_3 = Coin::new(2);
|
||||
|
||||
let (mut ledger, genesis) = ledger(&[
|
||||
coin_1.commitment(),
|
||||
coin_2.commitment(),
|
||||
coin_3.commitment(),
|
||||
]);
|
||||
|
||||
// coin_1 & coin_2 both concurrently win slot 0
|
||||
let h_1 = header(1, genesis.id(), coin_1);
|
||||
let h_2 = header(1, genesis.id(), coin_2);
|
||||
|
||||
ledger = ledger.try_apply_header(&h_1).unwrap();
|
||||
ledger = ledger.try_apply_header(&h_2).unwrap();
|
||||
|
||||
// then coin_3 wins slot 1 and chooses to extend from block_2
|
||||
let h_3 = header(2, h_2.id(), coin_3);
|
||||
ledger = ledger.try_apply_header(&h_3).unwrap();
|
||||
// coin 1 is not spent in the chain that ends with block_3
|
||||
assert!(!ledger.states[&h_3.id()].is_nullified(&coin_1.nullifier()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_transition() {
|
||||
let coins = (0..4).map(Coin::new).collect::<Vec<_>>();
|
||||
let coin_4 = Coin::new(4);
|
||||
let coin_5 = Coin::new(5);
|
||||
let (mut ledger, genesis) =
|
||||
ledger(&coins.iter().map(|c| c.commitment()).collect::<Vec<_>>());
|
||||
|
||||
// An epoch will be 10 slots long, with stake distribution snapshot taken at the start of the epoch
|
||||
// and nonce snapshot before slot 7
|
||||
|
||||
let h_1 = header(1, genesis.id(), coins[0]);
|
||||
ledger = ledger.try_apply_header(&h_1).unwrap();
|
||||
assert_eq!(ledger.states[&h_1.id()].epoch_state.epoch, 0.into());
|
||||
|
||||
let h_2 = header(6, h_1.id(), coins[1]);
|
||||
ledger = ledger.try_apply_header(&h_2).unwrap();
|
||||
|
||||
let h_3 = header(9, h_2.id(), coins[2]);
|
||||
ledger = apply_and_add_coin(ledger, h_3.clone(), coin_4);
|
||||
|
||||
// test epoch jump
|
||||
let h_4 = header(20, h_3.id(), coins[3]);
|
||||
ledger = ledger.try_apply_header(&h_4).unwrap();
|
||||
// nonce for epoch 2 should be taken at the end of slot 16, but in our case the last block is at slot 9
|
||||
assert_eq!(
|
||||
ledger.states[&h_4.id()].epoch_state.nonce,
|
||||
ledger.states[&h_3.id()].nonce,
|
||||
);
|
||||
// stake distribution snapshot should be taken at the end of slot 9
|
||||
assert_eq!(
|
||||
ledger.states[&h_4.id()].epoch_state.commitments,
|
||||
ledger.states[&h_3.id()].spend_commitments,
|
||||
);
|
||||
|
||||
// nonce for epoch 1 should be taken at the end of slot 6
|
||||
let h_5 = header(10, h_3.id(), coins[3]);
|
||||
ledger = apply_and_add_coin(ledger, h_5.clone(), coin_5);
|
||||
assert_eq!(
|
||||
ledger.states[&h_5.id()].epoch_state.nonce,
|
||||
ledger.states[&h_2.id()].nonce,
|
||||
);
|
||||
|
||||
let h_6 = header(20, h_5.id(), coins[3].evolve());
|
||||
ledger = ledger.try_apply_header(&h_6).unwrap();
|
||||
// stake distribution snapshot should be taken at the end of slot 9, check that changes in slot 10
|
||||
// are ignored
|
||||
assert_eq!(
|
||||
ledger.states[&h_6.id()].epoch_state.commitments,
|
||||
ledger.states[&h_3.id()].spend_commitments,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evolved_coin_is_eligible_for_leadership() {
|
||||
let coin = Coin::new(0);
|
||||
let (mut ledger, genesis) = ledger(&[coin.commitment()]);
|
||||
let h = header(1, genesis.id(), coin);
|
||||
ledger = ledger.try_apply_header(&h).unwrap();
|
||||
|
||||
// reusing the same coin should be prevented
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(2, h.id(), coin)),
|
||||
Err(LedgerError::NullifierExists),
|
||||
));
|
||||
|
||||
// the evolved coin is not elibile before block 2 as it has not appeared on the ledger yet
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(2, genesis.id(), coin.evolve())),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
|
||||
// the evolved coin is eligible after coin 1 is spent
|
||||
assert!(ledger
|
||||
.try_apply_header(&header(2, h.id(), coin.evolve()))
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_coins_becoming_eligible_after_stake_distribution_stabilizes() {
|
||||
let coin = Coin::new(0);
|
||||
let coin_1 = Coin::new(1);
|
||||
let (mut ledger, genesis) = ledger(&[coin.commitment()]);
|
||||
|
||||
// EPOCH 0
|
||||
let h_0_1 = header(1, genesis.id(), coin);
|
||||
// mint a new coin to be used for leader elections in upcoming epochs
|
||||
ledger = apply_and_add_coin(ledger, h_0_1.clone(), coin_1);
|
||||
|
||||
let h_0_2 = header(2, h_0_1.id(), coin_1);
|
||||
// the new coin is not yet eligible for leader elections
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&h_0_2),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
|
||||
// but the evolved coin can
|
||||
let h_0_2 = header(2, h_0_1.id(), coin.evolve());
|
||||
ledger = ledger.try_apply_header(&h_0_2).unwrap();
|
||||
|
||||
// EPOCH 1
|
||||
for i in 10..20 {
|
||||
// the newly minted coin is still not eligible in the following epoch since the
|
||||
// stake distribution snapshot is taken at the beginning of the previous epoch
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(i, h_0_2.id(), coin_1)),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
}
|
||||
|
||||
// EPOCH 2
|
||||
// the coin is finally eligible 2 epochs after it was first minted
|
||||
let h_2_0 = header(20, h_0_2.id(), coin_1);
|
||||
ledger = ledger.try_apply_header(&h_2_0).unwrap();
|
||||
|
||||
// and now the minted coin can freely use the evolved coin for subsequent blocks
|
||||
let h_2_1 = header(21, h_2_0.id(), coin_1.evolve());
|
||||
ledger.try_apply_header(&h_2_1).unwrap();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,393 @@
|
|||
pub mod block;
|
||||
pub mod config;
|
||||
pub mod crypto;
|
||||
pub mod leader_proof;
|
||||
pub mod ledger;
|
||||
pub mod time;
|
||||
|
||||
pub use block::*;
|
||||
pub use config::*;
|
||||
pub use leader_proof::*;
|
||||
use ledger::{Ledger, LedgerState};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use thiserror::Error;
|
||||
pub use time::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Cryptarchia {
|
||||
local_chain: Branch,
|
||||
branches: Branches,
|
||||
ledger: Ledger,
|
||||
config: Config,
|
||||
genesis: HeaderId,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Branches {
|
||||
branches: HashMap<HeaderId, Branch>,
|
||||
tips: HashSet<HeaderId>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Branch {
|
||||
header: Header,
|
||||
// chain length
|
||||
length: u64,
|
||||
}
|
||||
|
||||
impl Branches {
|
||||
pub fn from_genesis(genesis: &Header) -> Self {
|
||||
let mut branches = HashMap::new();
|
||||
branches.insert(
|
||||
genesis.id(),
|
||||
Branch {
|
||||
header: genesis.clone(),
|
||||
length: 0,
|
||||
},
|
||||
);
|
||||
let tips = HashSet::from([genesis.id()]);
|
||||
Self { branches, tips }
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn apply_header(&self, header: Header) -> Self {
|
||||
let mut branches = self.branches.clone();
|
||||
let mut tips = self.tips.clone();
|
||||
// if the parent was the head of a branch, remove it as it has been superseded by the new header
|
||||
tips.remove(&header.parent());
|
||||
let length = branches[&header.parent()].length + 1;
|
||||
tips.insert(header.id());
|
||||
branches.insert(header.id(), Branch { header, length });
|
||||
|
||||
Self { branches, tips }
|
||||
}
|
||||
|
||||
pub fn branches(&self) -> Vec<Branch> {
|
||||
self.tips
|
||||
.iter()
|
||||
.map(|id| self.branches[id].clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
// find the lowest common ancestor of two branches
|
||||
pub fn lca<'a>(&'a self, mut b1: &'a Branch, mut b2: &'a Branch) -> Branch {
|
||||
// first reduce branches to the same length
|
||||
while b1.length > b2.length {
|
||||
b1 = &self.branches[&b1.header.parent()];
|
||||
}
|
||||
|
||||
while b2.length > b1.length {
|
||||
b2 = &self.branches[&b2.header.parent()];
|
||||
}
|
||||
|
||||
// then walk up the chain until we find the common ancestor
|
||||
while b1.header.id() != b2.header.id() {
|
||||
b1 = &self.branches[&b1.header.parent()];
|
||||
b2 = &self.branches[&b2.header.parent()];
|
||||
}
|
||||
|
||||
b1.clone()
|
||||
}
|
||||
|
||||
pub fn get(&self, id: &HeaderId) -> Option<&Branch> {
|
||||
self.branches.get(id)
|
||||
}
|
||||
|
||||
// Walk back the chain until the target slot
|
||||
fn walk_back_before(&self, branch: &Branch, slot: Slot) -> Branch {
|
||||
let mut current = branch;
|
||||
while current.header.slot() > slot {
|
||||
current = &self.branches[¤t.header.parent()];
|
||||
}
|
||||
current.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum Error {
|
||||
#[error("Ledger error: {0}")]
|
||||
LedgerError(#[from] ledger::LedgerError),
|
||||
#[error("Parent block: {0:?} is not know to this node")]
|
||||
ParentMissing(HeaderId),
|
||||
#[error("Orphan proof has was not found in the ledger: {0:?}, can't import it")]
|
||||
OrphanMissing(HeaderId),
|
||||
}
|
||||
|
||||
impl Cryptarchia {
|
||||
pub fn from_genesis(header: Header, state: LedgerState, config: Config) -> Self {
|
||||
assert_eq!(header.slot(), Slot::genesis());
|
||||
Self {
|
||||
ledger: Ledger::from_genesis(header.id(), state, config.clone()),
|
||||
branches: Branches::from_genesis(&header),
|
||||
local_chain: Branch {
|
||||
header: header.clone(),
|
||||
length: 0,
|
||||
},
|
||||
config,
|
||||
genesis: header.id(),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use = "this returns the result of the operation, without modifying the original"]
|
||||
pub fn receive_block(&self, block: Block) -> Result<Self, Error> {
|
||||
let header = block.header();
|
||||
|
||||
let mut new: Self = self.clone();
|
||||
new.branches = new.branches.apply_header(header.clone());
|
||||
new.ledger = new.ledger.try_apply_header(header)?;
|
||||
new.local_chain = new.fork_choice();
|
||||
|
||||
Ok(new)
|
||||
}
|
||||
|
||||
pub fn fork_choice(&self) -> Branch {
|
||||
let k = self.config.security_param as u64;
|
||||
let s = self.config.s();
|
||||
Self::maxvalid_bg(self.local_chain.clone(), &self.branches, k, s)
|
||||
}
|
||||
|
||||
pub fn tip(&self) -> &Header {
|
||||
&self.local_chain.header
|
||||
}
|
||||
|
||||
pub fn tip_id(&self) -> HeaderId {
|
||||
self.local_chain.header.id()
|
||||
}
|
||||
|
||||
// prune all states deeper than 'depth' with regard to the current
|
||||
// local chain except for states belonging to the local chain
|
||||
pub fn prune_forks(&mut self, _depth: u64) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub fn genesis(&self) -> &HeaderId {
|
||||
&self.genesis
|
||||
}
|
||||
|
||||
pub fn branches(&self) -> &Branches {
|
||||
&self.branches
|
||||
}
|
||||
|
||||
// Implementation of the fork choice rule as defined in the Ouroboros Genesis paper
|
||||
// k defines the forking depth of chain we accept without more analysis
|
||||
// s defines the length of time (unit of slots) after the fork happened we will inspect for chain density
|
||||
fn maxvalid_bg(local_chain: Branch, branches: &Branches, k: u64, s: u64) -> Branch {
|
||||
let mut cmax = local_chain;
|
||||
let forks = branches.branches();
|
||||
for chain in forks {
|
||||
let lowest_common_ancestor = branches.lca(&cmax, &chain);
|
||||
let m = cmax.length - lowest_common_ancestor.length;
|
||||
if m <= k {
|
||||
// Classic longest chain rule with parameter k
|
||||
if cmax.length < chain.length {
|
||||
cmax = chain;
|
||||
} else {
|
||||
println!(
|
||||
"shorter {:?} {} {}",
|
||||
chain.header.id(),
|
||||
cmax.length,
|
||||
chain.length
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// The chain is forking too much, we need to pay a bit more attention
|
||||
// In particular, select the chain that is the densest after the fork
|
||||
let density_slot = Slot::from(u64::from(lowest_common_ancestor.header.slot()) + s);
|
||||
let cmax_density = branches.walk_back_before(&cmax, density_slot).length;
|
||||
let candidate_density = branches.walk_back_before(&chain, density_slot).length;
|
||||
if cmax_density < candidate_density {
|
||||
cmax = chain;
|
||||
} else {
|
||||
println!(
|
||||
"less dense {:?} {} {}",
|
||||
chain.header.id(),
|
||||
cmax_density,
|
||||
candidate_density
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
cmax
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use crate::{
|
||||
crypto::Blake2b, Block, Commitment, Config, Header, HeaderId, LeaderProof, Nullifier, Slot,
|
||||
TimeConfig,
|
||||
};
|
||||
use blake2::Digest;
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
|
||||
use super::{ledger::tests::genesis_state, Cryptarchia};
|
||||
|
||||
pub fn header(slot: impl Into<Slot>, parent: HeaderId, coin: Coin) -> Header {
|
||||
let slot = slot.into();
|
||||
Header::new(parent, 0, [0; 32].into(), slot, coin.to_proof(slot))
|
||||
}
|
||||
|
||||
pub fn block(slot: impl Into<Slot>, parent: HeaderId, coin: Coin) -> Block {
|
||||
Block::new(header(slot, parent, coin))
|
||||
}
|
||||
|
||||
pub fn propose_and_evolve(
|
||||
slot: impl Into<Slot>,
|
||||
parent: HeaderId,
|
||||
coin: &mut Coin,
|
||||
eng: &mut Cryptarchia,
|
||||
) -> HeaderId {
|
||||
let b = block(slot, parent, *coin);
|
||||
let id = b.header().id();
|
||||
*eng = eng.receive_block(b).unwrap();
|
||||
*coin = coin.evolve();
|
||||
id
|
||||
}
|
||||
|
||||
pub fn genesis_header() -> Header {
|
||||
Header::new(
|
||||
[0; 32].into(),
|
||||
0,
|
||||
[0; 32].into(),
|
||||
0.into(),
|
||||
LeaderProof::dummy(0.into()),
|
||||
)
|
||||
}
|
||||
|
||||
fn engine(commitments: &[Commitment]) -> Cryptarchia {
|
||||
Cryptarchia::from_genesis(genesis_header(), genesis_state(commitments), config())
|
||||
}
|
||||
|
||||
pub fn config() -> Config {
|
||||
Config {
|
||||
security_param: 1,
|
||||
active_slot_coeff: 1.0,
|
||||
epoch_stake_distribution_stabilization: 4,
|
||||
epoch_period_nonce_buffer: 3,
|
||||
epoch_period_nonce_stabilization: 3,
|
||||
time: TimeConfig {
|
||||
slot_duration: 1,
|
||||
chain_start_time: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Coin {
|
||||
sk: u64,
|
||||
nonce: u64,
|
||||
}
|
||||
|
||||
impl Coin {
|
||||
pub fn new(sk: u64) -> Self {
|
||||
Self { sk, nonce: 0 }
|
||||
}
|
||||
|
||||
pub fn commitment(&self) -> Commitment {
|
||||
<[u8; 32]>::from(
|
||||
Blake2b::new_with_prefix("commitment".as_bytes())
|
||||
.chain_update(self.sk.to_be_bytes())
|
||||
.chain_update(self.nonce.to_be_bytes())
|
||||
.finalize(),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
|
||||
pub fn nullifier(&self) -> Nullifier {
|
||||
<[u8; 32]>::from(
|
||||
Blake2b::new_with_prefix("nullifier".as_bytes())
|
||||
.chain_update(self.sk.to_be_bytes())
|
||||
.chain_update(self.nonce.to_be_bytes())
|
||||
.finalize(),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
|
||||
pub fn evolve(&self) -> Self {
|
||||
let mut h = DefaultHasher::new();
|
||||
self.nonce.hash(&mut h);
|
||||
let nonce = h.finish();
|
||||
Self { sk: self.sk, nonce }
|
||||
}
|
||||
|
||||
pub fn to_proof(&self, slot: Slot) -> LeaderProof {
|
||||
LeaderProof::new(
|
||||
self.commitment(),
|
||||
self.nullifier(),
|
||||
slot,
|
||||
self.evolve().commitment(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork_choice() {
|
||||
let mut long_coin = Coin::new(0);
|
||||
let mut short_coin = Coin::new(1);
|
||||
let mut long_dense_coin = Coin::new(2);
|
||||
// TODO: use cryptarchia
|
||||
let mut engine = engine(&[
|
||||
long_coin.commitment(),
|
||||
short_coin.commitment(),
|
||||
long_dense_coin.commitment(),
|
||||
]);
|
||||
// by setting a low k we trigger the density choice rule, and the shorter chain is denser after
|
||||
// the fork
|
||||
engine.config.security_param = 10;
|
||||
|
||||
let mut parent = *engine.genesis();
|
||||
for i in 1..50 {
|
||||
parent = propose_and_evolve(i, parent, &mut long_coin, &mut engine);
|
||||
println!("{:?}", engine.tip());
|
||||
}
|
||||
println!("{:?}", engine.tip());
|
||||
assert_eq!(engine.tip_id(), parent);
|
||||
|
||||
let mut long_p = parent;
|
||||
let mut short_p = parent;
|
||||
// the node sees first the short chain
|
||||
for slot in 50..70 {
|
||||
short_p = propose_and_evolve(slot, short_p, &mut short_coin, &mut engine);
|
||||
}
|
||||
|
||||
assert_eq!(engine.tip_id(), short_p);
|
||||
|
||||
// then it receives a longer chain which is however less dense after the fork
|
||||
for slot in 50..70 {
|
||||
if slot % 2 == 0 {
|
||||
long_p = propose_and_evolve(slot, long_p, &mut long_coin, &mut engine);
|
||||
}
|
||||
assert_eq!(engine.tip_id(), short_p);
|
||||
}
|
||||
// even if the long chain is much longer, it will never be accepted as it's not dense enough
|
||||
for slot in 70..100 {
|
||||
long_p = propose_and_evolve(slot, long_p, &mut long_coin, &mut engine);
|
||||
assert_eq!(engine.tip_id(), short_p);
|
||||
}
|
||||
|
||||
let bs = engine.branches().branches();
|
||||
let long_branch = bs.iter().find(|b| b.header.id() == long_p).unwrap();
|
||||
let short_branch = bs.iter().find(|b| b.header.id() == short_p).unwrap();
|
||||
assert!(long_branch.length > short_branch.length);
|
||||
|
||||
// however, if we set k to the fork length, it will be accepted
|
||||
let k = long_branch.length;
|
||||
assert_eq!(
|
||||
Cryptarchia::maxvalid_bg(
|
||||
short_branch.clone(),
|
||||
engine.branches(),
|
||||
k,
|
||||
engine.config.s()
|
||||
)
|
||||
.header
|
||||
.id(),
|
||||
long_p
|
||||
);
|
||||
|
||||
// a longer chain which is equally dense after the fork will be selected as the main tip
|
||||
for slot in 50..71 {
|
||||
parent = propose_and_evolve(slot, parent, &mut long_dense_coin, &mut engine);
|
||||
}
|
||||
assert_eq!(engine.tip_id(), parent);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
use crate::config::Config;
|
||||
use std::ops::Add;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)]
|
||||
pub struct Slot(u64);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)]
|
||||
pub struct Epoch(u32);
|
||||
|
||||
impl Slot {
|
||||
pub fn to_be_bytes(&self) -> [u8; 8] {
|
||||
self.0.to_be_bytes()
|
||||
}
|
||||
|
||||
pub fn genesis() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
pub fn epoch(&self, config: &Config) -> Epoch {
|
||||
Epoch((self.0 / config.epoch_length()) as u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for Epoch {
|
||||
fn from(epoch: u32) -> Self {
|
||||
Self(epoch)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Epoch> for u32 {
|
||||
fn from(epoch: Epoch) -> Self {
|
||||
epoch.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for Slot {
|
||||
fn from(slot: u64) -> Self {
|
||||
Self(slot)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Slot> for u64 {
|
||||
fn from(slot: Slot) -> Self {
|
||||
slot.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<u64> for Slot {
|
||||
type Output = Slot;
|
||||
|
||||
fn add(self, rhs: u64) -> Self::Output {
|
||||
Slot(self.0 + rhs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<u32> for Epoch {
|
||||
type Output = Epoch;
|
||||
|
||||
fn add(self, rhs: u32) -> Self::Output {
|
||||
Epoch(self.0 + rhs)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue