Merge branch 'master' into block-explorer
This commit is contained in:
commit
a5fcd4e688
|
@ -23,6 +23,7 @@ members = [
|
|||
"nodes/explorer",
|
||||
"simulations",
|
||||
"consensus/carnot-engine",
|
||||
"consensus/cryptarchia-engine",
|
||||
"tests",
|
||||
"mixnet/node",
|
||||
"mixnet/client",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# BUILD IMAGE ---------------------------------------------------------
|
||||
|
||||
FROM rust:1.75.0-slim-bullseye AS builder
|
||||
FROM rust:1.76.0-slim-bullseye AS builder
|
||||
|
||||
# Using backports for go 1.19
|
||||
RUN echo 'deb http://deb.debian.org/debian bullseye-backports main' \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM rust:1.75.0-slim-bullseye
|
||||
FROM rust:1.76.0-slim-bullseye
|
||||
|
||||
LABEL maintainer="augustinas@status.im" \
|
||||
source="https://github.com/logos-co/nomos-node" \
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#!/usr/bin/env groovy
|
||||
library 'status-jenkins-lib@v1.8.6'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile {
|
||||
|
@ -43,16 +46,16 @@ pipeline {
|
|||
}
|
||||
|
||||
post {
|
||||
failure {
|
||||
always {
|
||||
script {
|
||||
def discord = load "${WORKSPACE}/ci/discord.groovy"
|
||||
discord.sendMessage(header: 'Nightly Fuzztest Failed. Regression files archived as job artifacts')
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
def discord = load "${WORKSPACE}/ci/discord.groovy"
|
||||
discord.sendMessage(header: 'Nightly Fuzztest Passed')
|
||||
discord.send(
|
||||
header: (
|
||||
currentBuild.currentResult == 'SUCCESS' ?
|
||||
'Nightly Fuzztest Passed' :
|
||||
'Nightly Fuzztest Failed. Regression files archived as job artifacts'
|
||||
),
|
||||
cred: 'nomos-node-discord-commits-webhook',
|
||||
)
|
||||
}
|
||||
}
|
||||
cleanup { cleanWs() }
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#!/usr/bin/env groovy
|
||||
library 'status-jenkins-lib@v1.8.6'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile {
|
||||
|
@ -67,20 +70,13 @@ pipeline {
|
|||
}
|
||||
|
||||
post {
|
||||
failure {
|
||||
script {
|
||||
def report = readFile("${WORKSPACE}/report.txt").trim()
|
||||
def discord = load "${WORKSPACE}/ci/discord.groovy"
|
||||
discord.sendMessage(header: "Nightly Integration Tests Failed: ${report}")
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
def report = readFile('report.txt').trim()
|
||||
def discord = load "${WORKSPACE}/ci/discord.groovy"
|
||||
discord.sendMessage(header: "Nightly Integration Tests Passed: ${report}")
|
||||
}
|
||||
}
|
||||
always { script {
|
||||
def report = readFile("${WORKSPACE}/report.txt").trim()
|
||||
discord.send(
|
||||
header: "Nightly Integration Tests ${currentBuild.currentResult}: ${report}",
|
||||
cred: 'nomos-node-discord-commits-webhook',
|
||||
)
|
||||
} }
|
||||
cleanup { cleanWs() }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
def sendMessage(Map args=[:]) {
|
||||
def opts = [
|
||||
header: args.header ?: 'Build succeeded',
|
||||
title: args.title ?: "${env.JOB_NAME}#${env.BUILD_NUMBER}",
|
||||
cred: args.cred ?: 'nomos-node-discord-commits-webhook',
|
||||
]
|
||||
def repo = [
|
||||
url: GIT_URL.minus('.git'),
|
||||
branch: GIT_BRANCH.minus('origin/'),
|
||||
commit: GIT_COMMIT.take(8),
|
||||
]
|
||||
withCredentials([
|
||||
string(
|
||||
credentialsId: opts.cred,
|
||||
variable: 'DISCORD_WEBHOOK',
|
||||
),
|
||||
]) {
|
||||
discordSend(
|
||||
link: env.BUILD_URL,
|
||||
result: currentBuild.currentResult,
|
||||
webhookURL: env.DISCORD_WEBHOOK,
|
||||
title: opts.title,
|
||||
description: """
|
||||
${opts.header}
|
||||
Branch: [`${repo.branch}`](${repo.url}/commits/${repo.branch})
|
||||
Commit: [`${repo.commit}`](${repo.url}/commit/${repo.commit})
|
||||
""",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return this
|
|
@ -139,6 +139,15 @@ services:
|
|||
entrypoint: /usr/bin/mixnode
|
||||
command: /etc/nomos/mixnode_config.yaml
|
||||
|
||||
chatbot:
|
||||
container_name: chatbot
|
||||
build:
|
||||
context: .
|
||||
dockerfile: testnet/Dockerfile
|
||||
volumes:
|
||||
- ./testnet:/etc/nomos
|
||||
entrypoint: /etc/nomos/scripts/run_nomos_bot.sh
|
||||
|
||||
etcd:
|
||||
container_name: etcd
|
||||
image: quay.io/coreos/etcd:v3.4.15
|
||||
|
@ -148,3 +157,29 @@ services:
|
|||
- /usr/local/bin/etcd
|
||||
- --advertise-client-urls=http://etcd:2379
|
||||
- --listen-client-urls=http://0.0.0.0:2379
|
||||
|
||||
prometheus:
|
||||
container_name: prometheus
|
||||
image: prom/prometheus:latest
|
||||
volumes:
|
||||
- ./testnet/monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:z
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
- --storage.tsdb.retention.time=7d
|
||||
ports:
|
||||
- 127.0.0.1:9090:9090
|
||||
restart: on-failure
|
||||
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana:latest
|
||||
env_file:
|
||||
- ./testnet/monitoring/grafana/plugins.env
|
||||
volumes:
|
||||
- ./testnet/monitoring/grafana/grafana.ini:/etc/grafana/grafana.ini:z
|
||||
- ./testnet/monitoring/grafana/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:z
|
||||
ports:
|
||||
- 9091:3000
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- prometheus
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
[package]
|
||||
name = "cryptarchia-engine"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
blake2 = "0.10"
|
||||
rpds = "1"
|
||||
thiserror = "1"
|
|
@ -0,0 +1,142 @@
|
|||
use crate::{crypto::Blake2b, leader_proof::LeaderProof, time::Slot};
|
||||
use blake2::Digest;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct HeaderId([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct ContentId([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy)]
|
||||
pub struct Nonce([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct Header {
|
||||
parent: HeaderId,
|
||||
// length of block contents in bytes
|
||||
content_size: u32,
|
||||
// id of block contents
|
||||
content_id: ContentId,
|
||||
slot: Slot,
|
||||
leader_proof: LeaderProof,
|
||||
orphaned_leader_proofs: Vec<Header>,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn parent(&self) -> HeaderId {
|
||||
self.parent
|
||||
}
|
||||
|
||||
fn update_hasher(&self, h: &mut Blake2b) {
|
||||
h.update(b"\x01");
|
||||
h.update(self.content_size.to_be_bytes());
|
||||
h.update(self.content_id.0);
|
||||
h.update(self.slot.to_be_bytes());
|
||||
h.update(self.parent.0);
|
||||
|
||||
h.update(self.leader_proof.commitment());
|
||||
h.update(self.leader_proof.nullifier());
|
||||
h.update(self.leader_proof.evolved_commitment());
|
||||
|
||||
for proof in &self.orphaned_leader_proofs {
|
||||
proof.update_hasher(h)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn id(&self) -> HeaderId {
|
||||
let mut h = Blake2b::new();
|
||||
self.update_hasher(&mut h);
|
||||
HeaderId(h.finalize().into())
|
||||
}
|
||||
|
||||
pub fn leader_proof(&self) -> &LeaderProof {
|
||||
&self.leader_proof
|
||||
}
|
||||
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
|
||||
pub fn orphaned_proofs(&self) -> &[Header] {
|
||||
&self.orphaned_leader_proofs
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
parent: HeaderId,
|
||||
content_size: u32,
|
||||
content_id: ContentId,
|
||||
slot: Slot,
|
||||
leader_proof: LeaderProof,
|
||||
) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
content_size,
|
||||
content_id,
|
||||
slot,
|
||||
leader_proof,
|
||||
orphaned_leader_proofs: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_orphaned_proofs(mut self, orphaned_leader_proofs: Vec<Header>) -> Self {
|
||||
self.orphaned_leader_proofs = orphaned_leader_proofs;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Block {
|
||||
header: Header,
|
||||
_contents: (),
|
||||
}
|
||||
|
||||
impl Block {
|
||||
pub fn header(&self) -> &Header {
|
||||
&self.header
|
||||
}
|
||||
|
||||
pub fn new(header: Header) -> Self {
|
||||
Self {
|
||||
header,
|
||||
_contents: (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----------- conversions
|
||||
|
||||
impl From<[u8; 32]> for Nonce {
|
||||
fn from(nonce: [u8; 32]) -> Self {
|
||||
Self(nonce)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Nonce> for [u8; 32] {
|
||||
fn from(nonce: Nonce) -> [u8; 32] {
|
||||
nonce.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 32]> for HeaderId {
|
||||
fn from(id: [u8; 32]) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HeaderId> for [u8; 32] {
|
||||
fn from(id: HeaderId) -> Self {
|
||||
id.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 32]> for ContentId {
|
||||
fn from(id: [u8; 32]) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ContentId> for [u8; 32] {
|
||||
fn from(id: ContentId) -> Self {
|
||||
id.0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
use crate::{Epoch, Slot};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct TimeConfig {
|
||||
// How long a slot lasts in seconds
|
||||
pub slot_duration: u64,
|
||||
// Start of the first epoch, in unix timestamp second precision
|
||||
pub chain_start_time: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Config {
|
||||
// The k parameter in the Common Prefix property.
|
||||
// Blocks deeper than k are generally considered stable and forks deeper than that
|
||||
// trigger the additional fork selection rule, which is however only expected to be used
|
||||
// during bootstrapping.
|
||||
pub security_param: u32,
|
||||
// f, the rate of occupied slots
|
||||
pub active_slot_coeff: f64,
|
||||
// The stake distribution is always taken at the beginning of the previous epoch.
|
||||
// This parameters controls how many slots to wait for it to be stabilized
|
||||
// The value is computed as epoch_stake_distribution_stabilization * int(floor(k / f))
|
||||
pub epoch_stake_distribution_stabilization: u8,
|
||||
// This parameter controls how many slots we wait after the stake distribution
|
||||
// snapshot has stabilized to take the nonce snapshot.
|
||||
pub epoch_period_nonce_buffer: u8,
|
||||
// This parameter controls how many slots we wait for the nonce snapshot to be considered
|
||||
// stabilized
|
||||
pub epoch_period_nonce_stabilization: u8,
|
||||
pub time: TimeConfig,
|
||||
}
|
||||
impl Config {
|
||||
pub fn time_config(&self) -> &TimeConfig {
|
||||
&self.time
|
||||
}
|
||||
|
||||
pub fn base_period_length(&self) -> u64 {
|
||||
(f64::from(self.security_param) / self.active_slot_coeff).floor() as u64
|
||||
}
|
||||
|
||||
// return the number of slots required to have great confidence at least k blocks have been produced
|
||||
pub fn s(&self) -> u64 {
|
||||
self.base_period_length() * 3
|
||||
}
|
||||
|
||||
pub fn epoch_length(&self) -> u64 {
|
||||
(self.epoch_stake_distribution_stabilization as u64
|
||||
+ self.epoch_period_nonce_buffer as u64
|
||||
+ self.epoch_period_nonce_stabilization as u64)
|
||||
* self.base_period_length()
|
||||
}
|
||||
|
||||
pub fn nonce_snapshot(&self, epoch: Epoch) -> Slot {
|
||||
let offset = self.base_period_length()
|
||||
* (self.epoch_period_nonce_buffer + self.epoch_stake_distribution_stabilization) as u64;
|
||||
let base = u32::from(epoch) as u64 * self.epoch_length();
|
||||
(base + offset).into()
|
||||
}
|
||||
|
||||
pub fn stake_distribution_snapshot(&self, epoch: Epoch) -> Slot {
|
||||
(u32::from(epoch) as u64 * self.epoch_length()).into()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
use blake2::digest::typenum::U32;
|
||||
|
||||
pub(crate) type Blake2b = blake2::Blake2b<U32>;
|
|
@ -0,0 +1,93 @@
|
|||
use crate::time::Slot;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct LeaderProof {
|
||||
commitment: Commitment,
|
||||
nullifier: Nullifier,
|
||||
slot: Slot,
|
||||
evolved_commitment: Commitment,
|
||||
}
|
||||
|
||||
impl LeaderProof {
|
||||
pub fn commitment(&self) -> &Commitment {
|
||||
&self.commitment
|
||||
}
|
||||
|
||||
pub fn nullifier(&self) -> &Nullifier {
|
||||
&self.nullifier
|
||||
}
|
||||
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
|
||||
pub fn evolved_commitment(&self) -> &Commitment {
|
||||
&self.evolved_commitment
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn dummy(slot: Slot) -> Self {
|
||||
Self {
|
||||
commitment: Commitment([0; 32]),
|
||||
nullifier: Nullifier([0; 32]),
|
||||
slot,
|
||||
evolved_commitment: Commitment([0; 32]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
commitment: Commitment,
|
||||
nullifier: Nullifier,
|
||||
slot: Slot,
|
||||
evolved_commitment: Commitment,
|
||||
) -> Self {
|
||||
Self {
|
||||
commitment,
|
||||
nullifier,
|
||||
slot,
|
||||
evolved_commitment,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct Commitment([u8; 32]);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
|
||||
pub struct Nullifier([u8; 32]);
|
||||
|
||||
impl From<[u8; 32]> for Commitment {
|
||||
fn from(commitment: [u8; 32]) -> Self {
|
||||
Self(commitment)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Commitment> for [u8; 32] {
|
||||
fn from(commitment: Commitment) -> Self {
|
||||
commitment.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 32]> for Nullifier {
|
||||
fn from(nullifier: [u8; 32]) -> Self {
|
||||
Self(nullifier)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Nullifier> for [u8; 32] {
|
||||
fn from(nullifier: Nullifier) -> Self {
|
||||
nullifier.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Nullifier {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Commitment {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,502 @@
|
|||
use crate::{
|
||||
crypto::Blake2b, Commitment, Config, Epoch, Header, HeaderId, LeaderProof, Nonce, Nullifier,
|
||||
Slot,
|
||||
};
|
||||
use blake2::Digest;
|
||||
use rpds::HashTrieSet;
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum LedgerError {
|
||||
#[error("Commitment not found in the ledger state")]
|
||||
CommitmentNotFound,
|
||||
#[error("Nullifier already exists in the ledger state")]
|
||||
NullifierExists,
|
||||
#[error("Commitment already exists in the ledger state")]
|
||||
CommitmentExists,
|
||||
#[error("Invalid block slot {block:?} for parent slot {parent:?}")]
|
||||
InvalidSlot { parent: Slot, block: Slot },
|
||||
#[error("Parent block not found: {0:?}")]
|
||||
ParentNotFound(HeaderId),
|
||||
#[error("Orphan block missing: {0:?}. Importing leader proofs requires the block to be validated first")]
|
||||
OrphanMissing(HeaderId),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct EpochState {
|
||||
// The epoch this snapshot is for
|
||||
epoch: Epoch,
|
||||
// value of the ledger nonce after 'epoch_period_nonce_buffer' slots from the beginning of the epoch
|
||||
nonce: Nonce,
|
||||
// stake distribution snapshot taken at the beginning of the epoch
|
||||
// (in practice, this is equivalent to the coins the are spendable at the beginning of the epoch)
|
||||
commitments: HashTrieSet<Commitment>,
|
||||
}
|
||||
|
||||
impl EpochState {
|
||||
fn update_from_ledger(self, ledger: &LedgerState, config: &Config) -> Self {
|
||||
let nonce_snapshot_slot = config.nonce_snapshot(self.epoch);
|
||||
let nonce = if ledger.slot < nonce_snapshot_slot {
|
||||
ledger.nonce
|
||||
} else {
|
||||
self.nonce
|
||||
};
|
||||
|
||||
let stake_snapshot_slot = config.stake_distribution_snapshot(self.epoch);
|
||||
let commitments = if ledger.slot < stake_snapshot_slot {
|
||||
ledger.lead_commitments.clone()
|
||||
} else {
|
||||
self.commitments
|
||||
};
|
||||
Self {
|
||||
epoch: self.epoch,
|
||||
nonce,
|
||||
commitments,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_eligible_leader(&self, commitment: &Commitment) -> bool {
|
||||
self.commitments.contains(commitment)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Ledger {
|
||||
states: HashMap<HeaderId, LedgerState>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl Ledger {
|
||||
pub fn from_genesis(id: HeaderId, state: LedgerState, config: Config) -> Self {
|
||||
Self {
|
||||
states: [(id, state)].into_iter().collect(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use = "this returns the result of the operation, without modifying the original"]
|
||||
pub fn try_apply_header(&self, header: &Header) -> Result<Self, LedgerError> {
|
||||
let parent_id = header.parent();
|
||||
let parent_state = self
|
||||
.states
|
||||
.get(&parent_id)
|
||||
.ok_or(LedgerError::ParentNotFound(parent_id))?;
|
||||
let config = self.config.clone();
|
||||
|
||||
let new_state = parent_state
|
||||
.clone()
|
||||
.try_apply_header(header, &self.config)?;
|
||||
|
||||
let mut states = self.states.clone();
|
||||
|
||||
states.insert(header.id(), new_state);
|
||||
|
||||
Ok(Self { states, config })
|
||||
}
|
||||
|
||||
pub fn state(&self, header_id: &HeaderId) -> Option<&LedgerState> {
|
||||
self.states.get(header_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq)]
|
||||
pub struct LedgerState {
|
||||
// commitments to coins that can be used to propose new blocks
|
||||
lead_commitments: HashTrieSet<Commitment>,
|
||||
// commitments to coins that can be spent, this is a superset of lead_commitments
|
||||
spend_commitments: HashTrieSet<Commitment>,
|
||||
nullifiers: HashTrieSet<Nullifier>,
|
||||
// randomness contribution
|
||||
nonce: Nonce,
|
||||
slot: Slot,
|
||||
// rolling snapshot of the state for the next epoch, used for epoch transitions
|
||||
next_epoch_state: EpochState,
|
||||
epoch_state: EpochState,
|
||||
}
|
||||
|
||||
impl LedgerState {
|
||||
fn try_apply_header(self, header: &Header, config: &Config) -> Result<Self, LedgerError> {
|
||||
// TODO: import leader proofs
|
||||
self.update_epoch_state(header.slot(), config)?
|
||||
.try_apply_leadership(header, config)
|
||||
}
|
||||
|
||||
fn update_epoch_state(self, slot: Slot, config: &Config) -> Result<Self, LedgerError> {
|
||||
if slot <= self.slot {
|
||||
return Err(LedgerError::InvalidSlot {
|
||||
parent: self.slot,
|
||||
block: slot,
|
||||
});
|
||||
}
|
||||
|
||||
let current_epoch = self.slot.epoch(config);
|
||||
let new_epoch = slot.epoch(config);
|
||||
|
||||
// there are 3 cases to consider:
|
||||
// 1. we are in the same epoch as the parent state
|
||||
// update the next epoch state
|
||||
// 2. we are in the next epoch
|
||||
// use the next epoch state as the current epoch state and reset next epoch state
|
||||
// 3. we are in the next-next or later epoch:
|
||||
// use the parent state as the epoch state and reset next epoch state
|
||||
|
||||
if current_epoch == new_epoch {
|
||||
// case 1)
|
||||
let next_epoch_state = self
|
||||
.next_epoch_state
|
||||
.clone()
|
||||
.update_from_ledger(&self, config);
|
||||
Ok(Self {
|
||||
slot,
|
||||
next_epoch_state,
|
||||
..self
|
||||
})
|
||||
} else if new_epoch == current_epoch + 1 {
|
||||
// case 2)
|
||||
let epoch_state = self.next_epoch_state.clone();
|
||||
let next_epoch_state = EpochState {
|
||||
epoch: new_epoch + 1,
|
||||
nonce: self.nonce,
|
||||
commitments: self.spend_commitments.clone(),
|
||||
};
|
||||
Ok(Self {
|
||||
slot,
|
||||
next_epoch_state,
|
||||
epoch_state,
|
||||
..self
|
||||
})
|
||||
} else {
|
||||
// case 3)
|
||||
let epoch_state = EpochState {
|
||||
epoch: new_epoch,
|
||||
nonce: self.nonce,
|
||||
commitments: self.spend_commitments.clone(),
|
||||
};
|
||||
let next_epoch_state = EpochState {
|
||||
epoch: new_epoch + 1,
|
||||
nonce: self.nonce,
|
||||
commitments: self.spend_commitments.clone(),
|
||||
};
|
||||
Ok(Self {
|
||||
slot,
|
||||
next_epoch_state,
|
||||
epoch_state,
|
||||
..self
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn try_apply_proof(self, proof: &LeaderProof, config: &Config) -> Result<Self, LedgerError> {
|
||||
assert_eq!(proof.slot().epoch(config), self.epoch_state.epoch);
|
||||
// The leadership coin either has to be in the state snapshot or be derived from
|
||||
// a coin that is in the state snapshot (i.e. be in the lead coins commitments)
|
||||
if !self.can_lead(proof.commitment())
|
||||
&& !self.epoch_state.is_eligible_leader(proof.commitment())
|
||||
{
|
||||
return Err(LedgerError::CommitmentNotFound);
|
||||
}
|
||||
|
||||
if self.is_nullified(proof.nullifier()) {
|
||||
return Err(LedgerError::NullifierExists);
|
||||
}
|
||||
|
||||
if self.is_committed(proof.evolved_commitment()) {
|
||||
return Err(LedgerError::CommitmentExists);
|
||||
}
|
||||
|
||||
let lead_commitments = self.lead_commitments.insert(*proof.evolved_commitment());
|
||||
let spend_commitments = self.spend_commitments.insert(*proof.evolved_commitment());
|
||||
let nullifiers = self.nullifiers.insert(*proof.nullifier());
|
||||
|
||||
Ok(Self {
|
||||
lead_commitments,
|
||||
spend_commitments,
|
||||
nullifiers,
|
||||
..self
|
||||
})
|
||||
}
|
||||
|
||||
fn try_apply_leadership(
|
||||
mut self,
|
||||
header: &Header,
|
||||
config: &Config,
|
||||
) -> Result<Self, LedgerError> {
|
||||
self = self
|
||||
.try_apply_proof(header.leader_proof(), config)?
|
||||
.update_nonce(header.leader_proof());
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub fn can_spend(&self, commitment: &Commitment) -> bool {
|
||||
self.spend_commitments.contains(commitment)
|
||||
}
|
||||
|
||||
pub fn can_lead(&self, commitment: &Commitment) -> bool {
|
||||
self.lead_commitments.contains(commitment)
|
||||
}
|
||||
|
||||
pub fn is_nullified(&self, nullifier: &Nullifier) -> bool {
|
||||
self.nullifiers.contains(nullifier)
|
||||
}
|
||||
|
||||
pub fn is_committed(&self, commitment: &Commitment) -> bool {
|
||||
// spendable coins are a superset of coins that can lead, so it's sufficient to check only one set
|
||||
self.spend_commitments.contains(commitment)
|
||||
}
|
||||
|
||||
fn update_nonce(self, proof: &LeaderProof) -> Self {
|
||||
Self {
|
||||
nonce: <[u8; 32]>::from(
|
||||
Blake2b::new_with_prefix("epoch-nonce".as_bytes())
|
||||
.chain_update(<[u8; 32]>::from(self.nonce))
|
||||
.chain_update(proof.nullifier())
|
||||
.chain_update(proof.slot().to_be_bytes())
|
||||
.finalize(),
|
||||
)
|
||||
.into(),
|
||||
..self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for LedgerState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("LedgerState")
|
||||
.field(
|
||||
"lead_commitment",
|
||||
&self.lead_commitments.iter().collect::<Vec<_>>(),
|
||||
)
|
||||
.field(
|
||||
"spend_commitments",
|
||||
&self.spend_commitments.iter().collect::<Vec<_>>(),
|
||||
)
|
||||
.field("nullifiers", &self.nullifiers.iter().collect::<Vec<_>>())
|
||||
.field("nonce", &self.nonce)
|
||||
.field("slot", &self.slot)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use crate::{ledger::LedgerError, Commitment, Header};
|
||||
|
||||
use super::{
|
||||
super::tests::{config, genesis_header, header, Coin},
|
||||
EpochState, Ledger, LedgerState,
|
||||
};
|
||||
|
||||
pub fn genesis_state(commitments: &[Commitment]) -> LedgerState {
|
||||
LedgerState {
|
||||
lead_commitments: commitments.iter().cloned().collect(),
|
||||
spend_commitments: commitments.iter().cloned().collect(),
|
||||
nullifiers: Default::default(),
|
||||
nonce: [0; 32].into(),
|
||||
slot: 0.into(),
|
||||
next_epoch_state: EpochState {
|
||||
epoch: 1.into(),
|
||||
nonce: [0; 32].into(),
|
||||
commitments: commitments.iter().cloned().collect(),
|
||||
},
|
||||
epoch_state: EpochState {
|
||||
epoch: 0.into(),
|
||||
nonce: [0; 32].into(),
|
||||
commitments: commitments.iter().cloned().collect(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn ledger(commitments: &[Commitment]) -> (Ledger, Header) {
|
||||
let genesis_state = genesis_state(commitments);
|
||||
let genesis_header = genesis_header();
|
||||
(
|
||||
Ledger::from_genesis(genesis_header.id(), genesis_state, config()),
|
||||
genesis_header,
|
||||
)
|
||||
}
|
||||
|
||||
fn apply_and_add_coin(mut ledger: Ledger, header: Header, coin: Coin) -> Ledger {
|
||||
let header_id = header.id();
|
||||
ledger = ledger.try_apply_header(&header).unwrap();
|
||||
// we still don't have transactions, so the only way to add a commitment to spendable commitments and
|
||||
// test epoch snapshotting is by doing this manually
|
||||
let mut block_state = ledger.states[&header_id].clone();
|
||||
block_state.spend_commitments = block_state.spend_commitments.insert(coin.commitment());
|
||||
ledger.states.insert(header_id, block_state);
|
||||
ledger
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_state_prevents_coin_reuse() {
|
||||
let coin = Coin::new(0);
|
||||
let (mut ledger, genesis) = ledger(&[coin.commitment()]);
|
||||
let h = header(1, genesis.id(), coin);
|
||||
ledger = ledger.try_apply_header(&h).unwrap();
|
||||
|
||||
// reusing the same coin should be prevented
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(2, h.id(), coin)),
|
||||
Err(LedgerError::NullifierExists),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_state_uncommited_coin() {
|
||||
let coin = Coin::new(0);
|
||||
let (ledger, genesis) = ledger(&[]);
|
||||
let h = header(1, genesis.id(), coin);
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&h),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_state_is_properly_updated_on_reorg() {
|
||||
let coin_1 = Coin::new(0);
|
||||
let coin_2 = Coin::new(1);
|
||||
let coin_3 = Coin::new(2);
|
||||
|
||||
let (mut ledger, genesis) = ledger(&[
|
||||
coin_1.commitment(),
|
||||
coin_2.commitment(),
|
||||
coin_3.commitment(),
|
||||
]);
|
||||
|
||||
// coin_1 & coin_2 both concurrently win slot 0
|
||||
let h_1 = header(1, genesis.id(), coin_1);
|
||||
let h_2 = header(1, genesis.id(), coin_2);
|
||||
|
||||
ledger = ledger.try_apply_header(&h_1).unwrap();
|
||||
ledger = ledger.try_apply_header(&h_2).unwrap();
|
||||
|
||||
// then coin_3 wins slot 1 and chooses to extend from block_2
|
||||
let h_3 = header(2, h_2.id(), coin_3);
|
||||
ledger = ledger.try_apply_header(&h_3).unwrap();
|
||||
// coin 1 is not spent in the chain that ends with block_3
|
||||
assert!(!ledger.states[&h_3.id()].is_nullified(&coin_1.nullifier()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_transition() {
|
||||
let coins = (0..4).map(Coin::new).collect::<Vec<_>>();
|
||||
let coin_4 = Coin::new(4);
|
||||
let coin_5 = Coin::new(5);
|
||||
let (mut ledger, genesis) =
|
||||
ledger(&coins.iter().map(|c| c.commitment()).collect::<Vec<_>>());
|
||||
|
||||
// An epoch will be 10 slots long, with stake distribution snapshot taken at the start of the epoch
|
||||
// and nonce snapshot before slot 7
|
||||
|
||||
let h_1 = header(1, genesis.id(), coins[0]);
|
||||
ledger = ledger.try_apply_header(&h_1).unwrap();
|
||||
assert_eq!(ledger.states[&h_1.id()].epoch_state.epoch, 0.into());
|
||||
|
||||
let h_2 = header(6, h_1.id(), coins[1]);
|
||||
ledger = ledger.try_apply_header(&h_2).unwrap();
|
||||
|
||||
let h_3 = header(9, h_2.id(), coins[2]);
|
||||
ledger = apply_and_add_coin(ledger, h_3.clone(), coin_4);
|
||||
|
||||
// test epoch jump
|
||||
let h_4 = header(20, h_3.id(), coins[3]);
|
||||
ledger = ledger.try_apply_header(&h_4).unwrap();
|
||||
// nonce for epoch 2 should be taken at the end of slot 16, but in our case the last block is at slot 9
|
||||
assert_eq!(
|
||||
ledger.states[&h_4.id()].epoch_state.nonce,
|
||||
ledger.states[&h_3.id()].nonce,
|
||||
);
|
||||
// stake distribution snapshot should be taken at the end of slot 9
|
||||
assert_eq!(
|
||||
ledger.states[&h_4.id()].epoch_state.commitments,
|
||||
ledger.states[&h_3.id()].spend_commitments,
|
||||
);
|
||||
|
||||
// nonce for epoch 1 should be taken at the end of slot 6
|
||||
let h_5 = header(10, h_3.id(), coins[3]);
|
||||
ledger = apply_and_add_coin(ledger, h_5.clone(), coin_5);
|
||||
assert_eq!(
|
||||
ledger.states[&h_5.id()].epoch_state.nonce,
|
||||
ledger.states[&h_2.id()].nonce,
|
||||
);
|
||||
|
||||
let h_6 = header(20, h_5.id(), coins[3].evolve());
|
||||
ledger = ledger.try_apply_header(&h_6).unwrap();
|
||||
// stake distribution snapshot should be taken at the end of slot 9, check that changes in slot 10
|
||||
// are ignored
|
||||
assert_eq!(
|
||||
ledger.states[&h_6.id()].epoch_state.commitments,
|
||||
ledger.states[&h_3.id()].spend_commitments,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evolved_coin_is_eligible_for_leadership() {
|
||||
let coin = Coin::new(0);
|
||||
let (mut ledger, genesis) = ledger(&[coin.commitment()]);
|
||||
let h = header(1, genesis.id(), coin);
|
||||
ledger = ledger.try_apply_header(&h).unwrap();
|
||||
|
||||
// reusing the same coin should be prevented
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(2, h.id(), coin)),
|
||||
Err(LedgerError::NullifierExists),
|
||||
));
|
||||
|
||||
// the evolved coin is not elibile before block 2 as it has not appeared on the ledger yet
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(2, genesis.id(), coin.evolve())),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
|
||||
// the evolved coin is eligible after coin 1 is spent
|
||||
assert!(ledger
|
||||
.try_apply_header(&header(2, h.id(), coin.evolve()))
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_coins_becoming_eligible_after_stake_distribution_stabilizes() {
|
||||
let coin = Coin::new(0);
|
||||
let coin_1 = Coin::new(1);
|
||||
let (mut ledger, genesis) = ledger(&[coin.commitment()]);
|
||||
|
||||
// EPOCH 0
|
||||
let h_0_1 = header(1, genesis.id(), coin);
|
||||
// mint a new coin to be used for leader elections in upcoming epochs
|
||||
ledger = apply_and_add_coin(ledger, h_0_1.clone(), coin_1);
|
||||
|
||||
let h_0_2 = header(2, h_0_1.id(), coin_1);
|
||||
// the new coin is not yet eligible for leader elections
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&h_0_2),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
|
||||
// but the evolved coin can
|
||||
let h_0_2 = header(2, h_0_1.id(), coin.evolve());
|
||||
ledger = ledger.try_apply_header(&h_0_2).unwrap();
|
||||
|
||||
// EPOCH 1
|
||||
for i in 10..20 {
|
||||
// the newly minted coin is still not eligible in the following epoch since the
|
||||
// stake distribution snapshot is taken at the beginning of the previous epoch
|
||||
assert!(matches!(
|
||||
ledger.try_apply_header(&header(i, h_0_2.id(), coin_1)),
|
||||
Err(LedgerError::CommitmentNotFound),
|
||||
));
|
||||
}
|
||||
|
||||
// EPOCH 2
|
||||
// the coin is finally eligible 2 epochs after it was first minted
|
||||
let h_2_0 = header(20, h_0_2.id(), coin_1);
|
||||
ledger = ledger.try_apply_header(&h_2_0).unwrap();
|
||||
|
||||
// and now the minted coin can freely use the evolved coin for subsequent blocks
|
||||
let h_2_1 = header(21, h_2_0.id(), coin_1.evolve());
|
||||
ledger.try_apply_header(&h_2_1).unwrap();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,393 @@
|
|||
pub mod block;
|
||||
pub mod config;
|
||||
pub mod crypto;
|
||||
pub mod leader_proof;
|
||||
pub mod ledger;
|
||||
pub mod time;
|
||||
|
||||
pub use block::*;
|
||||
pub use config::*;
|
||||
pub use leader_proof::*;
|
||||
use ledger::{Ledger, LedgerState};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use thiserror::Error;
|
||||
pub use time::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Cryptarchia {
|
||||
local_chain: Branch,
|
||||
branches: Branches,
|
||||
ledger: Ledger,
|
||||
config: Config,
|
||||
genesis: HeaderId,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Branches {
|
||||
branches: HashMap<HeaderId, Branch>,
|
||||
tips: HashSet<HeaderId>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Branch {
|
||||
header: Header,
|
||||
// chain length
|
||||
length: u64,
|
||||
}
|
||||
|
||||
impl Branches {
|
||||
pub fn from_genesis(genesis: &Header) -> Self {
|
||||
let mut branches = HashMap::new();
|
||||
branches.insert(
|
||||
genesis.id(),
|
||||
Branch {
|
||||
header: genesis.clone(),
|
||||
length: 0,
|
||||
},
|
||||
);
|
||||
let tips = HashSet::from([genesis.id()]);
|
||||
Self { branches, tips }
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn apply_header(&self, header: Header) -> Self {
|
||||
let mut branches = self.branches.clone();
|
||||
let mut tips = self.tips.clone();
|
||||
// if the parent was the head of a branch, remove it as it has been superseded by the new header
|
||||
tips.remove(&header.parent());
|
||||
let length = branches[&header.parent()].length + 1;
|
||||
tips.insert(header.id());
|
||||
branches.insert(header.id(), Branch { header, length });
|
||||
|
||||
Self { branches, tips }
|
||||
}
|
||||
|
||||
pub fn branches(&self) -> Vec<Branch> {
|
||||
self.tips
|
||||
.iter()
|
||||
.map(|id| self.branches[id].clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
// find the lowest common ancestor of two branches
|
||||
pub fn lca<'a>(&'a self, mut b1: &'a Branch, mut b2: &'a Branch) -> Branch {
|
||||
// first reduce branches to the same length
|
||||
while b1.length > b2.length {
|
||||
b1 = &self.branches[&b1.header.parent()];
|
||||
}
|
||||
|
||||
while b2.length > b1.length {
|
||||
b2 = &self.branches[&b2.header.parent()];
|
||||
}
|
||||
|
||||
// then walk up the chain until we find the common ancestor
|
||||
while b1.header.id() != b2.header.id() {
|
||||
b1 = &self.branches[&b1.header.parent()];
|
||||
b2 = &self.branches[&b2.header.parent()];
|
||||
}
|
||||
|
||||
b1.clone()
|
||||
}
|
||||
|
||||
pub fn get(&self, id: &HeaderId) -> Option<&Branch> {
|
||||
self.branches.get(id)
|
||||
}
|
||||
|
||||
// Walk back the chain until the target slot
|
||||
fn walk_back_before(&self, branch: &Branch, slot: Slot) -> Branch {
|
||||
let mut current = branch;
|
||||
while current.header.slot() > slot {
|
||||
current = &self.branches[¤t.header.parent()];
|
||||
}
|
||||
current.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum Error {
|
||||
#[error("Ledger error: {0}")]
|
||||
LedgerError(#[from] ledger::LedgerError),
|
||||
#[error("Parent block: {0:?} is not know to this node")]
|
||||
ParentMissing(HeaderId),
|
||||
#[error("Orphan proof has was not found in the ledger: {0:?}, can't import it")]
|
||||
OrphanMissing(HeaderId),
|
||||
}
|
||||
|
||||
impl Cryptarchia {
|
||||
pub fn from_genesis(header: Header, state: LedgerState, config: Config) -> Self {
|
||||
assert_eq!(header.slot(), Slot::genesis());
|
||||
Self {
|
||||
ledger: Ledger::from_genesis(header.id(), state, config.clone()),
|
||||
branches: Branches::from_genesis(&header),
|
||||
local_chain: Branch {
|
||||
header: header.clone(),
|
||||
length: 0,
|
||||
},
|
||||
config,
|
||||
genesis: header.id(),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use = "this returns the result of the operation, without modifying the original"]
|
||||
pub fn receive_block(&self, block: Block) -> Result<Self, Error> {
|
||||
let header = block.header();
|
||||
|
||||
let mut new: Self = self.clone();
|
||||
new.branches = new.branches.apply_header(header.clone());
|
||||
new.ledger = new.ledger.try_apply_header(header)?;
|
||||
new.local_chain = new.fork_choice();
|
||||
|
||||
Ok(new)
|
||||
}
|
||||
|
||||
pub fn fork_choice(&self) -> Branch {
|
||||
let k = self.config.security_param as u64;
|
||||
let s = self.config.s();
|
||||
Self::maxvalid_bg(self.local_chain.clone(), &self.branches, k, s)
|
||||
}
|
||||
|
||||
pub fn tip(&self) -> &Header {
|
||||
&self.local_chain.header
|
||||
}
|
||||
|
||||
pub fn tip_id(&self) -> HeaderId {
|
||||
self.local_chain.header.id()
|
||||
}
|
||||
|
||||
// prune all states deeper than 'depth' with regard to the current
|
||||
// local chain except for states belonging to the local chain
|
||||
pub fn prune_forks(&mut self, _depth: u64) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub fn genesis(&self) -> &HeaderId {
|
||||
&self.genesis
|
||||
}
|
||||
|
||||
pub fn branches(&self) -> &Branches {
|
||||
&self.branches
|
||||
}
|
||||
|
||||
// Implementation of the fork choice rule as defined in the Ouroboros Genesis paper
|
||||
// k defines the forking depth of chain we accept without more analysis
|
||||
// s defines the length of time (unit of slots) after the fork happened we will inspect for chain density
|
||||
fn maxvalid_bg(local_chain: Branch, branches: &Branches, k: u64, s: u64) -> Branch {
|
||||
let mut cmax = local_chain;
|
||||
let forks = branches.branches();
|
||||
for chain in forks {
|
||||
let lowest_common_ancestor = branches.lca(&cmax, &chain);
|
||||
let m = cmax.length - lowest_common_ancestor.length;
|
||||
if m <= k {
|
||||
// Classic longest chain rule with parameter k
|
||||
if cmax.length < chain.length {
|
||||
cmax = chain;
|
||||
} else {
|
||||
println!(
|
||||
"shorter {:?} {} {}",
|
||||
chain.header.id(),
|
||||
cmax.length,
|
||||
chain.length
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// The chain is forking too much, we need to pay a bit more attention
|
||||
// In particular, select the chain that is the densest after the fork
|
||||
let density_slot = Slot::from(u64::from(lowest_common_ancestor.header.slot()) + s);
|
||||
let cmax_density = branches.walk_back_before(&cmax, density_slot).length;
|
||||
let candidate_density = branches.walk_back_before(&chain, density_slot).length;
|
||||
if cmax_density < candidate_density {
|
||||
cmax = chain;
|
||||
} else {
|
||||
println!(
|
||||
"less dense {:?} {} {}",
|
||||
chain.header.id(),
|
||||
cmax_density,
|
||||
candidate_density
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
cmax
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use crate::{
|
||||
crypto::Blake2b, Block, Commitment, Config, Header, HeaderId, LeaderProof, Nullifier, Slot,
|
||||
TimeConfig,
|
||||
};
|
||||
use blake2::Digest;
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
|
||||
use super::{ledger::tests::genesis_state, Cryptarchia};
|
||||
|
||||
pub fn header(slot: impl Into<Slot>, parent: HeaderId, coin: Coin) -> Header {
|
||||
let slot = slot.into();
|
||||
Header::new(parent, 0, [0; 32].into(), slot, coin.to_proof(slot))
|
||||
}
|
||||
|
||||
pub fn block(slot: impl Into<Slot>, parent: HeaderId, coin: Coin) -> Block {
|
||||
Block::new(header(slot, parent, coin))
|
||||
}
|
||||
|
||||
pub fn propose_and_evolve(
|
||||
slot: impl Into<Slot>,
|
||||
parent: HeaderId,
|
||||
coin: &mut Coin,
|
||||
eng: &mut Cryptarchia,
|
||||
) -> HeaderId {
|
||||
let b = block(slot, parent, *coin);
|
||||
let id = b.header().id();
|
||||
*eng = eng.receive_block(b).unwrap();
|
||||
*coin = coin.evolve();
|
||||
id
|
||||
}
|
||||
|
||||
pub fn genesis_header() -> Header {
|
||||
Header::new(
|
||||
[0; 32].into(),
|
||||
0,
|
||||
[0; 32].into(),
|
||||
0.into(),
|
||||
LeaderProof::dummy(0.into()),
|
||||
)
|
||||
}
|
||||
|
||||
fn engine(commitments: &[Commitment]) -> Cryptarchia {
|
||||
Cryptarchia::from_genesis(genesis_header(), genesis_state(commitments), config())
|
||||
}
|
||||
|
||||
pub fn config() -> Config {
|
||||
Config {
|
||||
security_param: 1,
|
||||
active_slot_coeff: 1.0,
|
||||
epoch_stake_distribution_stabilization: 4,
|
||||
epoch_period_nonce_buffer: 3,
|
||||
epoch_period_nonce_stabilization: 3,
|
||||
time: TimeConfig {
|
||||
slot_duration: 1,
|
||||
chain_start_time: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Coin {
|
||||
sk: u64,
|
||||
nonce: u64,
|
||||
}
|
||||
|
||||
impl Coin {
|
||||
pub fn new(sk: u64) -> Self {
|
||||
Self { sk, nonce: 0 }
|
||||
}
|
||||
|
||||
pub fn commitment(&self) -> Commitment {
|
||||
<[u8; 32]>::from(
|
||||
Blake2b::new_with_prefix("commitment".as_bytes())
|
||||
.chain_update(self.sk.to_be_bytes())
|
||||
.chain_update(self.nonce.to_be_bytes())
|
||||
.finalize(),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
|
||||
pub fn nullifier(&self) -> Nullifier {
|
||||
<[u8; 32]>::from(
|
||||
Blake2b::new_with_prefix("nullifier".as_bytes())
|
||||
.chain_update(self.sk.to_be_bytes())
|
||||
.chain_update(self.nonce.to_be_bytes())
|
||||
.finalize(),
|
||||
)
|
||||
.into()
|
||||
}
|
||||
|
||||
pub fn evolve(&self) -> Self {
|
||||
let mut h = DefaultHasher::new();
|
||||
self.nonce.hash(&mut h);
|
||||
let nonce = h.finish();
|
||||
Self { sk: self.sk, nonce }
|
||||
}
|
||||
|
||||
pub fn to_proof(&self, slot: Slot) -> LeaderProof {
|
||||
LeaderProof::new(
|
||||
self.commitment(),
|
||||
self.nullifier(),
|
||||
slot,
|
||||
self.evolve().commitment(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork_choice() {
|
||||
let mut long_coin = Coin::new(0);
|
||||
let mut short_coin = Coin::new(1);
|
||||
let mut long_dense_coin = Coin::new(2);
|
||||
// TODO: use cryptarchia
|
||||
let mut engine = engine(&[
|
||||
long_coin.commitment(),
|
||||
short_coin.commitment(),
|
||||
long_dense_coin.commitment(),
|
||||
]);
|
||||
// by setting a low k we trigger the density choice rule, and the shorter chain is denser after
|
||||
// the fork
|
||||
engine.config.security_param = 10;
|
||||
|
||||
let mut parent = *engine.genesis();
|
||||
for i in 1..50 {
|
||||
parent = propose_and_evolve(i, parent, &mut long_coin, &mut engine);
|
||||
println!("{:?}", engine.tip());
|
||||
}
|
||||
println!("{:?}", engine.tip());
|
||||
assert_eq!(engine.tip_id(), parent);
|
||||
|
||||
let mut long_p = parent;
|
||||
let mut short_p = parent;
|
||||
// the node sees first the short chain
|
||||
for slot in 50..70 {
|
||||
short_p = propose_and_evolve(slot, short_p, &mut short_coin, &mut engine);
|
||||
}
|
||||
|
||||
assert_eq!(engine.tip_id(), short_p);
|
||||
|
||||
// then it receives a longer chain which is however less dense after the fork
|
||||
for slot in 50..70 {
|
||||
if slot % 2 == 0 {
|
||||
long_p = propose_and_evolve(slot, long_p, &mut long_coin, &mut engine);
|
||||
}
|
||||
assert_eq!(engine.tip_id(), short_p);
|
||||
}
|
||||
// even if the long chain is much longer, it will never be accepted as it's not dense enough
|
||||
for slot in 70..100 {
|
||||
long_p = propose_and_evolve(slot, long_p, &mut long_coin, &mut engine);
|
||||
assert_eq!(engine.tip_id(), short_p);
|
||||
}
|
||||
|
||||
let bs = engine.branches().branches();
|
||||
let long_branch = bs.iter().find(|b| b.header.id() == long_p).unwrap();
|
||||
let short_branch = bs.iter().find(|b| b.header.id() == short_p).unwrap();
|
||||
assert!(long_branch.length > short_branch.length);
|
||||
|
||||
// however, if we set k to the fork length, it will be accepted
|
||||
let k = long_branch.length;
|
||||
assert_eq!(
|
||||
Cryptarchia::maxvalid_bg(
|
||||
short_branch.clone(),
|
||||
engine.branches(),
|
||||
k,
|
||||
engine.config.s()
|
||||
)
|
||||
.header
|
||||
.id(),
|
||||
long_p
|
||||
);
|
||||
|
||||
// a longer chain which is equally dense after the fork will be selected as the main tip
|
||||
for slot in 50..71 {
|
||||
parent = propose_and_evolve(slot, parent, &mut long_dense_coin, &mut engine);
|
||||
}
|
||||
assert_eq!(engine.tip_id(), parent);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
use crate::config::Config;
|
||||
use std::ops::Add;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)]
|
||||
pub struct Slot(u64);
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash, PartialOrd, Ord)]
|
||||
pub struct Epoch(u32);
|
||||
|
||||
impl Slot {
|
||||
pub fn to_be_bytes(&self) -> [u8; 8] {
|
||||
self.0.to_be_bytes()
|
||||
}
|
||||
|
||||
pub fn genesis() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
pub fn epoch(&self, config: &Config) -> Epoch {
|
||||
Epoch((self.0 / config.epoch_length()) as u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for Epoch {
|
||||
fn from(epoch: u32) -> Self {
|
||||
Self(epoch)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Epoch> for u32 {
|
||||
fn from(epoch: Epoch) -> Self {
|
||||
epoch.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for Slot {
|
||||
fn from(slot: u64) -> Self {
|
||||
Self(slot)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Slot> for u64 {
|
||||
fn from(slot: Slot) -> Self {
|
||||
slot.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<u64> for Slot {
|
||||
type Output = Slot;
|
||||
|
||||
fn add(self, rhs: u64) -> Self::Output {
|
||||
Slot(self.0 + rhs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<u32> for Epoch {
|
||||
type Output = Epoch;
|
||||
|
||||
fn add(self, rhs: u32) -> Self::Output {
|
||||
Epoch(self.0 + rhs)
|
||||
}
|
||||
}
|
|
@ -6,9 +6,9 @@ consensus:
|
|||
private_key: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
||||
fountain_settings: null
|
||||
overlay_settings:
|
||||
nodes: [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]
|
||||
nodes: ["0000000000000000000000000000000000000000000000000000000000000000", "0000000000000000000000000000000000000000000000000000000000000001"]
|
||||
number_of_committees: 1
|
||||
current_leader: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
||||
current_leader: 0000000000000000000000000000000000000000000000000000000000000000
|
||||
leader:
|
||||
cur: 0
|
||||
committee_membership: !Sad
|
||||
|
|
|
@ -48,6 +48,8 @@ use ratatui::{
|
|||
use tui_input::{backend::crossterm::EventHandler, Input};
|
||||
|
||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(120);
|
||||
// Limit the number of maximum in-flight requests
|
||||
const MAX_BUFFERED_REQUESTS: usize = 20;
|
||||
|
||||
#[derive(Clone, Debug, Args)]
|
||||
/// The almost-instant messaging protocol.
|
||||
|
@ -61,6 +63,12 @@ pub struct NomosChat {
|
|||
/// The node to connect to to fetch blocks and blobs
|
||||
#[clap(long)]
|
||||
pub node: Url,
|
||||
/// Author for non interactive message formation
|
||||
#[clap(long, requires("message"))]
|
||||
pub author: Option<String>,
|
||||
/// Message for non interactive message formation
|
||||
#[clap(long, requires("author"))]
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
pub struct App {
|
||||
|
@ -84,12 +92,6 @@ impl NomosChat {
|
|||
<NetworkService<Libp2p> as ServiceData>::Settings,
|
||||
>(std::fs::File::open(&self.network_config)?)?;
|
||||
let da_protocol = self.da_protocol.clone();
|
||||
// setup terminal
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
let node_addr = Some(self.node.clone());
|
||||
|
||||
|
@ -123,6 +125,21 @@ impl NomosChat {
|
|||
.wait_finished()
|
||||
});
|
||||
|
||||
if let Some(author) = self.author.as_ref() {
|
||||
let message = self
|
||||
.message
|
||||
.as_ref()
|
||||
.expect("Should be available if author is set");
|
||||
return run_once(author, message, payload_sender);
|
||||
}
|
||||
|
||||
// setup terminal
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
let app = App {
|
||||
input: Input::default(),
|
||||
username: None,
|
||||
|
@ -152,6 +169,24 @@ impl NomosChat {
|
|||
}
|
||||
}
|
||||
|
||||
fn run_once(
|
||||
author: &str,
|
||||
message: &str,
|
||||
payload_sender: UnboundedSender<Box<[u8]>>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
payload_sender.send(
|
||||
wire::serialize(&ChatMessage {
|
||||
author: author.to_string(),
|
||||
message: message.to_string(),
|
||||
_nonce: rand::random(),
|
||||
})
|
||||
.unwrap()
|
||||
.into(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_app<B: Backend>(terminal: &mut Terminal<B>, mut app: App) {
|
||||
let (message_tx, message_rx) = std::sync::mpsc::channel();
|
||||
let node = app.node.clone();
|
||||
|
@ -299,7 +334,7 @@ async fn fetch_new_messages(
|
|||
|
||||
process_block_blobs(node, block, da_settings)
|
||||
})
|
||||
.buffer_unordered(new_blocks.len())
|
||||
.buffered(MAX_BUFFERED_REQUESTS)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
|
|
|
@ -9,11 +9,11 @@ use reqwest::Url;
|
|||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
#[derive(Args, Debug, Default)]
|
||||
pub struct Disseminate {
|
||||
// TODO: accept bytes
|
||||
#[clap(short, long)]
|
||||
pub data: String,
|
||||
#[clap(short, long, required_unless_present("file"))]
|
||||
pub data: Option<String>,
|
||||
/// Path to the network config file
|
||||
#[clap(short, long)]
|
||||
pub network_config: PathBuf,
|
||||
|
@ -30,6 +30,9 @@ pub struct Disseminate {
|
|||
/// File to write the certificate to, if present.
|
||||
#[clap(long)]
|
||||
pub output: Option<PathBuf>,
|
||||
/// File to disseminate
|
||||
#[clap(short, long)]
|
||||
pub file: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Disseminate {
|
||||
|
@ -41,7 +44,15 @@ impl Disseminate {
|
|||
<NetworkService<Libp2p> as ServiceData>::Settings,
|
||||
>(std::fs::File::open(&self.network_config)?)?;
|
||||
let (status_updates, rx) = std::sync::mpsc::channel();
|
||||
let bytes: Box<[u8]> = self.data.clone().as_bytes().into();
|
||||
|
||||
let bytes: Box<[u8]> = if let Some(data) = &self.data {
|
||||
data.clone().as_bytes().into()
|
||||
} else {
|
||||
let file_path = self.file.as_ref().unwrap();
|
||||
let file_bytes = std::fs::read(file_path)?;
|
||||
file_bytes.into_boxed_slice()
|
||||
};
|
||||
|
||||
let timeout = Duration::from_secs(self.timeout);
|
||||
let da_protocol = self.da_protocol.clone();
|
||||
let node_addr = self.node_addr.clone();
|
||||
|
|
|
@ -205,7 +205,7 @@ impl ServiceCore for DisseminateService {
|
|||
// protocols, but only the one chosen will be used.
|
||||
// We can enforce only sensible combinations of protocol/settings
|
||||
// are specified by using special clap directives
|
||||
#[derive(Clone, Debug, Args)]
|
||||
#[derive(Clone, Debug, Args, Default)]
|
||||
pub struct DaProtocolChoice {
|
||||
#[clap(long, default_value = "full-replication")]
|
||||
pub da_protocol: Protocol,
|
||||
|
@ -227,14 +227,15 @@ impl TryFrom<DaProtocolChoice> for FullReplication<AbsoluteNumber<Attestation, C
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Args)]
|
||||
#[derive(Clone, Debug, Args, Default)]
|
||||
pub struct ProtocolSettings {
|
||||
#[clap(flatten)]
|
||||
pub full_replication: FullReplicationSettings,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, ValueEnum)]
|
||||
#[derive(Clone, Debug, ValueEnum, Default)]
|
||||
pub enum Protocol {
|
||||
#[default]
|
||||
FullReplication,
|
||||
}
|
||||
|
||||
|
|
|
@ -20,5 +20,16 @@ pub async fn get_block_blobs(node: &Url, block: &BlockId) -> Result<Vec<Blob>, E
|
|||
.await?
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
Ok(get_blobs(node, block.blobs().map(|cert| cert.blob()).collect()).await?)
|
||||
let blobs = block.blobs().map(|cert| cert.blob()).collect::<Vec<_>>();
|
||||
|
||||
if blobs.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let n_blobs = blobs.len();
|
||||
let resp = get_blobs(node, blobs).await?;
|
||||
if resp.len() != n_blobs {
|
||||
tracing::warn!("Only {}/{} blobs returned", resp.len(), n_blobs);
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ bytes = "1.2"
|
|||
overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" }
|
||||
serde = "1.0"
|
||||
sled = { version = "0.34", optional = true }
|
||||
rocksdb = { version = "0.22", optional = true }
|
||||
thiserror = "1.0"
|
||||
tracing = "0.1"
|
||||
|
||||
|
@ -24,3 +25,9 @@ tempfile = "3"
|
|||
default = []
|
||||
mock = []
|
||||
sled-backend = ["sled"]
|
||||
rocksdb-backend = ["rocksdb"]
|
||||
|
||||
[[bin]]
|
||||
name = "rocks"
|
||||
path = "src/bin/rocks.rs"
|
||||
required-features = ["rocksdb-backend"]
|
||||
|
|
|
@ -3,6 +3,9 @@ pub mod mock;
|
|||
#[cfg(feature = "sled")]
|
||||
pub mod sled;
|
||||
|
||||
#[cfg(feature = "rocksdb")]
|
||||
pub mod rocksdb;
|
||||
|
||||
// std
|
||||
use std::error::Error;
|
||||
// crates
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
// std
|
||||
use std::path::PathBuf;
|
||||
use std::{marker::PhantomData, sync::Arc};
|
||||
// crates
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
pub use rocksdb::Error;
|
||||
use rocksdb::{Options, DB};
|
||||
// internal
|
||||
use super::{StorageBackend, StorageSerde, StorageTransaction};
|
||||
|
||||
/// Rocks backend setting
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RocksBackendSettings {
|
||||
/// File path to the db file
|
||||
pub db_path: PathBuf,
|
||||
pub read_only: bool,
|
||||
pub column_family: Option<String>,
|
||||
}
|
||||
|
||||
/// Rocks transaction type
|
||||
// Do not use `TransactionDB` here, because rocksdb's `TransactionDB` does not support open by read-only mode.
|
||||
// Thus, we cannot open the same db in two or more processes.
|
||||
pub struct Transaction {
|
||||
rocks: Arc<DB>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
executor: Box<dyn FnOnce(&DB) -> Result<Option<Bytes>, Error> + Send + Sync>,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
/// Execute a function over the transaction
|
||||
pub fn execute(self) -> Result<Option<Bytes>, Error> {
|
||||
(self.executor)(&self.rocks)
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageTransaction for Transaction {
|
||||
type Result = Result<Option<Bytes>, Error>;
|
||||
type Transaction = Self;
|
||||
}
|
||||
|
||||
/// Rocks storage backend
|
||||
|
||||
pub struct RocksBackend<SerdeOp> {
|
||||
rocks: Arc<DB>,
|
||||
_serde_op: PhantomData<SerdeOp>,
|
||||
}
|
||||
|
||||
impl<SerdeOp> RocksBackend<SerdeOp> {
|
||||
pub fn txn(
|
||||
&self,
|
||||
executor: impl FnOnce(&DB) -> Result<Option<Bytes>, Error> + Send + Sync + 'static,
|
||||
) -> Transaction {
|
||||
Transaction {
|
||||
rocks: self.rocks.clone(),
|
||||
executor: Box::new(executor),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<SerdeOp> core::fmt::Debug for RocksBackend<SerdeOp> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
format!("RocksBackend {{ rocks: {:?} }}", self.rocks).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<SerdeOp: StorageSerde + Send + Sync + 'static> StorageBackend for RocksBackend<SerdeOp> {
|
||||
type Settings = RocksBackendSettings;
|
||||
type Error = rocksdb::Error;
|
||||
type Transaction = Transaction;
|
||||
type SerdeOperator = SerdeOp;
|
||||
|
||||
fn new(config: Self::Settings) -> Result<Self, Self::Error> {
|
||||
let RocksBackendSettings {
|
||||
db_path,
|
||||
read_only,
|
||||
column_family: cf,
|
||||
} = config;
|
||||
|
||||
let db = match (read_only, cf) {
|
||||
(true, None) => {
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(false);
|
||||
DB::open_for_read_only(&opts, db_path, false)?
|
||||
}
|
||||
(true, Some(cf)) => {
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(false);
|
||||
DB::open_cf_for_read_only(&opts, db_path, [cf], false)?
|
||||
}
|
||||
(false, None) => {
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(true);
|
||||
opts.create_missing_column_families(true);
|
||||
DB::open(&opts, db_path)?
|
||||
}
|
||||
(false, Some(cf)) => {
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(true);
|
||||
opts.create_missing_column_families(true);
|
||||
DB::open_cf(&opts, db_path, [cf])?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
rocks: Arc::new(db),
|
||||
_serde_op: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn store(&mut self, key: Bytes, value: Bytes) -> Result<(), Self::Error> {
|
||||
self.rocks.put(key, value)
|
||||
}
|
||||
|
||||
async fn load(&mut self, key: &[u8]) -> Result<Option<Bytes>, Self::Error> {
|
||||
self.rocks.get(key).map(|opt| opt.map(|ivec| ivec.into()))
|
||||
}
|
||||
|
||||
async fn remove(&mut self, key: &[u8]) -> Result<Option<Bytes>, Self::Error> {
|
||||
self.load(key).await.and_then(|val| {
|
||||
if val.is_some() {
|
||||
self.rocks.delete(key).map(|_| val)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
&mut self,
|
||||
transaction: Self::Transaction,
|
||||
) -> Result<<Self::Transaction as StorageTransaction>::Result, Self::Error> {
|
||||
Ok(transaction.execute())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::super::testing::NoStorageSerde;
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_store_load_remove(
|
||||
) -> Result<(), <RocksBackend<NoStorageSerde> as StorageBackend>::Error> {
|
||||
let temp_path = TempDir::new().unwrap();
|
||||
let sled_settings = RocksBackendSettings {
|
||||
db_path: temp_path.path().to_path_buf(),
|
||||
read_only: false,
|
||||
column_family: None,
|
||||
};
|
||||
let key = "foo";
|
||||
let value = "bar";
|
||||
|
||||
let mut db: RocksBackend<NoStorageSerde> = RocksBackend::new(sled_settings)?;
|
||||
db.store(key.as_bytes().into(), value.as_bytes().into())
|
||||
.await?;
|
||||
let load_value = db.load(key.as_bytes()).await?;
|
||||
assert_eq!(load_value, Some(value.as_bytes().into()));
|
||||
let removed_value = db.remove(key.as_bytes()).await?;
|
||||
assert_eq!(removed_value, Some(value.as_bytes().into()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction(
|
||||
) -> Result<(), <RocksBackend<NoStorageSerde> as StorageBackend>::Error> {
|
||||
let temp_path = TempDir::new().unwrap();
|
||||
|
||||
let sled_settings = RocksBackendSettings {
|
||||
db_path: temp_path.path().to_path_buf(),
|
||||
read_only: false,
|
||||
column_family: None,
|
||||
};
|
||||
|
||||
let mut db: RocksBackend<NoStorageSerde> = RocksBackend::new(sled_settings)?;
|
||||
let txn = db.txn(|db| {
|
||||
let key = "foo";
|
||||
let value = "bar";
|
||||
db.put(key, value)?;
|
||||
let result = db.get(key)?;
|
||||
db.delete(key)?;
|
||||
Ok(result.map(|ivec| ivec.to_vec().into()))
|
||||
});
|
||||
let result = db.execute(txn).await??;
|
||||
assert_eq!(result, Some("bar".as_bytes().into()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multi_readers_single_writer(
|
||||
) -> Result<(), <RocksBackend<NoStorageSerde> as StorageBackend>::Error> {
|
||||
use tokio::sync::mpsc::channel;
|
||||
|
||||
let temp_path = TempDir::new().unwrap();
|
||||
let path = temp_path.path().to_path_buf();
|
||||
let sled_settings = RocksBackendSettings {
|
||||
db_path: temp_path.path().to_path_buf(),
|
||||
read_only: false,
|
||||
column_family: None,
|
||||
};
|
||||
let key = "foo";
|
||||
let value = "bar";
|
||||
|
||||
let mut db: RocksBackend<NoStorageSerde> = RocksBackend::new(sled_settings)?;
|
||||
|
||||
let (tx, mut rx) = channel(5);
|
||||
// now let us spawn a few readers
|
||||
for _ in 0..5 {
|
||||
let p = path.clone();
|
||||
let tx = tx.clone();
|
||||
std::thread::spawn(move || {
|
||||
tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(async move {
|
||||
let sled_settings = RocksBackendSettings {
|
||||
db_path: p,
|
||||
read_only: true,
|
||||
column_family: None,
|
||||
};
|
||||
let key = "foo";
|
||||
|
||||
let mut db: RocksBackend<NoStorageSerde> =
|
||||
RocksBackend::new(sled_settings).unwrap();
|
||||
|
||||
while db.load(key.as_bytes()).await.unwrap().is_none() {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
tx.send(()).await.unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
db.store(key.as_bytes().into(), value.as_bytes().into())
|
||||
.await?;
|
||||
|
||||
let mut recvs = 0;
|
||||
loop {
|
||||
if rx.recv().await.is_some() {
|
||||
recvs += 1;
|
||||
if recvs == 5 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
use rocksdb::{Options, DB};
|
||||
|
||||
const TEMP_ROCKS_PATH: &str = "rocks";
|
||||
|
||||
pub fn rocksdb_ro() {
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(true);
|
||||
|
||||
// open in read only mode
|
||||
let db = DB::open_cf_for_read_only(&opts, TEMP_ROCKS_PATH, ["blocks", "da"], false).unwrap();
|
||||
|
||||
let blocks_cf = db.cf_handle("blocks").unwrap();
|
||||
let r = db.get_cf(blocks_cf, b"block1").unwrap().unwrap();
|
||||
|
||||
assert_eq!(r, b"block1data");
|
||||
|
||||
let da_cf = db.cf_handle("da").unwrap();
|
||||
let r = db.get_cf(da_cf, b"da1").unwrap().unwrap();
|
||||
assert_eq!(r, b"da1data");
|
||||
|
||||
loop {
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rocksdb_rw() {
|
||||
let mut opts = Options::default();
|
||||
opts.create_if_missing(true);
|
||||
opts.create_missing_column_families(true);
|
||||
|
||||
let db = DB::open_cf(&opts, TEMP_ROCKS_PATH, ["blocks", "da"]).unwrap();
|
||||
|
||||
// open blocks column family and insert a block
|
||||
let blocks_cf = db.cf_handle("blocks").unwrap();
|
||||
db.put_cf(blocks_cf, b"block1", b"block1data").unwrap();
|
||||
|
||||
// open da column family and insert a blob
|
||||
let da_cf = db.cf_handle("da").unwrap();
|
||||
db.put_cf(da_cf, b"da1", b"da1data").unwrap();
|
||||
|
||||
// A loop to mock a long running program
|
||||
loop {
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut args = std::env::args();
|
||||
args.next();
|
||||
let o = args.next();
|
||||
if o.is_none() {
|
||||
println!("open in read-write mode");
|
||||
rocksdb_rw()
|
||||
} else {
|
||||
println!("open in read-only mode");
|
||||
rocksdb_ro()
|
||||
}
|
||||
}
|
|
@ -6,7 +6,7 @@
|
|||
overlays = [
|
||||
(import (fetchGit {
|
||||
url = "https://github.com/oxalica/rust-overlay.git";
|
||||
rev = "a0df72e106322b67e9c6e591fe870380bd0da0d5";
|
||||
rev = "0e031ddb3f5a339dc6eda93d271ae43618b14eec";
|
||||
}))
|
||||
];
|
||||
}
|
||||
|
@ -17,9 +17,10 @@ pkgs.mkShell {
|
|||
|
||||
buildInputs = with pkgs; [
|
||||
pkg-config
|
||||
rust-bin.stable."1.75.0".default
|
||||
rust-bin.stable."1.76.0".default
|
||||
clang_14
|
||||
llvmPackages_14.libclang
|
||||
openssl
|
||||
];
|
||||
shellHook = ''
|
||||
export LIBCLANG_PATH="${pkgs.llvmPackages_14.libclang.lib}/lib";
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
# BUILD IMAGE ---------------------------------------------------------
|
||||
|
||||
FROM rust:1.75.0-slim-bullseye AS builder
|
||||
|
||||
# Using backports for go 1.19
|
||||
RUN echo 'deb http://deb.debian.org/debian bullseye-backports main' \
|
||||
>> /etc/apt/sources.list
|
||||
FROM rust:1.76.0-slim-bullseye AS builder
|
||||
|
||||
# Dependecies for publishing documentation.
|
||||
RUN apt-get update && apt-get install -yq \
|
||||
|
@ -13,11 +9,11 @@ RUN apt-get update && apt-get install -yq \
|
|||
WORKDIR /nomos
|
||||
COPY . .
|
||||
|
||||
RUN cargo build --release --all
|
||||
RUN cargo build --release --all --features metrics
|
||||
|
||||
# NODE IMAGE ----------------------------------------------------------
|
||||
|
||||
FROM bitnami/minideb:latest
|
||||
FROM bitnami/minideb:bullseye
|
||||
|
||||
LABEL maintainer="augustinas@status.im" \
|
||||
source="https://github.com/logos-co/nomos-node" \
|
||||
|
@ -27,9 +23,9 @@ LABEL maintainer="augustinas@status.im" \
|
|||
EXPOSE 3000 8080 9000 60000
|
||||
|
||||
COPY --from=builder /nomos/target/release/nomos-node /usr/bin/nomos-node
|
||||
COPY --from=builder /nomos/target/release/nomos-cli /usr/bin/nomos-cli
|
||||
COPY --from=builder /nomos/target/release/mixnode /usr/bin/mixnode
|
||||
COPY --from=builder /usr/bin/etcdctl /usr/bin/etcdctl
|
||||
COPY nodes/nomos-node/config.yaml /etc/nomos/config.yaml
|
||||
RUN install_packages python3 python3-etcd3
|
||||
|
||||
ENTRYPOINT ["/usr/bin/nomos-node"]
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
backend:
|
||||
host: 0.0.0.0
|
||||
port: 4007
|
||||
log_level: "fatal"
|
||||
node_key: "0000000000000000000000000000000000000000000000000000000000000667"
|
||||
discV5BootstrapNodes: []
|
||||
initial_peers: ["/dns/bootstrap/tcp/3000"]
|
||||
relayTopics: []
|
||||
# Mixclient configuration to communicate with mixnodes.
|
||||
# The libp2p network backend always requires this mixclient configuration
|
||||
# (cannot be disabled for now).
|
||||
mixnet_client:
|
||||
# A mixclient mode. For details, see the documentation of the "mixnet" crate.
|
||||
# - Sender
|
||||
# - !SenderReceiver [mixnode_client_listen_address]
|
||||
mode: Sender
|
||||
# A mixnet topology, which contains the information of all mixnodes in the mixnet.
|
||||
# (The topology is static for now.)
|
||||
topology:
|
||||
# Each mixnet layer consists of a list of mixnodes.
|
||||
layers:
|
||||
- nodes:
|
||||
- address: mix-node-0:7707 # A listen address of the mixnode
|
||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
||||
- nodes:
|
||||
- address: mix-node-1:7717 # A listen address of the mixnode
|
||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
||||
|
||||
- nodes:
|
||||
- address: mix-node-2:7727 # A listen address of the mixnode
|
||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
||||
|
||||
# A max number of connections that will stay connected to mixnodes in the first mixnet layer.
|
||||
connection_pool_size: 255
|
||||
max_retries: 5
|
||||
retry_delay:
|
||||
secs: 1
|
||||
nanos: 0
|
||||
# A range of total delay that will be set to each Sphinx packets
|
||||
# sent to the mixnet for timing obfuscation.
|
||||
# Panics if start > end.
|
||||
mixnet_delay:
|
||||
start: "0ms"
|
||||
end: "0ms"
|
|
@ -0,0 +1,10 @@
|
|||
log:
|
||||
backend: "Stdout"
|
||||
format: "Json"
|
||||
level: "info"
|
||||
|
||||
api:
|
||||
backend_settings:
|
||||
address: 0.0.0.0:9090
|
||||
cors_origins: []
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
org_id: 1
|
||||
url: http://prometheus:9090
|
||||
is_default: true
|
||||
version: 1
|
||||
editable: true
|
|
@ -0,0 +1,51 @@
|
|||
instance_name = nomos dashboard
|
||||
|
||||
;[dashboards.json]
|
||||
;enabled = true
|
||||
;path = /home/git/grafana/grafana-dashboards/dashboards
|
||||
|
||||
|
||||
#################################### Auth ##########################
|
||||
[auth]
|
||||
disable_login_form = false
|
||||
|
||||
#################################### Anonymous Auth ##########################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
enabled = true
|
||||
|
||||
# specify organization name that should be used for unauthenticated users
|
||||
;org_name = Public
|
||||
|
||||
# specify role for unauthenticated users
|
||||
; org_role = Admin
|
||||
org_role = Viewer
|
||||
|
||||
;[security]
|
||||
;admin_user = ocr
|
||||
;admin_password = ocr
|
||||
|
||||
;[users]
|
||||
# disable user signup / registration
|
||||
;allow_sign_up = false
|
||||
|
||||
# Set to true to automatically assign new users to the default organization (id 1)
|
||||
;auto_assign_org = true
|
||||
|
||||
# Default role new users will be automatically assigned (if disabled above is set to true)
|
||||
;auto_assign_org_role = Viewer
|
||||
|
||||
#################################### SMTP / Emailing ##########################
|
||||
;[smtp]
|
||||
;enabled = false
|
||||
;host = localhost:25
|
||||
;user =
|
||||
;password =
|
||||
;cert_file =
|
||||
;key_file =
|
||||
;skip_verify = false
|
||||
;from_address = admin@grafana.localhost
|
||||
|
||||
;[emails]
|
||||
;welcome_email_on_sign_up = false
|
||||
|
|
@ -0,0 +1 @@
|
|||
GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel
|
|
@ -0,0 +1,14 @@
|
|||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
monitor: "Monitoring"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "libp2p"
|
||||
static_configs:
|
||||
- targets:
|
||||
- bootstrap:18080
|
||||
- libp2p_node_1:18080
|
||||
- libp2p_node_2:18080
|
||||
- libp2p_node_3:18080
|
|
@ -17,4 +17,4 @@ echo "CONSENSUS_PRIV_KEY: ${CONSENSUS_PRIV_KEY}"
|
|||
echo "DA_VOTER: ${DA_VOTER}"
|
||||
echo "OVERLAY_NODES: ${OVERLAY_NODES}"
|
||||
|
||||
exec /usr/bin/nomos-node /etc/nomos/bootstrap_config.yaml
|
||||
exec /usr/bin/nomos-node /etc/nomos/bootstrap_config.yaml --with-metrics
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "I am a container ${HOSTNAME} bot"
|
||||
|
||||
while true
|
||||
do
|
||||
/usr/bin/nomos-cli chat --author nomos-ghost --message "$(date +%H:%M:%S) ~ ping" --network-config /etc/nomos/cli_config.yaml --node http://bootstrap:18080
|
||||
sleep 10
|
||||
done
|
|
@ -33,4 +33,4 @@ echo "DA_VOTER: ${DA_VOTER}"
|
|||
echo "OVERLAY_NODES: ${OVERLAY_NODES}"
|
||||
echo "NET_INITIAL_PEERS: ${NET_INITIAL_PEERS}"
|
||||
|
||||
exec /usr/bin/nomos-node /etc/nomos/libp2p_config.yaml
|
||||
exec /usr/bin/nomos-node /etc/nomos/libp2p_config.yaml --with-metrics
|
||||
|
|
|
@ -4,8 +4,10 @@ use std::{
|
|||
time::Duration,
|
||||
};
|
||||
|
||||
use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
|
||||
use mixnet_node::{MixnetNodeConfig, PRIVATE_KEY_SIZE};
|
||||
use mixnet_topology::{Layer, MixnetTopology, Node};
|
||||
use nomos_log::{LoggerBackend, LoggerFormat};
|
||||
use rand::{thread_rng, RngCore};
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
|
@ -14,21 +16,37 @@ use crate::{get_available_port, MixnetConfig};
|
|||
const MIXNODE_BIN: &str = "../target/debug/mixnode";
|
||||
|
||||
pub struct MixNode {
|
||||
_tempdir: tempfile::TempDir,
|
||||
child: Child,
|
||||
}
|
||||
|
||||
impl Drop for MixNode {
|
||||
fn drop(&mut self) {
|
||||
self.child.kill().unwrap();
|
||||
if std::thread::panicking() {
|
||||
if let Err(e) = persist_tempdir(&mut self._tempdir, "mixnode") {
|
||||
println!("failed to persist tempdir: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = self.child.kill() {
|
||||
println!("failed to kill the child process: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MixNode {
|
||||
pub async fn spawn(config: MixnetNodeConfig) -> Self {
|
||||
let config = mixnode::Config {
|
||||
let dir = create_tempdir().unwrap();
|
||||
|
||||
let mut config = mixnode::Config {
|
||||
mixnode: config,
|
||||
log: Default::default(),
|
||||
};
|
||||
config.log.backend = LoggerBackend::File {
|
||||
directory: dir.path().to_owned(),
|
||||
prefix: Some(LOGS_PREFIX.into()),
|
||||
};
|
||||
config.log.format = LoggerFormat::Json;
|
||||
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
let config_path = file.path().to_owned();
|
||||
|
@ -43,7 +61,10 @@ impl MixNode {
|
|||
//TODO: use a sophisticated way to wait until the node is ready
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
Self { child }
|
||||
Self {
|
||||
_tempdir: dir,
|
||||
child,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn spawn_nodes(num_nodes: usize) -> (Vec<Self>, MixnetConfig) {
|
||||
|
|
|
@ -3,3 +3,26 @@ pub mod nomos;
|
|||
|
||||
pub use self::mixnode::MixNode;
|
||||
pub use nomos::NomosNode;
|
||||
use tempfile::TempDir;
|
||||
|
||||
const LOGS_PREFIX: &str = "__logs";
|
||||
|
||||
fn create_tempdir() -> std::io::Result<TempDir> {
|
||||
// It's easier to use the current location instead of OS-default tempfile location
|
||||
// because Github Actions can easily access files in the current location using wildcard
|
||||
// to upload them as artifacts.
|
||||
tempfile::TempDir::new_in(std::env::current_dir()?)
|
||||
}
|
||||
|
||||
fn persist_tempdir(tempdir: &mut TempDir, label: &str) -> std::io::Result<()> {
|
||||
println!(
|
||||
"{}: persisting directory at {}",
|
||||
label,
|
||||
tempdir.path().display()
|
||||
);
|
||||
// we need ownership of the dir to persist it
|
||||
let dir = std::mem::replace(tempdir, tempfile::tempdir()?);
|
||||
// a bit confusing but `into_path` persists the directory
|
||||
let _ = dir.into_path();
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ use std::net::SocketAddr;
|
|||
use std::process::{Child, Command, Stdio};
|
||||
use std::time::Duration;
|
||||
// internal
|
||||
use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
|
||||
use crate::{adjust_timeout, get_available_port, ConsensusConfig, MixnetConfig, Node, SpawnConfig};
|
||||
use carnot_consensus::{CarnotInfo, CarnotSettings};
|
||||
use carnot_engine::overlay::{RandomBeaconState, RoundRobin, TreeOverlay, TreeOverlaySettings};
|
||||
|
@ -22,14 +23,13 @@ use nomos_node::{api::AxumBackendSettings, Config, Tx};
|
|||
use fraction::Fraction;
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::{thread_rng, Rng};
|
||||
use reqwest::Client;
|
||||
use reqwest::{Client, Url};
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(Client::new);
|
||||
const NOMOS_BIN: &str = "../target/debug/nomos-node";
|
||||
const CARNOT_INFO_API: &str = "carnot/info";
|
||||
const STORAGE_BLOCKS_API: &str = "storage/block";
|
||||
const LOGS_PREFIX: &str = "__logs";
|
||||
const GET_BLOCKS_INFO: &str = "carnot/blocks";
|
||||
|
||||
pub struct NomosNode {
|
||||
|
@ -42,14 +42,14 @@ pub struct NomosNode {
|
|||
impl Drop for NomosNode {
|
||||
fn drop(&mut self) {
|
||||
if std::thread::panicking() {
|
||||
println!("persisting directory at {}", self._tempdir.path().display());
|
||||
// we need ownership of the dir to persist it
|
||||
let dir = std::mem::replace(&mut self._tempdir, tempfile::tempdir().unwrap());
|
||||
// a bit confusing but `into_path` persists the directory
|
||||
let _ = dir.into_path();
|
||||
if let Err(e) = persist_tempdir(&mut self._tempdir, "nomos-node") {
|
||||
println!("failed to persist tempdir: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
self.child.kill().unwrap();
|
||||
if let Err(e) = self.child.kill() {
|
||||
println!("failed to kill the child process: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,11 +61,7 @@ impl NomosNode {
|
|||
pub async fn spawn(mut config: Config) -> Self {
|
||||
// Waku stores the messages in a db file in the current dir, we need a different
|
||||
// directory for each node to avoid conflicts
|
||||
//
|
||||
// NOTE: It's easier to use the current location instead of OS-default tempfile location
|
||||
// because Github Actions can easily access files in the current location using wildcard
|
||||
// to upload them as artifacts.
|
||||
let dir = tempfile::TempDir::new_in(std::env::current_dir().unwrap()).unwrap();
|
||||
let dir = create_tempdir().unwrap();
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
let config_path = file.path().to_owned();
|
||||
|
||||
|
@ -115,6 +111,10 @@ impl NomosNode {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn url(&self) -> Url {
|
||||
format!("http://{}", self.addr).parse().unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_block(&self, id: BlockId) -> Option<Block<Tx, Certificate>> {
|
||||
CLIENT
|
||||
.post(&format!("http://{}/{}", self.addr, STORAGE_BLOCKS_API))
|
||||
|
@ -341,8 +341,6 @@ fn create_node_config(
|
|||
cors_origins: vec![],
|
||||
},
|
||||
},
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics: Default::default(),
|
||||
da: nomos_da::Settings {
|
||||
da_protocol: full_replication::Settings {
|
||||
voter: id,
|
||||
|
|
|
@ -1,17 +1,40 @@
|
|||
use full_replication::{AbsoluteNumber, Attestation, Certificate, FullReplication};
|
||||
use nomos_cli::{
|
||||
cmds::{disseminate::Disseminate, Command},
|
||||
api::da::get_blobs,
|
||||
cmds::disseminate::Disseminate,
|
||||
da::disseminate::{DaProtocolChoice, FullReplicationSettings, Protocol, ProtocolSettings},
|
||||
};
|
||||
use std::time::Duration;
|
||||
use nomos_core::da::{blob::Blob as _, DaProtocol};
|
||||
use std::{io::Write, time::Duration};
|
||||
use tempfile::NamedTempFile;
|
||||
use tests::{
|
||||
adjust_timeout, get_available_port, nodes::nomos::Pool, MixNode, Node, NomosNode, SpawnConfig,
|
||||
};
|
||||
|
||||
const CLI_BIN: &str = "../target/debug/nomos-cli";
|
||||
|
||||
use std::process::Command;
|
||||
|
||||
const TIMEOUT_SECS: u64 = 20;
|
||||
|
||||
#[tokio::test]
|
||||
async fn disseminate_blob() {
|
||||
fn run_disseminate(disseminate: &Disseminate) {
|
||||
let mut binding = Command::new(CLI_BIN);
|
||||
let c = binding
|
||||
.args(["disseminate", "--network-config"])
|
||||
.arg(disseminate.network_config.as_os_str())
|
||||
.arg("--node-addr")
|
||||
.arg(disseminate.node_addr.as_ref().unwrap().as_str());
|
||||
|
||||
match (&disseminate.data, &disseminate.file) {
|
||||
(Some(data), None) => c.args(["--data", &data]),
|
||||
(None, Some(file)) => c.args(["--file", file.as_os_str().to_str().unwrap()]),
|
||||
(_, _) => panic!("Either data or file needs to be provided, but not both"),
|
||||
};
|
||||
|
||||
c.status().expect("failed to execute nomos cli");
|
||||
}
|
||||
|
||||
async fn disseminate(config: &mut Disseminate) {
|
||||
let (_mixnodes, mixnet_config) = MixNode::spawn_nodes(2).await;
|
||||
let mut nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(2, mixnet_config)).await;
|
||||
|
||||
|
@ -25,31 +48,34 @@ async fn disseminate_blob() {
|
|||
let mut file = NamedTempFile::new().unwrap();
|
||||
let config_path = file.path().to_owned();
|
||||
serde_yaml::to_writer(&mut file, &network_config).unwrap();
|
||||
let cmd = Command::Disseminate(Disseminate {
|
||||
data: "Hello World".into(),
|
||||
timeout: 20,
|
||||
network_config: config_path,
|
||||
da_protocol: DaProtocolChoice {
|
||||
da_protocol: Protocol::FullReplication,
|
||||
settings: ProtocolSettings {
|
||||
full_replication: FullReplicationSettings {
|
||||
voter: [0; 32],
|
||||
num_attestations: 1,
|
||||
},
|
||||
let da_protocol = DaProtocolChoice {
|
||||
da_protocol: Protocol::FullReplication,
|
||||
settings: ProtocolSettings {
|
||||
full_replication: FullReplicationSettings {
|
||||
voter: [0; 32],
|
||||
num_attestations: 1,
|
||||
},
|
||||
},
|
||||
node_addr: Some(
|
||||
format!(
|
||||
"http://{}",
|
||||
nodes[0].config().http.backend_settings.address.clone()
|
||||
)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
),
|
||||
output: None,
|
||||
});
|
||||
};
|
||||
|
||||
let thread = std::thread::spawn(move || cmd.run().unwrap());
|
||||
let da =
|
||||
<FullReplication<AbsoluteNumber<Attestation, Certificate>>>::try_from(da_protocol.clone())
|
||||
.unwrap();
|
||||
|
||||
config.timeout = 20;
|
||||
config.network_config = config_path;
|
||||
config.da_protocol = da_protocol;
|
||||
config.node_addr = Some(
|
||||
format!(
|
||||
"http://{}",
|
||||
nodes[0].config().http.backend_settings.address.clone()
|
||||
)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
run_disseminate(&config);
|
||||
// let thread = std::thread::spawn(move || cmd.run().unwrap());
|
||||
|
||||
tokio::time::timeout(
|
||||
adjust_timeout(Duration::from_secs(TIMEOUT_SECS)),
|
||||
|
@ -58,7 +84,53 @@ async fn disseminate_blob() {
|
|||
.await
|
||||
.unwrap();
|
||||
|
||||
thread.join().unwrap();
|
||||
let (blob, bytes) = if let Some(data) = &config.data {
|
||||
let bytes = data.as_bytes().to_vec();
|
||||
(da.encode(bytes.clone())[0].hash(), bytes)
|
||||
} else {
|
||||
let bytes = std::fs::read(&config.file.as_ref().unwrap()).unwrap();
|
||||
(da.encode(bytes.clone())[0].hash(), bytes)
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
get_blobs(&nodes[0].url(), vec![blob]).await.unwrap()[0].as_bytes(),
|
||||
bytes.clone()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn disseminate_blob() {
|
||||
let mut config = Disseminate {
|
||||
data: Some("hello world".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
disseminate(&mut config).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn disseminate_big_blob() {
|
||||
const MSG_SIZE: usize = 1024;
|
||||
let mut config = Disseminate {
|
||||
data: std::iter::repeat(String::from("X"))
|
||||
.take(MSG_SIZE)
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
.into(),
|
||||
..Default::default()
|
||||
};
|
||||
disseminate(&mut config).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn disseminate_blob_from_file() {
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
file.write_all("hello world".as_bytes()).unwrap();
|
||||
|
||||
let mut config = Disseminate {
|
||||
file: Some(file.path().to_path_buf()),
|
||||
..Default::default()
|
||||
};
|
||||
disseminate(&mut config).await;
|
||||
}
|
||||
|
||||
async fn wait_for_cert_in_mempool(node: &NomosNode) {
|
||||
|
|
Loading…
Reference in New Issue