Mixnet v1 (#569)
* base * Remove mixnet client from libp2p network backend (#572) * Mixnet v1: Remove all mixnet legacies: mixnet crate, mixnode binary, tests, and docker (#573) * Mixnet v1: Skeleton (#570) * Use QUIC for libp2p (#580) * Add Poisson interval function for Mixnet (#575) * Mixnet network backend skeleton (#586) * Libp2p stream read/write (#587) * Emitting packets from mixclient using libp2p stream (#588) * Handle outputs from mixnode using libp2p stream/gossipsub (#589) * Refactor poisson (#590) * Mix client Poisson emission (#591) * Mix node packet handling (#592) * Mix Packet / Fragment logic (#593) * Move FisherYates to `nomos-utils` (#594) * Mixnet topology (#595) * Mix client/node unit tests (#596) * change multiaddr from tcp to udp with quic-v1 (#607) --------- Co-authored-by: Al Liu <scygliu1@gmail.com>
This commit is contained in:
parent
98bc66a67c
commit
e7d591b7bc
|
@ -6,4 +6,4 @@ DOCKER_COMPOSE_ETCDCTL_ENDPOINTS=etcd:2379
|
||||||
DOCKER_COMPOSE_ETCDCTL_API=3
|
DOCKER_COMPOSE_ETCDCTL_API=3
|
||||||
DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY=1000000000000000000000000000000000000000000000000000000000000000
|
DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY=1000000000000000000000000000000000000000000000000000000000000000
|
||||||
DOCKER_COMPOSE_OVERLAY_NODES=$DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY
|
DOCKER_COMPOSE_OVERLAY_NODES=$DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY
|
||||||
DOCKER_COMPOSE_NET_INITIAL_PEERS=/dns/bootstrap/tcp/3000
|
DOCKER_COMPOSE_NET_INITIAL_PEERS=/dns/bootstrap/udp/3000/quic-v1
|
||||||
|
|
|
@ -6,4 +6,4 @@ DOCKER_COMPOSE_ETCDCTL_ENDPOINTS=etcd:2379
|
||||||
DOCKER_COMPOSE_ETCDCTL_API=3
|
DOCKER_COMPOSE_ETCDCTL_API=3
|
||||||
DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY=1000000000000000000000000000000000000000000000000000000000000000
|
DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY=1000000000000000000000000000000000000000000000000000000000000000
|
||||||
DOCKER_COMPOSE_OVERLAY_NODES=1000000000000000000000000000000000000000000000000000000000000000
|
DOCKER_COMPOSE_OVERLAY_NODES=1000000000000000000000000000000000000000000000000000000000000000
|
||||||
DOCKER_COMPOSE_NET_INITIAL_PEERS=/dns/bootstrap/tcp/3000
|
DOCKER_COMPOSE_NET_INITIAL_PEERS=/dns/bootstrap/udp/3000/quic-v1
|
||||||
|
|
|
@ -19,15 +19,11 @@ members = [
|
||||||
"nomos-cli",
|
"nomos-cli",
|
||||||
"nomos-utils",
|
"nomos-utils",
|
||||||
"nodes/nomos-node",
|
"nodes/nomos-node",
|
||||||
"nodes/mixnode",
|
"mixnet",
|
||||||
"simulations",
|
"simulations",
|
||||||
"consensus/carnot-engine",
|
"consensus/carnot-engine",
|
||||||
"consensus/cryptarchia-engine",
|
"consensus/cryptarchia-engine",
|
||||||
"ledger/cryptarchia-ledger",
|
"ledger/cryptarchia-ledger",
|
||||||
"tests",
|
"tests",
|
||||||
"mixnet/node",
|
|
||||||
"mixnet/client",
|
|
||||||
"mixnet/protocol",
|
|
||||||
"mixnet/topology",
|
|
||||||
]
|
]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
|
@ -53,10 +53,6 @@ pipeline {
|
||||||
/* To prevent rebuilding node for each test, tests are defined here */
|
/* To prevent rebuilding node for each test, tests are defined here */
|
||||||
def tests = ['ten_nodes_happy', 'two_nodes_happy', 'ten_nodes_one_down']
|
def tests = ['ten_nodes_happy', 'two_nodes_happy', 'ten_nodes_one_down']
|
||||||
|
|
||||||
if (FEATURE == 'libp2p') {
|
|
||||||
tests.add('mixnet')
|
|
||||||
}
|
|
||||||
|
|
||||||
def report = runBuildAndTestsForFeature(FEATURE, tests)
|
def report = runBuildAndTestsForFeature(FEATURE, tests)
|
||||||
writeFile(file: "${WORKSPACE}/report.txt", text: report)
|
writeFile(file: "${WORKSPACE}/report.txt", text: report)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,14 +7,10 @@ services:
|
||||||
context: .
|
context: .
|
||||||
dockerfile: testnet/Dockerfile
|
dockerfile: testnet/Dockerfile
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000/tcp"
|
- "3000:3000/udp"
|
||||||
- "18080:18080/tcp"
|
- "18080:18080/tcp"
|
||||||
volumes:
|
volumes:
|
||||||
- ./testnet:/etc/nomos
|
- ./testnet:/etc/nomos
|
||||||
depends_on:
|
|
||||||
- mix-node-0
|
|
||||||
- mix-node-1
|
|
||||||
- mix-node-2
|
|
||||||
environment:
|
environment:
|
||||||
- BOOTSTRAP_NODE_KEY=${DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY:-1000000000000000000000000000000000000000000000000000000000000000}
|
- BOOTSTRAP_NODE_KEY=${DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY:-1000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
||||||
|
@ -32,11 +28,8 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- bootstrap
|
- bootstrap
|
||||||
- etcd
|
- etcd
|
||||||
- mix-node-0
|
|
||||||
- mix-node-1
|
|
||||||
- mix-node-2
|
|
||||||
ports:
|
ports:
|
||||||
- "3001:3000/tcp"
|
- "3001:3000/udp"
|
||||||
- "18081:18080/tcp"
|
- "18081:18080/tcp"
|
||||||
environment:
|
environment:
|
||||||
- LIBP2P_REPLICAS=3
|
- LIBP2P_REPLICAS=3
|
||||||
|
@ -45,7 +38,7 @@ services:
|
||||||
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
||||||
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/tcp/3000}
|
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
|
||||||
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
||||||
|
|
||||||
libp2p-node-2:
|
libp2p-node-2:
|
||||||
|
@ -58,11 +51,8 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- bootstrap
|
- bootstrap
|
||||||
- etcd
|
- etcd
|
||||||
- mix-node-0
|
|
||||||
- mix-node-1
|
|
||||||
- mix-node-2
|
|
||||||
ports:
|
ports:
|
||||||
- "3002:3000/tcp"
|
- "3002:3000/udp"
|
||||||
- "18082:18080/tcp"
|
- "18082:18080/tcp"
|
||||||
environment:
|
environment:
|
||||||
- LIBP2P_REPLICAS=3
|
- LIBP2P_REPLICAS=3
|
||||||
|
@ -71,7 +61,7 @@ services:
|
||||||
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
||||||
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/tcp/3000}
|
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
|
||||||
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
||||||
|
|
||||||
libp2p-node-3:
|
libp2p-node-3:
|
||||||
|
@ -84,11 +74,8 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- bootstrap
|
- bootstrap
|
||||||
- etcd
|
- etcd
|
||||||
- mix-node-0
|
|
||||||
- mix-node-1
|
|
||||||
- mix-node-2
|
|
||||||
ports:
|
ports:
|
||||||
- "3003:3000/tcp"
|
- "3003:3000/udp"
|
||||||
- "18083:18080/tcp"
|
- "18083:18080/tcp"
|
||||||
environment:
|
environment:
|
||||||
- LIBP2P_REPLICAS=3
|
- LIBP2P_REPLICAS=3
|
||||||
|
@ -97,48 +84,9 @@ services:
|
||||||
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
||||||
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/tcp/3000}
|
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
|
||||||
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
||||||
|
|
||||||
mix-node-0:
|
|
||||||
container_name: mix_node_0
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: testnet/Dockerfile
|
|
||||||
volumes:
|
|
||||||
- ./testnet:/etc/nomos
|
|
||||||
ports:
|
|
||||||
- "7707:7777/tcp"
|
|
||||||
- "7708:7778/tcp"
|
|
||||||
entrypoint: /usr/bin/mixnode
|
|
||||||
command: /etc/nomos/mixnode_config.yaml
|
|
||||||
|
|
||||||
mix-node-1:
|
|
||||||
container_name: mix_node_1
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: testnet/Dockerfile
|
|
||||||
volumes:
|
|
||||||
- ./testnet:/etc/nomos
|
|
||||||
ports:
|
|
||||||
- "7717:7777/tcp"
|
|
||||||
- "7718:7778/tcp"
|
|
||||||
entrypoint: /usr/bin/mixnode
|
|
||||||
command: /etc/nomos/mixnode_config.yaml
|
|
||||||
|
|
||||||
mix-node-2:
|
|
||||||
container_name: mix_node_2
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: testnet/Dockerfile
|
|
||||||
volumes:
|
|
||||||
- ./testnet:/etc/nomos
|
|
||||||
ports:
|
|
||||||
- "7727:7777/tcp"
|
|
||||||
- "7728:7778/tcp"
|
|
||||||
entrypoint: /usr/bin/mixnode
|
|
||||||
command: /etc/nomos/mixnode_config.yaml
|
|
||||||
|
|
||||||
chatbot:
|
chatbot:
|
||||||
container_name: chatbot
|
container_name: chatbot
|
||||||
build:
|
build:
|
||||||
|
|
34
compose.yml
34
compose.yml
|
@ -5,7 +5,7 @@ services:
|
||||||
context: .
|
context: .
|
||||||
dockerfile: testnet/Dockerfile
|
dockerfile: testnet/Dockerfile
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000/tcp"
|
- "3000:3000/udp"
|
||||||
- "18080:18080/tcp"
|
- "18080:18080/tcp"
|
||||||
volumes:
|
volumes:
|
||||||
- ./testnet:/etc/nomos
|
- ./testnet:/etc/nomos
|
||||||
|
@ -27,9 +27,6 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- bootstrap
|
- bootstrap
|
||||||
- etcd
|
- etcd
|
||||||
- mix-node-0
|
|
||||||
- mix-node-1
|
|
||||||
- mix-node-2
|
|
||||||
environment:
|
environment:
|
||||||
- LIBP2P_REPLICAS=${DOCKER_COMPOSE_LIBP2P_REPLICAS:-1}
|
- LIBP2P_REPLICAS=${DOCKER_COMPOSE_LIBP2P_REPLICAS:-1}
|
||||||
- ETCDCTL_ENDPOINTS=${DOCKER_COMPOSE_ETCDCTL_ENDPOINTS:-etcd:2379}
|
- ETCDCTL_ENDPOINTS=${DOCKER_COMPOSE_ETCDCTL_ENDPOINTS:-etcd:2379}
|
||||||
|
@ -37,36 +34,9 @@ services:
|
||||||
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
|
||||||
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
|
||||||
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/tcp/3000}
|
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
|
||||||
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
|
||||||
|
|
||||||
mix-node-0:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: testnet/Dockerfile
|
|
||||||
volumes:
|
|
||||||
- ./testnet:/etc/nomos
|
|
||||||
entrypoint: /usr/bin/mixnode
|
|
||||||
command: /etc/nomos/mixnode_config.yaml
|
|
||||||
|
|
||||||
mix-node-1:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: testnet/Dockerfile
|
|
||||||
volumes:
|
|
||||||
- ./testnet:/etc/nomos
|
|
||||||
entrypoint: /usr/bin/mixnode
|
|
||||||
command: /etc/nomos/mixnode_config.yaml
|
|
||||||
|
|
||||||
mix-node-2:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: testnet/Dockerfile
|
|
||||||
volumes:
|
|
||||||
- ./testnet:/etc/nomos
|
|
||||||
entrypoint: /usr/bin/mixnode
|
|
||||||
command: /etc/nomos/mixnode_config.yaml
|
|
||||||
|
|
||||||
etcd:
|
etcd:
|
||||||
image: quay.io/coreos/etcd:v3.4.15
|
image: quay.io/coreos/etcd:v3.4.15
|
||||||
ports:
|
ports:
|
||||||
|
|
|
@ -15,7 +15,7 @@ rand = "0.8"
|
||||||
rand_chacha = "0.3"
|
rand_chacha = "0.3"
|
||||||
thiserror = "1"
|
thiserror = "1"
|
||||||
fraction = { version = "0.13" }
|
fraction = { version = "0.13" }
|
||||||
nomos-utils = { path = "../../nomos-utils", optional = true }
|
nomos-utils = { path = "../../nomos-utils" }
|
||||||
|
|
||||||
utoipa = { version = "4.0", optional = true }
|
utoipa = { version = "4.0", optional = true }
|
||||||
serde_json = { version = "1.0", optional = true }
|
serde_json = { version = "1.0", optional = true }
|
||||||
|
|
|
@ -188,7 +188,9 @@ fn build_committee_from_nodes_with_size(
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::overlay::{FisherYatesShuffle, RoundRobin};
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
|
|
||||||
|
use crate::overlay::RoundRobin;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
const ENTROPY: [u8; 32] = [0; 32];
|
const ENTROPY: [u8; 32] = [0; 32];
|
||||||
|
|
|
@ -1,33 +1,7 @@
|
||||||
// std
|
|
||||||
|
|
||||||
// crates
|
|
||||||
use rand::{Rng, SeedableRng};
|
|
||||||
use rand_chacha::ChaCha20Rng;
|
|
||||||
|
|
||||||
// internal
|
// internal
|
||||||
use crate::overlay::CommitteeMembership;
|
use crate::overlay::CommitteeMembership;
|
||||||
use crate::NodeId;
|
use crate::NodeId;
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
|
||||||
pub struct FisherYatesShuffle {
|
|
||||||
entropy: [u8; 32],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FisherYatesShuffle {
|
|
||||||
pub fn new(entropy: [u8; 32]) -> Self {
|
|
||||||
Self { entropy }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shuffle<T: Clone>(elements: &mut [T], entropy: [u8; 32]) {
|
|
||||||
let mut rng = ChaCha20Rng::from_seed(entropy);
|
|
||||||
// Implementation of fisher yates shuffling
|
|
||||||
// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
|
|
||||||
for i in (1..elements.len()).rev() {
|
|
||||||
let j = rng.gen_range(0..=i);
|
|
||||||
elements.swap(i, j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CommitteeMembership for FisherYatesShuffle {
|
impl CommitteeMembership for FisherYatesShuffle {
|
||||||
fn reshape_committees(&self, nodes: &mut [NodeId]) {
|
fn reshape_committees(&self, nodes: &mut [NodeId]) {
|
||||||
|
|
|
@ -53,8 +53,10 @@ pub trait CommitteeMembership: Clone {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::overlay::{FisherYatesShuffle, RoundRobin};
|
use crate::overlay::RoundRobin;
|
||||||
|
|
||||||
const ENTROPY: [u8; 32] = [0; 32];
|
const ENTROPY: [u8; 32] = [0; 32];
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use crate::overlay::{CommitteeMembership, FisherYatesShuffle};
|
use crate::overlay::CommitteeMembership;
|
||||||
use crate::types::*;
|
use crate::types::*;
|
||||||
use bls_signatures::{PrivateKey, PublicKey, Serialize, Signature};
|
use bls_signatures::{PrivateKey, PublicKey, Serialize, Signature};
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
use rand::{seq::SliceRandom, SeedableRng};
|
use rand::{seq::SliceRandom, SeedableRng};
|
||||||
use serde::{Deserialize, Serialize as SerdeSerialize};
|
use serde::{Deserialize, Serialize as SerdeSerialize};
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
|
@ -231,8 +231,9 @@ where
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
|
|
||||||
use crate::overlay::leadership::RoundRobin;
|
use crate::overlay::leadership::RoundRobin;
|
||||||
use crate::overlay::membership::FisherYatesShuffle;
|
|
||||||
use crate::Overlay;
|
use crate::Overlay;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
[package]
|
||||||
|
name = "mixnet"
|
||||||
|
version = "0.0.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
rand = "0.8"
|
||||||
|
rand_distr = "0.4"
|
||||||
|
nomos-utils = { path = "../nomos-utils" }
|
||||||
|
thiserror = "1.0.57"
|
||||||
|
tokio = { version = "1.36.0", features = ["sync"] }
|
||||||
|
serde = { version = "1.0.197", features = ["derive"] }
|
||||||
|
sphinx-packet = "0.1.0"
|
||||||
|
nym-sphinx-addressing = { package = "nym-sphinx-addressing", git = "https://github.com/nymtech/nym", tag = "v1.1.22" }
|
||||||
|
tracing = "0.1.40"
|
||||||
|
uuid = { version = "1.7.0", features = ["v4"] }
|
||||||
|
futures = "0.3"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { version = "1.36.0", features = ["test-util"] }
|
|
@ -1,50 +0,0 @@
|
||||||
# Mixnet
|
|
||||||
|
|
||||||
## Components
|
|
||||||
|
|
||||||
- `node`: A mixnode implementation that will be assigned to one of the mixnet layers and will be responsible for receiving packets and forwarding them to the next mixnet layer.
|
|
||||||
- `client`: A mixclient implementation
|
|
||||||
- which splits a message into multiple Sphinx packets, constructs mix routes for them, and sends the packets to the first mixnode in each route.
|
|
||||||
- which receives Sphinx packets from a mixnode and reconstructs a message.
|
|
||||||
|
|
||||||
## Recommended Architecture
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
subgraph layer-1
|
|
||||||
mixnode-1-1
|
|
||||||
mixnode-1-2
|
|
||||||
end
|
|
||||||
subgraph layer-2
|
|
||||||
mixnode-2-1
|
|
||||||
mixnode-2-2
|
|
||||||
end
|
|
||||||
subgraph layer-3
|
|
||||||
mixnode-3-1
|
|
||||||
mixnode-3-2
|
|
||||||
end
|
|
||||||
mixnode-1-1 --> mixnode-2-1
|
|
||||||
mixnode-1-1 --> mixnode-2-2
|
|
||||||
mixnode-1-2 --> mixnode-2-1
|
|
||||||
mixnode-1-2 --> mixnode-2-2
|
|
||||||
mixnode-2-1 --> mixnode-3-1
|
|
||||||
mixnode-2-1 --> mixnode-3-2
|
|
||||||
mixnode-2-2 --> mixnode-3-1
|
|
||||||
mixnode-2-2 --> mixnode-3-2
|
|
||||||
mixclient-sender-1 --> mixnode-1-1
|
|
||||||
mixclient-sender-1 --> mixnode-1-2
|
|
||||||
mixnode-3-1 --> mixclient-senderreceiver-1
|
|
||||||
mixnode-3-2 --> mixclient-senderreceiver-2
|
|
||||||
```
|
|
||||||
|
|
||||||
The mix `node` component can be integrated into a separate application, for example, so that it can be run independently with mixclients for better reliability or privacy.
|
|
||||||
|
|
||||||
The mix `client` component is also designed to be integrated into any application that wants to send/receive packets to/from the mixnet.
|
|
||||||
The `client` can be configured with one of modes described [below](#mixclient-modes).
|
|
||||||
|
|
||||||
## Mixclient Modes
|
|
||||||
|
|
||||||
- `Sender`: A mixclient only sends Sphinx packets to mixnodes, but doesn't receive any packets from mixnodes.
|
|
||||||
- `SenderReceiver`: A mixclient not only sends Sphinx packets to mixnodes, but also receive packets from mixnodes.
|
|
||||||
- Due to the design of mixnet, mixclients always receive packets from mixnodes in the last mixnet layer.
|
|
||||||
- Currently, only 1:1 mapping is supported. In other words, multiple mixclients cannot listen from the same mixnode. It also means that it is recommended that a single node operator runs both a mixnode and a mixclient.
|
|
|
@ -1,21 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "mixnet-client"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
tracing = "0.1.37"
|
|
||||||
tokio = { version = "1.32", features = ["net"] }
|
|
||||||
sphinx-packet = "0.1.0"
|
|
||||||
nym-sphinx = { package = "nym-sphinx", git = "https://github.com/nymtech/nym", tag = "v1.1.22" }
|
|
||||||
# Using an older version, since `nym-sphinx` depends on `rand` v0.7.3.
|
|
||||||
rand = "0.7.3"
|
|
||||||
mixnet-protocol = { path = "../protocol" }
|
|
||||||
mixnet-topology = { path = "../topology" }
|
|
||||||
mixnet-util = { path = "../util" }
|
|
||||||
futures = "0.3.28"
|
|
||||||
thiserror = "1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
serde_yaml = "0.9.25"
|
|
|
@ -1,93 +0,0 @@
|
||||||
use std::{net::ToSocketAddrs, time::Duration};
|
|
||||||
|
|
||||||
use futures::{stream, StreamExt};
|
|
||||||
use mixnet_topology::MixnetTopology;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::{receiver::Receiver, MessageStream, MixnetClientError};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
|
||||||
pub struct MixnetClientConfig {
|
|
||||||
pub mode: MixnetClientMode,
|
|
||||||
pub topology: MixnetTopology,
|
|
||||||
#[serde(default = "MixnetClientConfig::default_connection_pool_size")]
|
|
||||||
pub connection_pool_size: usize,
|
|
||||||
#[serde(default = "MixnetClientConfig::default_max_retries")]
|
|
||||||
pub max_retries: usize,
|
|
||||||
#[serde(default = "MixnetClientConfig::default_retry_delay")]
|
|
||||||
pub retry_delay: std::time::Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetClientConfig {
|
|
||||||
/// Creates a new `MixnetClientConfig` with default values.
|
|
||||||
pub fn new(mode: MixnetClientMode, topology: MixnetTopology) -> Self {
|
|
||||||
Self {
|
|
||||||
mode,
|
|
||||||
topology,
|
|
||||||
connection_pool_size: Self::default_connection_pool_size(),
|
|
||||||
max_retries: Self::default_max_retries(),
|
|
||||||
retry_delay: Self::default_retry_delay(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn default_connection_pool_size() -> usize {
|
|
||||||
256
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn default_max_retries() -> usize {
|
|
||||||
3
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn default_retry_delay() -> Duration {
|
|
||||||
Duration::from_secs(5)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
|
||||||
pub enum MixnetClientMode {
|
|
||||||
Sender,
|
|
||||||
SenderReceiver(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetClientMode {
|
|
||||||
pub(crate) async fn run(&self) -> Result<MessageStream, MixnetClientError> {
|
|
||||||
match self {
|
|
||||||
Self::Sender => Ok(stream::empty().boxed()),
|
|
||||||
Self::SenderReceiver(node_address) => {
|
|
||||||
let mut addrs = node_address
|
|
||||||
.to_socket_addrs()
|
|
||||||
.map_err(|e| MixnetClientError::MixnetNodeAddressError(e.to_string()))?;
|
|
||||||
let socket_addr = addrs
|
|
||||||
.next()
|
|
||||||
.ok_or(MixnetClientError::MixnetNodeAddressError(
|
|
||||||
"No address provided".into(),
|
|
||||||
))?;
|
|
||||||
Ok(Receiver::new(socket_addr).run().await?.boxed())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use mixnet_topology::MixnetTopology;
|
|
||||||
|
|
||||||
use crate::{MixnetClientConfig, MixnetClientMode};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn default_config_serde() {
|
|
||||||
let yaml = "
|
|
||||||
mode: Sender
|
|
||||||
topology:
|
|
||||||
layers: []
|
|
||||||
";
|
|
||||||
let conf: MixnetClientConfig = serde_yaml::from_str(yaml).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
conf,
|
|
||||||
MixnetClientConfig::new(
|
|
||||||
MixnetClientMode::Sender,
|
|
||||||
MixnetTopology { layers: Vec::new() }
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
use mixnet_protocol::ProtocolError;
|
|
||||||
use nym_sphinx::addressing::nodes::NymNodeRoutingAddressError;
|
|
||||||
|
|
||||||
#[derive(thiserror::Error, Debug)]
|
|
||||||
pub enum MixnetClientError {
|
|
||||||
#[error("invalid node address: {0}")]
|
|
||||||
MixnetNodeAddressError(String),
|
|
||||||
#[error("mixnet node connect error")]
|
|
||||||
MixnetNodeConnectError,
|
|
||||||
#[error("mixnode stream has been closed")]
|
|
||||||
MixnetNodeStreamClosed,
|
|
||||||
#[error("unexpected stream body received")]
|
|
||||||
UnexpectedStreamBody,
|
|
||||||
#[error("invalid payload")]
|
|
||||||
InvalidPayload,
|
|
||||||
#[error("invalid fragment")]
|
|
||||||
InvalidFragment,
|
|
||||||
#[error("invalid routing address: {0}")]
|
|
||||||
InvalidRoutingAddress(#[from] NymNodeRoutingAddressError),
|
|
||||||
#[error("{0}")]
|
|
||||||
Protocol(#[from] ProtocolError),
|
|
||||||
#[error("{0}")]
|
|
||||||
Message(#[from] nym_sphinx::message::NymMessageError),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = core::result::Result<T, MixnetClientError>;
|
|
|
@ -1,46 +0,0 @@
|
||||||
pub mod config;
|
|
||||||
pub mod error;
|
|
||||||
pub use error::*;
|
|
||||||
mod receiver;
|
|
||||||
mod sender;
|
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
pub use config::MixnetClientConfig;
|
|
||||||
pub use config::MixnetClientMode;
|
|
||||||
use futures::stream::BoxStream;
|
|
||||||
use mixnet_util::ConnectionPool;
|
|
||||||
use rand::Rng;
|
|
||||||
use sender::Sender;
|
|
||||||
|
|
||||||
// A client for sending packets to Mixnet and receiving packets from Mixnet.
|
|
||||||
pub struct MixnetClient<R: Rng> {
|
|
||||||
mode: MixnetClientMode,
|
|
||||||
sender: Sender<R>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type MessageStream = BoxStream<'static, Result<Vec<u8>>>;
|
|
||||||
|
|
||||||
impl<R: Rng> MixnetClient<R> {
|
|
||||||
pub fn new(config: MixnetClientConfig, rng: R) -> Self {
|
|
||||||
let cache = ConnectionPool::new(config.connection_pool_size);
|
|
||||||
Self {
|
|
||||||
mode: config.mode,
|
|
||||||
sender: Sender::new(
|
|
||||||
config.topology,
|
|
||||||
cache,
|
|
||||||
rng,
|
|
||||||
config.max_retries,
|
|
||||||
config.retry_delay,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(&self) -> Result<MessageStream> {
|
|
||||||
self.mode.run().await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send(&mut self, msg: Vec<u8>, total_delay: Duration) -> Result<()> {
|
|
||||||
self.sender.send(msg, total_delay)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,132 +0,0 @@
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
use futures::{stream, Stream, StreamExt};
|
|
||||||
use mixnet_protocol::Body;
|
|
||||||
use nym_sphinx::{
|
|
||||||
chunking::{fragment::Fragment, reconstruction::MessageReconstructor},
|
|
||||||
message::{NymMessage, PaddedMessage},
|
|
||||||
Payload,
|
|
||||||
};
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
|
|
||||||
use super::error::*;
|
|
||||||
use crate::MixnetClientError;
|
|
||||||
|
|
||||||
// Receiver accepts TCP connections to receive incoming payloads from the Mixnet.
|
|
||||||
pub struct Receiver {
|
|
||||||
node_address: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Receiver {
|
|
||||||
pub fn new(node_address: SocketAddr) -> Self {
|
|
||||||
Self { node_address }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(&self) -> Result<impl Stream<Item = Result<Vec<u8>>> + Send + 'static> {
|
|
||||||
let Ok(socket) = TcpStream::connect(self.node_address).await else {
|
|
||||||
return Err(MixnetClientError::MixnetNodeConnectError);
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self::message_stream(Box::pin(Self::fragment_stream(
|
|
||||||
socket,
|
|
||||||
))))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fragment_stream(socket: TcpStream) -> impl Stream<Item = Result<Fragment>> + Send + 'static {
|
|
||||||
stream::unfold(socket, move |mut socket| {
|
|
||||||
async move {
|
|
||||||
let Ok(body) = Body::read(&mut socket).await else {
|
|
||||||
// TODO: Maybe this is a hard error and the stream is corrupted? In that case stop the stream
|
|
||||||
return Some((Err(MixnetClientError::MixnetNodeStreamClosed), socket));
|
|
||||||
};
|
|
||||||
|
|
||||||
match body {
|
|
||||||
Body::SphinxPacket(_) => {
|
|
||||||
Some((Err(MixnetClientError::UnexpectedStreamBody), socket))
|
|
||||||
}
|
|
||||||
Body::FinalPayload(payload) => {
|
|
||||||
Some((Self::fragment_from_payload(payload), socket))
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn message_stream(
|
|
||||||
fragment_stream: impl Stream<Item = Result<Fragment>> + Send + Unpin + 'static,
|
|
||||||
) -> impl Stream<Item = Result<Vec<u8>>> + Send + 'static {
|
|
||||||
// MessageReconstructor buffers all received fragments
|
|
||||||
// and eventually returns reconstructed messages.
|
|
||||||
let message_reconstructor: MessageReconstructor = Default::default();
|
|
||||||
|
|
||||||
stream::unfold(
|
|
||||||
(fragment_stream, message_reconstructor),
|
|
||||||
|(mut fragment_stream, mut message_reconstructor)| async move {
|
|
||||||
let result =
|
|
||||||
Self::reconstruct_message(&mut fragment_stream, &mut message_reconstructor)
|
|
||||||
.await;
|
|
||||||
Some((result, (fragment_stream, message_reconstructor)))
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fragment_from_payload(payload: Payload) -> Result<Fragment> {
|
|
||||||
let Ok(payload_plaintext) = payload.recover_plaintext() else {
|
|
||||||
return Err(MixnetClientError::InvalidPayload);
|
|
||||||
};
|
|
||||||
let Ok(fragment) = Fragment::try_from_bytes(&payload_plaintext) else {
|
|
||||||
return Err(MixnetClientError::InvalidPayload);
|
|
||||||
};
|
|
||||||
Ok(fragment)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn reconstruct_message(
|
|
||||||
fragment_stream: &mut (impl Stream<Item = Result<Fragment>> + Send + Unpin + 'static),
|
|
||||||
message_reconstructor: &mut MessageReconstructor,
|
|
||||||
) -> Result<Vec<u8>> {
|
|
||||||
// Read fragments until at least one message is fully reconstructed.
|
|
||||||
while let Some(next) = fragment_stream.next().await {
|
|
||||||
match next {
|
|
||||||
Ok(fragment) => {
|
|
||||||
if let Some(message) =
|
|
||||||
Self::try_reconstruct_message(fragment, message_reconstructor)?
|
|
||||||
{
|
|
||||||
return Ok(message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fragment_stream closed before messages are fully reconstructed
|
|
||||||
Err(MixnetClientError::MixnetNodeStreamClosed)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_reconstruct_message(
|
|
||||||
fragment: Fragment,
|
|
||||||
message_reconstructor: &mut MessageReconstructor,
|
|
||||||
) -> Result<Option<Vec<u8>>> {
|
|
||||||
let reconstruction_result = message_reconstructor.insert_new_fragment(fragment);
|
|
||||||
match reconstruction_result {
|
|
||||||
Some((padded_message, _)) => {
|
|
||||||
let message = Self::remove_padding(padded_message)?;
|
|
||||||
Ok(Some(message))
|
|
||||||
}
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn remove_padding(msg: Vec<u8>) -> Result<Vec<u8>> {
|
|
||||||
let padded_message = PaddedMessage::new_reconstructed(msg);
|
|
||||||
// we need this because PaddedMessage.remove_padding requires it for other NymMessage types.
|
|
||||||
let dummy_num_mix_hops = 0;
|
|
||||||
|
|
||||||
match padded_message.remove_padding(dummy_num_mix_hops)? {
|
|
||||||
NymMessage::Plain(msg) => Ok(msg),
|
|
||||||
_ => Err(MixnetClientError::InvalidFragment),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,231 +0,0 @@
|
||||||
use std::{net::SocketAddr, time::Duration};
|
|
||||||
|
|
||||||
use mixnet_protocol::{Body, ProtocolError};
|
|
||||||
use mixnet_topology::MixnetTopology;
|
|
||||||
use mixnet_util::ConnectionPool;
|
|
||||||
use nym_sphinx::{
|
|
||||||
addressing::nodes::NymNodeRoutingAddress, chunking::fragment::Fragment, message::NymMessage,
|
|
||||||
params::PacketSize, Delay, Destination, DestinationAddressBytes, NodeAddressBytes,
|
|
||||||
IDENTIFIER_LENGTH, PAYLOAD_OVERHEAD_SIZE,
|
|
||||||
};
|
|
||||||
use rand::{distributions::Uniform, prelude::Distribution, Rng};
|
|
||||||
use sphinx_packet::{route, SphinxPacket, SphinxPacketBuilder};
|
|
||||||
|
|
||||||
use super::error::*;
|
|
||||||
|
|
||||||
// Sender splits messages into Sphinx packets and sends them to the Mixnet.
|
|
||||||
pub struct Sender<R: Rng> {
|
|
||||||
//TODO: handle topology update
|
|
||||||
topology: MixnetTopology,
|
|
||||||
pool: ConnectionPool,
|
|
||||||
max_retries: usize,
|
|
||||||
retry_delay: Duration,
|
|
||||||
rng: R,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: Rng> Sender<R> {
|
|
||||||
pub fn new(
|
|
||||||
topology: MixnetTopology,
|
|
||||||
pool: ConnectionPool,
|
|
||||||
rng: R,
|
|
||||||
max_retries: usize,
|
|
||||||
retry_delay: Duration,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
topology,
|
|
||||||
rng,
|
|
||||||
pool,
|
|
||||||
max_retries,
|
|
||||||
retry_delay,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send(&mut self, msg: Vec<u8>, total_delay: Duration) -> Result<()> {
|
|
||||||
let destination = self.topology.random_destination(&mut self.rng)?;
|
|
||||||
let destination = Destination::new(
|
|
||||||
DestinationAddressBytes::from_bytes(destination.address.as_bytes()),
|
|
||||||
[0; IDENTIFIER_LENGTH], // TODO: use a proper SURBIdentifier if we need SURB
|
|
||||||
);
|
|
||||||
|
|
||||||
self.pad_and_split_message(msg)
|
|
||||||
.into_iter()
|
|
||||||
.map(|fragment| self.build_sphinx_packet(fragment, &destination, total_delay))
|
|
||||||
.collect::<Result<Vec<_>>>()?
|
|
||||||
.into_iter()
|
|
||||||
.for_each(|(packet, first_node)| {
|
|
||||||
let pool = self.pool.clone();
|
|
||||||
let max_retries = self.max_retries;
|
|
||||||
let retry_delay = self.retry_delay;
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = Self::send_packet(
|
|
||||||
&pool,
|
|
||||||
max_retries,
|
|
||||||
retry_delay,
|
|
||||||
Box::new(packet),
|
|
||||||
first_node.address,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::error!("failed to send packet to the first node: {e}");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pad_and_split_message(&mut self, msg: Vec<u8>) -> Vec<Fragment> {
|
|
||||||
let nym_message = NymMessage::new_plain(msg);
|
|
||||||
|
|
||||||
// TODO: add PUBLIC_KEY_SIZE for encryption for the destination,
|
|
||||||
// if we're going to encrypt final payloads for the destination.
|
|
||||||
// TODO: add ACK_OVERHEAD if we need SURB-ACKs.
|
|
||||||
// https://github.com/nymtech/nym/blob/3748ab77a132143d5fd1cd75dd06334d33294815/common/nymsphinx/src/message.rs#L181-L181
|
|
||||||
let plaintext_size_per_packet = PacketSize::RegularPacket.plaintext_size();
|
|
||||||
|
|
||||||
nym_message
|
|
||||||
.pad_to_full_packet_lengths(plaintext_size_per_packet)
|
|
||||||
.split_into_fragments(&mut self.rng, plaintext_size_per_packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_sphinx_packet(
|
|
||||||
&mut self,
|
|
||||||
fragment: Fragment,
|
|
||||||
destination: &Destination,
|
|
||||||
total_delay: Duration,
|
|
||||||
) -> Result<(sphinx_packet::SphinxPacket, route::Node)> {
|
|
||||||
let route = self.topology.random_route(&mut self.rng)?;
|
|
||||||
|
|
||||||
let delays: Vec<Delay> =
|
|
||||||
RandomDelayIterator::new(&mut self.rng, route.len() as u64, total_delay)
|
|
||||||
.map(|d| Delay::new_from_millis(d.as_millis() as u64))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// TODO: encrypt the payload for the destination, if we want
|
|
||||||
// https://github.com/nymtech/nym/blob/3748ab77a132143d5fd1cd75dd06334d33294815/common/nymsphinx/src/preparer/payload.rs#L70
|
|
||||||
let payload = fragment.into_bytes();
|
|
||||||
|
|
||||||
let packet = SphinxPacketBuilder::new()
|
|
||||||
.with_payload_size(payload.len() + PAYLOAD_OVERHEAD_SIZE)
|
|
||||||
.build_packet(payload, &route, destination, &delays)
|
|
||||||
.map_err(ProtocolError::InvalidSphinxPacket)?;
|
|
||||||
|
|
||||||
let first_mixnode = route.first().cloned().expect("route is not empty");
|
|
||||||
|
|
||||||
Ok((packet, first_mixnode))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_packet(
|
|
||||||
pool: &ConnectionPool,
|
|
||||||
max_retries: usize,
|
|
||||||
retry_delay: Duration,
|
|
||||||
packet: Box<SphinxPacket>,
|
|
||||||
addr: NodeAddressBytes,
|
|
||||||
) -> Result<()> {
|
|
||||||
let addr = SocketAddr::from(NymNodeRoutingAddress::try_from(addr)?);
|
|
||||||
tracing::debug!("Sending a Sphinx packet to the node: {addr:?}");
|
|
||||||
|
|
||||||
let mu: std::sync::Arc<tokio::sync::Mutex<tokio::net::TcpStream>> =
|
|
||||||
pool.get_or_init(&addr).await?;
|
|
||||||
let arc_socket = mu.clone();
|
|
||||||
|
|
||||||
let body = Body::SphinxPacket(packet);
|
|
||||||
|
|
||||||
if let Err(e) = {
|
|
||||||
let mut socket = mu.lock().await;
|
|
||||||
body.write(&mut *socket).await
|
|
||||||
} {
|
|
||||||
tracing::error!("Failed to send packet to {addr} with error: {e}. Retrying...");
|
|
||||||
return mixnet_protocol::retry_backoff(
|
|
||||||
addr,
|
|
||||||
max_retries,
|
|
||||||
retry_delay,
|
|
||||||
body,
|
|
||||||
arc_socket,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(Into::into);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct RandomDelayIterator<R> {
|
|
||||||
rng: R,
|
|
||||||
remaining_delays: u64,
|
|
||||||
remaining_time: u64,
|
|
||||||
avg_delay: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R> RandomDelayIterator<R> {
|
|
||||||
fn new(rng: R, total_delays: u64, total_time: Duration) -> Self {
|
|
||||||
let total_time = total_time.as_millis() as u64;
|
|
||||||
RandomDelayIterator {
|
|
||||||
rng,
|
|
||||||
remaining_delays: total_delays,
|
|
||||||
remaining_time: total_time,
|
|
||||||
avg_delay: total_time / total_delays,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R> Iterator for RandomDelayIterator<R>
|
|
||||||
where
|
|
||||||
R: Rng,
|
|
||||||
{
|
|
||||||
type Item = Duration;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Duration> {
|
|
||||||
if self.remaining_delays == 0 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.remaining_delays -= 1;
|
|
||||||
|
|
||||||
if self.remaining_delays == 1 {
|
|
||||||
return Some(Duration::from_millis(self.remaining_time));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate bounds to avoid extreme values
|
|
||||||
let upper_bound = (self.avg_delay as f64 * 1.5)
|
|
||||||
// guarantee that we don't exceed the remaining time and promise the delay we return is
|
|
||||||
// at least 1ms.
|
|
||||||
.min(self.remaining_time.saturating_sub(self.remaining_delays) as f64);
|
|
||||||
let lower_bound = (self.avg_delay as f64 * 0.5).min(upper_bound);
|
|
||||||
|
|
||||||
let delay = Uniform::new_inclusive(lower_bound, upper_bound).sample(&mut self.rng) as u64;
|
|
||||||
self.remaining_time = self.remaining_time.saturating_sub(delay);
|
|
||||||
|
|
||||||
Some(Duration::from_millis(delay))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use super::RandomDelayIterator;
|
|
||||||
|
|
||||||
const TOTAL_DELAYS: u64 = 3;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_random_delay_iter_zero_total_time() {
|
|
||||||
let mut delays = RandomDelayIterator::new(rand::thread_rng(), TOTAL_DELAYS, Duration::ZERO);
|
|
||||||
for _ in 0..TOTAL_DELAYS {
|
|
||||||
assert!(delays.next().is_some());
|
|
||||||
}
|
|
||||||
assert!(delays.next().is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_random_delay_iter_small_total_time() {
|
|
||||||
let mut delays =
|
|
||||||
RandomDelayIterator::new(rand::thread_rng(), TOTAL_DELAYS, Duration::from_millis(1));
|
|
||||||
let mut d = Duration::ZERO;
|
|
||||||
for _ in 0..TOTAL_DELAYS {
|
|
||||||
d += delays.next().unwrap();
|
|
||||||
}
|
|
||||||
assert!(delays.next().is_none());
|
|
||||||
assert_eq!(d, Duration::from_millis(1));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "mixnet-node"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
tracing = "0.1.37"
|
|
||||||
tokio = { version = "1.32", features = ["net", "time", "signal"] }
|
|
||||||
thiserror = "1"
|
|
||||||
sphinx-packet = "0.1.0"
|
|
||||||
nym-sphinx = { package = "nym-sphinx", git = "https://github.com/nymtech/nym", tag = "v1.1.22" }
|
|
||||||
mixnet-protocol = { path = "../protocol" }
|
|
||||||
mixnet-topology = { path = "../topology" }
|
|
||||||
mixnet-util = { path = "../util" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tokio = {version = "1.32", features =["full"]}
|
|
|
@ -1,49 +0,0 @@
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
use mixnet_protocol::Body;
|
|
||||||
use tokio::{
|
|
||||||
net::{TcpListener, TcpStream},
|
|
||||||
sync::mpsc,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct ClientNotifier {}
|
|
||||||
|
|
||||||
impl ClientNotifier {
|
|
||||||
pub async fn run(
|
|
||||||
listen_address: SocketAddr,
|
|
||||||
mut rx: mpsc::Receiver<Body>,
|
|
||||||
) -> super::Result<()> {
|
|
||||||
let listener = TcpListener::bind(listen_address)
|
|
||||||
.await
|
|
||||||
.map_err(super::ProtocolError::IO)?;
|
|
||||||
tracing::info!("Listening mixnet client connections: {listen_address}");
|
|
||||||
|
|
||||||
// Currently, handling only a single incoming connection
|
|
||||||
// TODO: consider handling multiple clients
|
|
||||||
loop {
|
|
||||||
match listener.accept().await {
|
|
||||||
Ok((socket, remote_addr)) => {
|
|
||||||
tracing::debug!("Accepted incoming client connection from {remote_addr}");
|
|
||||||
|
|
||||||
if let Err(e) = Self::handle_connection(socket, &mut rx).await {
|
|
||||||
tracing::error!("failed to handle conn: {e}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => tracing::warn!("Failed to accept incoming client connection: {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_connection(
|
|
||||||
mut socket: TcpStream,
|
|
||||||
rx: &mut mpsc::Receiver<Body>,
|
|
||||||
) -> super::Result<()> {
|
|
||||||
while let Some(body) = rx.recv().await {
|
|
||||||
if let Err(e) = body.write(&mut socket).await {
|
|
||||||
return Err(super::MixnetNodeError::Client(e));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tracing::debug!("body receiver closed");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
use std::{
|
|
||||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use nym_sphinx::{PrivateKey, PublicKey};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sphinx_packet::crypto::{PRIVATE_KEY_SIZE, PUBLIC_KEY_SIZE};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
|
|
||||||
pub struct MixnetNodeConfig {
|
|
||||||
/// A listen address for receiving Sphinx packets
|
|
||||||
pub listen_address: SocketAddr,
|
|
||||||
/// An listen address for communicating with mixnet clients
|
|
||||||
pub client_listen_address: SocketAddr,
|
|
||||||
/// A key for decrypting Sphinx packets
|
|
||||||
pub private_key: [u8; PRIVATE_KEY_SIZE],
|
|
||||||
/// The size of the connection pool.
|
|
||||||
#[serde(default = "MixnetNodeConfig::default_connection_pool_size")]
|
|
||||||
pub connection_pool_size: usize,
|
|
||||||
/// The maximum number of retries.
|
|
||||||
#[serde(default = "MixnetNodeConfig::default_max_retries")]
|
|
||||||
pub max_retries: usize,
|
|
||||||
/// The retry delay between retries.
|
|
||||||
#[serde(default = "MixnetNodeConfig::default_retry_delay")]
|
|
||||||
pub retry_delay: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for MixnetNodeConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
listen_address: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 7777)),
|
|
||||||
client_listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
7778,
|
|
||||||
)),
|
|
||||||
private_key: PrivateKey::new().to_bytes(),
|
|
||||||
connection_pool_size: 255,
|
|
||||||
max_retries: 3,
|
|
||||||
retry_delay: Duration::from_secs(5),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetNodeConfig {
|
|
||||||
const fn default_connection_pool_size() -> usize {
|
|
||||||
255
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn default_max_retries() -> usize {
|
|
||||||
3
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn default_retry_delay() -> Duration {
|
|
||||||
Duration::from_secs(5)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn public_key(&self) -> [u8; PUBLIC_KEY_SIZE] {
|
|
||||||
*PublicKey::from(&PrivateKey::from(self.private_key)).as_bytes()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,313 +0,0 @@
|
||||||
mod client_notifier;
|
|
||||||
pub mod config;
|
|
||||||
|
|
||||||
use std::{collections::HashMap, net::SocketAddr, time::Duration};
|
|
||||||
|
|
||||||
use client_notifier::ClientNotifier;
|
|
||||||
pub use config::MixnetNodeConfig;
|
|
||||||
use mixnet_protocol::{Body, ProtocolError};
|
|
||||||
use mixnet_topology::MixnetNodeId;
|
|
||||||
use nym_sphinx::{
|
|
||||||
addressing::nodes::{NymNodeRoutingAddress, NymNodeRoutingAddressError},
|
|
||||||
Delay, DestinationAddressBytes, NodeAddressBytes, PrivateKey,
|
|
||||||
};
|
|
||||||
pub use sphinx_packet::crypto::PRIVATE_KEY_SIZE;
|
|
||||||
use sphinx_packet::{crypto::PUBLIC_KEY_SIZE, ProcessedPacket, SphinxPacket};
|
|
||||||
use tokio::{
|
|
||||||
net::{TcpListener, TcpStream},
|
|
||||||
sync::mpsc,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub type Result<T> = core::result::Result<T, MixnetNodeError>;
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
pub enum MixnetNodeError {
|
|
||||||
#[error("{0}")]
|
|
||||||
Protocol(#[from] ProtocolError),
|
|
||||||
#[error("invalid routing address: {0}")]
|
|
||||||
InvalidRoutingAddress(#[from] NymNodeRoutingAddressError),
|
|
||||||
#[error("send error: {0}")]
|
|
||||||
PacketSendError(#[from] tokio::sync::mpsc::error::SendError<Packet>),
|
|
||||||
#[error("send error: fail to send {0} to client")]
|
|
||||||
ClientSendError(#[from] tokio::sync::mpsc::error::TrySendError<Body>),
|
|
||||||
#[error("client: {0}")]
|
|
||||||
Client(ProtocolError),
|
|
||||||
}
|
|
||||||
|
|
||||||
// A mix node that routes packets in the Mixnet.
|
|
||||||
pub struct MixnetNode {
|
|
||||||
config: MixnetNodeConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetNode {
|
|
||||||
pub fn new(config: MixnetNodeConfig) -> Self {
|
|
||||||
Self { config }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn id(&self) -> MixnetNodeId {
|
|
||||||
self.public_key()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn public_key(&self) -> [u8; PUBLIC_KEY_SIZE] {
|
|
||||||
self.config.public_key()
|
|
||||||
}
|
|
||||||
|
|
||||||
const CLIENT_NOTI_CHANNEL_SIZE: usize = 100;
|
|
||||||
|
|
||||||
pub async fn run(self) -> Result<()> {
|
|
||||||
tracing::info!("Public key: {:?}", self.public_key());
|
|
||||||
|
|
||||||
// Spawn a ClientNotifier
|
|
||||||
let (client_tx, client_rx) = mpsc::channel(Self::CLIENT_NOTI_CHANNEL_SIZE);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = ClientNotifier::run(self.config.client_listen_address, client_rx).await
|
|
||||||
{
|
|
||||||
tracing::error!("failed to run client notifier: {e}");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
//TODO: Accepting ad-hoc TCP conns for now. Improve conn handling.
|
|
||||||
let listener = TcpListener::bind(self.config.listen_address)
|
|
||||||
.await
|
|
||||||
.map_err(ProtocolError::IO)?;
|
|
||||||
tracing::info!(
|
|
||||||
"Listening mixnet node connections: {}",
|
|
||||||
self.config.listen_address
|
|
||||||
);
|
|
||||||
|
|
||||||
let (tx, rx) = mpsc::unbounded_channel();
|
|
||||||
|
|
||||||
let packet_forwarder = PacketForwarder::new(tx.clone(), rx, self.config);
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
packet_forwarder.run().await;
|
|
||||||
});
|
|
||||||
|
|
||||||
let runner = MixnetNodeRunner {
|
|
||||||
config: self.config,
|
|
||||||
client_tx,
|
|
||||||
packet_tx: tx,
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
res = listener.accept() => {
|
|
||||||
match res {
|
|
||||||
Ok((socket, remote_addr)) => {
|
|
||||||
tracing::debug!("Accepted incoming connection from {remote_addr:?}");
|
|
||||||
|
|
||||||
let runner = runner.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = runner.handle_connection(socket).await {
|
|
||||||
tracing::error!("failed to handle conn: {e}");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Err(e) => tracing::warn!("Failed to accept incoming connection: {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ = tokio::signal::ctrl_c() => {
|
|
||||||
tracing::info!("Shutting down...");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct MixnetNodeRunner {
|
|
||||||
config: MixnetNodeConfig,
|
|
||||||
client_tx: mpsc::Sender<Body>,
|
|
||||||
packet_tx: mpsc::UnboundedSender<Packet>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetNodeRunner {
|
|
||||||
async fn handle_connection(&self, mut socket: TcpStream) -> Result<()> {
|
|
||||||
loop {
|
|
||||||
let body = Body::read(&mut socket).await?;
|
|
||||||
let this = self.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = this.handle_body(body).await {
|
|
||||||
tracing::error!("failed to handle body: {e}");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_body(&self, pkt: Body) -> Result<()> {
|
|
||||||
match pkt {
|
|
||||||
Body::SphinxPacket(packet) => self.handle_sphinx_packet(packet).await,
|
|
||||||
Body::FinalPayload(payload) => {
|
|
||||||
self.forward_body_to_client_notifier(Body::FinalPayload(payload))
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_sphinx_packet(&self, packet: Box<SphinxPacket>) -> Result<()> {
|
|
||||||
match packet
|
|
||||||
.process(&PrivateKey::from(self.config.private_key))
|
|
||||||
.map_err(ProtocolError::InvalidSphinxPacket)?
|
|
||||||
{
|
|
||||||
ProcessedPacket::ForwardHop(packet, next_node_addr, delay) => {
|
|
||||||
self.forward_packet_to_next_hop(Body::SphinxPacket(packet), next_node_addr, delay)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
ProcessedPacket::FinalHop(destination_addr, _, payload) => {
|
|
||||||
self.forward_payload_to_destination(Body::FinalPayload(payload), destination_addr)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn forward_body_to_client_notifier(&self, body: Body) -> Result<()> {
|
|
||||||
// TODO: Decrypt the final payload using the private key, if it's encrypted
|
|
||||||
|
|
||||||
// Do not wait when the channel is full or no receiver exists
|
|
||||||
self.client_tx.try_send(body)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn forward_packet_to_next_hop(
|
|
||||||
&self,
|
|
||||||
packet: Body,
|
|
||||||
next_node_addr: NodeAddressBytes,
|
|
||||||
delay: Delay,
|
|
||||||
) -> Result<()> {
|
|
||||||
tracing::debug!("Delaying the packet for {delay:?}");
|
|
||||||
tokio::time::sleep(delay.to_duration()).await;
|
|
||||||
|
|
||||||
self.forward(packet, NymNodeRoutingAddress::try_from(next_node_addr)?)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn forward_payload_to_destination(
|
|
||||||
&self,
|
|
||||||
payload: Body,
|
|
||||||
destination_addr: DestinationAddressBytes,
|
|
||||||
) -> Result<()> {
|
|
||||||
tracing::debug!("Forwarding final payload to destination mixnode");
|
|
||||||
|
|
||||||
self.forward(
|
|
||||||
payload,
|
|
||||||
NymNodeRoutingAddress::try_from_bytes(&destination_addr.as_bytes())?,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn forward(&self, pkt: Body, to: NymNodeRoutingAddress) -> Result<()> {
|
|
||||||
let addr = SocketAddr::from(to);
|
|
||||||
|
|
||||||
self.packet_tx.send(Packet::new(addr, pkt))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct PacketForwarder {
|
|
||||||
config: MixnetNodeConfig,
|
|
||||||
packet_rx: mpsc::UnboundedReceiver<Packet>,
|
|
||||||
packet_tx: mpsc::UnboundedSender<Packet>,
|
|
||||||
connections: HashMap<SocketAddr, TcpStream>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PacketForwarder {
|
|
||||||
pub fn new(
|
|
||||||
packet_tx: mpsc::UnboundedSender<Packet>,
|
|
||||||
packet_rx: mpsc::UnboundedReceiver<Packet>,
|
|
||||||
config: MixnetNodeConfig,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
packet_tx,
|
|
||||||
packet_rx,
|
|
||||||
connections: HashMap::with_capacity(config.connection_pool_size),
|
|
||||||
config,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(mut self) {
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
pkt = self.packet_rx.recv() => {
|
|
||||||
if let Some(pkt) = pkt {
|
|
||||||
self.send(pkt).await;
|
|
||||||
} else {
|
|
||||||
unreachable!("Packet channel should not be closed, because PacketForwarder is also holding the send half");
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ = tokio::signal::ctrl_c() => {
|
|
||||||
tracing::info!("Shutting down packet forwarder task...");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_send(&mut self, target: SocketAddr, body: &Body) -> Result<()> {
|
|
||||||
if let std::collections::hash_map::Entry::Vacant(e) = self.connections.entry(target) {
|
|
||||||
match TcpStream::connect(target).await {
|
|
||||||
Ok(tcp) => {
|
|
||||||
e.insert(tcp);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("failed to connect to {}: {e}", target);
|
|
||||||
return Err(MixnetNodeError::Protocol(e.into()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(body
|
|
||||||
.write(self.connections.get_mut(&target).unwrap())
|
|
||||||
.await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send(&mut self, pkt: Packet) {
|
|
||||||
if let Err(err) = self.try_send(pkt.target, &pkt.body).await {
|
|
||||||
match err {
|
|
||||||
MixnetNodeError::Protocol(ProtocolError::IO(e))
|
|
||||||
if e.kind() == std::io::ErrorKind::Unsupported =>
|
|
||||||
{
|
|
||||||
tracing::error!("fail to send message to {}: {e}", pkt.target);
|
|
||||||
}
|
|
||||||
_ => self.handle_retry(pkt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_retry(&self, mut pkt: Packet) {
|
|
||||||
if pkt.retry_count < self.config.max_retries {
|
|
||||||
let delay = Duration::from_millis(
|
|
||||||
(self.config.retry_delay.as_millis() as u64).pow(pkt.retry_count as u32),
|
|
||||||
);
|
|
||||||
let tx = self.packet_tx.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
tokio::time::sleep(delay).await;
|
|
||||||
pkt.retry_count += 1;
|
|
||||||
if let Err(e) = tx.send(pkt) {
|
|
||||||
tracing::error!("fail to enqueue retry message: {e}");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
tracing::error!(
|
|
||||||
"fail to send message to {}: reach maximum retries",
|
|
||||||
pkt.target
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Packet {
|
|
||||||
target: SocketAddr,
|
|
||||||
body: Body,
|
|
||||||
retry_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Packet {
|
|
||||||
fn new(target: SocketAddr, body: Body) -> Self {
|
|
||||||
Self {
|
|
||||||
target,
|
|
||||||
body,
|
|
||||||
retry_count: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "mixnet-protocol"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
tokio = { version = "1.32", features = ["sync", "net", "time"] }
|
|
||||||
sphinx-packet = "0.1.0"
|
|
||||||
futures = "0.3"
|
|
||||||
tokio-util = { version = "0.7", features = ["io", "io-util"] }
|
|
||||||
thiserror = "1"
|
|
|
@ -1,149 +0,0 @@
|
||||||
use sphinx_packet::{payload::Payload, SphinxPacket};
|
|
||||||
|
|
||||||
use std::{io::ErrorKind, net::SocketAddr, sync::Arc, time::Duration};
|
|
||||||
|
|
||||||
use tokio::{
|
|
||||||
io::{self, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
|
|
||||||
net::TcpStream,
|
|
||||||
sync::Mutex,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub type Result<T> = core::result::Result<T, ProtocolError>;
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
pub enum ProtocolError {
|
|
||||||
#[error("Unknown body type {0}")]
|
|
||||||
UnknownBodyType(u8),
|
|
||||||
#[error("{0}")]
|
|
||||||
InvalidSphinxPacket(sphinx_packet::Error),
|
|
||||||
#[error("{0}")]
|
|
||||||
InvalidPayload(sphinx_packet::Error),
|
|
||||||
#[error("{0}")]
|
|
||||||
IO(#[from] io::Error),
|
|
||||||
#[error("fail to send packet, reach maximum retries {0}")]
|
|
||||||
ReachMaxRetries(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum Body {
|
|
||||||
SphinxPacket(Box<SphinxPacket>),
|
|
||||||
FinalPayload(Payload),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Body {
|
|
||||||
pub fn new_sphinx(packet: Box<SphinxPacket>) -> Self {
|
|
||||||
Self::SphinxPacket(packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_final_payload(payload: Payload) -> Self {
|
|
||||||
Self::FinalPayload(payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn variant_as_u8(&self) -> u8 {
|
|
||||||
match self {
|
|
||||||
Self::SphinxPacket(_) => 0,
|
|
||||||
Self::FinalPayload(_) => 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn read<R>(reader: &mut R) -> Result<Body>
|
|
||||||
where
|
|
||||||
R: AsyncRead + Unpin,
|
|
||||||
{
|
|
||||||
let id = reader.read_u8().await?;
|
|
||||||
match id {
|
|
||||||
0 => Self::read_sphinx_packet(reader).await,
|
|
||||||
1 => Self::read_final_payload(reader).await,
|
|
||||||
id => Err(ProtocolError::UnknownBodyType(id)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sphinx_packet_from_bytes(data: &[u8]) -> Result<Self> {
|
|
||||||
SphinxPacket::from_bytes(data)
|
|
||||||
.map(|packet| Self::new_sphinx(Box::new(packet)))
|
|
||||||
.map_err(ProtocolError::InvalidPayload)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_sphinx_packet<R>(reader: &mut R) -> Result<Body>
|
|
||||||
where
|
|
||||||
R: AsyncRead + Unpin,
|
|
||||||
{
|
|
||||||
let size = reader.read_u64().await?;
|
|
||||||
let mut buf = vec![0; size as usize];
|
|
||||||
reader.read_exact(&mut buf).await?;
|
|
||||||
Self::sphinx_packet_from_bytes(&buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn final_payload_from_bytes(data: &[u8]) -> Result<Self> {
|
|
||||||
Payload::from_bytes(data)
|
|
||||||
.map(Self::new_final_payload)
|
|
||||||
.map_err(ProtocolError::InvalidPayload)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_final_payload<R>(reader: &mut R) -> Result<Body>
|
|
||||||
where
|
|
||||||
R: AsyncRead + Unpin,
|
|
||||||
{
|
|
||||||
let size = reader.read_u64().await?;
|
|
||||||
let mut buf = vec![0; size as usize];
|
|
||||||
reader.read_exact(&mut buf).await?;
|
|
||||||
|
|
||||||
Self::final_payload_from_bytes(&buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn write<W>(&self, writer: &mut W) -> Result<()>
|
|
||||||
where
|
|
||||||
W: AsyncWrite + Unpin + ?Sized,
|
|
||||||
{
|
|
||||||
let variant = self.variant_as_u8();
|
|
||||||
writer.write_u8(variant).await?;
|
|
||||||
match self {
|
|
||||||
Self::SphinxPacket(packet) => {
|
|
||||||
let data = packet.to_bytes();
|
|
||||||
writer.write_u64(data.len() as u64).await?;
|
|
||||||
writer.write_all(&data).await?;
|
|
||||||
}
|
|
||||||
Self::FinalPayload(payload) => {
|
|
||||||
let data = payload.as_bytes();
|
|
||||||
writer.write_u64(data.len() as u64).await?;
|
|
||||||
writer.write_all(data).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn retry_backoff(
|
|
||||||
peer_addr: SocketAddr,
|
|
||||||
max_retries: usize,
|
|
||||||
retry_delay: Duration,
|
|
||||||
body: Body,
|
|
||||||
socket: Arc<Mutex<TcpStream>>,
|
|
||||||
) -> Result<()> {
|
|
||||||
for idx in 0..max_retries {
|
|
||||||
// backoff
|
|
||||||
let wait = Duration::from_millis((retry_delay.as_millis() as u64).pow(idx as u32));
|
|
||||||
tokio::time::sleep(wait).await;
|
|
||||||
let mut socket = socket.lock().await;
|
|
||||||
match body.write(&mut *socket).await {
|
|
||||||
Ok(_) => return Ok(()),
|
|
||||||
Err(e) => {
|
|
||||||
match &e {
|
|
||||||
ProtocolError::IO(err) => {
|
|
||||||
match err.kind() {
|
|
||||||
ErrorKind::Unsupported => return Err(e),
|
|
||||||
_ => {
|
|
||||||
// update the connection
|
|
||||||
if let Ok(tcp) = TcpStream::connect(peer_addr).await {
|
|
||||||
*socket = tcp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => return Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(ProtocolError::ReachMaxRetries(max_retries))
|
|
||||||
}
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use nym_sphinx_addressing::nodes::NymNodeRoutingAddress;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::error::MixnetError;
|
||||||
|
|
||||||
|
/// Represents an address of mix node.
|
||||||
|
///
|
||||||
|
/// This just contains a single [`SocketAddr`], but has conversion functions
|
||||||
|
/// for various address types defined in the `sphinx-packet` crate.
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
pub struct NodeAddress(SocketAddr);
|
||||||
|
|
||||||
|
impl From<SocketAddr> for NodeAddress {
|
||||||
|
fn from(address: SocketAddr) -> Self {
|
||||||
|
Self(address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<NodeAddress> for SocketAddr {
|
||||||
|
fn from(address: NodeAddress) -> Self {
|
||||||
|
address.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<sphinx_packet::route::NodeAddressBytes> for NodeAddress {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_into(self) -> Result<sphinx_packet::route::NodeAddressBytes, Self::Error> {
|
||||||
|
Ok(NymNodeRoutingAddress::from(SocketAddr::from(self)).try_into()?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<sphinx_packet::route::NodeAddressBytes> for NodeAddress {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(value: sphinx_packet::route::NodeAddressBytes) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self::from(SocketAddr::from(
|
||||||
|
NymNodeRoutingAddress::try_from(value)?,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<sphinx_packet::route::DestinationAddressBytes> for NodeAddress {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(value: sphinx_packet::route::DestinationAddressBytes) -> Result<Self, Self::Error> {
|
||||||
|
Self::try_from(sphinx_packet::route::NodeAddressBytes::from_bytes(
|
||||||
|
value.as_bytes(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,183 @@
|
||||||
|
use std::{collections::VecDeque, num::NonZeroU8};
|
||||||
|
|
||||||
|
use rand::rngs::OsRng;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use crate::{error::MixnetError, packet::Packet, poisson::Poisson, topology::MixnetTopology};
|
||||||
|
|
||||||
|
/// Mix client implementation that is used to schedule messages to be sent to the mixnet.
|
||||||
|
/// Messages inserted to the [`MessageQueue`] are scheduled according to the Poisson interals
|
||||||
|
/// and returns from [`MixClient.next()`] when it is ready to be sent to the mixnet.
|
||||||
|
/// If there is no messages inserted to the [`MessageQueue`], cover packets are generated and
|
||||||
|
/// returned from [`MixClient.next()`].
|
||||||
|
pub struct MixClient {
|
||||||
|
packet_rx: mpsc::UnboundedReceiver<Packet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MixClientRunner {
|
||||||
|
config: MixClientConfig,
|
||||||
|
poisson: Poisson,
|
||||||
|
message_queue: mpsc::Receiver<Vec<u8>>,
|
||||||
|
real_packet_queue: VecDeque<Packet>,
|
||||||
|
packet_tx: mpsc::UnboundedSender<Packet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mix client configuration
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct MixClientConfig {
|
||||||
|
/// Mixnet topology
|
||||||
|
pub topology: MixnetTopology,
|
||||||
|
/// Poisson rate for packet emissions (per minute)
|
||||||
|
pub emission_rate_per_min: f64,
|
||||||
|
/// Packet redundancy for passive retransmission
|
||||||
|
pub redundancy: NonZeroU8,
|
||||||
|
}
|
||||||
|
|
||||||
|
const MESSAGE_QUEUE_SIZE: usize = 256;
|
||||||
|
|
||||||
|
/// Queue for sending messages to [`MixClient`]
|
||||||
|
pub type MessageQueue = mpsc::Sender<Vec<u8>>;
|
||||||
|
|
||||||
|
impl MixClient {
|
||||||
|
/// Creates a [`MixClient`] and a [`MessageQueue`].
|
||||||
|
///
|
||||||
|
/// This returns [`MixnetError`] if the given `config` is invalid.
|
||||||
|
pub fn new(config: MixClientConfig) -> Result<(Self, MessageQueue), MixnetError> {
|
||||||
|
let poisson = Poisson::new(config.emission_rate_per_min)?;
|
||||||
|
let (tx, rx) = mpsc::channel(MESSAGE_QUEUE_SIZE);
|
||||||
|
let (packet_tx, packet_rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
MixClientRunner {
|
||||||
|
config,
|
||||||
|
poisson,
|
||||||
|
message_queue: rx,
|
||||||
|
real_packet_queue: VecDeque::new(),
|
||||||
|
packet_tx,
|
||||||
|
}
|
||||||
|
.run();
|
||||||
|
|
||||||
|
Ok((Self { packet_rx }, tx))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a next [`Packet`] to be emitted, if it exists and the Poisson timer is done.
|
||||||
|
pub async fn next(&mut self) -> Option<Packet> {
|
||||||
|
self.packet_rx.recv().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MixClientRunner {
|
||||||
|
fn run(mut self) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut delay = tokio::time::sleep(self.poisson.interval(&mut OsRng));
|
||||||
|
loop {
|
||||||
|
let next_deadline = delay.deadline() + self.poisson.interval(&mut OsRng);
|
||||||
|
delay.await;
|
||||||
|
delay = tokio::time::sleep_until(next_deadline);
|
||||||
|
|
||||||
|
match self.next_packet().await {
|
||||||
|
Ok(packet) => {
|
||||||
|
// packet_tx is always expected to be not closed/dropped.
|
||||||
|
self.packet_tx.send(packet).unwrap();
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
"failed to find a next packet to emit. skipping to the next turn: {e}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const DROP_COVER_MSG: &'static [u8] = b"drop cover";
|
||||||
|
|
||||||
|
async fn next_packet(&mut self) -> Result<Packet, MixnetError> {
|
||||||
|
if let Some(packet) = self.real_packet_queue.pop_front() {
|
||||||
|
return Ok(packet);
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.message_queue.try_recv() {
|
||||||
|
Ok(msg) => {
|
||||||
|
for packet in Packet::build_real(msg, &self.config.topology)? {
|
||||||
|
for _ in 0..self.config.redundancy.get() {
|
||||||
|
self.real_packet_queue.push_back(packet.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(self
|
||||||
|
.real_packet_queue
|
||||||
|
.pop_front()
|
||||||
|
.expect("real packet queue should not be empty"))
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
let mut packets = Packet::build_drop_cover(
|
||||||
|
Vec::from(Self::DROP_COVER_MSG),
|
||||||
|
&self.config.topology,
|
||||||
|
)?;
|
||||||
|
Ok(packets.pop().expect("drop cover should not be empty"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::{num::NonZeroU8, time::Instant};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
client::MixClientConfig,
|
||||||
|
topology::{
|
||||||
|
tests::{gen_entropy, gen_mixnodes},
|
||||||
|
MixnetTopology,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::MixClient;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn poisson_emission() {
|
||||||
|
let emission_rate_per_min = 60.0;
|
||||||
|
let (mut client, _) = MixClient::new(MixClientConfig {
|
||||||
|
topology: MixnetTopology::new(gen_mixnodes(10), 3, 2, gen_entropy()).unwrap(),
|
||||||
|
emission_rate_per_min,
|
||||||
|
redundancy: NonZeroU8::new(3).unwrap(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut ts = Instant::now();
|
||||||
|
let mut intervals = Vec::new();
|
||||||
|
for _ in 0..30 {
|
||||||
|
assert!(client.next().await.is_some());
|
||||||
|
let now = Instant::now();
|
||||||
|
intervals.push(now - ts);
|
||||||
|
ts = now;
|
||||||
|
}
|
||||||
|
|
||||||
|
let avg_sec = intervals.iter().map(|d| d.as_secs()).sum::<u64>() / intervals.len() as u64;
|
||||||
|
let expected_avg_sec = (60.0 / emission_rate_per_min) as u64;
|
||||||
|
assert!(
|
||||||
|
avg_sec.abs_diff(expected_avg_sec) <= 1,
|
||||||
|
"{avg_sec} -{expected_avg_sec}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn real_packet_emission() {
|
||||||
|
let (mut client, msg_queue) = MixClient::new(MixClientConfig {
|
||||||
|
topology: MixnetTopology::new(gen_mixnodes(10), 3, 2, gen_entropy()).unwrap(),
|
||||||
|
emission_rate_per_min: 360.0,
|
||||||
|
redundancy: NonZeroU8::new(3).unwrap(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
msg_queue.send("hello".as_bytes().into()).await.unwrap();
|
||||||
|
|
||||||
|
// Check if the next 3 packets are the same, according to the redundancy
|
||||||
|
let packet = client.next().await.unwrap();
|
||||||
|
assert_eq!(packet, client.next().await.unwrap());
|
||||||
|
assert_eq!(packet, client.next().await.unwrap());
|
||||||
|
|
||||||
|
// Check if the next packet is different (drop cover)
|
||||||
|
assert_ne!(packet, client.next().await.unwrap());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
/// Mixnet Errors
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
pub enum MixnetError {
|
||||||
|
/// Invalid topology size
|
||||||
|
#[error("invalid mixnet topology size")]
|
||||||
|
InvalidTopologySize,
|
||||||
|
/// Invalid packet flag
|
||||||
|
#[error("invalid packet flag")]
|
||||||
|
InvalidPacketFlag,
|
||||||
|
/// Invalid fragment header
|
||||||
|
#[error("invalid fragment header")]
|
||||||
|
InvalidFragmentHeader,
|
||||||
|
/// Invalid fragment set ID
|
||||||
|
#[error("invalid fragment set ID: {0}")]
|
||||||
|
InvalidFragmentSetId(#[from] uuid::Error),
|
||||||
|
/// Invalid fragment ID
|
||||||
|
#[error("invalid fragment ID")]
|
||||||
|
InvalidFragmentId,
|
||||||
|
/// Message too long
|
||||||
|
#[error("message too long: {0} bytes")]
|
||||||
|
MessageTooLong(usize),
|
||||||
|
/// Invalid message
|
||||||
|
#[error("invalid message")]
|
||||||
|
InvalidMessage,
|
||||||
|
/// Node address error
|
||||||
|
#[error("node address error: {0}")]
|
||||||
|
NodeAddressError(#[from] nym_sphinx_addressing::nodes::NymNodeRoutingAddressError),
|
||||||
|
/// Sphinx packet error
|
||||||
|
#[error("sphinx packet error: {0}")]
|
||||||
|
SphinxPacketError(#[from] sphinx_packet::Error),
|
||||||
|
/// Exponential distribution error
|
||||||
|
#[error("exponential distribution error: {0}")]
|
||||||
|
ExponentialError(#[from] rand_distr::ExpError),
|
||||||
|
}
|
|
@ -0,0 +1,280 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use sphinx_packet::{constants::PAYLOAD_SIZE, payload::PAYLOAD_OVERHEAD_SIZE};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::error::MixnetError;
|
||||||
|
|
||||||
|
pub(crate) struct FragmentSet(Vec<Fragment>);
|
||||||
|
|
||||||
|
impl FragmentSet {
|
||||||
|
const MAX_PLAIN_PAYLOAD_SIZE: usize = PAYLOAD_SIZE - PAYLOAD_OVERHEAD_SIZE;
|
||||||
|
const CHUNK_SIZE: usize = Self::MAX_PLAIN_PAYLOAD_SIZE - FragmentHeader::SIZE;
|
||||||
|
|
||||||
|
pub(crate) fn new(msg: &[u8]) -> Result<Self, MixnetError> {
|
||||||
|
// For now, we don't support more than `u8::MAX + 1` fragments.
|
||||||
|
// If needed, we can devise the FragmentSet chaining to support larger messages, like Nym.
|
||||||
|
let last_fragment_id = FragmentId::try_from(Self::num_chunks(msg) - 1)
|
||||||
|
.map_err(|_| MixnetError::MessageTooLong(msg.len()))?;
|
||||||
|
let set_id = FragmentSetId::new();
|
||||||
|
|
||||||
|
Ok(FragmentSet(
|
||||||
|
msg.chunks(Self::CHUNK_SIZE)
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, chunk)| Fragment {
|
||||||
|
header: FragmentHeader {
|
||||||
|
set_id,
|
||||||
|
last_fragment_id,
|
||||||
|
fragment_id: FragmentId::try_from(i)
|
||||||
|
.expect("i is always in the right range"),
|
||||||
|
},
|
||||||
|
body: Vec::from(chunk),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_chunks(msg: &[u8]) -> usize {
|
||||||
|
msg.len().div_ceil(Self::CHUNK_SIZE)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<Vec<Fragment>> for FragmentSet {
|
||||||
|
fn as_ref(&self) -> &Vec<Fragment> {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
|
pub(crate) struct Fragment {
|
||||||
|
header: FragmentHeader,
|
||||||
|
body: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Fragment {
|
||||||
|
pub(crate) fn bytes(&self) -> Vec<u8> {
|
||||||
|
let mut out = Vec::with_capacity(FragmentHeader::SIZE + self.body.len());
|
||||||
|
out.extend(self.header.bytes());
|
||||||
|
out.extend(&self.body);
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_bytes(value: &[u8]) -> Result<Self, MixnetError> {
|
||||||
|
Ok(Self {
|
||||||
|
header: FragmentHeader::from_bytes(&value[0..FragmentHeader::SIZE])?,
|
||||||
|
body: value[FragmentHeader::SIZE..].to_vec(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy)]
|
||||||
|
struct FragmentSetId(Uuid);
|
||||||
|
|
||||||
|
impl FragmentSetId {
|
||||||
|
const SIZE: usize = 16;
|
||||||
|
|
||||||
|
fn new() -> Self {
|
||||||
|
Self(Uuid::new_v4())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy)]
|
||||||
|
struct FragmentId(u8);
|
||||||
|
|
||||||
|
impl FragmentId {
|
||||||
|
const SIZE: usize = std::mem::size_of::<u8>();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<usize> for FragmentId {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(id: usize) -> Result<Self, Self::Error> {
|
||||||
|
if id > u8::MAX as usize {
|
||||||
|
return Err(MixnetError::InvalidFragmentId);
|
||||||
|
}
|
||||||
|
Ok(Self(id as u8))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FragmentId> for usize {
|
||||||
|
fn from(id: FragmentId) -> Self {
|
||||||
|
id.0 as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
|
struct FragmentHeader {
|
||||||
|
set_id: FragmentSetId,
|
||||||
|
last_fragment_id: FragmentId,
|
||||||
|
fragment_id: FragmentId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FragmentHeader {
|
||||||
|
const SIZE: usize = FragmentSetId::SIZE + 2 * FragmentId::SIZE;
|
||||||
|
|
||||||
|
fn bytes(&self) -> [u8; Self::SIZE] {
|
||||||
|
let mut out = [0u8; Self::SIZE];
|
||||||
|
out[0..FragmentSetId::SIZE].copy_from_slice(self.set_id.0.as_bytes());
|
||||||
|
out[FragmentSetId::SIZE] = self.last_fragment_id.0;
|
||||||
|
out[FragmentSetId::SIZE + FragmentId::SIZE] = self.fragment_id.0;
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_bytes(value: &[u8]) -> Result<Self, MixnetError> {
|
||||||
|
if value.len() != Self::SIZE {
|
||||||
|
return Err(MixnetError::InvalidFragmentHeader);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
set_id: FragmentSetId(Uuid::from_slice(&value[0..FragmentSetId::SIZE])?),
|
||||||
|
last_fragment_id: FragmentId(value[FragmentSetId::SIZE]),
|
||||||
|
fragment_id: FragmentId(value[FragmentSetId::SIZE + FragmentId::SIZE]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct MessageReconstructor {
|
||||||
|
fragment_sets: HashMap<FragmentSetId, FragmentSetReconstructor>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MessageReconstructor {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
fragment_sets: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a fragment to the reconstructor and tries to reconstruct a message from the fragment set.
|
||||||
|
/// This returns `None` if the message has not been reconstructed yet.
|
||||||
|
pub fn add_and_reconstruct(&mut self, fragment: Fragment) -> Option<Vec<u8>> {
|
||||||
|
let set_id = fragment.header.set_id;
|
||||||
|
let reconstructed_msg = self
|
||||||
|
.fragment_sets
|
||||||
|
.entry(set_id)
|
||||||
|
.or_insert(FragmentSetReconstructor::new(
|
||||||
|
fragment.header.last_fragment_id,
|
||||||
|
))
|
||||||
|
.add(fragment)
|
||||||
|
.try_reconstruct_message()?;
|
||||||
|
// A message has been reconstructed completely from the fragment set.
|
||||||
|
// Delete the fragment set from the reconstructor.
|
||||||
|
self.fragment_sets.remove(&set_id);
|
||||||
|
Some(reconstructed_msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct FragmentSetReconstructor {
|
||||||
|
last_fragment_id: FragmentId,
|
||||||
|
fragments: HashMap<FragmentId, Fragment>,
|
||||||
|
// For mem optimization, accumulates the expected message size
|
||||||
|
// whenever a new fragment is added to the `fragments`.
|
||||||
|
message_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FragmentSetReconstructor {
|
||||||
|
fn new(last_fragment_id: FragmentId) -> Self {
|
||||||
|
Self {
|
||||||
|
last_fragment_id,
|
||||||
|
fragments: HashMap::new(),
|
||||||
|
message_size: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add(&mut self, fragment: Fragment) -> &mut Self {
|
||||||
|
self.message_size += fragment.body.len();
|
||||||
|
if let Some(old_fragment) = self.fragments.insert(fragment.header.fragment_id, fragment) {
|
||||||
|
// In the case when a new fragment replaces the old one, adjust the `meesage_size`.
|
||||||
|
// e.g. The same fragment has been received multiple times.
|
||||||
|
self.message_size -= old_fragment.body.len();
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merges all fragments gathered if possible
|
||||||
|
fn try_reconstruct_message(&self) -> Option<Vec<u8>> {
|
||||||
|
(self.fragments.len() - 1 == self.last_fragment_id.into()).then(|| {
|
||||||
|
let mut msg = Vec::with_capacity(self.message_size);
|
||||||
|
for id in 0..=self.last_fragment_id.0 {
|
||||||
|
msg.extend(&self.fragments.get(&FragmentId(id)).unwrap().body);
|
||||||
|
}
|
||||||
|
msg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use rand::RngCore;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fragment_header() {
|
||||||
|
let header = FragmentHeader {
|
||||||
|
set_id: FragmentSetId::new(),
|
||||||
|
last_fragment_id: FragmentId(19),
|
||||||
|
fragment_id: FragmentId(0),
|
||||||
|
};
|
||||||
|
let bz = header.bytes();
|
||||||
|
assert_eq!(FragmentHeader::SIZE, bz.len());
|
||||||
|
assert_eq!(header, FragmentHeader::from_bytes(bz.as_slice()).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fragment() {
|
||||||
|
let fragment = Fragment {
|
||||||
|
header: FragmentHeader {
|
||||||
|
set_id: FragmentSetId::new(),
|
||||||
|
last_fragment_id: FragmentId(19),
|
||||||
|
fragment_id: FragmentId(0),
|
||||||
|
},
|
||||||
|
body: vec![1, 2, 3, 4],
|
||||||
|
};
|
||||||
|
let bz = fragment.bytes();
|
||||||
|
assert_eq!(FragmentHeader::SIZE + fragment.body.len(), bz.len());
|
||||||
|
assert_eq!(fragment, Fragment::from_bytes(bz.as_slice()).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fragment_set() {
|
||||||
|
let mut msg = vec![0u8; FragmentSet::CHUNK_SIZE * 3 + FragmentSet::CHUNK_SIZE / 2];
|
||||||
|
rand::thread_rng().fill_bytes(&mut msg);
|
||||||
|
|
||||||
|
assert_eq!(4, FragmentSet::num_chunks(&msg));
|
||||||
|
|
||||||
|
let set = FragmentSet::new(&msg).unwrap();
|
||||||
|
assert_eq!(4, set.as_ref().iter().len());
|
||||||
|
assert_eq!(
|
||||||
|
1,
|
||||||
|
HashSet::<FragmentSetId>::from_iter(
|
||||||
|
set.as_ref().iter().map(|fragment| fragment.header.set_id)
|
||||||
|
)
|
||||||
|
.len()
|
||||||
|
);
|
||||||
|
set.as_ref()
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.for_each(|(i, fragment)| assert_eq!(i, fragment.header.fragment_id.0 as usize));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn message_reconstructor() {
|
||||||
|
let mut msg = vec![0u8; FragmentSet::CHUNK_SIZE * 2];
|
||||||
|
rand::thread_rng().fill_bytes(&mut msg);
|
||||||
|
|
||||||
|
let set = FragmentSet::new(&msg).unwrap();
|
||||||
|
|
||||||
|
let mut reconstructor = MessageReconstructor::new();
|
||||||
|
let mut fragments = set.as_ref().iter();
|
||||||
|
assert_eq!(
|
||||||
|
None,
|
||||||
|
reconstructor.add_and_reconstruct(fragments.next().unwrap().clone())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
Some(msg),
|
||||||
|
reconstructor.add_and_reconstruct(fragments.next().unwrap().clone())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
//! Mixnet
|
||||||
|
// #![deny(missing_docs, warnings)]
|
||||||
|
// #![forbid(unsafe_code)]
|
||||||
|
|
||||||
|
/// Mix node address
|
||||||
|
pub mod address;
|
||||||
|
/// Mix client
|
||||||
|
pub mod client;
|
||||||
|
/// Mixnet errors
|
||||||
|
pub mod error;
|
||||||
|
mod fragment;
|
||||||
|
/// Mix node
|
||||||
|
pub mod node;
|
||||||
|
pub mod packet;
|
||||||
|
mod poisson;
|
||||||
|
/// Mixnet topology
|
||||||
|
pub mod topology;
|
|
@ -0,0 +1,191 @@
|
||||||
|
use rand::rngs::OsRng;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sphinx_packet::crypto::{PrivateKey, PRIVATE_KEY_SIZE};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
error::MixnetError,
|
||||||
|
fragment::{Fragment, MessageReconstructor},
|
||||||
|
packet::{Message, Packet, PacketBody},
|
||||||
|
poisson::Poisson,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Mix node implementation that returns Sphinx packets which needs to be forwarded to next mix nodes,
|
||||||
|
/// or messages reconstructed from Sphinx packets delivered through all mix layers.
|
||||||
|
pub struct MixNode {
|
||||||
|
output_rx: mpsc::UnboundedReceiver<Output>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MixNodeRunner {
|
||||||
|
_config: MixNodeConfig,
|
||||||
|
encryption_private_key: PrivateKey,
|
||||||
|
poisson: Poisson,
|
||||||
|
packet_queue: mpsc::Receiver<PacketBody>,
|
||||||
|
message_reconstructor: MessageReconstructor,
|
||||||
|
output_tx: mpsc::UnboundedSender<Output>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mix node configuration
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct MixNodeConfig {
|
||||||
|
/// Private key for decrypting Sphinx packets
|
||||||
|
pub encryption_private_key: [u8; PRIVATE_KEY_SIZE],
|
||||||
|
/// Poisson delay rate per minutes
|
||||||
|
pub delay_rate_per_min: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
const PACKET_QUEUE_SIZE: usize = 256;
|
||||||
|
|
||||||
|
/// Queue for sending packets to [`MixNode`]
|
||||||
|
pub type PacketQueue = mpsc::Sender<PacketBody>;
|
||||||
|
|
||||||
|
impl MixNode {
|
||||||
|
/// Creates a [`MixNode`] and a [`PacketQueue`].
|
||||||
|
///
|
||||||
|
/// This returns [`MixnetError`] if the given `config` is invalid.
|
||||||
|
pub fn new(config: MixNodeConfig) -> Result<(Self, PacketQueue), MixnetError> {
|
||||||
|
let encryption_private_key = PrivateKey::from(config.encryption_private_key);
|
||||||
|
let poisson = Poisson::new(config.delay_rate_per_min)?;
|
||||||
|
let (packet_tx, packet_rx) = mpsc::channel(PACKET_QUEUE_SIZE);
|
||||||
|
let (output_tx, output_rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let mixnode_runner = MixNodeRunner {
|
||||||
|
_config: config,
|
||||||
|
encryption_private_key,
|
||||||
|
poisson,
|
||||||
|
packet_queue: packet_rx,
|
||||||
|
message_reconstructor: MessageReconstructor::new(),
|
||||||
|
output_tx,
|
||||||
|
};
|
||||||
|
tokio::spawn(mixnode_runner.run());
|
||||||
|
|
||||||
|
Ok((Self { output_rx }, packet_tx))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a next `[Output]` to be emitted, if it exists and the Poisson delay is done (if necessary).
|
||||||
|
pub async fn next(&mut self) -> Option<Output> {
|
||||||
|
self.output_rx.recv().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MixNodeRunner {
|
||||||
|
async fn run(mut self) {
|
||||||
|
loop {
|
||||||
|
if let Some(packet) = self.packet_queue.recv().await {
|
||||||
|
if let Err(e) = self.process_packet(packet) {
|
||||||
|
tracing::error!("failed to process packet. skipping it: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_packet(&mut self, packet: PacketBody) -> Result<(), MixnetError> {
|
||||||
|
match packet {
|
||||||
|
PacketBody::SphinxPacket(packet) => self.process_sphinx_packet(packet.as_ref()),
|
||||||
|
PacketBody::Fragment(fragment) => self.process_fragment(fragment.as_ref()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_sphinx_packet(&self, packet: &[u8]) -> Result<(), MixnetError> {
|
||||||
|
let output = Output::Forward(PacketBody::process_sphinx_packet(
|
||||||
|
packet,
|
||||||
|
&self.encryption_private_key,
|
||||||
|
)?);
|
||||||
|
let delay = self.poisson.interval(&mut OsRng);
|
||||||
|
let output_tx = self.output_tx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::time::sleep(delay).await;
|
||||||
|
// output_tx is always expected to be not closed/dropped.
|
||||||
|
output_tx.send(output).unwrap();
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_fragment(&mut self, fragment: &[u8]) -> Result<(), MixnetError> {
|
||||||
|
if let Some(msg) = self
|
||||||
|
.message_reconstructor
|
||||||
|
.add_and_reconstruct(Fragment::from_bytes(fragment)?)
|
||||||
|
{
|
||||||
|
match Message::from_bytes(&msg)? {
|
||||||
|
Message::Real(msg) => {
|
||||||
|
let output = Output::ReconstructedMessage(msg.into_boxed_slice());
|
||||||
|
self.output_tx
|
||||||
|
.send(output)
|
||||||
|
.expect("output channel shouldn't be closed");
|
||||||
|
}
|
||||||
|
Message::DropCover(_) => {
|
||||||
|
tracing::debug!("Drop cover message has been reconstructed. Dropping it...");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Output that [`MixNode::next`] returns.
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub enum Output {
|
||||||
|
/// Packet to be forwarded to the next mix node
|
||||||
|
Forward(Packet),
|
||||||
|
/// Message reconstructed from [`Packet`]s
|
||||||
|
ReconstructedMessage(Box<[u8]>),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
|
||||||
|
use sphinx_packet::crypto::PublicKey;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
packet::Packet,
|
||||||
|
topology::{tests::gen_entropy, MixNodeInfo, MixnetTopology},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mixnode() {
|
||||||
|
let encryption_private_key = PrivateKey::new();
|
||||||
|
let node_info = MixNodeInfo::new(
|
||||||
|
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1000u16).into(),
|
||||||
|
*PublicKey::from(&encryption_private_key).as_bytes(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let topology = MixnetTopology::new(
|
||||||
|
(0..2).map(|_| node_info.clone()).collect(),
|
||||||
|
2,
|
||||||
|
1,
|
||||||
|
gen_entropy(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let (mut mixnode, packet_queue) = MixNode::new(MixNodeConfig {
|
||||||
|
encryption_private_key: encryption_private_key.to_bytes(),
|
||||||
|
delay_rate_per_min: 60.0,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let msg = "hello".as_bytes().to_vec();
|
||||||
|
let packets = Packet::build_real(msg.clone(), &topology).unwrap();
|
||||||
|
let num_packets = packets.len();
|
||||||
|
|
||||||
|
for packet in packets.into_iter() {
|
||||||
|
packet_queue.send(packet.body()).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
for _ in 0..num_packets {
|
||||||
|
match mixnode.next().await.unwrap() {
|
||||||
|
Output::Forward(packet_to) => {
|
||||||
|
packet_queue.send(packet_to.body()).await.unwrap();
|
||||||
|
}
|
||||||
|
Output::ReconstructedMessage(_) => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Output::ReconstructedMessage(msg.into_boxed_slice()),
|
||||||
|
mixnode.next().await.unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,226 @@
|
||||||
|
use std::{io, u8};
|
||||||
|
|
||||||
|
use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
|
use sphinx_packet::{crypto::PrivateKey, header::delays::Delay};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
address::NodeAddress,
|
||||||
|
error::MixnetError,
|
||||||
|
fragment::{Fragment, FragmentSet},
|
||||||
|
topology::MixnetTopology,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct Packet {
|
||||||
|
address: NodeAddress,
|
||||||
|
body: PacketBody,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Packet {
|
||||||
|
fn new(processed_packet: sphinx_packet::ProcessedPacket) -> Result<Self, MixnetError> {
|
||||||
|
match processed_packet {
|
||||||
|
sphinx_packet::ProcessedPacket::ForwardHop(packet, addr, _) => Ok(Packet {
|
||||||
|
address: addr.try_into()?,
|
||||||
|
body: PacketBody::from(packet.as_ref()),
|
||||||
|
}),
|
||||||
|
sphinx_packet::ProcessedPacket::FinalHop(addr, _, payload) => Ok(Packet {
|
||||||
|
address: addr.try_into()?,
|
||||||
|
body: PacketBody::try_from(payload)?,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn build_real(
|
||||||
|
msg: Vec<u8>,
|
||||||
|
topology: &MixnetTopology,
|
||||||
|
) -> Result<Vec<Packet>, MixnetError> {
|
||||||
|
Self::build(Message::Real(msg), topology)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn build_drop_cover(
|
||||||
|
msg: Vec<u8>,
|
||||||
|
topology: &MixnetTopology,
|
||||||
|
) -> Result<Vec<Packet>, MixnetError> {
|
||||||
|
Self::build(Message::DropCover(msg), topology)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build(msg: Message, topology: &MixnetTopology) -> Result<Vec<Packet>, MixnetError> {
|
||||||
|
let destination = topology.choose_destination();
|
||||||
|
|
||||||
|
let fragment_set = FragmentSet::new(&msg.bytes())?;
|
||||||
|
let mut packets = Vec::with_capacity(fragment_set.as_ref().len());
|
||||||
|
for fragment in fragment_set.as_ref().iter() {
|
||||||
|
let route = topology.gen_route();
|
||||||
|
if route.is_empty() {
|
||||||
|
// Create a packet that will be directly sent to the mix destination
|
||||||
|
packets.push(Packet {
|
||||||
|
address: NodeAddress::try_from(destination.address)?,
|
||||||
|
body: PacketBody::from(fragment),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Use dummy delays because mixnodes will ignore this value and generate delay randomly by themselves.
|
||||||
|
let delays = vec![Delay::new_from_nanos(0); route.len()];
|
||||||
|
packets.push(Packet {
|
||||||
|
address: NodeAddress::try_from(route[0].address)?,
|
||||||
|
body: PacketBody::from(&sphinx_packet::SphinxPacket::new(
|
||||||
|
fragment.bytes(),
|
||||||
|
&route,
|
||||||
|
&destination,
|
||||||
|
&delays,
|
||||||
|
)?),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(packets)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn address(&self) -> NodeAddress {
|
||||||
|
self.address
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn body(self) -> PacketBody {
|
||||||
|
self.body
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum PacketBody {
|
||||||
|
SphinxPacket(Vec<u8>),
|
||||||
|
Fragment(Vec<u8>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&sphinx_packet::SphinxPacket> for PacketBody {
|
||||||
|
fn from(packet: &sphinx_packet::SphinxPacket) -> Self {
|
||||||
|
Self::SphinxPacket(packet.to_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&Fragment> for PacketBody {
|
||||||
|
fn from(fragment: &Fragment) -> Self {
|
||||||
|
Self::Fragment(fragment.bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<sphinx_packet::payload::Payload> for PacketBody {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(payload: sphinx_packet::payload::Payload) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self::Fragment(payload.recover_plaintext()?))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PacketBody {
|
||||||
|
pub async fn write_to<W: AsyncWrite + Unpin + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
Self::SphinxPacket(data) => {
|
||||||
|
Self::write(writer, PacketBodyFlag::SphinxPacket, data).await
|
||||||
|
}
|
||||||
|
Self::Fragment(data) => Self::write(writer, PacketBodyFlag::Fragment, data).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write<W: AsyncWrite + Unpin + ?Sized>(
|
||||||
|
writer: &mut W,
|
||||||
|
flag: PacketBodyFlag,
|
||||||
|
data: &[u8],
|
||||||
|
) -> io::Result<()> {
|
||||||
|
writer.write_all(&[flag as u8]).await?;
|
||||||
|
writer.write_all(&data.len().to_le_bytes()).await?;
|
||||||
|
writer.write_all(data).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn read_from<R: AsyncRead + Unpin>(
|
||||||
|
reader: &mut R,
|
||||||
|
) -> io::Result<Result<Self, MixnetError>> {
|
||||||
|
let mut flag = [0u8; 1];
|
||||||
|
reader.read_exact(&mut flag).await?;
|
||||||
|
|
||||||
|
let mut size = [0u8; std::mem::size_of::<usize>()];
|
||||||
|
reader.read_exact(&mut size).await?;
|
||||||
|
|
||||||
|
let mut data = vec![0u8; usize::from_le_bytes(size)];
|
||||||
|
reader.read_exact(&mut data).await?;
|
||||||
|
|
||||||
|
match PacketBodyFlag::try_from(flag[0]) {
|
||||||
|
Ok(PacketBodyFlag::SphinxPacket) => Ok(Ok(PacketBody::SphinxPacket(data))),
|
||||||
|
Ok(PacketBodyFlag::Fragment) => Ok(Ok(PacketBody::Fragment(data))),
|
||||||
|
Err(e) => Ok(Err(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn process_sphinx_packet(
|
||||||
|
packet: &[u8],
|
||||||
|
private_key: &PrivateKey,
|
||||||
|
) -> Result<Packet, MixnetError> {
|
||||||
|
Packet::new(sphinx_packet::SphinxPacket::from_bytes(packet)?.process(private_key)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(u8)]
|
||||||
|
enum PacketBodyFlag {
|
||||||
|
SphinxPacket,
|
||||||
|
Fragment,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<u8> for PacketBodyFlag {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
0u8 => Ok(PacketBodyFlag::SphinxPacket),
|
||||||
|
1u8 => Ok(PacketBodyFlag::Fragment),
|
||||||
|
_ => Err(MixnetError::InvalidPacketFlag),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) enum Message {
|
||||||
|
Real(Vec<u8>),
|
||||||
|
DropCover(Vec<u8>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Message {
|
||||||
|
fn bytes(self) -> Box<[u8]> {
|
||||||
|
match self {
|
||||||
|
Self::Real(msg) => Self::bytes_with_flag(MessageFlag::Real, msg),
|
||||||
|
Self::DropCover(msg) => Self::bytes_with_flag(MessageFlag::DropCover, msg),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bytes_with_flag(flag: MessageFlag, mut msg: Vec<u8>) -> Box<[u8]> {
|
||||||
|
let mut out = Vec::with_capacity(1 + msg.len());
|
||||||
|
out.push(flag as u8);
|
||||||
|
out.append(&mut msg);
|
||||||
|
out.into_boxed_slice()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_bytes(value: &[u8]) -> Result<Self, MixnetError> {
|
||||||
|
if value.is_empty() {
|
||||||
|
return Err(MixnetError::InvalidMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
match MessageFlag::try_from(value[0])? {
|
||||||
|
MessageFlag::Real => Ok(Self::Real(value[1..].into())),
|
||||||
|
MessageFlag::DropCover => Ok(Self::DropCover(value[1..].into())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(u8)]
|
||||||
|
enum MessageFlag {
|
||||||
|
Real,
|
||||||
|
DropCover,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<u8> for MessageFlag {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||||
|
match value {
|
||||||
|
0u8 => Ok(MessageFlag::Real),
|
||||||
|
1u8 => Ok(MessageFlag::DropCover),
|
||||||
|
_ => Err(MixnetError::InvalidPacketFlag),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,94 @@
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use rand::Rng;
|
||||||
|
use rand_distr::{Distribution, Exp};
|
||||||
|
|
||||||
|
use crate::error::MixnetError;
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub struct Poisson(Exp<f64>);
|
||||||
|
|
||||||
|
impl Poisson {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn new(rate_per_min: f64) -> Result<Self, MixnetError> {
|
||||||
|
Ok(Self(Exp::new(rate_per_min)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a random interval between events that follow a Poisson distribution.
|
||||||
|
///
|
||||||
|
/// If events occur in a Poisson distribution with rate_per_min,
|
||||||
|
/// the interval between events follow the exponential distribution with rate_per_min.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn interval<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
|
||||||
|
// generate a random value from the distribution
|
||||||
|
let interval_min = self.0.sample(rng);
|
||||||
|
// convert minutes to seconds
|
||||||
|
Duration::from_secs_f64(interval_min * 60.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use rand::rngs::OsRng;
|
||||||
|
use std::{collections::BTreeMap, time::Duration};
|
||||||
|
|
||||||
|
// Test the interval generation for a specific rate
|
||||||
|
#[test]
|
||||||
|
fn test_interval_generation() {
|
||||||
|
let interval = Poisson::new(1.0).unwrap().interval(&mut OsRng);
|
||||||
|
// Check if the interval is within a plausible range
|
||||||
|
// This is a basic check; in practice, you may want to perform a statistical test
|
||||||
|
assert!(interval > Duration::from_secs(0)); // Must be positive
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the empirical CDF
|
||||||
|
fn empirical_cdf(samples: &[Duration]) -> BTreeMap<Duration, f64> {
|
||||||
|
let mut map = BTreeMap::new();
|
||||||
|
let n = samples.len() as f64;
|
||||||
|
|
||||||
|
for &sample in samples {
|
||||||
|
*map.entry(sample).or_insert(0.0) += 1.0 / n;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut acc = 0.0;
|
||||||
|
for value in map.values_mut() {
|
||||||
|
acc += *value;
|
||||||
|
*value = acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
map
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare the empirical CDF to the theoretical CDF
|
||||||
|
#[test]
|
||||||
|
fn test_distribution_fit() {
|
||||||
|
let rate_per_min = 1.0;
|
||||||
|
let mut intervals = Vec::new();
|
||||||
|
|
||||||
|
// Generate 10,000 samples
|
||||||
|
let poisson = Poisson::new(rate_per_min).unwrap();
|
||||||
|
for _ in 0..10_000 {
|
||||||
|
intervals.push(poisson.interval(&mut OsRng));
|
||||||
|
}
|
||||||
|
|
||||||
|
let empirical = empirical_cdf(&intervals);
|
||||||
|
|
||||||
|
// theoretical CDF for exponential distribution
|
||||||
|
let rate_per_sec = rate_per_min / 60.0;
|
||||||
|
let theoretical_cdf = |x: f64| 1.0 - (-rate_per_sec * x).exp();
|
||||||
|
|
||||||
|
// Kolmogorov-Smirnov test
|
||||||
|
let ks_statistic: f64 = empirical
|
||||||
|
.iter()
|
||||||
|
.map(|(&k, &v)| {
|
||||||
|
let x = k.as_secs_f64();
|
||||||
|
(theoretical_cdf(x) - v).abs()
|
||||||
|
})
|
||||||
|
.fold(0.0, f64::max);
|
||||||
|
|
||||||
|
println!("KS Statistic: {}", ks_statistic);
|
||||||
|
|
||||||
|
assert!(ks_statistic < 0.05, "Distributions differ significantly.");
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,199 @@
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
|
use rand::Rng;
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
use sphinx_packet::{
|
||||||
|
constants::IDENTIFIER_LENGTH,
|
||||||
|
crypto::{PublicKey, PUBLIC_KEY_SIZE},
|
||||||
|
route::{DestinationAddressBytes, SURBIdentifier},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{address::NodeAddress, error::MixnetError};
|
||||||
|
|
||||||
|
/// Defines Mixnet topology construction and route selection
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct MixnetTopology {
|
||||||
|
mixnode_candidates: Vec<MixNodeInfo>,
|
||||||
|
num_layers: usize,
|
||||||
|
num_mixnodes_per_layer: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MixnetTopology {
|
||||||
|
/// Generates [MixnetTopology] with random shuffling/sampling using a given entropy.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// This function will return an error if parameters are invalid.
|
||||||
|
pub fn new(
|
||||||
|
mut mixnode_candidates: Vec<MixNodeInfo>,
|
||||||
|
num_layers: usize,
|
||||||
|
num_mixnodes_per_layer: usize,
|
||||||
|
entropy: [u8; 32],
|
||||||
|
) -> Result<Self, MixnetError> {
|
||||||
|
if mixnode_candidates.len() < num_layers * num_mixnodes_per_layer {
|
||||||
|
return Err(MixnetError::InvalidTopologySize);
|
||||||
|
}
|
||||||
|
|
||||||
|
FisherYatesShuffle::shuffle(&mut mixnode_candidates, entropy);
|
||||||
|
Ok(Self {
|
||||||
|
mixnode_candidates,
|
||||||
|
num_layers,
|
||||||
|
num_mixnodes_per_layer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Selects a mix destination randomly from the last mix layer
|
||||||
|
pub(crate) fn choose_destination(&self) -> sphinx_packet::route::Destination {
|
||||||
|
let idx_in_layer = rand::thread_rng().gen_range(0..self.num_mixnodes_per_layer);
|
||||||
|
let idx = self.num_mixnodes_per_layer * (self.num_layers - 1) + idx_in_layer;
|
||||||
|
self.mixnode_candidates[idx].clone().into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Selects a mix route randomly from all mix layers except the last layer
|
||||||
|
/// and append a mix destination to the end of the mix route.
|
||||||
|
///
|
||||||
|
/// That is, the caller can generate multiple routes with one mix destination.
|
||||||
|
pub(crate) fn gen_route(&self) -> Vec<sphinx_packet::route::Node> {
|
||||||
|
let mut route = Vec::with_capacity(self.num_layers);
|
||||||
|
for layer in 0..self.num_layers - 1 {
|
||||||
|
let idx_in_layer = rand::thread_rng().gen_range(0..self.num_mixnodes_per_layer);
|
||||||
|
let idx = self.num_mixnodes_per_layer * layer + idx_in_layer;
|
||||||
|
route.push(self.mixnode_candidates[idx].clone().into());
|
||||||
|
}
|
||||||
|
route
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mix node information that is used for forwarding packets to the mix node
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct MixNodeInfo(sphinx_packet::route::Node);
|
||||||
|
|
||||||
|
impl MixNodeInfo {
|
||||||
|
/// Creates a [`MixNodeInfo`].
|
||||||
|
pub fn new(
|
||||||
|
address: NodeAddress,
|
||||||
|
public_key: [u8; PUBLIC_KEY_SIZE],
|
||||||
|
) -> Result<Self, MixnetError> {
|
||||||
|
Ok(Self(sphinx_packet::route::Node::new(
|
||||||
|
address.try_into()?,
|
||||||
|
PublicKey::from(public_key),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<MixNodeInfo> for sphinx_packet::route::Node {
|
||||||
|
fn from(info: MixNodeInfo) -> Self {
|
||||||
|
info.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const DUMMY_SURB_IDENTIFIER: SURBIdentifier = [0u8; IDENTIFIER_LENGTH];
|
||||||
|
|
||||||
|
impl From<MixNodeInfo> for sphinx_packet::route::Destination {
|
||||||
|
fn from(info: MixNodeInfo) -> Self {
|
||||||
|
sphinx_packet::route::Destination::new(
|
||||||
|
DestinationAddressBytes::from_bytes(info.0.address.as_bytes()),
|
||||||
|
DUMMY_SURB_IDENTIFIER,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for MixNodeInfo {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
SerializableMixNodeInfo::try_from(self)
|
||||||
|
.map_err(serde::ser::Error::custom)?
|
||||||
|
.serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for MixNodeInfo {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
Self::try_from(SerializableMixNodeInfo::deserialize(deserializer)?)
|
||||||
|
.map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only for serializing/deserializing [`MixNodeInfo`] since [`sphinx_packet::route::Node`] is not serializable.
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
struct SerializableMixNodeInfo {
|
||||||
|
address: NodeAddress,
|
||||||
|
public_key: [u8; PUBLIC_KEY_SIZE],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&MixNodeInfo> for SerializableMixNodeInfo {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(info: &MixNodeInfo) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self {
|
||||||
|
address: NodeAddress::try_from(info.0.address)?,
|
||||||
|
public_key: *info.0.pub_key.as_bytes(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<SerializableMixNodeInfo> for MixNodeInfo {
|
||||||
|
type Error = MixnetError;
|
||||||
|
|
||||||
|
fn try_from(info: SerializableMixNodeInfo) -> Result<Self, Self::Error> {
|
||||||
|
Self::new(info.address, info.public_key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod tests {
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
|
||||||
|
use rand::RngCore;
|
||||||
|
use sphinx_packet::crypto::{PrivateKey, PublicKey};
|
||||||
|
|
||||||
|
use crate::error::MixnetError;
|
||||||
|
|
||||||
|
use super::{MixNodeInfo, MixnetTopology};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn shuffle() {
|
||||||
|
let candidates = gen_mixnodes(10);
|
||||||
|
let topology = MixnetTopology::new(candidates.clone(), 3, 2, gen_entropy()).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(candidates.len(), topology.mixnode_candidates.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn route_and_destination() {
|
||||||
|
let topology = MixnetTopology::new(gen_mixnodes(10), 3, 2, gen_entropy()).unwrap();
|
||||||
|
let _ = topology.choose_destination();
|
||||||
|
assert_eq!(2, topology.gen_route().len()); // except a destination
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn invalid_topology_size() {
|
||||||
|
// if # of candidates is smaller than the topology size
|
||||||
|
assert!(matches!(
|
||||||
|
MixnetTopology::new(gen_mixnodes(5), 3, 2, gen_entropy()).err(),
|
||||||
|
Some(MixnetError::InvalidTopologySize),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn gen_mixnodes(n: usize) -> Vec<MixNodeInfo> {
|
||||||
|
(0..n)
|
||||||
|
.map(|i| {
|
||||||
|
MixNodeInfo::new(
|
||||||
|
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), i as u16).into(),
|
||||||
|
*PublicKey::from(&PrivateKey::new()).as_bytes(),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn gen_entropy() -> [u8; 32] {
|
||||||
|
let mut entropy = [0u8; 32];
|
||||||
|
rand::thread_rng().fill_bytes(&mut entropy);
|
||||||
|
entropy
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,13 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "mixnet-topology"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
hex = "0.4"
|
|
||||||
# Using an older version, since `nym-sphinx` depends on `rand` v0.7.3.
|
|
||||||
rand = "0.7.3"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
sphinx-packet = "0.1.0"
|
|
||||||
nym-sphinx = { package = "nym-sphinx", git = "https://github.com/nymtech/nym", tag = "v1.1.22" }
|
|
||||||
thiserror = "1"
|
|
|
@ -1,127 +0,0 @@
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
use nym_sphinx::addressing::nodes::{NymNodeRoutingAddress, NymNodeRoutingAddressError};
|
|
||||||
use rand::{seq::IteratorRandom, Rng};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sphinx_packet::{crypto::PUBLIC_KEY_SIZE, route};
|
|
||||||
|
|
||||||
pub type MixnetNodeId = [u8; PUBLIC_KEY_SIZE];
|
|
||||||
|
|
||||||
pub type Result<T> = core::result::Result<T, NymNodeRoutingAddressError>;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
|
|
||||||
pub struct MixnetTopology {
|
|
||||||
pub layers: Vec<Layer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
|
||||||
pub struct Layer {
|
|
||||||
pub nodes: Vec<Node>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
|
||||||
pub struct Node {
|
|
||||||
#[serde(with = "addr_serde")]
|
|
||||||
pub address: SocketAddr,
|
|
||||||
#[serde(with = "hex_serde")]
|
|
||||||
pub public_key: [u8; PUBLIC_KEY_SIZE],
|
|
||||||
}
|
|
||||||
|
|
||||||
mod addr_serde {
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|
||||||
use std::net::{SocketAddr, ToSocketAddrs};
|
|
||||||
|
|
||||||
pub fn serialize<S: Serializer>(addr: &SocketAddr, serializer: S) -> Result<S::Ok, S::Error> {
|
|
||||||
addr.to_string().serialize(serializer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result<SocketAddr, D::Error> {
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
|
|
||||||
// Try to convert the string (which might be a domain name) to a SocketAddr.
|
|
||||||
let mut addrs = s.to_socket_addrs().map_err(serde::de::Error::custom)?;
|
|
||||||
|
|
||||||
addrs
|
|
||||||
.next()
|
|
||||||
.ok_or_else(|| serde::de::Error::custom("Failed to resolve to a valid address"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mod hex_serde {
|
|
||||||
use super::PUBLIC_KEY_SIZE;
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|
||||||
|
|
||||||
pub fn serialize<S: Serializer>(
|
|
||||||
pk: &[u8; PUBLIC_KEY_SIZE],
|
|
||||||
serializer: S,
|
|
||||||
) -> Result<S::Ok, S::Error> {
|
|
||||||
if serializer.is_human_readable() {
|
|
||||||
hex::encode(pk).serialize(serializer)
|
|
||||||
} else {
|
|
||||||
serializer.serialize_bytes(pk)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D: Deserializer<'de>>(
|
|
||||||
deserializer: D,
|
|
||||||
) -> Result<[u8; PUBLIC_KEY_SIZE], D::Error> {
|
|
||||||
if deserializer.is_human_readable() {
|
|
||||||
let hex_str = String::deserialize(deserializer)?;
|
|
||||||
hex::decode(hex_str)
|
|
||||||
.map_err(serde::de::Error::custom)
|
|
||||||
.and_then(|v| v.as_slice().try_into().map_err(serde::de::Error::custom))
|
|
||||||
} else {
|
|
||||||
<[u8; PUBLIC_KEY_SIZE]>::deserialize(deserializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetTopology {
|
|
||||||
pub fn random_route<R: Rng>(&self, rng: &mut R) -> Result<Vec<route::Node>> {
|
|
||||||
let num_hops = self.layers.len();
|
|
||||||
|
|
||||||
let route: Vec<route::Node> = self
|
|
||||||
.layers
|
|
||||||
.iter()
|
|
||||||
.take(num_hops)
|
|
||||||
.map(|layer| {
|
|
||||||
layer
|
|
||||||
.random_node(rng)
|
|
||||||
.expect("layer is not empty")
|
|
||||||
.clone()
|
|
||||||
.try_into()
|
|
||||||
.unwrap()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(route)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Choose a destination mixnet node randomly from the last layer.
|
|
||||||
pub fn random_destination<R: Rng>(&self, rng: &mut R) -> Result<route::Node> {
|
|
||||||
self.layers
|
|
||||||
.last()
|
|
||||||
.expect("topology is not empty")
|
|
||||||
.random_node(rng)
|
|
||||||
.expect("layer is not empty")
|
|
||||||
.clone()
|
|
||||||
.try_into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Layer {
|
|
||||||
pub fn random_node<R: Rng>(&self, rng: &mut R) -> Option<&Node> {
|
|
||||||
self.nodes.iter().choose(rng)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryInto<route::Node> for Node {
|
|
||||||
type Error = NymNodeRoutingAddressError;
|
|
||||||
|
|
||||||
fn try_into(self) -> Result<route::Node> {
|
|
||||||
Ok(route::Node {
|
|
||||||
address: NymNodeRoutingAddress::from(self.address).try_into()?,
|
|
||||||
pub_key: self.public_key.into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "mixnet-util"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
tokio = { version = "1.32", default-features = false, features = ["sync", "net"] }
|
|
||||||
mixnet-protocol = { path = "../protocol" }
|
|
|
@ -1,35 +0,0 @@
|
||||||
use std::{collections::HashMap, net::SocketAddr, sync::Arc};
|
|
||||||
|
|
||||||
use tokio::{net::TcpStream, sync::Mutex};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ConnectionPool {
|
|
||||||
pool: Arc<Mutex<HashMap<SocketAddr, Arc<Mutex<TcpStream>>>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnectionPool {
|
|
||||||
pub fn new(size: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
pool: Arc::new(Mutex::new(HashMap::with_capacity(size))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_or_init(
|
|
||||||
&self,
|
|
||||||
addr: &SocketAddr,
|
|
||||||
) -> mixnet_protocol::Result<Arc<Mutex<TcpStream>>> {
|
|
||||||
let mut pool = self.pool.lock().await;
|
|
||||||
match pool.get(addr).cloned() {
|
|
||||||
Some(tcp) => Ok(tcp),
|
|
||||||
None => {
|
|
||||||
let tcp = Arc::new(Mutex::new(
|
|
||||||
TcpStream::connect(addr)
|
|
||||||
.await
|
|
||||||
.map_err(mixnet_protocol::ProtocolError::IO)?,
|
|
||||||
));
|
|
||||||
pool.insert(*addr, tcp.clone());
|
|
||||||
Ok(tcp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "mixnode"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
async-trait = "0.1"
|
|
||||||
mixnet-node = { path = "../../mixnet/node" }
|
|
||||||
nomos-log = { path = "../../nomos-services/log" }
|
|
||||||
clap = { version = "4", features = ["derive"] }
|
|
||||||
color-eyre = "0.6.0"
|
|
||||||
overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" }
|
|
||||||
overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" }
|
|
||||||
serde = "1"
|
|
||||||
serde_yaml = "0.9"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-subscriber = "0.3"
|
|
||||||
tokio = { version = "1.33", features = ["macros"] }
|
|
|
@ -1,9 +0,0 @@
|
||||||
# Mixnode
|
|
||||||
|
|
||||||
A mixnode application that runs the mixnet [`node`](../../mixnet/node/) component, which will be deployed as a part of the mixnet.
|
|
||||||
|
|
||||||
For the recommended architecture of mixnet, please see the [mixnet](../../mixnet/README.md) documentation.
|
|
||||||
|
|
||||||
## Configurations
|
|
||||||
|
|
||||||
A mixnode can be configured by [`config.yaml`](./config.yaml), for example.
|
|
|
@ -1,16 +0,0 @@
|
||||||
mixnode:
|
|
||||||
# A listen address for other mixnodes in the mixnet and mixclients who want to send packets.
|
|
||||||
listen_address: 127.0.0.1:7777
|
|
||||||
# A (internal) listen address only for a "single" mixclient who wants to receive packets
|
|
||||||
# from the last mixnet layer.
|
|
||||||
# For more details, see the documentation in the "mixnet" crate.
|
|
||||||
client_listen_address: 127.0.0.1:7778
|
|
||||||
# A ed25519 private key for decrypting inbound Sphinx packets
|
|
||||||
# received from mixclients or mixnodes in the previous mixnet layer.
|
|
||||||
private_key: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the next layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
log:
|
|
||||||
backend: "Stdout"
|
|
||||||
format: "Json"
|
|
||||||
level: "debug"
|
|
|
@ -1,20 +0,0 @@
|
||||||
mod services;
|
|
||||||
|
|
||||||
use nomos_log::Logger;
|
|
||||||
use overwatch_derive::Services;
|
|
||||||
use overwatch_rs::services::handle::ServiceHandle;
|
|
||||||
use overwatch_rs::services::ServiceData;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use services::mixnet::MixnetNodeService;
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone, Serialize)]
|
|
||||||
pub struct Config {
|
|
||||||
pub mixnode: <MixnetNodeService as ServiceData>::Settings,
|
|
||||||
pub log: <Logger as ServiceData>::Settings,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Services)]
|
|
||||||
pub struct MixNode {
|
|
||||||
node: ServiceHandle<MixnetNodeService>,
|
|
||||||
logging: ServiceHandle<Logger>,
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
mod services;
|
|
||||||
|
|
||||||
use clap::Parser;
|
|
||||||
use color_eyre::eyre::Result;
|
|
||||||
use mixnode::{Config, MixNode, MixNodeServiceSettings};
|
|
||||||
use overwatch_rs::overwatch::OverwatchRunner;
|
|
||||||
use overwatch_rs::DynError;
|
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
|
||||||
#[command(author, version, about, long_about = None)]
|
|
||||||
struct Args {
|
|
||||||
/// Path for a yaml-encoded mixnet-node config file
|
|
||||||
config: std::path::PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> Result<(), DynError> {
|
|
||||||
let Args { config } = Args::parse();
|
|
||||||
let config = serde_yaml::from_reader::<_, Config>(std::fs::File::open(config)?)?;
|
|
||||||
|
|
||||||
let app = OverwatchRunner::<MixNode>::run(
|
|
||||||
MixNodeServiceSettings {
|
|
||||||
node: config.mixnode,
|
|
||||||
logging: config.log,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)?;
|
|
||||||
app.wait_finished();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
use mixnet_node::{MixnetNode, MixnetNodeConfig};
|
|
||||||
use overwatch_rs::services::handle::ServiceStateHandle;
|
|
||||||
use overwatch_rs::services::relay::NoMessage;
|
|
||||||
use overwatch_rs::services::state::{NoOperator, NoState};
|
|
||||||
use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId};
|
|
||||||
use overwatch_rs::DynError;
|
|
||||||
|
|
||||||
pub struct MixnetNodeService(MixnetNode);
|
|
||||||
|
|
||||||
impl ServiceData for MixnetNodeService {
|
|
||||||
const SERVICE_ID: ServiceId = "mixnet-node";
|
|
||||||
type Settings = MixnetNodeConfig;
|
|
||||||
type State = NoState<Self::Settings>;
|
|
||||||
type StateOperator = NoOperator<Self::State>;
|
|
||||||
type Message = NoMessage;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl ServiceCore for MixnetNodeService {
|
|
||||||
fn init(service_state: ServiceStateHandle<Self>) -> Result<Self, DynError> {
|
|
||||||
let settings: Self::Settings = service_state.settings_reader.get_updated_settings();
|
|
||||||
Ok(Self(MixnetNode::new(settings)))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run(self) -> Result<(), DynError> {
|
|
||||||
if let Err(_e) = self.0.run().await {
|
|
||||||
todo!("Errors should match");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1 +0,0 @@
|
||||||
pub mod mixnet;
|
|
|
@ -1,91 +0,0 @@
|
||||||
# Nomos Node
|
|
||||||
|
|
||||||
Nomos blockchain node
|
|
||||||
|
|
||||||
|
|
||||||
## Network service
|
|
||||||
|
|
||||||
Nomos node can be configured with one of the following network backends:
|
|
||||||
- [libp2p](../../nomos-services/backends/libp2p.rs)
|
|
||||||
|
|
||||||
### Mixclient integration
|
|
||||||
|
|
||||||
The [mixclient](../../mixnet/client/) is currently integrated as a part of the libp2p network backend.
|
|
||||||
To run a Nomos node with the libp2p network backend, the `mixnet_client` and `mixnet_delay` fields in the [`config.yaml`](./config.yaml) must be specified, so the Nomos node can send/receive packets to/from mixnodes.
|
|
||||||
|
|
||||||
For more details about the mixnode/mixclient architecture, see the [mixnet documentation](../../mixnet/README.md).
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
|
|
||||||
subgraph mixnet
|
|
||||||
direction LR
|
|
||||||
|
|
||||||
subgraph layer-1
|
|
||||||
direction TB
|
|
||||||
mixnode-1-1
|
|
||||||
mixnode-1-2
|
|
||||||
end
|
|
||||||
subgraph layer-2
|
|
||||||
direction TB
|
|
||||||
mixnode-2-1
|
|
||||||
mixnode-2-2
|
|
||||||
end
|
|
||||||
subgraph layer-3
|
|
||||||
direction TB
|
|
||||||
mixnode-3-1
|
|
||||||
mixnode-3-2
|
|
||||||
end
|
|
||||||
|
|
||||||
mixnode-1-1 --> mixnode-2-1
|
|
||||||
mixnode-1-1 --> mixnode-2-2
|
|
||||||
mixnode-1-2 --> mixnode-2-1
|
|
||||||
mixnode-1-2 --> mixnode-2-2
|
|
||||||
mixnode-2-1 --> mixnode-3-1
|
|
||||||
mixnode-2-1 --> mixnode-3-2
|
|
||||||
mixnode-2-2 --> mixnode-3-1
|
|
||||||
mixnode-2-2 --> mixnode-3-2
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph nomos-network
|
|
||||||
direction TB
|
|
||||||
|
|
||||||
subgraph nomos-node-1
|
|
||||||
libp2p-1[libp2p] --> mixclient-sender-1[mixclient-sender]
|
|
||||||
end
|
|
||||||
subgraph nomos-node-2
|
|
||||||
libp2p-2[libp2p] --> mixclient-sender-2[mixclient-sender]
|
|
||||||
end
|
|
||||||
subgraph nomos-node-3
|
|
||||||
libp2p-3[libp2p] <--> mixclient-senderreceiver
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
mixclient-sender-1 --> mixnode-1-1
|
|
||||||
mixclient-sender-1 --> mixnode-1-2
|
|
||||||
mixclient-sender-2 --> mixnode-1-1
|
|
||||||
mixclient-sender-2 --> mixnode-1-2
|
|
||||||
mixclient-senderreceiver --> mixnode-1-1
|
|
||||||
mixclient-senderreceiver --> mixnode-1-2
|
|
||||||
mixnode-3-2 --> mixclient-senderreceiver
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Sender mode
|
|
||||||
|
|
||||||
If you are a node operator who wants to run only a Nomos node (not a mixnode),
|
|
||||||
you can configure the mixclient as the `Sender` mode (like `nomos-node-1` or `nomos-node-2` above).
|
|
||||||
Then, the Nomos node sends messages to the mixnet instead of broadcasting them directly through libp2p gossipsub.
|
|
||||||
|
|
||||||
The mixclient in the `Sender` mode will split a message into multiple Sphinx packets by constructing mix routes based on the mixnet topology configured, and sends packets to the mixnode.
|
|
||||||
|
|
||||||
#### SenderReceiver mode
|
|
||||||
|
|
||||||
If you are a node operator who runs both a Nomos node and a mixnode,
|
|
||||||
you can configure the mixclient as the `SenderReceiver` mode by specifying the client listen address of your mixnode (like `nomos-node-3` and `mixnode-3-2` above).
|
|
||||||
|
|
||||||
The Nomos node with the mixclient in the `SenderRecevier` mode will behave essentially the same as the one in the `Sender` mode.
|
|
||||||
In addition, the node will receive packets from the connected mixnode, reconstruct a message, and broadcast it through libp2p gossipsub, if the connected mixnode is part of the last mixnet layer.
|
|
||||||
In other words, at least one Nomos node in the entire network must have a mixclient in the `SenderReceiver` mode, so that reconstructed messages can be broadcasted to all other Nomos nodes.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -24,31 +24,6 @@ network:
|
||||||
discV5BootstrapNodes: []
|
discV5BootstrapNodes: []
|
||||||
initial_peers: []
|
initial_peers: []
|
||||||
relayTopics: []
|
relayTopics: []
|
||||||
# Mixclient configuration to communicate with mixnodes.
|
|
||||||
# The libp2p network backend always requires this mixclient configuration
|
|
||||||
# (cannot be disabled for now).
|
|
||||||
mixnet_client:
|
|
||||||
# A mixclient mode. For details, see the documentation of the "mixnet" crate.
|
|
||||||
# - Sender
|
|
||||||
# - !SenderReceiver [mixnode_client_listen_address]
|
|
||||||
mode: Sender
|
|
||||||
# A mixnet topology, which contains the information of all mixnodes in the mixnet.
|
|
||||||
# (The topology is static for now.)
|
|
||||||
topology:
|
|
||||||
# Each mixnet layer consists of a list of mixnodes.
|
|
||||||
layers:
|
|
||||||
- nodes:
|
|
||||||
- address: 127.0.0.1:7777 # A listen address of the mixnode
|
|
||||||
# A ed25519 public key for encrypting Sphinx packets for the mixnode
|
|
||||||
public_key: "0000000000000000000000000000000000000000000000000000000000000000"
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the first mixnet layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
# A range of total delay that will be set to each Sphinx packets
|
|
||||||
# sent to the mixnet for timing obfuscation.
|
|
||||||
# Panics if start > end.
|
|
||||||
mixnet_delay:
|
|
||||||
start: "0ms"
|
|
||||||
end: "0ms"
|
|
||||||
|
|
||||||
http:
|
http:
|
||||||
backend_settings:
|
backend_settings:
|
||||||
|
|
|
@ -7,39 +7,3 @@ backend:
|
||||||
discV5BootstrapNodes: []
|
discV5BootstrapNodes: []
|
||||||
initial_peers: ["/dns/testnet.nomos.tech/tcp/3000"]
|
initial_peers: ["/dns/testnet.nomos.tech/tcp/3000"]
|
||||||
relayTopics: []
|
relayTopics: []
|
||||||
# Mixclient configuration to communicate with mixnodes.
|
|
||||||
# The libp2p network backend always requires this mixclient configuration
|
|
||||||
# (cannot be disabled for now).
|
|
||||||
mixnet_client:
|
|
||||||
# A mixclient mode. For details, see the documentation of the "mixnet" crate.
|
|
||||||
# - Sender
|
|
||||||
# - !SenderReceiver [mixnode_client_listen_address]
|
|
||||||
mode: Sender
|
|
||||||
# A mixnet topology, which contains the information of all mixnodes in the mixnet.
|
|
||||||
# (The topology is static for now.)
|
|
||||||
topology:
|
|
||||||
# Each mixnet layer consists of a list of mixnodes.
|
|
||||||
layers:
|
|
||||||
- nodes:
|
|
||||||
- address: testnet.nomos.tech:7707 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
- nodes:
|
|
||||||
- address: testnet.nomos.tech:7717 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
- nodes:
|
|
||||||
- address: testnet.nomos.tech:7727 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the first mixnet layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
max_retries: 5
|
|
||||||
retry_delay:
|
|
||||||
secs: 1
|
|
||||||
nanos: 0
|
|
||||||
# A range of total delay that will be set to each Sphinx packets
|
|
||||||
# sent to the mixnet for timing obfuscation.
|
|
||||||
# Panics if start > end.
|
|
||||||
mixnet_delay:
|
|
||||||
start: "0ms"
|
|
||||||
end: "0ms"
|
|
||||||
|
|
|
@ -7,17 +7,15 @@ edition = "2021"
|
||||||
multiaddr = "0.18"
|
multiaddr = "0.18"
|
||||||
tokio = { version = "1", features = ["sync", "macros"] }
|
tokio = { version = "1", features = ["sync", "macros"] }
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
libp2p = { version = "0.52.4", features = [
|
libp2p = { version = "0.53.2", features = [
|
||||||
"dns",
|
"dns",
|
||||||
"yamux",
|
|
||||||
"plaintext",
|
|
||||||
"macros",
|
"macros",
|
||||||
"gossipsub",
|
"gossipsub",
|
||||||
"identify",
|
|
||||||
"tcp",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"quic",
|
||||||
"secp256k1",
|
"secp256k1",
|
||||||
] }
|
] }
|
||||||
|
libp2p-stream = "0.1.0-alpha"
|
||||||
blake2 = { version = "0.10" }
|
blake2 = { version = "0.10" }
|
||||||
serde = { version = "1.0.166", features = ["derive"] }
|
serde = { version = "1.0.166", features = ["derive"] }
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
|
@ -26,5 +24,6 @@ thiserror = "1.0.40"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
rand = "0.8.5"
|
||||||
serde_json = "1.0.99"
|
serde_json = "1.0.99"
|
||||||
tokio = { version = "1", features = ["time"] }
|
tokio = { version = "1", features = ["time"] }
|
||||||
|
|
|
@ -12,17 +12,16 @@ pub use libp2p;
|
||||||
use blake2::digest::{consts::U32, Digest};
|
use blake2::digest::{consts::U32, Digest};
|
||||||
use blake2::Blake2b;
|
use blake2::Blake2b;
|
||||||
use libp2p::gossipsub::{Message, MessageId, TopicHash};
|
use libp2p::gossipsub::{Message, MessageId, TopicHash};
|
||||||
#[allow(deprecated)]
|
use libp2p::swarm::ConnectionId;
|
||||||
pub use libp2p::{
|
pub use libp2p::{
|
||||||
core::upgrade,
|
core::upgrade,
|
||||||
dns,
|
|
||||||
gossipsub::{self, PublishError, SubscriptionError},
|
gossipsub::{self, PublishError, SubscriptionError},
|
||||||
identity::{self, secp256k1},
|
identity::{self, secp256k1},
|
||||||
plaintext::Config as PlainText2Config,
|
swarm::{dial_opts::DialOpts, DialError, NetworkBehaviour, SwarmEvent},
|
||||||
swarm::{dial_opts::DialOpts, DialError, NetworkBehaviour, SwarmEvent, THandlerErr},
|
PeerId, SwarmBuilder, Transport,
|
||||||
tcp, yamux, PeerId, SwarmBuilder, Transport,
|
|
||||||
};
|
};
|
||||||
use libp2p::{swarm::ConnectionId, tcp::tokio::Tcp};
|
pub use libp2p_stream;
|
||||||
|
use libp2p_stream::Control;
|
||||||
pub use multiaddr::{multiaddr, Multiaddr, Protocol};
|
pub use multiaddr::{multiaddr, Multiaddr, Protocol};
|
||||||
|
|
||||||
/// Wraps [`libp2p::Swarm`], and config it for use within Nomos.
|
/// Wraps [`libp2p::Swarm`], and config it for use within Nomos.
|
||||||
|
@ -33,57 +32,54 @@ pub struct Swarm {
|
||||||
|
|
||||||
#[derive(NetworkBehaviour)]
|
#[derive(NetworkBehaviour)]
|
||||||
pub struct Behaviour {
|
pub struct Behaviour {
|
||||||
|
stream: libp2p_stream::Behaviour,
|
||||||
gossipsub: gossipsub::Behaviour,
|
gossipsub: gossipsub::Behaviour,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Behaviour {
|
||||||
|
fn new(peer_id: PeerId, gossipsub_config: gossipsub::Config) -> Result<Self, Box<dyn Error>> {
|
||||||
|
let gossipsub = gossipsub::Behaviour::new(
|
||||||
|
gossipsub::MessageAuthenticity::Author(peer_id),
|
||||||
|
gossipsub::ConfigBuilder::from(gossipsub_config)
|
||||||
|
.validation_mode(gossipsub::ValidationMode::None)
|
||||||
|
.message_id_fn(compute_message_id)
|
||||||
|
.build()?,
|
||||||
|
)?;
|
||||||
|
Ok(Self {
|
||||||
|
stream: libp2p_stream::Behaviour::new(),
|
||||||
|
gossipsub,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
pub enum SwarmError {
|
pub enum SwarmError {
|
||||||
#[error("duplicate dialing")]
|
#[error("duplicate dialing")]
|
||||||
DuplicateDialing,
|
DuplicateDialing,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A timeout for the setup and protocol upgrade process for all in/outbound connections
|
/// How long to keep a connection alive once it is idling.
|
||||||
const TRANSPORT_TIMEOUT: Duration = Duration::from_secs(20);
|
const IDLE_CONN_TIMEOUT: Duration = Duration::from_secs(300);
|
||||||
|
|
||||||
impl Swarm {
|
impl Swarm {
|
||||||
/// Builds a [`Swarm`] configured for use with Nomos on top of a tokio executor.
|
/// Builds a [`Swarm`] configured for use with Nomos on top of a tokio executor.
|
||||||
//
|
//
|
||||||
// TODO: define error types
|
// TODO: define error types
|
||||||
pub fn build(config: &SwarmConfig) -> Result<Self, Box<dyn Error>> {
|
pub fn build(config: &SwarmConfig) -> Result<Self, Box<dyn Error>> {
|
||||||
let id_keys = identity::Keypair::from(secp256k1::Keypair::from(config.node_key.clone()));
|
let keypair =
|
||||||
let local_peer_id = PeerId::from(id_keys.public());
|
libp2p::identity::Keypair::from(secp256k1::Keypair::from(config.node_key.clone()));
|
||||||
log::info!("libp2p peer_id:{}", local_peer_id);
|
let peer_id = PeerId::from(keypair.public());
|
||||||
|
tracing::info!("libp2p peer_id:{}", peer_id);
|
||||||
|
|
||||||
// TODO: consider using noise authentication
|
let mut swarm = libp2p::SwarmBuilder::with_existing_identity(keypair)
|
||||||
let tcp_transport = tcp::Transport::<Tcp>::new(tcp::Config::default().nodelay(true))
|
.with_tokio()
|
||||||
.upgrade(upgrade::Version::V1Lazy)
|
.with_quic()
|
||||||
.authenticate(PlainText2Config::new(&id_keys))
|
.with_dns()?
|
||||||
.multiplex(yamux::Config::default())
|
.with_behaviour(|_| Behaviour::new(peer_id, config.gossipsub_config.clone()).unwrap())?
|
||||||
.timeout(TRANSPORT_TIMEOUT)
|
.with_swarm_config(|c| c.with_idle_connection_timeout(IDLE_CONN_TIMEOUT))
|
||||||
.boxed();
|
.build();
|
||||||
|
|
||||||
// Wrapping TCP transport into DNS transport to resolve hostnames.
|
swarm.listen_on(Self::multiaddr(config.host, config.port))?;
|
||||||
let tcp_transport = dns::tokio::Transport::system(tcp_transport)?.boxed();
|
|
||||||
|
|
||||||
// TODO: consider using Signed or Anonymous.
|
|
||||||
// For Anonymous, a custom `message_id` function need to be set
|
|
||||||
// to prevent all messages from a peer being filtered as duplicates.
|
|
||||||
let gossipsub = gossipsub::Behaviour::new(
|
|
||||||
gossipsub::MessageAuthenticity::Author(local_peer_id),
|
|
||||||
gossipsub::ConfigBuilder::from(config.gossipsub_config.clone())
|
|
||||||
.validation_mode(gossipsub::ValidationMode::None)
|
|
||||||
.message_id_fn(compute_message_id)
|
|
||||||
.build()?,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut swarm = libp2p::Swarm::new(
|
|
||||||
tcp_transport,
|
|
||||||
Behaviour { gossipsub },
|
|
||||||
local_peer_id,
|
|
||||||
libp2p::swarm::Config::with_tokio_executor(),
|
|
||||||
);
|
|
||||||
|
|
||||||
swarm.listen_on(multiaddr!(Ip4(config.host), Tcp(config.port)))?;
|
|
||||||
|
|
||||||
Ok(Swarm { swarm })
|
Ok(Swarm { swarm })
|
||||||
}
|
}
|
||||||
|
@ -148,11 +144,21 @@ impl Swarm {
|
||||||
pub fn topic_hash(topic: &str) -> TopicHash {
|
pub fn topic_hash(topic: &str) -> TopicHash {
|
||||||
gossipsub::IdentTopic::new(topic).hash()
|
gossipsub::IdentTopic::new(topic).hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a stream control that can be used to accept streams and establish streams to
|
||||||
|
/// other peers.
|
||||||
|
/// Stream controls can be cloned.
|
||||||
|
pub fn stream_control(&self) -> Control {
|
||||||
|
self.swarm.behaviour().stream.new_control()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn multiaddr(ip: std::net::Ipv4Addr, port: u16) -> Multiaddr {
|
||||||
|
multiaddr!(Ip4(ip), Udp(port), QuicV1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl futures::Stream for Swarm {
|
impl futures::Stream for Swarm {
|
||||||
#[allow(deprecated)]
|
type Item = SwarmEvent<BehaviourEvent>;
|
||||||
type Item = SwarmEvent<BehaviourEvent, THandlerErr<Behaviour>>;
|
|
||||||
|
|
||||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
Pin::new(&mut self.swarm).poll_next(cx)
|
Pin::new(&mut self.swarm).poll_next(cx)
|
||||||
|
@ -164,3 +170,68 @@ fn compute_message_id(message: &Message) -> MessageId {
|
||||||
hasher.update(&message.data);
|
hasher.update(&message.data);
|
||||||
MessageId::from(hasher.finalize().to_vec())
|
MessageId::from(hasher.finalize().to_vec())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use futures::{AsyncReadExt, AsyncWriteExt, StreamExt};
|
||||||
|
use libp2p::StreamProtocol;
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
use crate::{Swarm, SwarmConfig};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn stream() {
|
||||||
|
// Init two swarms
|
||||||
|
let (config1, mut swarm1) = init_swarm();
|
||||||
|
let (_, mut swarm2) = init_swarm();
|
||||||
|
let swarm1_peer_id = *swarm1.swarm().local_peer_id();
|
||||||
|
|
||||||
|
// Dial to swarm1
|
||||||
|
swarm2
|
||||||
|
.connect(Swarm::multiaddr(config1.host, config1.port))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Prepare stream controls
|
||||||
|
let mut stream_control1 = swarm1.stream_control();
|
||||||
|
let mut stream_control2 = swarm2.stream_control();
|
||||||
|
|
||||||
|
// Poll swarms to make progress
|
||||||
|
tokio::spawn(async move { while (swarm1.next().await).is_some() {} });
|
||||||
|
tokio::spawn(async move { while (swarm2.next().await).is_some() {} });
|
||||||
|
|
||||||
|
// Make swarm1 accept incoming streams
|
||||||
|
let protocol = StreamProtocol::new("/test");
|
||||||
|
let mut incoming_streams = stream_control1.accept(protocol).unwrap();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// If a new stream is established, write bytes and close the stream.
|
||||||
|
while let Some((_, mut stream)) = incoming_streams.next().await {
|
||||||
|
stream.write_all(&[1, 2, 3, 4]).await.unwrap();
|
||||||
|
stream.close().await.unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait until the connection is established
|
||||||
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
|
|
||||||
|
// Establish a stream with swarm1 and read bytes
|
||||||
|
let mut stream = stream_control2
|
||||||
|
.open_stream(swarm1_peer_id, StreamProtocol::new("/test"))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let mut buf = [0u8; 4];
|
||||||
|
stream.read_exact(&mut buf).await.unwrap();
|
||||||
|
assert_eq!(buf, [1, 2, 3, 4]);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_swarm() -> (SwarmConfig, Swarm) {
|
||||||
|
let config = SwarmConfig {
|
||||||
|
host: std::net::Ipv4Addr::new(127, 0, 0, 1),
|
||||||
|
port: rand::thread_rng().gen_range(10000..30000),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let swarm = Swarm::build(&config).unwrap();
|
||||||
|
(config, swarm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ futures = "0.3"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
nomos-core = { path = "../../nomos-core" }
|
nomos-core = { path = "../../nomos-core" }
|
||||||
nomos-libp2p = { path = "../../nomos-libp2p", optional = true }
|
nomos-libp2p = { path = "../../nomos-libp2p", optional = true }
|
||||||
mixnet-client = { path = "../../mixnet/client" }
|
mixnet = { path = "../../mixnet" }
|
||||||
|
|
||||||
utoipa = { version = "4.0", optional = true }
|
utoipa = { version = "4.0", optional = true }
|
||||||
serde_json = { version = "1", optional = true }
|
serde_json = { version = "1", optional = true }
|
||||||
|
@ -34,4 +34,4 @@ tokio = { version = "1", features = ["full"] }
|
||||||
default = []
|
default = []
|
||||||
libp2p = ["nomos-libp2p", "rand", "humantime-serde"]
|
libp2p = ["nomos-libp2p", "rand", "humantime-serde"]
|
||||||
mock = ["rand", "chrono"]
|
mock = ["rand", "chrono"]
|
||||||
openapi = ["dep:utoipa", "serde_json",]
|
openapi = ["dep:utoipa", "serde_json"]
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
use nomos_libp2p::Multiaddr;
|
use mixnet::packet::PacketBody;
|
||||||
|
use nomos_libp2p::{libp2p::StreamProtocol, Multiaddr, PeerId};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
|
||||||
|
@ -16,18 +17,23 @@ pub enum Command {
|
||||||
reply: oneshot::Sender<Libp2pInfo>,
|
reply: oneshot::Sender<Libp2pInfo>,
|
||||||
},
|
},
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
// broadcast a message directly through gossipsub without mixnet
|
RetryBroadcast {
|
||||||
DirectBroadcastAndRetry {
|
|
||||||
topic: Topic,
|
topic: Topic,
|
||||||
message: Box<[u8]>,
|
message: Box<[u8]>,
|
||||||
retry_count: usize,
|
retry_count: usize,
|
||||||
},
|
},
|
||||||
|
StreamSend {
|
||||||
|
peer_id: PeerId,
|
||||||
|
protocol: StreamProtocol,
|
||||||
|
packet_body: PacketBody,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
pub struct Dial {
|
pub struct Dial {
|
||||||
pub addr: Multiaddr,
|
pub addr: Multiaddr,
|
||||||
pub retry_count: usize,
|
pub retry_count: usize,
|
||||||
|
pub result_sender: oneshot::Sender<Result<PeerId, nomos_libp2p::DialError>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Topic = String;
|
pub type Topic = String;
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
use std::{ops::Range, time::Duration};
|
|
||||||
|
|
||||||
use mixnet_client::MixnetClientConfig;
|
|
||||||
use nomos_libp2p::{Multiaddr, SwarmConfig};
|
use nomos_libp2p::{Multiaddr, SwarmConfig};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -11,47 +8,4 @@ pub struct Libp2pConfig {
|
||||||
// Initial peers to connect to
|
// Initial peers to connect to
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub initial_peers: Vec<Multiaddr>,
|
pub initial_peers: Vec<Multiaddr>,
|
||||||
pub mixnet_client: MixnetClientConfig,
|
|
||||||
#[serde(with = "humantime")]
|
|
||||||
pub mixnet_delay: Range<Duration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
mod humantime {
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|
||||||
use std::{ops::Range, time::Duration};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct DurationRangeHelper {
|
|
||||||
#[serde(with = "humantime_serde")]
|
|
||||||
start: Duration,
|
|
||||||
#[serde(with = "humantime_serde")]
|
|
||||||
end: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize<S: Serializer>(
|
|
||||||
val: &Range<Duration>,
|
|
||||||
serializer: S,
|
|
||||||
) -> Result<S::Ok, S::Error> {
|
|
||||||
if serializer.is_human_readable() {
|
|
||||||
DurationRangeHelper {
|
|
||||||
start: val.start,
|
|
||||||
end: val.end,
|
|
||||||
}
|
|
||||||
.serialize(serializer)
|
|
||||||
} else {
|
|
||||||
val.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D: Deserializer<'de>>(
|
|
||||||
deserializer: D,
|
|
||||||
) -> Result<Range<Duration>, D::Error> {
|
|
||||||
if deserializer.is_human_readable() {
|
|
||||||
let DurationRangeHelper { start, end } =
|
|
||||||
DurationRangeHelper::deserialize(deserializer)?;
|
|
||||||
Ok(start..end)
|
|
||||||
} else {
|
|
||||||
Range::<Duration>::deserialize(deserializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,115 +0,0 @@
|
||||||
use std::{ops::Range, time::Duration};
|
|
||||||
|
|
||||||
use mixnet_client::{MessageStream, MixnetClient};
|
|
||||||
use nomos_core::wire;
|
|
||||||
use rand::{rngs::OsRng, thread_rng, Rng};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio_stream::StreamExt;
|
|
||||||
|
|
||||||
use super::{command::Topic, Command, Libp2pConfig};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct MixnetMessage {
|
|
||||||
pub topic: Topic,
|
|
||||||
pub message: Box<[u8]>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetMessage {
|
|
||||||
pub fn as_bytes(&self) -> Vec<u8> {
|
|
||||||
wire::serialize(self).expect("Couldn't serialize MixnetMessage")
|
|
||||||
}
|
|
||||||
pub fn from_bytes(data: &[u8]) -> Result<Self, wire::Error> {
|
|
||||||
wire::deserialize(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn random_delay(range: &Range<Duration>) -> Duration {
|
|
||||||
if range.start == range.end {
|
|
||||||
return range.start;
|
|
||||||
}
|
|
||||||
thread_rng().gen_range(range.start, range.end)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct MixnetHandler {
|
|
||||||
client: MixnetClient<OsRng>,
|
|
||||||
commands_tx: mpsc::Sender<Command>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixnetHandler {
|
|
||||||
pub fn new(config: &Libp2pConfig, commands_tx: mpsc::Sender<Command>) -> Self {
|
|
||||||
let client = MixnetClient::new(config.mixnet_client.clone(), OsRng);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
client,
|
|
||||||
commands_tx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(&mut self) {
|
|
||||||
const BASE_DELAY: Duration = Duration::from_secs(5);
|
|
||||||
// we need this loop to help us reestablish the connection in case
|
|
||||||
// the mixnet client fails for whatever reason
|
|
||||||
let mut backoff = 0;
|
|
||||||
loop {
|
|
||||||
match self.client.run().await {
|
|
||||||
Ok(stream) => {
|
|
||||||
backoff = 0;
|
|
||||||
self.handle_stream(stream).await;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("mixnet client error: {e}");
|
|
||||||
backoff += 1;
|
|
||||||
tokio::time::sleep(BASE_DELAY * backoff).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_stream(&mut self, mut stream: MessageStream) {
|
|
||||||
while let Some(result) = stream.next().await {
|
|
||||||
match result {
|
|
||||||
Ok(msg) => {
|
|
||||||
tracing::debug!("receiving message from mixnet client");
|
|
||||||
let Ok(MixnetMessage { topic, message }) = MixnetMessage::from_bytes(&msg)
|
|
||||||
else {
|
|
||||||
tracing::error!("failed to deserialize msg received from mixnet client");
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
self.commands_tx
|
|
||||||
.send(Command::DirectBroadcastAndRetry {
|
|
||||||
topic,
|
|
||||||
message,
|
|
||||||
retry_count: 0,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap_or_else(|_| tracing::error!("could not schedule broadcast"));
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("mixnet client stream error: {e}");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use super::random_delay;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_random_delay() {
|
|
||||||
assert_eq!(
|
|
||||||
random_delay(&(Duration::ZERO..Duration::ZERO)),
|
|
||||||
Duration::ZERO
|
|
||||||
);
|
|
||||||
|
|
||||||
let range = Duration::from_millis(10)..Duration::from_millis(100);
|
|
||||||
let delay = random_delay(&range);
|
|
||||||
assert!(range.start <= delay && delay < range.end);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,12 +1,10 @@
|
||||||
mod command;
|
mod command;
|
||||||
mod config;
|
mod config;
|
||||||
mod mixnet;
|
pub(crate) mod swarm;
|
||||||
mod swarm;
|
|
||||||
|
|
||||||
// std
|
// std
|
||||||
pub use self::command::{Command, Libp2pInfo};
|
pub use self::command::{Command, Dial, Libp2pInfo, Topic};
|
||||||
pub use self::config::Libp2pConfig;
|
pub use self::config::Libp2pConfig;
|
||||||
use self::mixnet::MixnetHandler;
|
|
||||||
use self::swarm::SwarmHandler;
|
use self::swarm::SwarmHandler;
|
||||||
|
|
||||||
// internal
|
// internal
|
||||||
|
@ -46,11 +44,6 @@ impl NetworkBackend for Libp2p {
|
||||||
let (commands_tx, commands_rx) = tokio::sync::mpsc::channel(BUFFER_SIZE);
|
let (commands_tx, commands_rx) = tokio::sync::mpsc::channel(BUFFER_SIZE);
|
||||||
let (events_tx, _) = tokio::sync::broadcast::channel(BUFFER_SIZE);
|
let (events_tx, _) = tokio::sync::broadcast::channel(BUFFER_SIZE);
|
||||||
|
|
||||||
let mut mixnet_handler = MixnetHandler::new(&config, commands_tx.clone());
|
|
||||||
overwatch_handle.runtime().spawn(async move {
|
|
||||||
mixnet_handler.run().await;
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut swarm_handler =
|
let mut swarm_handler =
|
||||||
SwarmHandler::new(&config, commands_tx.clone(), commands_rx, events_tx.clone());
|
SwarmHandler::new(&config, commands_tx.clone(), commands_rx, events_tx.clone());
|
||||||
overwatch_handle.runtime().spawn(async move {
|
overwatch_handle.runtime().spawn(async move {
|
||||||
|
|
|
@ -1,20 +1,19 @@
|
||||||
use std::{collections::HashMap, ops::Range, time::Duration};
|
use std::{
|
||||||
|
collections::{hash_map::Entry, HashMap},
|
||||||
use mixnet_client::MixnetClient;
|
time::Duration,
|
||||||
#[allow(deprecated)]
|
|
||||||
use nomos_libp2p::{
|
|
||||||
gossipsub::{self, Message},
|
|
||||||
libp2p::swarm::ConnectionId,
|
|
||||||
Behaviour, BehaviourEvent, Multiaddr, Swarm, SwarmEvent, THandlerErr,
|
|
||||||
};
|
};
|
||||||
use rand::rngs::OsRng;
|
|
||||||
use tokio::sync::{broadcast, mpsc};
|
use futures::AsyncWriteExt;
|
||||||
|
use nomos_libp2p::{
|
||||||
|
gossipsub,
|
||||||
|
libp2p::{swarm::ConnectionId, Stream, StreamProtocol},
|
||||||
|
libp2p_stream::{Control, IncomingStreams, OpenStreamError},
|
||||||
|
BehaviourEvent, Multiaddr, PeerId, Swarm, SwarmEvent,
|
||||||
|
};
|
||||||
|
use tokio::sync::{broadcast, mpsc, oneshot};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
|
|
||||||
use crate::backends::libp2p::{
|
use crate::backends::libp2p::Libp2pInfo;
|
||||||
mixnet::{random_delay, MixnetMessage},
|
|
||||||
Libp2pInfo,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
command::{Command, Dial, Topic},
|
command::{Command, Dial, Topic},
|
||||||
|
@ -23,12 +22,12 @@ use super::{
|
||||||
|
|
||||||
pub struct SwarmHandler {
|
pub struct SwarmHandler {
|
||||||
pub swarm: Swarm,
|
pub swarm: Swarm,
|
||||||
|
stream_control: Control,
|
||||||
|
streams: HashMap<PeerId, Stream>,
|
||||||
pub pending_dials: HashMap<ConnectionId, Dial>,
|
pub pending_dials: HashMap<ConnectionId, Dial>,
|
||||||
pub commands_tx: mpsc::Sender<Command>,
|
pub commands_tx: mpsc::Sender<Command>,
|
||||||
pub commands_rx: mpsc::Receiver<Command>,
|
pub commands_rx: mpsc::Receiver<Command>,
|
||||||
pub events_tx: broadcast::Sender<Event>,
|
pub events_tx: broadcast::Sender<Event>,
|
||||||
pub mixnet_client: MixnetClient<OsRng>,
|
|
||||||
pub mixnet_delay: Range<Duration>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! log_error {
|
macro_rules! log_error {
|
||||||
|
@ -52,27 +51,29 @@ impl SwarmHandler {
|
||||||
events_tx: broadcast::Sender<Event>,
|
events_tx: broadcast::Sender<Event>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let swarm = Swarm::build(&config.inner).unwrap();
|
let swarm = Swarm::build(&config.inner).unwrap();
|
||||||
let mixnet_client = MixnetClient::new(config.mixnet_client.clone(), OsRng);
|
let stream_control = swarm.stream_control();
|
||||||
|
|
||||||
// Keep the dialing history since swarm.connect doesn't return the result synchronously
|
// Keep the dialing history since swarm.connect doesn't return the result synchronously
|
||||||
let pending_dials = HashMap::<ConnectionId, Dial>::new();
|
let pending_dials = HashMap::<ConnectionId, Dial>::new();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
swarm,
|
swarm,
|
||||||
|
stream_control,
|
||||||
|
streams: HashMap::new(),
|
||||||
pending_dials,
|
pending_dials,
|
||||||
commands_tx,
|
commands_tx,
|
||||||
commands_rx,
|
commands_rx,
|
||||||
events_tx,
|
events_tx,
|
||||||
mixnet_client,
|
|
||||||
mixnet_delay: config.mixnet_delay.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(&mut self, initial_peers: Vec<Multiaddr>) {
|
pub async fn run(&mut self, initial_peers: Vec<Multiaddr>) {
|
||||||
for initial_peer in initial_peers {
|
for initial_peer in initial_peers {
|
||||||
|
let (tx, _) = oneshot::channel();
|
||||||
let dial = Dial {
|
let dial = Dial {
|
||||||
addr: initial_peer,
|
addr: initial_peer,
|
||||||
retry_count: 0,
|
retry_count: 0,
|
||||||
|
result_sender: tx,
|
||||||
};
|
};
|
||||||
Self::schedule_connect(dial, self.commands_tx.clone()).await;
|
Self::schedule_connect(dial, self.commands_tx.clone()).await;
|
||||||
}
|
}
|
||||||
|
@ -89,8 +90,7 @@ impl SwarmHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(deprecated)]
|
fn handle_event(&mut self, event: SwarmEvent<BehaviourEvent>) {
|
||||||
fn handle_event(&mut self, event: SwarmEvent<BehaviourEvent, THandlerErr<Behaviour>>) {
|
|
||||||
match event {
|
match event {
|
||||||
SwarmEvent::Behaviour(BehaviourEvent::Gossipsub(gossipsub::Event::Message {
|
SwarmEvent::Behaviour(BehaviourEvent::Gossipsub(gossipsub::Event::Message {
|
||||||
propagation_source: peer_id,
|
propagation_source: peer_id,
|
||||||
|
@ -108,7 +108,7 @@ impl SwarmHandler {
|
||||||
} => {
|
} => {
|
||||||
tracing::debug!("connected to peer:{peer_id}, connection_id:{connection_id:?}");
|
tracing::debug!("connected to peer:{peer_id}, connection_id:{connection_id:?}");
|
||||||
if endpoint.is_dialer() {
|
if endpoint.is_dialer() {
|
||||||
self.complete_connect(connection_id);
|
self.complete_connect(connection_id, peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SwarmEvent::ConnectionClosed {
|
SwarmEvent::ConnectionClosed {
|
||||||
|
@ -142,10 +142,7 @@ impl SwarmHandler {
|
||||||
self.connect(dial);
|
self.connect(dial);
|
||||||
}
|
}
|
||||||
Command::Broadcast { topic, message } => {
|
Command::Broadcast { topic, message } => {
|
||||||
tracing::debug!("sending message to mixnet client");
|
self.broadcast_and_retry(topic, message, 0).await;
|
||||||
let msg = MixnetMessage { topic, message };
|
|
||||||
let delay = random_delay(&self.mixnet_delay);
|
|
||||||
log_error!(self.mixnet_client.send(msg.as_bytes(), delay));
|
|
||||||
}
|
}
|
||||||
Command::Subscribe(topic) => {
|
Command::Subscribe(topic) => {
|
||||||
tracing::debug!("subscribing to topic: {topic}");
|
tracing::debug!("subscribing to topic: {topic}");
|
||||||
|
@ -167,13 +164,31 @@ impl SwarmHandler {
|
||||||
};
|
};
|
||||||
log_error!(reply.send(info));
|
log_error!(reply.send(info));
|
||||||
}
|
}
|
||||||
Command::DirectBroadcastAndRetry {
|
Command::RetryBroadcast {
|
||||||
topic,
|
topic,
|
||||||
message,
|
message,
|
||||||
retry_count,
|
retry_count,
|
||||||
} => {
|
} => {
|
||||||
self.broadcast_and_retry(topic, message, retry_count).await;
|
self.broadcast_and_retry(topic, message, retry_count).await;
|
||||||
}
|
}
|
||||||
|
Command::StreamSend {
|
||||||
|
peer_id,
|
||||||
|
protocol,
|
||||||
|
packet_body: packet,
|
||||||
|
} => {
|
||||||
|
tracing::debug!("StreamSend to {peer_id}");
|
||||||
|
match self.open_stream(peer_id, protocol).await {
|
||||||
|
Ok(stream) => {
|
||||||
|
if let Err(e) = packet.write_to(stream).await {
|
||||||
|
tracing::error!("failed to write to the stream with ${peer_id}: {e}");
|
||||||
|
self.close_stream(&peer_id).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to open stream with {peer_id}: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,12 +212,19 @@ impl SwarmHandler {
|
||||||
"Failed to connect to {} with unretriable error: {e}",
|
"Failed to connect to {} with unretriable error: {e}",
|
||||||
dial.addr
|
dial.addr
|
||||||
);
|
);
|
||||||
|
if let Err(err) = dial.result_sender.send(Err(e)) {
|
||||||
|
tracing::warn!("failed to send the Err result of dialing: {err:?}");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_connect(&mut self, connection_id: ConnectionId) {
|
fn complete_connect(&mut self, connection_id: ConnectionId, peer_id: PeerId) {
|
||||||
self.pending_dials.remove(&connection_id);
|
if let Some(dial) = self.pending_dials.remove(&connection_id) {
|
||||||
|
if let Err(e) = dial.result_sender.send(Ok(peer_id)) {
|
||||||
|
tracing::warn!("failed to send the Ok result of dialing: {e:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Consider a common retry module for all use cases
|
// TODO: Consider a common retry module for all use cases
|
||||||
|
@ -233,7 +255,7 @@ impl SwarmHandler {
|
||||||
tracing::debug!("broadcasted message with id: {id} tp topic: {topic}");
|
tracing::debug!("broadcasted message with id: {id} tp topic: {topic}");
|
||||||
// self-notification because libp2p doesn't do it
|
// self-notification because libp2p doesn't do it
|
||||||
if self.swarm.is_subscribed(&topic) {
|
if self.swarm.is_subscribed(&topic) {
|
||||||
log_error!(self.events_tx.send(Event::Message(Message {
|
log_error!(self.events_tx.send(Event::Message(gossipsub::Message {
|
||||||
source: None,
|
source: None,
|
||||||
data: message.into(),
|
data: message.into(),
|
||||||
sequence_number: None,
|
sequence_number: None,
|
||||||
|
@ -249,7 +271,7 @@ impl SwarmHandler {
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
tokio::time::sleep(wait).await;
|
tokio::time::sleep(wait).await;
|
||||||
commands_tx
|
commands_tx
|
||||||
.send(Command::DirectBroadcastAndRetry {
|
.send(Command::RetryBroadcast {
|
||||||
topic,
|
topic,
|
||||||
message,
|
message,
|
||||||
retry_count: retry_count + 1,
|
retry_count: retry_count + 1,
|
||||||
|
@ -267,4 +289,26 @@ impl SwarmHandler {
|
||||||
fn exp_backoff(retry: usize) -> Duration {
|
fn exp_backoff(retry: usize) -> Duration {
|
||||||
std::time::Duration::from_secs(BACKOFF.pow(retry as u32))
|
std::time::Duration::from_secs(BACKOFF.pow(retry as u32))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn incoming_streams(&mut self, protocol: StreamProtocol) -> IncomingStreams {
|
||||||
|
self.stream_control.accept(protocol).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn open_stream(
|
||||||
|
&mut self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
protocol: StreamProtocol,
|
||||||
|
) -> Result<&mut Stream, OpenStreamError> {
|
||||||
|
if let Entry::Vacant(entry) = self.streams.entry(peer_id) {
|
||||||
|
let stream = self.stream_control.open_stream(peer_id, protocol).await?;
|
||||||
|
entry.insert(stream);
|
||||||
|
}
|
||||||
|
Ok(self.streams.get_mut(&peer_id).unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close_stream(&mut self, peer_id: &PeerId) {
|
||||||
|
if let Some(mut stream) = self.streams.remove(peer_id) {
|
||||||
|
let _ = stream.close().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,291 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
// internal
|
||||||
|
use super::{
|
||||||
|
libp2p::{self, swarm::SwarmHandler, Libp2pConfig, Topic},
|
||||||
|
NetworkBackend,
|
||||||
|
};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use mixnet::{
|
||||||
|
address::NodeAddress,
|
||||||
|
client::{MessageQueue, MixClient, MixClientConfig},
|
||||||
|
node::{MixNode, MixNodeConfig, Output, PacketQueue},
|
||||||
|
packet::PacketBody,
|
||||||
|
};
|
||||||
|
use nomos_core::wire;
|
||||||
|
use nomos_libp2p::{
|
||||||
|
libp2p::{Stream, StreamProtocol},
|
||||||
|
libp2p_stream::IncomingStreams,
|
||||||
|
Multiaddr, Protocol,
|
||||||
|
};
|
||||||
|
// crates
|
||||||
|
use overwatch_rs::{overwatch::handle::OverwatchHandle, services::state::NoState};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{
|
||||||
|
runtime::Handle,
|
||||||
|
sync::{broadcast, mpsc, oneshot},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A Mixnet network backend broadcasts messages to the network with mixing packets through mixnet,
|
||||||
|
/// and receives messages broadcasted from the network.
|
||||||
|
pub struct MixnetNetworkBackend {
|
||||||
|
libp2p_events_tx: broadcast::Sender<libp2p::Event>,
|
||||||
|
libp2p_commands_tx: mpsc::Sender<libp2p::Command>,
|
||||||
|
|
||||||
|
mixclient_message_queue: MessageQueue,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct MixnetConfig {
|
||||||
|
libp2p_config: Libp2pConfig,
|
||||||
|
mixclient_config: MixClientConfig,
|
||||||
|
mixnode_config: MixNodeConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
const BUFFER_SIZE: usize = 64;
|
||||||
|
const STREAM_PROTOCOL: StreamProtocol = StreamProtocol::new("/mixnet");
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl NetworkBackend for MixnetNetworkBackend {
|
||||||
|
type Settings = MixnetConfig;
|
||||||
|
type State = NoState<MixnetConfig>;
|
||||||
|
type Message = libp2p::Command;
|
||||||
|
type EventKind = libp2p::EventKind;
|
||||||
|
type NetworkEvent = libp2p::Event;
|
||||||
|
|
||||||
|
fn new(config: Self::Settings, overwatch_handle: OverwatchHandle) -> Self {
|
||||||
|
// TODO: One important task that should be spawned is
|
||||||
|
// subscribing NewEntropy events that will be emitted from the consensus service soon.
|
||||||
|
// so that new topology can be built internally.
|
||||||
|
// In the mixnet spec, the robustness layer is responsible for this task.
|
||||||
|
// We can implement the robustness layer in the mixnet-specific crate,
|
||||||
|
// that we're going to define at the root of the project.
|
||||||
|
|
||||||
|
let (libp2p_commands_tx, libp2p_commands_rx) = tokio::sync::mpsc::channel(BUFFER_SIZE);
|
||||||
|
let (libp2p_events_tx, _) = tokio::sync::broadcast::channel(BUFFER_SIZE);
|
||||||
|
|
||||||
|
let mut swarm_handler = SwarmHandler::new(
|
||||||
|
&config.libp2p_config,
|
||||||
|
libp2p_commands_tx.clone(),
|
||||||
|
libp2p_commands_rx,
|
||||||
|
libp2p_events_tx.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Run mixnode
|
||||||
|
let (mixnode, packet_queue) = MixNode::new(config.mixnode_config).unwrap();
|
||||||
|
let libp2p_cmd_tx = libp2p_commands_tx.clone();
|
||||||
|
let queue = packet_queue.clone();
|
||||||
|
overwatch_handle.runtime().spawn(async move {
|
||||||
|
Self::run_mixnode(mixnode, queue, libp2p_cmd_tx).await;
|
||||||
|
});
|
||||||
|
let incoming_streams = swarm_handler.incoming_streams(STREAM_PROTOCOL);
|
||||||
|
let runtime_handle = overwatch_handle.runtime().clone();
|
||||||
|
let queue = packet_queue.clone();
|
||||||
|
overwatch_handle.runtime().spawn(async move {
|
||||||
|
Self::handle_incoming_streams(incoming_streams, queue, runtime_handle).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Run mixclient
|
||||||
|
let (mixclient, message_queue) = MixClient::new(config.mixclient_config).unwrap();
|
||||||
|
let libp2p_cmd_tx = libp2p_commands_tx.clone();
|
||||||
|
overwatch_handle.runtime().spawn(async move {
|
||||||
|
Self::run_mixclient(mixclient, packet_queue, libp2p_cmd_tx).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Run libp2p swarm to make progress
|
||||||
|
overwatch_handle.runtime().spawn(async move {
|
||||||
|
swarm_handler.run(config.libp2p_config.initial_peers).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
Self {
|
||||||
|
libp2p_events_tx,
|
||||||
|
libp2p_commands_tx,
|
||||||
|
|
||||||
|
mixclient_message_queue: message_queue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn process(&self, msg: Self::Message) {
|
||||||
|
match msg {
|
||||||
|
libp2p::Command::Broadcast { topic, message } => {
|
||||||
|
let msg = MixnetMessage { topic, message };
|
||||||
|
if let Err(e) = self.mixclient_message_queue.send(msg.as_bytes()).await {
|
||||||
|
tracing::error!("failed to send messasge to mixclient: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd => {
|
||||||
|
if let Err(e) = self.libp2p_commands_tx.send(cmd).await {
|
||||||
|
tracing::error!("failed to send command to libp2p swarm: {e:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn subscribe(
|
||||||
|
&mut self,
|
||||||
|
kind: Self::EventKind,
|
||||||
|
) -> broadcast::Receiver<Self::NetworkEvent> {
|
||||||
|
match kind {
|
||||||
|
libp2p::EventKind::Message => self.libp2p_events_tx.subscribe(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MixnetNetworkBackend {
|
||||||
|
async fn run_mixnode(
|
||||||
|
mut mixnode: MixNode,
|
||||||
|
packet_queue: PacketQueue,
|
||||||
|
swarm_commands_tx: mpsc::Sender<libp2p::Command>,
|
||||||
|
) {
|
||||||
|
while let Some(output) = mixnode.next().await {
|
||||||
|
match output {
|
||||||
|
Output::Forward(packet) => {
|
||||||
|
Self::stream_send(
|
||||||
|
packet.address(),
|
||||||
|
packet.body(),
|
||||||
|
&swarm_commands_tx,
|
||||||
|
&packet_queue,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
Output::ReconstructedMessage(message) => {
|
||||||
|
match MixnetMessage::from_bytes(&message) {
|
||||||
|
Ok(msg) => {
|
||||||
|
swarm_commands_tx
|
||||||
|
.send(libp2p::Command::Broadcast {
|
||||||
|
topic: msg.topic,
|
||||||
|
message: msg.message,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("failed to parse message received from mixnet: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_mixclient(
|
||||||
|
mut mixclient: MixClient,
|
||||||
|
packet_queue: PacketQueue,
|
||||||
|
swarm_commands_tx: mpsc::Sender<libp2p::Command>,
|
||||||
|
) {
|
||||||
|
while let Some(packet) = mixclient.next().await {
|
||||||
|
Self::stream_send(
|
||||||
|
packet.address(),
|
||||||
|
packet.body(),
|
||||||
|
&swarm_commands_tx,
|
||||||
|
&packet_queue,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_incoming_streams(
|
||||||
|
mut incoming_streams: IncomingStreams,
|
||||||
|
packet_queue: PacketQueue,
|
||||||
|
runtime_handle: Handle,
|
||||||
|
) {
|
||||||
|
while let Some((_, stream)) = incoming_streams.next().await {
|
||||||
|
let queue = packet_queue.clone();
|
||||||
|
runtime_handle.spawn(async move {
|
||||||
|
if let Err(e) = Self::handle_stream(stream, queue).await {
|
||||||
|
tracing::warn!("stream closed: {e}");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_stream(mut stream: Stream, packet_queue: PacketQueue) -> std::io::Result<()> {
|
||||||
|
loop {
|
||||||
|
match PacketBody::read_from(&mut stream).await? {
|
||||||
|
Ok(packet_body) => {
|
||||||
|
packet_queue
|
||||||
|
.send(packet_body)
|
||||||
|
.await
|
||||||
|
.expect("The receiving half of packet queue should be always open");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(
|
||||||
|
"failed to parse packet body. continuing reading the next packet: {e}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn stream_send(
|
||||||
|
addr: NodeAddress,
|
||||||
|
packet_body: PacketBody,
|
||||||
|
swarm_commands_tx: &mpsc::Sender<libp2p::Command>,
|
||||||
|
packet_queue: &PacketQueue,
|
||||||
|
) {
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
swarm_commands_tx
|
||||||
|
.send(libp2p::Command::Connect(libp2p::Dial {
|
||||||
|
addr: Self::multiaddr_from(addr),
|
||||||
|
retry_count: 3,
|
||||||
|
result_sender: tx,
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.expect("Command receiver should be always open");
|
||||||
|
|
||||||
|
match rx.await {
|
||||||
|
Ok(Ok(peer_id)) => {
|
||||||
|
swarm_commands_tx
|
||||||
|
.send(libp2p::Command::StreamSend {
|
||||||
|
peer_id,
|
||||||
|
protocol: STREAM_PROTOCOL,
|
||||||
|
packet_body,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("Command receiver should be always open");
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => match e {
|
||||||
|
nomos_libp2p::DialError::NoAddresses => {
|
||||||
|
tracing::debug!("Dialing failed because the peer is the local node. Sending msg directly to the queue");
|
||||||
|
packet_queue
|
||||||
|
.send(packet_body)
|
||||||
|
.await
|
||||||
|
.expect("The receiving half of packet queue should be always open");
|
||||||
|
}
|
||||||
|
_ => tracing::error!("failed to dial with unrecoverable error: {e}"),
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("channel closed before receiving: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn multiaddr_from(addr: NodeAddress) -> Multiaddr {
|
||||||
|
match SocketAddr::from(addr) {
|
||||||
|
SocketAddr::V4(addr) => Multiaddr::empty()
|
||||||
|
.with(Protocol::Ip4(*addr.ip()))
|
||||||
|
.with(Protocol::Udp(addr.port()))
|
||||||
|
.with(Protocol::QuicV1),
|
||||||
|
SocketAddr::V6(addr) => Multiaddr::empty()
|
||||||
|
.with(Protocol::Ip6(*addr.ip()))
|
||||||
|
.with(Protocol::Udp(addr.port()))
|
||||||
|
.with(Protocol::QuicV1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct MixnetMessage {
|
||||||
|
pub topic: Topic,
|
||||||
|
pub message: Box<[u8]>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MixnetMessage {
|
||||||
|
pub fn as_bytes(&self) -> Vec<u8> {
|
||||||
|
wire::serialize(self).expect("Couldn't serialize MixnetMessage")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_bytes(data: &[u8]) -> Result<Self, wire::Error> {
|
||||||
|
wire::deserialize(data)
|
||||||
|
}
|
||||||
|
}
|
|
@ -5,6 +5,8 @@ use tokio::sync::broadcast::Receiver;
|
||||||
#[cfg(feature = "libp2p")]
|
#[cfg(feature = "libp2p")]
|
||||||
pub mod libp2p;
|
pub mod libp2p;
|
||||||
|
|
||||||
|
pub mod mixnet;
|
||||||
|
|
||||||
#[cfg(feature = "mock")]
|
#[cfg(feature = "mock")]
|
||||||
pub mod mock;
|
pub mod mock;
|
||||||
|
|
||||||
|
|
|
@ -8,4 +8,7 @@ serde = ["dep:serde"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
const-hex = "1"
|
const-hex = "1"
|
||||||
serde = { version = "1.0", optional = true }
|
serde = { version = "1.0", optional = true }
|
||||||
|
rand = "0.8"
|
||||||
|
rand_chacha = "0.3"
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use rand_chacha::ChaCha20Rng;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||||
|
pub struct FisherYatesShuffle {
|
||||||
|
pub entropy: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FisherYatesShuffle {
|
||||||
|
pub fn new(entropy: [u8; 32]) -> Self {
|
||||||
|
Self { entropy }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn shuffle<T: Clone>(elements: &mut [T], entropy: [u8; 32]) {
|
||||||
|
let mut rng = ChaCha20Rng::from_seed(entropy);
|
||||||
|
// Implementation of fisher yates shuffling
|
||||||
|
// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
|
||||||
|
for i in (1..elements.len()).rev() {
|
||||||
|
let j = rng.gen_range(0..=i);
|
||||||
|
elements.swap(i, j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,3 +1,5 @@
|
||||||
|
pub mod fisheryates;
|
||||||
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
pub mod serde {
|
pub mod serde {
|
||||||
fn serialize_human_readable_bytes_array<const N: usize, S: serde::Serializer>(
|
fn serialize_human_readable_bytes_array<const N: usize, S: serde::Serializer>(
|
||||||
|
|
|
@ -39,6 +39,7 @@ serde_json = "1.0"
|
||||||
thiserror = "1"
|
thiserror = "1"
|
||||||
tracing = { version = "0.1", default-features = false, features = ["log", "attributes"] }
|
tracing = { version = "0.1", default-features = false, features = ["log", "attributes"] }
|
||||||
tracing-subscriber = { version = "0.3", features = ["json", "env-filter", "tracing-log"]}
|
tracing-subscriber = { version = "0.3", features = ["json", "env-filter", "tracing-log"]}
|
||||||
|
nomos-utils = { path = "../nomos-utils" }
|
||||||
|
|
||||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||||
getrandom = { version = "0.2", features = ["js"] }
|
getrandom = { version = "0.2", features = ["js"] }
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
use carnot_engine::overlay::{BranchOverlay, FisherYatesShuffle, RandomBeaconState};
|
use carnot_engine::overlay::{BranchOverlay, RandomBeaconState};
|
||||||
use carnot_engine::Overlay;
|
use carnot_engine::Overlay;
|
||||||
use carnot_engine::{
|
use carnot_engine::{
|
||||||
overlay::{FlatOverlay, FreezeMembership, RoundRobin, TreeOverlay},
|
overlay::{FlatOverlay, FreezeMembership, RoundRobin, TreeOverlay},
|
||||||
NodeId,
|
NodeId,
|
||||||
};
|
};
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use simulations::overlay::overlay_info::{OverlayInfo, OverlayInfoExt};
|
use simulations::overlay::overlay_info::{OverlayInfo, OverlayInfoExt};
|
||||||
use simulations::settings::OverlaySettings;
|
use simulations::settings::OverlaySettings;
|
||||||
|
|
|
@ -60,11 +60,11 @@ impl<T: Overlay> OverlayInfoExt for T {
|
||||||
mod tests {
|
mod tests {
|
||||||
use carnot_engine::{
|
use carnot_engine::{
|
||||||
overlay::{
|
overlay::{
|
||||||
BranchOverlay, BranchOverlaySettings, FisherYatesShuffle, RoundRobin, TreeOverlay,
|
BranchOverlay, BranchOverlaySettings, RoundRobin, TreeOverlay, TreeOverlaySettings,
|
||||||
TreeOverlaySettings,
|
|
||||||
},
|
},
|
||||||
NodeId, Overlay,
|
NodeId, Overlay,
|
||||||
};
|
};
|
||||||
|
use nomos_utils::fisheryates::FisherYatesShuffle;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
const ENTROPY: [u8; 32] = [0; 32];
|
const ENTROPY: [u8; 32] = [0; 32];
|
||||||
|
|
|
@ -24,7 +24,6 @@ EXPOSE 3000 8080 9000 60000
|
||||||
|
|
||||||
COPY --from=builder /nomos/target/release/nomos-node /usr/bin/nomos-node
|
COPY --from=builder /nomos/target/release/nomos-node /usr/bin/nomos-node
|
||||||
COPY --from=builder /nomos/target/release/nomos-cli /usr/bin/nomos-cli
|
COPY --from=builder /nomos/target/release/nomos-cli /usr/bin/nomos-cli
|
||||||
COPY --from=builder /nomos/target/release/mixnode /usr/bin/mixnode
|
|
||||||
COPY --from=builder /usr/bin/etcdctl /usr/bin/etcdctl
|
COPY --from=builder /usr/bin/etcdctl /usr/bin/etcdctl
|
||||||
COPY nodes/nomos-node/config.yaml /etc/nomos/config.yaml
|
COPY nodes/nomos-node/config.yaml /etc/nomos/config.yaml
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,6 @@ The Nomos Docker Compose Testnet contains four distinct service types:
|
||||||
- **Bootstrap Node Service**: A singular Nomos node with its own service and a deterministic DNS address. Other nodes utilize this as their initial peer.
|
- **Bootstrap Node Service**: A singular Nomos node with its own service and a deterministic DNS address. Other nodes utilize this as their initial peer.
|
||||||
- **Libp2p Node Services**: Multiple dynamically spawned Nomos nodes that announce their existence through etcd.
|
- **Libp2p Node Services**: Multiple dynamically spawned Nomos nodes that announce their existence through etcd.
|
||||||
- **Etcd Service**: A container running an etcd instance.
|
- **Etcd Service**: A container running an etcd instance.
|
||||||
- **Mix-Node-{0,1,2}**: These are statically configured mixnet nodes. Every Libp2p node includes these in its topology configuration.
|
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
|
@ -42,7 +41,7 @@ docker compose up -d
|
||||||
Followed by:
|
Followed by:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose logs -f {bootstrap,libp2p-node,mixnode,etcd}
|
docker compose logs -f {bootstrap,libp2p-node,etcd}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using testnet
|
## Using testnet
|
||||||
|
|
|
@ -25,43 +25,6 @@ network:
|
||||||
discV5BootstrapNodes: []
|
discV5BootstrapNodes: []
|
||||||
initial_peers: []
|
initial_peers: []
|
||||||
relayTopics: []
|
relayTopics: []
|
||||||
# Mixclient configuration to communicate with mixnodes.
|
|
||||||
# The libp2p network backend always requires this mixclient configuration
|
|
||||||
# (cannot be disabled for now).
|
|
||||||
mixnet_client:
|
|
||||||
# A mixclient mode. For details, see the documentation of the "mixnet" crate.
|
|
||||||
# - Sender
|
|
||||||
# - !SenderReceiver [mixnode_client_listen_address]
|
|
||||||
mode: !SenderReceiver mix-node-2:7778
|
|
||||||
# A mixnet topology, which contains the information of all mixnodes in the mixnet.
|
|
||||||
# (The topology is static for now.)
|
|
||||||
topology:
|
|
||||||
# Each mixnet layer consists of a list of mixnodes.
|
|
||||||
layers:
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-0:7777 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-1:7777 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-2:7777 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the first mixnet layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
max_retries: 5
|
|
||||||
retry_delay:
|
|
||||||
secs: 1
|
|
||||||
nanos: 0
|
|
||||||
# A range of total delay that will be set to each Sphinx packets
|
|
||||||
# sent to the mixnet for timing obfuscation.
|
|
||||||
# Panics if start > end.
|
|
||||||
mixnet_delay:
|
|
||||||
start: "0ms"
|
|
||||||
end: "0ms"
|
|
||||||
|
|
||||||
http:
|
http:
|
||||||
backend_settings:
|
backend_settings:
|
||||||
|
|
|
@ -4,41 +4,5 @@ backend:
|
||||||
log_level: "fatal"
|
log_level: "fatal"
|
||||||
node_key: "0000000000000000000000000000000000000000000000000000000000000667"
|
node_key: "0000000000000000000000000000000000000000000000000000000000000667"
|
||||||
discV5BootstrapNodes: []
|
discV5BootstrapNodes: []
|
||||||
initial_peers: ["/dns/bootstrap/tcp/3000"]
|
initial_peers: ["/dns/bootstrap/udp/3000/quic-v1"]
|
||||||
relayTopics: []
|
relayTopics: []
|
||||||
# Mixclient configuration to communicate with mixnodes.
|
|
||||||
# The libp2p network backend always requires this mixclient configuration
|
|
||||||
# (cannot be disabled for now).
|
|
||||||
mixnet_client:
|
|
||||||
# A mixclient mode. For details, see the documentation of the "mixnet" crate.
|
|
||||||
# - Sender
|
|
||||||
# - !SenderReceiver [mixnode_client_listen_address]
|
|
||||||
mode: Sender
|
|
||||||
# A mixnet topology, which contains the information of all mixnodes in the mixnet.
|
|
||||||
# (The topology is static for now.)
|
|
||||||
topology:
|
|
||||||
# Each mixnet layer consists of a list of mixnodes.
|
|
||||||
layers:
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-0:7707 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-1:7717 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-2:7727 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the first mixnet layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
max_retries: 5
|
|
||||||
retry_delay:
|
|
||||||
secs: 1
|
|
||||||
nanos: 0
|
|
||||||
# A range of total delay that will be set to each Sphinx packets
|
|
||||||
# sent to the mixnet for timing obfuscation.
|
|
||||||
# Panics if start > end.
|
|
||||||
mixnet_delay:
|
|
||||||
start: "0ms"
|
|
||||||
end: "0ms"
|
|
||||||
|
|
|
@ -23,45 +23,8 @@ network:
|
||||||
log_level: "fatal"
|
log_level: "fatal"
|
||||||
node_key: "0000000000000000000000000000000000000000000000000000000000000001"
|
node_key: "0000000000000000000000000000000000000000000000000000000000000001"
|
||||||
discV5BootstrapNodes: []
|
discV5BootstrapNodes: []
|
||||||
initial_peers: ["/dns/bootstrap/tcp/3000"]
|
initial_peers: ["/dns/bootstrap/udp/3000/quic-v1"]
|
||||||
relayTopics: []
|
relayTopics: []
|
||||||
# Mixclient configuration to communicate with mixnodes.
|
|
||||||
# The libp2p network backend always requires this mixclient configuration
|
|
||||||
# (cannot be disabled for now).
|
|
||||||
mixnet_client:
|
|
||||||
# A mixclient mode. For details, see the documentation of the "mixnet" crate.
|
|
||||||
# - Sender
|
|
||||||
# - !SenderReceiver [mixnode_client_listen_address]
|
|
||||||
mode: Sender
|
|
||||||
# A mixnet topology, which contains the information of all mixnodes in the mixnet.
|
|
||||||
# (The topology is static for now.)
|
|
||||||
topology:
|
|
||||||
# Each mixnet layer consists of a list of mixnodes.
|
|
||||||
layers:
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-0:7777 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-1:7777 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
- nodes:
|
|
||||||
- address: mix-node-2:7777 # A listen address of the mixnode
|
|
||||||
public_key: "fd3384e132ad02a56c78f45547ee40038dc79002b90d29ed90e08eee762ae715"
|
|
||||||
|
|
||||||
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the first mixnet layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
max_retries: 5
|
|
||||||
retry_delay:
|
|
||||||
secs: 1
|
|
||||||
nanos: 0
|
|
||||||
# A range of total delay that will be set to each Sphinx packets
|
|
||||||
# sent to the mixnet for timing obfuscation.
|
|
||||||
# Panics if start > end.
|
|
||||||
mixnet_delay:
|
|
||||||
start: "0ms"
|
|
||||||
end: "0ms"
|
|
||||||
|
|
||||||
http:
|
http:
|
||||||
backend_settings:
|
backend_settings:
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
mixnode:
|
|
||||||
# A listen address for other mixnodes in the mixnet and mixclients who want to send packets.
|
|
||||||
listen_address: 0.0.0.0:7777
|
|
||||||
# A (internal) listen address only for a "single" mixclient who wants to receive packets
|
|
||||||
# from the last mixnet layer.
|
|
||||||
# For more details, see the documentation in the "mixnet" crate.
|
|
||||||
client_listen_address: 0.0.0.0:7778
|
|
||||||
# A ed25519 private key for decrypting inbound Sphinx packets
|
|
||||||
# received from mixclients or mixnodes in the previous mixnet layer.
|
|
||||||
private_key: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
|
|
||||||
# A max number of connections that will stay connected to mixnodes in the next layer.
|
|
||||||
connection_pool_size: 255
|
|
||||||
log:
|
|
||||||
backend: "Stdout"
|
|
||||||
format: "Json"
|
|
||||||
level: "info"
|
|
|
@ -12,7 +12,7 @@ node_ids=$(etcdctl get "/node/" --prefix --keys-only)
|
||||||
for node_id in $node_ids; do
|
for node_id in $node_ids; do
|
||||||
node_key=$(etcdctl get "/config${node_id}/key" --print-value-only)
|
node_key=$(etcdctl get "/config${node_id}/key" --print-value-only)
|
||||||
node_ip=$(etcdctl get "/config${node_id}/ip" --print-value-only)
|
node_ip=$(etcdctl get "/config${node_id}/ip" --print-value-only)
|
||||||
node_multiaddr="/ip4/${node_ip}/tcp/3000"
|
node_multiaddr="/ip4/${node_ip}/udp/3000/quic-v1"
|
||||||
|
|
||||||
if [ -z "$NET_INITIAL_PEERS" ]; then
|
if [ -z "$NET_INITIAL_PEERS" ]; then
|
||||||
NET_INITIAL_PEERS=$node_multiaddr
|
NET_INITIAL_PEERS=$node_multiaddr
|
||||||
|
|
|
@ -8,6 +8,7 @@ publish = false
|
||||||
nomos-node = { path = "../nodes/nomos-node", default-features = false }
|
nomos-node = { path = "../nodes/nomos-node", default-features = false }
|
||||||
carnot-consensus = { path = "../nomos-services/carnot-consensus" }
|
carnot-consensus = { path = "../nomos-services/carnot-consensus" }
|
||||||
nomos-network = { path = "../nomos-services/network", features = ["libp2p"]}
|
nomos-network = { path = "../nomos-services/network", features = ["libp2p"]}
|
||||||
|
mixnet = { path = "../mixnet" }
|
||||||
nomos-log = { path = "../nomos-services/log" }
|
nomos-log = { path = "../nomos-services/log" }
|
||||||
nomos-api = { path = "../nomos-services/api" }
|
nomos-api = { path = "../nomos-services/api" }
|
||||||
overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" }
|
overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" }
|
||||||
|
@ -16,12 +17,7 @@ carnot-engine = { path = "../consensus/carnot-engine", features = ["serde"] }
|
||||||
nomos-mempool = { path = "../nomos-services/mempool", features = ["mock", "libp2p"] }
|
nomos-mempool = { path = "../nomos-services/mempool", features = ["mock", "libp2p"] }
|
||||||
nomos-da = { path = "../nomos-services/data-availability" }
|
nomos-da = { path = "../nomos-services/data-availability" }
|
||||||
full-replication = { path = "../nomos-da/full-replication" }
|
full-replication = { path = "../nomos-da/full-replication" }
|
||||||
mixnode = { path = "../nodes/mixnode" }
|
rand = "0.8"
|
||||||
mixnet-node = { path = "../mixnet/node" }
|
|
||||||
mixnet-client = { path = "../mixnet/client" }
|
|
||||||
mixnet-topology = { path = "../mixnet/topology" }
|
|
||||||
# Using older versions, since `mixnet-*` crates depend on `rand` v0.7.3.
|
|
||||||
rand = "0.7.3"
|
|
||||||
once_cell = "1"
|
once_cell = "1"
|
||||||
secp256k1 = { version = "0.26", features = ["rand"] }
|
secp256k1 = { version = "0.26", features = ["rand"] }
|
||||||
reqwest = { version = "0.11", features = ["json"] }
|
reqwest = { version = "0.11", features = ["json"] }
|
||||||
|
@ -46,19 +42,10 @@ path = "src/tests/happy.rs"
|
||||||
name = "test_consensus_unhappy_path"
|
name = "test_consensus_unhappy_path"
|
||||||
path = "src/tests/unhappy.rs"
|
path = "src/tests/unhappy.rs"
|
||||||
|
|
||||||
[[test]]
|
|
||||||
name = "test_mixnet"
|
|
||||||
path = "src/tests/mixnet.rs"
|
|
||||||
|
|
||||||
[[test]]
|
[[test]]
|
||||||
name = "test_cli"
|
name = "test_cli"
|
||||||
path = "src/tests/cli.rs"
|
path = "src/tests/cli.rs"
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "mixnet"
|
|
||||||
path = "src/benches/mixnet.rs"
|
|
||||||
harness = false
|
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
metrics = ["nomos-node/metrics"]
|
metrics = ["nomos-node/metrics"]
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
|
||||||
use futures::StreamExt;
|
|
||||||
use mixnet_client::{MessageStream, MixnetClient, MixnetClientConfig, MixnetClientMode};
|
|
||||||
use rand::{rngs::OsRng, Rng, RngCore};
|
|
||||||
use tests::MixNode;
|
|
||||||
use tokio::time::Instant;
|
|
||||||
|
|
||||||
pub fn mixnet(c: &mut Criterion) {
|
|
||||||
c.bench_function("mixnet", |b| {
|
|
||||||
b.to_async(tokio::runtime::Runtime::new().unwrap())
|
|
||||||
.iter_custom(|iters| async move {
|
|
||||||
let (_mixnodes, mut sender_client, mut destination_stream, msg) =
|
|
||||||
setup(100 * 1024).await;
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
for _ in 0..iters {
|
|
||||||
black_box(
|
|
||||||
send_receive_message(&msg, &mut sender_client, &mut destination_stream)
|
|
||||||
.await,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
start.elapsed()
|
|
||||||
})
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn setup(msg_size: usize) -> (Vec<MixNode>, MixnetClient<OsRng>, MessageStream, Vec<u8>) {
|
|
||||||
let (mixnodes, node_configs, topology) = MixNode::spawn_nodes(3).await;
|
|
||||||
|
|
||||||
let sender_client = MixnetClient::new(
|
|
||||||
MixnetClientConfig {
|
|
||||||
mode: MixnetClientMode::Sender,
|
|
||||||
topology: topology.clone(),
|
|
||||||
connection_pool_size: 255,
|
|
||||||
max_retries: 3,
|
|
||||||
retry_delay: Duration::from_secs(5),
|
|
||||||
},
|
|
||||||
OsRng,
|
|
||||||
);
|
|
||||||
let destination_client = MixnetClient::new(
|
|
||||||
MixnetClientConfig {
|
|
||||||
mode: MixnetClientMode::SenderReceiver(
|
|
||||||
// Connect with the MixnetNode in the exit layer
|
|
||||||
// According to the current implementation,
|
|
||||||
// one of mixnodes the exit layer always will be selected as a destination.
|
|
||||||
node_configs.last().unwrap().client_listen_address,
|
|
||||||
),
|
|
||||||
topology,
|
|
||||||
connection_pool_size: 255,
|
|
||||||
max_retries: 3,
|
|
||||||
retry_delay: Duration::from_secs(5),
|
|
||||||
},
|
|
||||||
OsRng,
|
|
||||||
);
|
|
||||||
let destination_stream = destination_client.run().await.unwrap();
|
|
||||||
|
|
||||||
let mut msg = vec![0u8; msg_size];
|
|
||||||
rand::thread_rng().fill_bytes(&mut msg);
|
|
||||||
|
|
||||||
(mixnodes, sender_client, destination_stream, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_receive_message<R: Rng>(
|
|
||||||
msg: &[u8],
|
|
||||||
sender_client: &mut MixnetClient<R>,
|
|
||||||
destination_stream: &mut MessageStream,
|
|
||||||
) {
|
|
||||||
let res = sender_client.send(msg.to_vec(), Duration::ZERO);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
|
|
||||||
let received = destination_stream.next().await.unwrap().unwrap();
|
|
||||||
assert_eq!(msg, received.as_slice());
|
|
||||||
}
|
|
||||||
|
|
||||||
criterion_group!(
|
|
||||||
name = benches;
|
|
||||||
config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs(180));
|
|
||||||
targets = mixnet
|
|
||||||
);
|
|
||||||
criterion_main!(benches);
|
|
|
@ -1,7 +1,4 @@
|
||||||
pub mod nodes;
|
pub mod nodes;
|
||||||
use mixnet_node::MixnetNodeConfig;
|
|
||||||
use mixnet_topology::MixnetTopology;
|
|
||||||
pub use nodes::MixNode;
|
|
||||||
pub use nodes::NomosNode;
|
pub use nodes::NomosNode;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
|
@ -16,7 +13,7 @@ use std::{fmt::Debug, sync::Mutex};
|
||||||
use fraction::{Fraction, One};
|
use fraction::{Fraction, One};
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
|
||||||
static NET_PORT: Lazy<Mutex<u16>> = Lazy::new(|| Mutex::new(thread_rng().gen_range(8000, 10000)));
|
static NET_PORT: Lazy<Mutex<u16>> = Lazy::new(|| Mutex::new(thread_rng().gen_range(8000..10000)));
|
||||||
static IS_SLOW_TEST_ENV: Lazy<bool> =
|
static IS_SLOW_TEST_ENV: Lazy<bool> =
|
||||||
Lazy::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true"));
|
Lazy::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true"));
|
||||||
|
|
||||||
|
@ -42,6 +39,7 @@ pub fn adjust_timeout(d: Duration) -> Duration {
|
||||||
pub trait Node: Sized {
|
pub trait Node: Sized {
|
||||||
type ConsensusInfo: Debug + Clone + PartialEq;
|
type ConsensusInfo: Debug + Clone + PartialEq;
|
||||||
async fn spawn_nodes(config: SpawnConfig) -> Vec<Self>;
|
async fn spawn_nodes(config: SpawnConfig) -> Vec<Self>;
|
||||||
|
fn node_configs(config: SpawnConfig) -> Vec<nomos_node::Config>;
|
||||||
async fn consensus_info(&self) -> Self::ConsensusInfo;
|
async fn consensus_info(&self) -> Self::ConsensusInfo;
|
||||||
fn stop(&mut self);
|
fn stop(&mut self);
|
||||||
}
|
}
|
||||||
|
@ -49,20 +47,14 @@ pub trait Node: Sized {
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum SpawnConfig {
|
pub enum SpawnConfig {
|
||||||
// Star topology: Every node is initially connected to a single node.
|
// Star topology: Every node is initially connected to a single node.
|
||||||
Star {
|
Star { consensus: ConsensusConfig },
|
||||||
consensus: ConsensusConfig,
|
|
||||||
mixnet: MixnetConfig,
|
|
||||||
},
|
|
||||||
// Chain topology: Every node is chained to the node next to it.
|
// Chain topology: Every node is chained to the node next to it.
|
||||||
Chain {
|
Chain { consensus: ConsensusConfig },
|
||||||
consensus: ConsensusConfig,
|
|
||||||
mixnet: MixnetConfig,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SpawnConfig {
|
impl SpawnConfig {
|
||||||
// Returns a SpawnConfig::Chain with proper configurations for happy-path tests
|
// Returns a SpawnConfig::Chain with proper configurations for happy-path tests
|
||||||
pub fn chain_happy(n_participants: usize, mixnet_config: MixnetConfig) -> Self {
|
pub fn chain_happy(n_participants: usize) -> Self {
|
||||||
Self::Chain {
|
Self::Chain {
|
||||||
consensus: ConsensusConfig {
|
consensus: ConsensusConfig {
|
||||||
n_participants,
|
n_participants,
|
||||||
|
@ -73,7 +65,6 @@ impl SpawnConfig {
|
||||||
// and it takes 1+ secs for each nomos-node to be started.
|
// and it takes 1+ secs for each nomos-node to be started.
|
||||||
timeout: adjust_timeout(Duration::from_millis(n_participants as u64 * 2500)),
|
timeout: adjust_timeout(Duration::from_millis(n_participants as u64 * 2500)),
|
||||||
},
|
},
|
||||||
mixnet: mixnet_config,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -84,9 +75,3 @@ pub struct ConsensusConfig {
|
||||||
pub threshold: Fraction,
|
pub threshold: Fraction,
|
||||||
pub timeout: Duration,
|
pub timeout: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct MixnetConfig {
|
|
||||||
pub node_configs: Vec<MixnetNodeConfig>,
|
|
||||||
pub topology: MixnetTopology,
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,131 +0,0 @@
|
||||||
use std::{
|
|
||||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
|
||||||
process::{Child, Command, Stdio},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
|
|
||||||
use mixnet_node::{MixnetNodeConfig, PRIVATE_KEY_SIZE};
|
|
||||||
use mixnet_topology::{Layer, MixnetTopology, Node};
|
|
||||||
use nomos_log::{LoggerBackend, LoggerFormat};
|
|
||||||
use rand::{thread_rng, RngCore};
|
|
||||||
use tempfile::NamedTempFile;
|
|
||||||
|
|
||||||
use crate::{get_available_port, MixnetConfig};
|
|
||||||
|
|
||||||
const MIXNODE_BIN: &str = "../target/debug/mixnode";
|
|
||||||
|
|
||||||
pub struct MixNode {
|
|
||||||
_tempdir: tempfile::TempDir,
|
|
||||||
child: Child,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for MixNode {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if std::thread::panicking() {
|
|
||||||
if let Err(e) = persist_tempdir(&mut self._tempdir, "mixnode") {
|
|
||||||
println!("failed to persist tempdir: {e}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = self.child.kill() {
|
|
||||||
println!("failed to kill the child process: {e}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MixNode {
|
|
||||||
pub async fn spawn(config: MixnetNodeConfig) -> Self {
|
|
||||||
let dir = create_tempdir().unwrap();
|
|
||||||
|
|
||||||
let mut config = mixnode::Config {
|
|
||||||
mixnode: config,
|
|
||||||
log: Default::default(),
|
|
||||||
};
|
|
||||||
config.log.backend = LoggerBackend::File {
|
|
||||||
directory: dir.path().to_owned(),
|
|
||||||
prefix: Some(LOGS_PREFIX.into()),
|
|
||||||
};
|
|
||||||
config.log.format = LoggerFormat::Json;
|
|
||||||
|
|
||||||
let mut file = NamedTempFile::new().unwrap();
|
|
||||||
let config_path = file.path().to_owned();
|
|
||||||
serde_yaml::to_writer(&mut file, &config).unwrap();
|
|
||||||
|
|
||||||
let child = Command::new(std::env::current_dir().unwrap().join(MIXNODE_BIN))
|
|
||||||
.arg(&config_path)
|
|
||||||
.stdout(Stdio::null())
|
|
||||||
.spawn()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
//TODO: use a sophisticated way to wait until the node is ready
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
|
|
||||||
Self {
|
|
||||||
_tempdir: dir,
|
|
||||||
child,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn spawn_nodes(num_nodes: usize) -> (Vec<Self>, MixnetConfig) {
|
|
||||||
let mut configs = Vec::<MixnetNodeConfig>::new();
|
|
||||||
for _ in 0..num_nodes {
|
|
||||||
let mut private_key = [0u8; PRIVATE_KEY_SIZE];
|
|
||||||
thread_rng().fill_bytes(&mut private_key);
|
|
||||||
|
|
||||||
let config = MixnetNodeConfig {
|
|
||||||
listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
client_listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
private_key,
|
|
||||||
connection_pool_size: 255,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
configs.push(config);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut nodes = Vec::<MixNode>::new();
|
|
||||||
for config in &configs {
|
|
||||||
nodes.push(Self::spawn(*config).await);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to return configs as well, to configure mixclients accordingly
|
|
||||||
(
|
|
||||||
nodes,
|
|
||||||
MixnetConfig {
|
|
||||||
node_configs: configs.clone(),
|
|
||||||
topology: Self::build_topology(configs),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_topology(configs: Vec<MixnetNodeConfig>) -> MixnetTopology {
|
|
||||||
// Build three empty layers first
|
|
||||||
let mut layers = vec![Layer { nodes: Vec::new() }; 3];
|
|
||||||
let mut layer_id = 0;
|
|
||||||
|
|
||||||
// Assign nodes to each layer in round-robin
|
|
||||||
for config in &configs {
|
|
||||||
let public_key = config.public_key();
|
|
||||||
layers.get_mut(layer_id).unwrap().nodes.push(Node {
|
|
||||||
address: config.listen_address,
|
|
||||||
public_key,
|
|
||||||
});
|
|
||||||
layer_id = (layer_id + 1) % layers.len();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exclude empty layers
|
|
||||||
MixnetTopology {
|
|
||||||
layers: layers
|
|
||||||
.iter()
|
|
||||||
.filter(|layer| !layer.nodes.is_empty())
|
|
||||||
.cloned()
|
|
||||||
.collect(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,7 +1,5 @@
|
||||||
mod mixnode;
|
|
||||||
pub mod nomos;
|
pub mod nomos;
|
||||||
|
|
||||||
pub use self::mixnode::MixNode;
|
|
||||||
pub use nomos::NomosNode;
|
pub use nomos::NomosNode;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
|
|
@ -4,16 +4,13 @@ use std::process::{Child, Command, Stdio};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
// internal
|
// internal
|
||||||
use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
|
use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
|
||||||
use crate::{adjust_timeout, get_available_port, ConsensusConfig, MixnetConfig, Node, SpawnConfig};
|
use crate::{adjust_timeout, get_available_port, ConsensusConfig, Node, SpawnConfig};
|
||||||
use carnot_consensus::{CarnotInfo, CarnotSettings};
|
use carnot_consensus::{CarnotInfo, CarnotSettings};
|
||||||
use carnot_engine::overlay::{RandomBeaconState, RoundRobin, TreeOverlay, TreeOverlaySettings};
|
use carnot_engine::overlay::{RandomBeaconState, RoundRobin, TreeOverlay, TreeOverlaySettings};
|
||||||
use carnot_engine::{BlockId, NodeId, Overlay};
|
use carnot_engine::{BlockId, NodeId, Overlay};
|
||||||
use full_replication::Certificate;
|
use full_replication::Certificate;
|
||||||
use mixnet_client::{MixnetClientConfig, MixnetClientMode};
|
|
||||||
use mixnet_node::MixnetNodeConfig;
|
|
||||||
use mixnet_topology::MixnetTopology;
|
|
||||||
use nomos_core::block::Block;
|
use nomos_core::block::Block;
|
||||||
use nomos_libp2p::{multiaddr, Multiaddr};
|
use nomos_libp2p::{Multiaddr, Swarm};
|
||||||
use nomos_log::{LoggerBackend, LoggerFormat};
|
use nomos_log::{LoggerBackend, LoggerFormat};
|
||||||
use nomos_mempool::MempoolMetrics;
|
use nomos_mempool::MempoolMetrics;
|
||||||
use nomos_network::backends::libp2p::Libp2pConfig;
|
use nomos_network::backends::libp2p::Libp2pConfig;
|
||||||
|
@ -201,35 +198,45 @@ impl NomosNode {
|
||||||
impl Node for NomosNode {
|
impl Node for NomosNode {
|
||||||
type ConsensusInfo = CarnotInfo;
|
type ConsensusInfo = CarnotInfo;
|
||||||
|
|
||||||
|
/// Spawn nodes sequentially.
|
||||||
|
/// After one node is spawned successfully, the next node is spawned.
|
||||||
async fn spawn_nodes(config: SpawnConfig) -> Vec<Self> {
|
async fn spawn_nodes(config: SpawnConfig) -> Vec<Self> {
|
||||||
|
let mut nodes = Vec::new();
|
||||||
|
for conf in Self::node_configs(config) {
|
||||||
|
nodes.push(Self::spawn(conf).await);
|
||||||
|
}
|
||||||
|
nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_configs(config: SpawnConfig) -> Vec<nomos_node::Config> {
|
||||||
match config {
|
match config {
|
||||||
SpawnConfig::Star { consensus, mixnet } => {
|
SpawnConfig::Star { consensus } => {
|
||||||
let (next_leader_config, configs) = create_node_configs(consensus, mixnet);
|
let (next_leader_config, configs) = create_node_configs(consensus);
|
||||||
|
|
||||||
let first_node_addr = node_address(&next_leader_config);
|
let first_node_addr = node_address(&next_leader_config);
|
||||||
let mut nodes = vec![Self::spawn(next_leader_config).await];
|
let mut node_configs = vec![next_leader_config];
|
||||||
for mut conf in configs {
|
for mut conf in configs {
|
||||||
conf.network
|
conf.network
|
||||||
.backend
|
.backend
|
||||||
.initial_peers
|
.initial_peers
|
||||||
.push(first_node_addr.clone());
|
.push(first_node_addr.clone());
|
||||||
|
|
||||||
nodes.push(Self::spawn(conf).await);
|
node_configs.push(conf);
|
||||||
}
|
}
|
||||||
nodes
|
node_configs
|
||||||
}
|
}
|
||||||
SpawnConfig::Chain { consensus, mixnet } => {
|
SpawnConfig::Chain { consensus } => {
|
||||||
let (next_leader_config, configs) = create_node_configs(consensus, mixnet);
|
let (next_leader_config, configs) = create_node_configs(consensus);
|
||||||
|
|
||||||
let mut prev_node_addr = node_address(&next_leader_config);
|
let mut prev_node_addr = node_address(&next_leader_config);
|
||||||
let mut nodes = vec![Self::spawn(next_leader_config).await];
|
let mut node_configs = vec![next_leader_config];
|
||||||
for mut conf in configs {
|
for mut conf in configs {
|
||||||
conf.network.backend.initial_peers.push(prev_node_addr);
|
conf.network.backend.initial_peers.push(prev_node_addr);
|
||||||
prev_node_addr = node_address(&conf);
|
prev_node_addr = node_address(&conf);
|
||||||
|
|
||||||
nodes.push(Self::spawn(conf).await);
|
node_configs.push(conf);
|
||||||
}
|
}
|
||||||
nodes
|
node_configs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -250,10 +257,7 @@ impl Node for NomosNode {
|
||||||
/// so the leader can receive votes from all other nodes that will be subsequently spawned.
|
/// so the leader can receive votes from all other nodes that will be subsequently spawned.
|
||||||
/// If not, the leader will miss votes from nodes spawned before itself.
|
/// If not, the leader will miss votes from nodes spawned before itself.
|
||||||
/// This issue will be resolved by devising the block catch-up mechanism in the future.
|
/// This issue will be resolved by devising the block catch-up mechanism in the future.
|
||||||
fn create_node_configs(
|
fn create_node_configs(consensus: ConsensusConfig) -> (Config, Vec<Config>) {
|
||||||
consensus: ConsensusConfig,
|
|
||||||
mut mixnet: MixnetConfig,
|
|
||||||
) -> (Config, Vec<Config>) {
|
|
||||||
let mut ids = vec![[0; 32]; consensus.n_participants];
|
let mut ids = vec![[0; 32]; consensus.n_participants];
|
||||||
for id in &mut ids {
|
for id in &mut ids {
|
||||||
thread_rng().fill(id);
|
thread_rng().fill(id);
|
||||||
|
@ -267,8 +271,6 @@ fn create_node_configs(
|
||||||
*id,
|
*id,
|
||||||
consensus.threshold,
|
consensus.threshold,
|
||||||
consensus.timeout,
|
consensus.timeout,
|
||||||
mixnet.node_configs.pop(),
|
|
||||||
mixnet.topology.clone(),
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
@ -290,29 +292,12 @@ fn create_node_config(
|
||||||
id: [u8; 32],
|
id: [u8; 32],
|
||||||
threshold: Fraction,
|
threshold: Fraction,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
mixnet_node_config: Option<MixnetNodeConfig>,
|
|
||||||
mixnet_topology: MixnetTopology,
|
|
||||||
) -> Config {
|
) -> Config {
|
||||||
let mixnet_client_mode = match mixnet_node_config {
|
|
||||||
Some(node_config) => {
|
|
||||||
MixnetClientMode::SenderReceiver(node_config.client_listen_address.to_string())
|
|
||||||
}
|
|
||||||
None => MixnetClientMode::Sender,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut config = Config {
|
let mut config = Config {
|
||||||
network: NetworkConfig {
|
network: NetworkConfig {
|
||||||
backend: Libp2pConfig {
|
backend: Libp2pConfig {
|
||||||
inner: Default::default(),
|
inner: Default::default(),
|
||||||
initial_peers: vec![],
|
initial_peers: vec![],
|
||||||
mixnet_client: MixnetClientConfig {
|
|
||||||
mode: mixnet_client_mode,
|
|
||||||
topology: mixnet_topology,
|
|
||||||
connection_pool_size: 255,
|
|
||||||
max_retries: 3,
|
|
||||||
retry_delay: Duration::from_secs(5),
|
|
||||||
},
|
|
||||||
mixnet_delay: Duration::ZERO..Duration::from_millis(10),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
consensus: CarnotSettings {
|
consensus: CarnotSettings {
|
||||||
|
@ -359,7 +344,10 @@ fn create_node_config(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_address(config: &Config) -> Multiaddr {
|
fn node_address(config: &Config) -> Multiaddr {
|
||||||
multiaddr!(Ip4([127, 0, 0, 1]), Tcp(config.network.backend.inner.port))
|
Swarm::multiaddr(
|
||||||
|
std::net::Ipv4Addr::new(127, 0, 0, 1),
|
||||||
|
config.network.backend.inner.port,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum Pool {
|
pub enum Pool {
|
||||||
|
|
|
@ -7,9 +7,7 @@ use nomos_cli::{
|
||||||
use nomos_core::da::{blob::Blob as _, DaProtocol};
|
use nomos_core::da::{blob::Blob as _, DaProtocol};
|
||||||
use std::{io::Write, time::Duration};
|
use std::{io::Write, time::Duration};
|
||||||
use tempfile::NamedTempFile;
|
use tempfile::NamedTempFile;
|
||||||
use tests::{
|
use tests::{adjust_timeout, get_available_port, nodes::nomos::Pool, Node, NomosNode, SpawnConfig};
|
||||||
adjust_timeout, get_available_port, nodes::nomos::Pool, MixNode, Node, NomosNode, SpawnConfig,
|
|
||||||
};
|
|
||||||
|
|
||||||
const CLI_BIN: &str = "../target/debug/nomos-cli";
|
const CLI_BIN: &str = "../target/debug/nomos-cli";
|
||||||
|
|
||||||
|
@ -35,13 +33,10 @@ fn run_disseminate(disseminate: &Disseminate) {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn disseminate(config: &mut Disseminate) {
|
async fn disseminate(config: &mut Disseminate) {
|
||||||
let (_mixnodes, mixnet_config) = MixNode::spawn_nodes(2).await;
|
let node_configs = NomosNode::node_configs(SpawnConfig::chain_happy(2));
|
||||||
let mut nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(2, mixnet_config)).await;
|
let first_node = NomosNode::spawn(node_configs[0].clone()).await;
|
||||||
|
|
||||||
// kill the node so that we can reuse its network config
|
let mut network_config = node_configs[1].network.clone();
|
||||||
nodes[1].stop();
|
|
||||||
|
|
||||||
let mut network_config = nodes[1].config().network.clone();
|
|
||||||
// use a new port because the old port is sometimes not closed immediately
|
// use a new port because the old port is sometimes not closed immediately
|
||||||
network_config.backend.inner.port = get_available_port();
|
network_config.backend.inner.port = get_available_port();
|
||||||
|
|
||||||
|
@ -68,7 +63,7 @@ async fn disseminate(config: &mut Disseminate) {
|
||||||
config.node_addr = Some(
|
config.node_addr = Some(
|
||||||
format!(
|
format!(
|
||||||
"http://{}",
|
"http://{}",
|
||||||
nodes[0].config().http.backend_settings.address.clone()
|
first_node.config().http.backend_settings.address.clone()
|
||||||
)
|
)
|
||||||
.parse()
|
.parse()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
|
@ -79,7 +74,7 @@ async fn disseminate(config: &mut Disseminate) {
|
||||||
|
|
||||||
tokio::time::timeout(
|
tokio::time::timeout(
|
||||||
adjust_timeout(Duration::from_secs(TIMEOUT_SECS)),
|
adjust_timeout(Duration::from_secs(TIMEOUT_SECS)),
|
||||||
wait_for_cert_in_mempool(&nodes[0]),
|
wait_for_cert_in_mempool(&first_node),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -93,7 +88,7 @@ async fn disseminate(config: &mut Disseminate) {
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_blobs(&nodes[0].url(), vec![blob]).await.unwrap()[0].as_bytes(),
|
get_blobs(&first_node.url(), vec![blob]).await.unwrap()[0].as_bytes(),
|
||||||
bytes.clone()
|
bytes.clone()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ use carnot_engine::{Qc, View};
|
||||||
use futures::stream::{self, StreamExt};
|
use futures::stream::{self, StreamExt};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tests::{adjust_timeout, MixNode, Node, NomosNode, SpawnConfig};
|
use tests::{adjust_timeout, Node, NomosNode, SpawnConfig};
|
||||||
|
|
||||||
const TARGET_VIEW: View = View::new(20);
|
const TARGET_VIEW: View = View::new(20);
|
||||||
|
|
||||||
|
@ -71,22 +71,19 @@ async fn happy_test(nodes: &[NomosNode]) {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn two_nodes_happy() {
|
async fn two_nodes_happy() {
|
||||||
let (_mixnodes, mixnet_config) = MixNode::spawn_nodes(2).await;
|
let nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(2)).await;
|
||||||
let nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(2, mixnet_config)).await;
|
|
||||||
happy_test(&nodes).await;
|
happy_test(&nodes).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn ten_nodes_happy() {
|
async fn ten_nodes_happy() {
|
||||||
let (_mixnodes, mixnet_config) = MixNode::spawn_nodes(3).await;
|
let nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(10)).await;
|
||||||
let nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(10, mixnet_config)).await;
|
|
||||||
happy_test(&nodes).await;
|
happy_test(&nodes).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_get_block() {
|
async fn test_get_block() {
|
||||||
let (_mixnodes, mixnet_config) = MixNode::spawn_nodes(3).await;
|
let nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(2)).await;
|
||||||
let nodes = NomosNode::spawn_nodes(SpawnConfig::chain_happy(2, mixnet_config)).await;
|
|
||||||
happy_test(&nodes).await;
|
happy_test(&nodes).await;
|
||||||
let id = nodes[0].consensus_info().await.last_committed_block.id;
|
let id = nodes[0].consensus_info().await.last_committed_block.id;
|
||||||
tokio::time::timeout(Duration::from_secs(10), async {
|
tokio::time::timeout(Duration::from_secs(10), async {
|
||||||
|
|
|
@ -1,139 +0,0 @@
|
||||||
use std::{
|
|
||||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use futures::{Stream, StreamExt};
|
|
||||||
use mixnet_client::{MixnetClient, MixnetClientConfig, MixnetClientError, MixnetClientMode};
|
|
||||||
use mixnet_node::{MixnetNode, MixnetNodeConfig};
|
|
||||||
use mixnet_topology::{Layer, MixnetTopology, Node};
|
|
||||||
use rand::{rngs::OsRng, RngCore};
|
|
||||||
use tests::get_available_port;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Set timeout since the test won't stop even if mixnodes (spawned asynchronously) panic.
|
|
||||||
#[ntest::timeout(5000)]
|
|
||||||
async fn mixnet() {
|
|
||||||
let (topology, mut destination_stream) = run_nodes_and_destination_client().await;
|
|
||||||
|
|
||||||
let mut msg = [0u8; 100 * 1024];
|
|
||||||
rand::thread_rng().fill_bytes(&mut msg);
|
|
||||||
|
|
||||||
let mut sender_client = MixnetClient::new(
|
|
||||||
MixnetClientConfig {
|
|
||||||
mode: MixnetClientMode::Sender,
|
|
||||||
topology: topology.clone(),
|
|
||||||
connection_pool_size: 255,
|
|
||||||
max_retries: 3,
|
|
||||||
retry_delay: Duration::from_secs(5),
|
|
||||||
},
|
|
||||||
OsRng,
|
|
||||||
);
|
|
||||||
|
|
||||||
let res = sender_client.send(msg.to_vec(), Duration::from_millis(500));
|
|
||||||
assert!(res.is_ok());
|
|
||||||
|
|
||||||
let received = destination_stream.next().await.unwrap().unwrap();
|
|
||||||
assert_eq!(msg, received.as_slice());
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run_nodes_and_destination_client() -> (
|
|
||||||
MixnetTopology,
|
|
||||||
impl Stream<Item = Result<Vec<u8>, MixnetClientError>> + Send,
|
|
||||||
) {
|
|
||||||
let config1 = MixnetNodeConfig {
|
|
||||||
listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
client_listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let config2 = MixnetNodeConfig {
|
|
||||||
listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
client_listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let config3 = MixnetNodeConfig {
|
|
||||||
listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
client_listen_address: SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
get_available_port(),
|
|
||||||
)),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mixnode1 = MixnetNode::new(config1);
|
|
||||||
let mixnode2 = MixnetNode::new(config2);
|
|
||||||
let mixnode3 = MixnetNode::new(config3);
|
|
||||||
|
|
||||||
let topology = MixnetTopology {
|
|
||||||
layers: vec![
|
|
||||||
Layer {
|
|
||||||
nodes: vec![Node {
|
|
||||||
address: config1.listen_address,
|
|
||||||
public_key: mixnode1.public_key(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
Layer {
|
|
||||||
nodes: vec![Node {
|
|
||||||
address: config2.listen_address,
|
|
||||||
public_key: mixnode2.public_key(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
Layer {
|
|
||||||
nodes: vec![Node {
|
|
||||||
address: config3.listen_address,
|
|
||||||
public_key: mixnode3.public_key(),
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
// Run all MixnetNodes
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let res = mixnode1.run().await;
|
|
||||||
assert!(res.is_ok());
|
|
||||||
});
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let res = mixnode2.run().await;
|
|
||||||
assert!(res.is_ok());
|
|
||||||
});
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let res = mixnode3.run().await;
|
|
||||||
assert!(res.is_ok());
|
|
||||||
});
|
|
||||||
|
|
||||||
// Wait until mixnodes are ready
|
|
||||||
// TODO: use a more sophisticated way
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
|
|
||||||
// Run a MixnetClient only for the MixnetNode in the exit layer.
|
|
||||||
// According to the current implementation,
|
|
||||||
// one of mixnodes the exit layer always will be selected as a destination.
|
|
||||||
let client = MixnetClient::new(
|
|
||||||
MixnetClientConfig {
|
|
||||||
mode: MixnetClientMode::SenderReceiver(config3.client_listen_address.to_string()),
|
|
||||||
topology: topology.clone(),
|
|
||||||
connection_pool_size: 255,
|
|
||||||
max_retries: 3,
|
|
||||||
retry_delay: Duration::from_secs(5),
|
|
||||||
},
|
|
||||||
OsRng,
|
|
||||||
);
|
|
||||||
let client_stream = client.run().await.unwrap();
|
|
||||||
|
|
||||||
(topology, client_stream)
|
|
||||||
}
|
|
|
@ -3,21 +3,19 @@ use carnot_engine::{Block, NodeId, TimeoutQc, View};
|
||||||
use fraction::Fraction;
|
use fraction::Fraction;
|
||||||
use futures::stream::{self, StreamExt};
|
use futures::stream::{self, StreamExt};
|
||||||
use std::{collections::HashSet, time::Duration};
|
use std::{collections::HashSet, time::Duration};
|
||||||
use tests::{adjust_timeout, ConsensusConfig, MixNode, Node, NomosNode, SpawnConfig};
|
use tests::{adjust_timeout, ConsensusConfig, Node, NomosNode, SpawnConfig};
|
||||||
|
|
||||||
const TARGET_VIEW: View = View::new(20);
|
const TARGET_VIEW: View = View::new(20);
|
||||||
const DUMMY_NODE_ID: NodeId = NodeId::new([0u8; 32]);
|
const DUMMY_NODE_ID: NodeId = NodeId::new([0u8; 32]);
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn ten_nodes_one_down() {
|
async fn ten_nodes_one_down() {
|
||||||
let (_mixnodes, mixnet_config) = MixNode::spawn_nodes(3).await;
|
|
||||||
let mut nodes = NomosNode::spawn_nodes(SpawnConfig::Chain {
|
let mut nodes = NomosNode::spawn_nodes(SpawnConfig::Chain {
|
||||||
consensus: ConsensusConfig {
|
consensus: ConsensusConfig {
|
||||||
n_participants: 10,
|
n_participants: 10,
|
||||||
threshold: Fraction::new(9u32, 10u32),
|
threshold: Fraction::new(9u32, 10u32),
|
||||||
timeout: std::time::Duration::from_secs(5),
|
timeout: std::time::Duration::from_secs(5),
|
||||||
},
|
},
|
||||||
mixnet: mixnet_config,
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
let mut failed_node = nodes.pop().unwrap();
|
let mut failed_node = nodes.pop().unwrap();
|
||||||
|
|
Loading…
Reference in New Issue