Missing simulation updates
This commit is contained in:
parent
6ea3006af9
commit
182797b0ba
|
@ -322,22 +322,6 @@ dependencies = [
|
|||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "consensus-simulations"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"claro",
|
||||
"fixed-slice-deque",
|
||||
"once_cell",
|
||||
"polars",
|
||||
"rand",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snowball",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.3"
|
||||
|
@ -1576,6 +1560,22 @@ version = "1.0.5"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451"
|
||||
|
||||
[[package]]
|
||||
name = "snow-family"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"claro",
|
||||
"fixed-slice-deque",
|
||||
"once_cell",
|
||||
"polars",
|
||||
"rand",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snowball",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snowball"
|
||||
version = "0.1.0"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* `snowball`: Snowball implementation
|
||||
* `claro`: Claro implementation
|
||||
* `prototypes`: Simulations and experiments related libraries and binaries
|
||||
* `consensus-simulations`: Consensus simulations app
|
||||
* `snow-family`: Snow family and `claro` simulations app
|
||||
|
||||
## Build & Test
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
{
|
||||
"consensus_settings": {
|
||||
"claro": {
|
||||
"evidence_alpha": 0.8,
|
||||
"evidence_alpha_2": 0.5,
|
||||
"look_ahead": 20,
|
||||
"query": {
|
||||
"query_size": 10,
|
||||
"initial_query_size": 10,
|
||||
"query_multiplier": 2,
|
||||
"max_multiplier": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
"distribution": {
|
||||
"yes": 0.5,
|
||||
"no": 0.5,
|
||||
"none": 0
|
||||
},
|
||||
"byzantine_settings": {
|
||||
"total_size": 1000,
|
||||
"distribution": {
|
||||
"honest": 1.0,
|
||||
"infantile": 0.0,
|
||||
"random": 0.0,
|
||||
"omniscient": 0.0
|
||||
}
|
||||
},
|
||||
"wards": [
|
||||
{
|
||||
"time_to_finality": {
|
||||
"ttf_threshold" : 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"network_modifiers": [
|
||||
{
|
||||
"random_drop": {
|
||||
"drop_rate": 0.00
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
{
|
||||
"consensus_settings": {
|
||||
"snow_ball": {
|
||||
"quorum_size": 14,
|
||||
"sample_size": 20,
|
||||
"decision_threshold": 20
|
||||
}
|
||||
},
|
||||
"distribution": {
|
||||
"yes": 0.6,
|
||||
"no": 0.4,
|
||||
"none": 0.0
|
||||
},
|
||||
"byzantine_settings": {
|
||||
"total_size": 10000,
|
||||
"distribution": {
|
||||
"honest": 1.0,
|
||||
"infantile": 0.0,
|
||||
"random": 0.0,
|
||||
"omniscient": 0.0
|
||||
}
|
||||
},
|
||||
"wards": [
|
||||
{
|
||||
"time_to_finality": {
|
||||
"ttf_threshold" : 10
|
||||
}
|
||||
},
|
||||
{
|
||||
"stabilised": {
|
||||
"rounds": 4
|
||||
}
|
||||
},
|
||||
{
|
||||
"converged": {
|
||||
"ratio": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
"network_modifiers": [
|
||||
{
|
||||
"random_drop": {
|
||||
"drop_rate": 0.01
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,8 +1,10 @@
|
|||
[package]
|
||||
name = "consensus-simulations"
|
||||
name = "snow-family"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
authors = [
|
||||
"Daniel Sanchez Quiros <danielsq@status.im>"
|
||||
]
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
|
|
@ -0,0 +1,154 @@
|
|||
//! # Layered simulation runner
|
||||
//!
|
||||
//! A revision of the [`glauber`](super::glauber_runner) simulation runner.
|
||||
//!
|
||||
//! **`glauber`** simulations have some drawbacks:
|
||||
//!
|
||||
//! * Completely random, difficult to control
|
||||
//! * Not close to how real nodes would perform in reality
|
||||
//! * Difficult to analise recorded data, as data it is updated by chunks of iterations
|
||||
//!
|
||||
//! To solve this we can use a concept of layered *glauber* executions.
|
||||
//! The algorithm roughly works as follows:
|
||||
//!
|
||||
//! ```
|
||||
//! nodes <- [nodes]
|
||||
//! layers <- [[nodes_ids], [], ...]
|
||||
//! while nodes_to_compute(layers):
|
||||
//! layer_index <- pick_rand_layer(layers)
|
||||
//! node_index <- pop_rand_node(rand_layer)
|
||||
//! step(nodes[node_index])
|
||||
//! if not node_decided(node):
|
||||
//! push(layers[layer_index+1], node_index)
|
||||
//! ```
|
||||
//!
|
||||
//! From within this, controlling the *number of layers*, and *weighting* them (how often are they picked),
|
||||
//! we can control the flow of the simulations.
|
||||
//! Also we can consider that once the bottom layer is empty a fully step have been concluded and we can record
|
||||
//! the data of that step simulation.
|
||||
|
||||
// std
|
||||
use std::collections::BTreeSet;
|
||||
use std::ops::Not;
|
||||
use std::sync::Arc;
|
||||
// crates
|
||||
use fixed_slice_deque::FixedSliceDeque;
|
||||
use rand::prelude::{IteratorRandom, SliceRandom};
|
||||
use rand::rngs::SmallRng;
|
||||
// internal
|
||||
use crate::node::{ComputeNode, Node, NodeId};
|
||||
use crate::output_processors::OutData;
|
||||
use crate::runner::SimulationRunner;
|
||||
use crate::warding::SimulationState;
|
||||
|
||||
pub fn simulate(
|
||||
runner: &mut SimulationRunner,
|
||||
gap: usize,
|
||||
distribution: Option<Vec<f32>>,
|
||||
mut out_data: Option<&mut Vec<OutData>>,
|
||||
) {
|
||||
let distribution =
|
||||
distribution.unwrap_or_else(|| std::iter::repeat(1.0f32).take(gap).collect());
|
||||
|
||||
let layers: Vec<usize> = (0..gap).collect();
|
||||
|
||||
let mut deque = build_node_ids_deque(gap, runner);
|
||||
|
||||
let mut simulation_state = SimulationState {
|
||||
network_state: Arc::clone(&runner.network_state),
|
||||
nodes: Arc::clone(&runner.nodes),
|
||||
iteration: 0,
|
||||
round: 0,
|
||||
};
|
||||
|
||||
loop {
|
||||
let (group_index, node_id) =
|
||||
choose_random_layer_and_node_id(&mut runner.rng, &distribution, &layers, &mut deque);
|
||||
|
||||
// remove node_id from group
|
||||
deque.get_mut(group_index).unwrap().remove(&node_id);
|
||||
|
||||
let vote = {
|
||||
let mut shared_nodes = runner.nodes.write().expect("Write access to nodes vector");
|
||||
let node: &mut Node = shared_nodes
|
||||
.get_mut(node_id)
|
||||
.expect("Node should be present");
|
||||
|
||||
node.step();
|
||||
if !matches!(node.decision(), claro::Decision::Decided(_)) {
|
||||
// pass node to next step group
|
||||
deque.get_mut(group_index + 1).unwrap().insert(node_id);
|
||||
}
|
||||
node.vote()
|
||||
};
|
||||
runner.update_single_network_state_vote(node_id, vote);
|
||||
|
||||
// check if any condition makes the simulation stop
|
||||
if runner.check_wards(&simulation_state) {
|
||||
break;
|
||||
}
|
||||
// run modifiers over the current step network state
|
||||
runner.run_network_behaviour_modifiers();
|
||||
simulation_state.iteration += 1;
|
||||
|
||||
// if initial is empty then we finished a full round, append a new set to the end so we can
|
||||
// compute the most advanced nodes again
|
||||
if deque.first().unwrap().is_empty() {
|
||||
let _ = deque.push_back(BTreeSet::default());
|
||||
runner.dump_state_to_out_data(&simulation_state, &mut out_data);
|
||||
simulation_state.round += 1;
|
||||
}
|
||||
|
||||
// if no more nodes to compute
|
||||
if deque.iter().all(BTreeSet::is_empty) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// write latest state
|
||||
runner.dump_state_to_out_data(&simulation_state, &mut out_data);
|
||||
}
|
||||
|
||||
fn choose_random_layer_and_node_id(
|
||||
rng: &mut SmallRng,
|
||||
distribution: &[f32],
|
||||
layers: &[usize],
|
||||
deque: &mut FixedSliceDeque<BTreeSet<NodeId>>,
|
||||
) -> (usize, NodeId) {
|
||||
let i = *layers
|
||||
.iter()
|
||||
// filter out empty round groups
|
||||
.filter_map(|&i| {
|
||||
let g = deque.get(i).unwrap();
|
||||
g.is_empty().not().then_some(i)
|
||||
})
|
||||
// intermediate collect necessary for choose_weighted
|
||||
.collect::<Vec<_>>()
|
||||
.choose_weighted(rng, |&i| distribution.get(i).unwrap())
|
||||
.expect("Distribution choose to work");
|
||||
|
||||
let group: &mut BTreeSet<NodeId> = deque.get_mut(i).unwrap();
|
||||
|
||||
let node_id = group.iter().choose(rng).unwrap();
|
||||
(i, *node_id)
|
||||
}
|
||||
|
||||
fn build_node_ids_deque(
|
||||
gap: usize,
|
||||
runner: &SimulationRunner,
|
||||
) -> FixedSliceDeque<BTreeSet<NodeId>> {
|
||||
// add a +1 so we always have
|
||||
let mut deque = FixedSliceDeque::new(gap + 1);
|
||||
// push first layer
|
||||
let node_ids: BTreeSet<NodeId> = runner
|
||||
.nodes
|
||||
.write()
|
||||
.expect("Single access to runner nodes")
|
||||
.iter()
|
||||
.map(|node| node.id())
|
||||
.collect();
|
||||
|
||||
deque.push_back(node_ids);
|
||||
// allocate default sets
|
||||
while deque.try_push_back(BTreeSet::new()).is_ok() {}
|
||||
deque
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
mod async_runner;
|
||||
mod glauber_runner;
|
||||
mod layered_runner;
|
||||
mod sync_runner;
|
||||
|
||||
// std
|
||||
|
@ -242,19 +243,25 @@ impl SimulationRunner {
|
|||
}
|
||||
|
||||
pub fn simulate(&mut self, out_data: Option<&mut Vec<OutData>>) {
|
||||
match &self.settings.simulation_style {
|
||||
match self.settings.simulation_style.clone() {
|
||||
SimulationStyle::Sync => {
|
||||
sync_runner::simulate(self, out_data);
|
||||
}
|
||||
&SimulationStyle::Async { chunks } => {
|
||||
SimulationStyle::Async { chunks } => {
|
||||
async_runner::simulate(self, chunks, out_data);
|
||||
}
|
||||
&SimulationStyle::Glauber {
|
||||
SimulationStyle::Glauber {
|
||||
maximum_iterations,
|
||||
update_rate,
|
||||
} => {
|
||||
glauber_runner::simulate(self, update_rate, maximum_iterations, out_data);
|
||||
}
|
||||
SimulationStyle::Layered {
|
||||
rounds_gap,
|
||||
distribution,
|
||||
} => {
|
||||
layered_runner::simulate(self, rounds_gap, distribution, out_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ pub struct ByzantineSettings {
|
|||
pub distribution: ByzantineDistribution,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Default)]
|
||||
#[derive(Clone, Debug, Deserialize, Default)]
|
||||
pub enum SimulationStyle {
|
||||
#[default]
|
||||
Sync,
|
||||
|
@ -113,6 +113,10 @@ pub enum SimulationStyle {
|
|||
maximum_iterations: usize,
|
||||
update_rate: usize,
|
||||
},
|
||||
Layered {
|
||||
rounds_gap: usize,
|
||||
distribution: Option<Vec<f32>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Full simulation settings:
|
||||
|
|
Loading…
Reference in New Issue