DA: Executor dispersal auto stream (#826)
* Re-structure connection handling properly * Handle event based connections * Fix test * Handling auto reconnection for dispersal behaviour * Ignore cli test
This commit is contained in:
parent
8dbcf560f9
commit
41a9387b4b
|
@ -1,5 +1,4 @@
|
||||||
// std
|
// std
|
||||||
use std::time::Duration;
|
|
||||||
// crates
|
// crates
|
||||||
use kzgrs_backend::common::blob::DaBlob;
|
use kzgrs_backend::common::blob::DaBlob;
|
||||||
use libp2p::futures::StreamExt;
|
use libp2p::futures::StreamExt;
|
||||||
|
@ -62,18 +61,19 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_swarm(
|
fn build_swarm(
|
||||||
key: Keypair,
|
_key: Keypair,
|
||||||
membership: Membership,
|
_membership: Membership,
|
||||||
) -> Swarm<DispersalExecutorBehaviour<Membership>> {
|
) -> Swarm<DispersalExecutorBehaviour<Membership>> {
|
||||||
libp2p::SwarmBuilder::with_existing_identity(key)
|
todo!("CLI will be removed");
|
||||||
.with_tokio()
|
// libp2p::SwarmBuilder::with_existing_identity(key)
|
||||||
.with_quic()
|
// .with_tokio()
|
||||||
.with_behaviour(|_key| DispersalExecutorBehaviour::new(membership))
|
// .with_quic()
|
||||||
.expect("Validator behaviour should build")
|
// .with_behaviour(|_key| DispersalExecutorBehaviour::new(membership))
|
||||||
.with_swarm_config(|cfg| {
|
// .expect("Validator behaviour should build")
|
||||||
cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))
|
// .with_swarm_config(|cfg| {
|
||||||
})
|
// cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))
|
||||||
.build()
|
// })
|
||||||
|
// .build()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dial(&mut self, addr: Multiaddr) -> Result<(), DialError> {
|
pub fn dial(&mut self, addr: Multiaddr) -> Result<(), DialError> {
|
||||||
|
@ -167,6 +167,7 @@ pub mod test {
|
||||||
use tracing_subscriber::EnvFilter;
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
#[ignore]
|
||||||
async fn test_dispersal_with_swarms() {
|
async fn test_dispersal_with_swarms() {
|
||||||
let _ = tracing_subscriber::fmt()
|
let _ = tracing_subscriber::fmt()
|
||||||
.with_env_filter(EnvFilter::from_default_env())
|
.with_env_filter(EnvFilter::from_default_env())
|
||||||
|
|
|
@ -4,6 +4,7 @@ version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
bincode = "1.3"
|
||||||
libp2p = { version = "0.53", features = ["macros", "tokio", "quic"] }
|
libp2p = { version = "0.53", features = ["macros", "tokio", "quic"] }
|
||||||
libp2p-stream = "0.1.0-alpha"
|
libp2p-stream = "0.1.0-alpha"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
|
@ -18,7 +19,8 @@ void = "1.0.2"
|
||||||
either = "1.13.0"
|
either = "1.13.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
bincode = "1.3"
|
rand = "0.8"
|
||||||
|
rand_chacha = "0.3"
|
||||||
tokio = { version = "1.39" }
|
tokio = { version = "1.39" }
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
|
|
|
@ -40,8 +40,12 @@ where
|
||||||
pub fn new(key: &Keypair, membership: Membership, addresses: AddressBook) -> Self {
|
pub fn new(key: &Keypair, membership: Membership, addresses: AddressBook) -> Self {
|
||||||
let peer_id = PeerId::from_public_key(&key.public());
|
let peer_id = PeerId::from_public_key(&key.public());
|
||||||
Self {
|
Self {
|
||||||
sampling: SamplingBehaviour::new(peer_id, membership.clone(), addresses),
|
sampling: SamplingBehaviour::new(peer_id, membership.clone(), addresses.clone()),
|
||||||
executor_dispersal: DispersalExecutorBehaviour::new(membership.clone()),
|
executor_dispersal: DispersalExecutorBehaviour::new(
|
||||||
|
peer_id,
|
||||||
|
membership.clone(),
|
||||||
|
addresses.clone(),
|
||||||
|
),
|
||||||
validator_dispersal: DispersalValidatorBehaviour::new(membership.clone()),
|
validator_dispersal: DispersalValidatorBehaviour::new(membership.clone()),
|
||||||
replication: ReplicationBehaviour::new(peer_id, membership),
|
replication: ReplicationBehaviour::new(peer_id, membership),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,33 +1,39 @@
|
||||||
// std
|
// std
|
||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
// crates
|
// crates
|
||||||
use either::Either;
|
use either::Either;
|
||||||
use futures::future::BoxFuture;
|
use futures::future::BoxFuture;
|
||||||
use futures::stream::{BoxStream, FuturesUnordered};
|
use futures::stream::{BoxStream, FuturesUnordered};
|
||||||
use futures::{AsyncWriteExt, FutureExt, StreamExt};
|
use futures::{AsyncWriteExt, FutureExt, StreamExt};
|
||||||
use kzgrs_backend::common::blob::DaBlob;
|
|
||||||
use libp2p::core::Endpoint;
|
use libp2p::core::Endpoint;
|
||||||
|
use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished};
|
||||||
|
use libp2p::swarm::dial_opts::DialOpts;
|
||||||
use libp2p::swarm::{
|
use libp2p::swarm::{
|
||||||
ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent,
|
ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent,
|
||||||
THandlerOutEvent, ToSwarm,
|
THandlerOutEvent, ToSwarm,
|
||||||
};
|
};
|
||||||
use libp2p::{Multiaddr, PeerId, Stream};
|
use libp2p::{Multiaddr, PeerId, Stream};
|
||||||
use libp2p_stream::{Control, OpenStreamError};
|
use libp2p_stream::{Control, OpenStreamError};
|
||||||
|
use rand::prelude::IteratorRandom;
|
||||||
|
use rand::SeedableRng;
|
||||||
|
use thiserror::Error;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio::sync::mpsc::UnboundedSender;
|
||||||
|
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||||
|
use tracing::error;
|
||||||
|
// internal
|
||||||
|
use crate::address_book::AddressBook;
|
||||||
|
use crate::protocol::DISPERSAL_PROTOCOL;
|
||||||
|
use crate::protocols::clone_deserialize_error;
|
||||||
|
use crate::SubnetworkId;
|
||||||
|
use kzgrs_backend::common::blob::DaBlob;
|
||||||
use nomos_core::da::BlobId;
|
use nomos_core::da::BlobId;
|
||||||
use nomos_da_messages::common::Blob;
|
use nomos_da_messages::common::Blob;
|
||||||
use nomos_da_messages::dispersal::dispersal_res::MessageType;
|
use nomos_da_messages::dispersal::dispersal_res::MessageType;
|
||||||
use nomos_da_messages::dispersal::{DispersalErr, DispersalReq, DispersalRes};
|
use nomos_da_messages::dispersal::{DispersalErr, DispersalReq, DispersalRes};
|
||||||
use nomos_da_messages::{pack_message, unpack_from_reader};
|
use nomos_da_messages::{pack_message, unpack_from_reader};
|
||||||
use subnetworks_assignations::MembershipHandler;
|
use subnetworks_assignations::MembershipHandler;
|
||||||
use thiserror::Error;
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio::sync::mpsc::UnboundedSender;
|
|
||||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
|
||||||
// internal
|
|
||||||
use crate::protocol::DISPERSAL_PROTOCOL;
|
|
||||||
use crate::protocols::clone_deserialize_error;
|
|
||||||
use crate::SubnetworkId;
|
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum DispersalError {
|
pub enum DispersalError {
|
||||||
|
@ -149,6 +155,8 @@ type StreamHandlerFuture = BoxFuture<'static, Result<StreamHandlerFutureSuccess,
|
||||||
/// It takes care of sending blobs to different subnetworks.
|
/// It takes care of sending blobs to different subnetworks.
|
||||||
/// Bubbles up events with the success or error when dispersing
|
/// Bubbles up events with the success or error when dispersing
|
||||||
pub struct DispersalExecutorBehaviour<Membership: MembershipHandler> {
|
pub struct DispersalExecutorBehaviour<Membership: MembershipHandler> {
|
||||||
|
/// Self id
|
||||||
|
local_peer_id: PeerId,
|
||||||
/// Underlying stream behaviour
|
/// Underlying stream behaviour
|
||||||
stream_behaviour: libp2p_stream::Behaviour,
|
stream_behaviour: libp2p_stream::Behaviour,
|
||||||
/// Pending running tasks (one task per stream)
|
/// Pending running tasks (one task per stream)
|
||||||
|
@ -157,10 +165,16 @@ pub struct DispersalExecutorBehaviour<Membership: MembershipHandler> {
|
||||||
idle_streams: HashMap<PeerId, DispersalStream>,
|
idle_streams: HashMap<PeerId, DispersalStream>,
|
||||||
/// Subnetworks membership information
|
/// Subnetworks membership information
|
||||||
membership: Membership,
|
membership: Membership,
|
||||||
|
/// Addresses of known peers in the DA network
|
||||||
|
addresses: AddressBook,
|
||||||
/// Pending blobs that need to be dispersed by PeerId
|
/// Pending blobs that need to be dispersed by PeerId
|
||||||
to_disperse: HashMap<PeerId, VecDeque<(Membership::NetworkId, DaBlob)>>,
|
to_disperse: HashMap<PeerId, VecDeque<(Membership::NetworkId, DaBlob)>>,
|
||||||
|
/// Pending blobs from disconnected networks
|
||||||
|
disconnected_pending_blobs: HashMap<Membership::NetworkId, VecDeque<DaBlob>>,
|
||||||
/// Already connected peers connection Ids
|
/// Already connected peers connection Ids
|
||||||
connected_subnetworks: HashMap<PeerId, ConnectionId>,
|
connected_peers: HashMap<PeerId, ConnectionId>,
|
||||||
|
/// Subnetwork working streams
|
||||||
|
subnetwork_open_streams: HashSet<SubnetworkId>,
|
||||||
/// Sender hook of peers to open streams channel
|
/// Sender hook of peers to open streams channel
|
||||||
pending_out_streams_sender: UnboundedSender<PeerId>,
|
pending_out_streams_sender: UnboundedSender<PeerId>,
|
||||||
/// Pending to open streams
|
/// Pending to open streams
|
||||||
|
@ -176,11 +190,12 @@ where
|
||||||
Membership: MembershipHandler + 'static,
|
Membership: MembershipHandler + 'static,
|
||||||
Membership::NetworkId: Send,
|
Membership::NetworkId: Send,
|
||||||
{
|
{
|
||||||
pub fn new(membership: Membership) -> Self {
|
pub fn new(local_peer_id: PeerId, membership: Membership, addresses: AddressBook) -> Self {
|
||||||
let stream_behaviour = libp2p_stream::Behaviour::new();
|
let stream_behaviour = libp2p_stream::Behaviour::new();
|
||||||
let tasks = FuturesUnordered::new();
|
let tasks = FuturesUnordered::new();
|
||||||
let to_disperse = HashMap::new();
|
let to_disperse = HashMap::new();
|
||||||
let connected_subnetworks = HashMap::new();
|
let connected_peers = HashMap::new();
|
||||||
|
let subnetwork_open_streams = HashSet::new();
|
||||||
let idle_streams = HashMap::new();
|
let idle_streams = HashMap::new();
|
||||||
let (pending_out_streams_sender, receiver) = mpsc::unbounded_channel();
|
let (pending_out_streams_sender, receiver) = mpsc::unbounded_channel();
|
||||||
let control = stream_behaviour.new_control();
|
let control = stream_behaviour.new_control();
|
||||||
|
@ -191,13 +206,18 @@ where
|
||||||
|
|
||||||
let (pending_blobs_sender, receiver) = mpsc::unbounded_channel();
|
let (pending_blobs_sender, receiver) = mpsc::unbounded_channel();
|
||||||
let pending_blobs_stream = UnboundedReceiverStream::new(receiver).boxed();
|
let pending_blobs_stream = UnboundedReceiverStream::new(receiver).boxed();
|
||||||
|
let disconnected_pending_blobs = HashMap::new();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
local_peer_id,
|
||||||
stream_behaviour,
|
stream_behaviour,
|
||||||
tasks,
|
tasks,
|
||||||
membership,
|
membership,
|
||||||
|
addresses,
|
||||||
to_disperse,
|
to_disperse,
|
||||||
connected_subnetworks,
|
disconnected_pending_blobs,
|
||||||
|
connected_peers,
|
||||||
|
subnetwork_open_streams,
|
||||||
idle_streams,
|
idle_streams,
|
||||||
pending_out_streams_sender,
|
pending_out_streams_sender,
|
||||||
pending_out_streams,
|
pending_out_streams,
|
||||||
|
@ -326,8 +346,8 @@ impl<Membership: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'sta
|
||||||
fn disperse_blob(
|
fn disperse_blob(
|
||||||
tasks: &mut FuturesUnordered<StreamHandlerFuture>,
|
tasks: &mut FuturesUnordered<StreamHandlerFuture>,
|
||||||
idle_streams: &mut HashMap<Membership::Id, DispersalStream>,
|
idle_streams: &mut HashMap<Membership::Id, DispersalStream>,
|
||||||
membership: &mut Membership,
|
membership: &Membership,
|
||||||
connected_subnetworks: &mut HashMap<PeerId, ConnectionId>,
|
connected_peers: &HashMap<PeerId, ConnectionId>,
|
||||||
to_disperse: &mut HashMap<PeerId, VecDeque<(Membership::NetworkId, DaBlob)>>,
|
to_disperse: &mut HashMap<PeerId, VecDeque<(Membership::NetworkId, DaBlob)>>,
|
||||||
subnetwork_id: SubnetworkId,
|
subnetwork_id: SubnetworkId,
|
||||||
blob: DaBlob,
|
blob: DaBlob,
|
||||||
|
@ -335,7 +355,8 @@ impl<Membership: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'sta
|
||||||
let members = membership.members_of(&subnetwork_id);
|
let members = membership.members_of(&subnetwork_id);
|
||||||
let peers = members
|
let peers = members
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|peer_id| connected_subnetworks.contains_key(peer_id));
|
.filter(|peer_id| connected_peers.contains_key(peer_id));
|
||||||
|
|
||||||
// We may be connected to more than a single node. Usually will be one, but that is an
|
// We may be connected to more than a single node. Usually will be one, but that is an
|
||||||
// internal decision of the executor itself.
|
// internal decision of the executor itself.
|
||||||
for peer in peers {
|
for peer in peers {
|
||||||
|
@ -352,6 +373,117 @@ impl<Membership: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'sta
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reschedule_blobs_for_peer_stream(
|
||||||
|
stream: &DispersalStream,
|
||||||
|
membership: &Membership,
|
||||||
|
to_disperse: &mut HashMap<PeerId, VecDeque<(SubnetworkId, DaBlob)>>,
|
||||||
|
disconnected_pending_blobs: &mut HashMap<SubnetworkId, VecDeque<DaBlob>>,
|
||||||
|
) {
|
||||||
|
let peer_id = stream.peer_id;
|
||||||
|
let subnetworks = membership.membership(&peer_id);
|
||||||
|
let entry = to_disperse.entry(peer_id).or_default();
|
||||||
|
for subnetwork in subnetworks {
|
||||||
|
if let Some(blobs) = disconnected_pending_blobs.remove(&subnetwork) {
|
||||||
|
entry.extend(blobs.into_iter().map(|blob| (subnetwork, blob)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn filter_peers_for_subnetworks<'s>(
|
||||||
|
&'s self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
subnetworks: impl Iterator<Item = SubnetworkId> + 's,
|
||||||
|
) -> impl Iterator<Item = HashSet<PeerId>> + 's {
|
||||||
|
subnetworks.map(move |subnetwork_id| {
|
||||||
|
self.membership
|
||||||
|
.members_of(&subnetwork_id)
|
||||||
|
.iter()
|
||||||
|
.filter(|&&peer| peer != peer_id && peer != self.local_peer_id)
|
||||||
|
.copied()
|
||||||
|
.collect::<HashSet<_>>()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_subnetworks_candidates_excluding_peer(
|
||||||
|
&self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
subnetworks: &HashSet<SubnetworkId>,
|
||||||
|
) -> HashSet<PeerId> {
|
||||||
|
let mut peers: HashSet<PeerId> = self
|
||||||
|
.filter_peers_for_subnetworks(peer_id, subnetworks.iter().copied())
|
||||||
|
.reduce(|h1, h2| h1.intersection(&h2).copied().collect())
|
||||||
|
.unwrap_or_default();
|
||||||
|
// we didn't find a single shared peer for all subnetworks, so we take the smallest subset
|
||||||
|
if peers.is_empty() {
|
||||||
|
peers = self
|
||||||
|
.filter_peers_for_subnetworks(peer_id, subnetworks.iter().copied())
|
||||||
|
.reduce(|h1, h2| h1.union(&h2).copied().collect())
|
||||||
|
.unwrap_or_default();
|
||||||
|
}
|
||||||
|
peers
|
||||||
|
}
|
||||||
|
fn open_streams_for_disconnected_subnetworks_selected_peer(&mut self, peer_id: PeerId) {
|
||||||
|
let subnetworks = self.membership.membership(&peer_id);
|
||||||
|
// open stream will result in dialing if we are not yet connected to the peer
|
||||||
|
for peer in self.find_subnetworks_candidates_excluding_peer(peer_id, &subnetworks) {
|
||||||
|
if let Err(e) = self.pending_out_streams_sender.send(peer) {
|
||||||
|
error!("Error requesting stream for peer {peer_id}: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prune_blobs_for_peer(&mut self, peer_id: PeerId) -> VecDeque<(SubnetworkId, DaBlob)> {
|
||||||
|
self.to_disperse.remove(&peer_id).unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recover_blobs_for_disconnected_subnetworks(&mut self, peer_id: PeerId) {
|
||||||
|
// push missing blobs into pending ones
|
||||||
|
let disconnected_pending_blobs = self.prune_blobs_for_peer(peer_id);
|
||||||
|
for (subnetwork_id, blob) in disconnected_pending_blobs {
|
||||||
|
self.disconnected_pending_blobs
|
||||||
|
.entry(subnetwork_id)
|
||||||
|
.or_default()
|
||||||
|
.push_back(blob);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_ensure_stream_from_missing_subnetwork(
|
||||||
|
local_peer_id: &PeerId,
|
||||||
|
pending_out_streams_sender: &mut UnboundedSender<PeerId>,
|
||||||
|
membership: &Membership,
|
||||||
|
subnetwork_id: &SubnetworkId,
|
||||||
|
) {
|
||||||
|
let mut rng = rand_chacha::ChaCha20Rng::from_entropy();
|
||||||
|
// chose a random peer that is not us
|
||||||
|
let peer = membership
|
||||||
|
.members_of(subnetwork_id)
|
||||||
|
.iter()
|
||||||
|
.filter(|&peer| peer != local_peer_id)
|
||||||
|
.choose(&mut rng)
|
||||||
|
.copied();
|
||||||
|
// if we have any, try to connect
|
||||||
|
if let Some(peer) = peer {
|
||||||
|
if let Err(e) = pending_out_streams_sender.send(peer) {
|
||||||
|
error!("Error requesting stream for peer {peer}: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) {
|
||||||
|
self.connected_peers.insert(peer_id, connection_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_connection_closed(&mut self, peer_id: PeerId) {
|
||||||
|
let peer_subnetworks = self.membership.membership(&peer_id);
|
||||||
|
self.subnetwork_open_streams
|
||||||
|
.retain(|subnetwork_id| !peer_subnetworks.contains(subnetwork_id));
|
||||||
|
if self.connected_peers.remove(&peer_id).is_some() {
|
||||||
|
// mangle pending blobs for disconnected subnetworks from peer
|
||||||
|
self.recover_blobs_for_disconnected_subnetworks(peer_id);
|
||||||
|
self.open_streams_for_disconnected_subnetworks_selected_peer(peer_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'static> NetworkBehaviour
|
impl<M: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'static> NetworkBehaviour
|
||||||
|
@ -380,14 +512,26 @@ impl<M: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'static> Netw
|
||||||
addr: &Multiaddr,
|
addr: &Multiaddr,
|
||||||
role_override: Endpoint,
|
role_override: Endpoint,
|
||||||
) -> Result<THandler<Self>, ConnectionDenied> {
|
) -> Result<THandler<Self>, ConnectionDenied> {
|
||||||
self.connected_subnetworks.insert(peer, connection_id);
|
|
||||||
self.stream_behaviour
|
self.stream_behaviour
|
||||||
.handle_established_outbound_connection(connection_id, peer, addr, role_override)
|
.handle_established_outbound_connection(connection_id, peer, addr, role_override)
|
||||||
.map(Either::Left)
|
.map(Either::Left)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_swarm_event(&mut self, event: FromSwarm) {
|
fn on_swarm_event(&mut self, event: FromSwarm) {
|
||||||
self.stream_behaviour.on_swarm_event(event)
|
self.stream_behaviour.on_swarm_event(event);
|
||||||
|
match event {
|
||||||
|
FromSwarm::ConnectionEstablished(ConnectionEstablished {
|
||||||
|
peer_id,
|
||||||
|
connection_id,
|
||||||
|
..
|
||||||
|
}) => {
|
||||||
|
self.handle_connection_established(peer_id, connection_id);
|
||||||
|
}
|
||||||
|
FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, .. }) => {
|
||||||
|
self.handle_connection_closed(peer_id);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_connection_handler_event(
|
fn on_connection_handler_event(
|
||||||
|
@ -408,13 +552,18 @@ impl<M: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'static> Netw
|
||||||
cx: &mut Context<'_>,
|
cx: &mut Context<'_>,
|
||||||
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
|
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
|
||||||
let Self {
|
let Self {
|
||||||
|
local_peer_id,
|
||||||
tasks,
|
tasks,
|
||||||
to_disperse,
|
to_disperse,
|
||||||
|
disconnected_pending_blobs,
|
||||||
idle_streams,
|
idle_streams,
|
||||||
pending_out_streams,
|
pending_out_streams,
|
||||||
|
pending_out_streams_sender,
|
||||||
pending_blobs_stream,
|
pending_blobs_stream,
|
||||||
membership,
|
membership,
|
||||||
connected_subnetworks,
|
addresses,
|
||||||
|
connected_peers,
|
||||||
|
subnetwork_open_streams,
|
||||||
..
|
..
|
||||||
} = self;
|
} = self;
|
||||||
// poll pending tasks
|
// poll pending tasks
|
||||||
|
@ -454,20 +603,38 @@ impl<M: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'static> Netw
|
||||||
}
|
}
|
||||||
// poll pending blobs
|
// poll pending blobs
|
||||||
if let Poll::Ready(Some((subnetwork_id, blob))) = pending_blobs_stream.poll_next_unpin(cx) {
|
if let Poll::Ready(Some((subnetwork_id, blob))) = pending_blobs_stream.poll_next_unpin(cx) {
|
||||||
Self::disperse_blob(
|
if subnetwork_open_streams.contains(&subnetwork_id) {
|
||||||
tasks,
|
Self::disperse_blob(
|
||||||
idle_streams,
|
tasks,
|
||||||
membership,
|
idle_streams,
|
||||||
connected_subnetworks,
|
membership,
|
||||||
to_disperse,
|
connected_peers,
|
||||||
subnetwork_id,
|
to_disperse,
|
||||||
blob,
|
subnetwork_id,
|
||||||
);
|
blob,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
let entry = disconnected_pending_blobs.entry(subnetwork_id).or_default();
|
||||||
|
entry.push_back(blob);
|
||||||
|
Self::try_ensure_stream_from_missing_subnetwork(
|
||||||
|
local_peer_id,
|
||||||
|
pending_out_streams_sender,
|
||||||
|
membership,
|
||||||
|
&subnetwork_id,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// poll pending streams
|
// poll pending streams
|
||||||
if let Poll::Ready(Some(res)) = pending_out_streams.poll_next_unpin(cx) {
|
if let Poll::Ready(Some(res)) = pending_out_streams.poll_next_unpin(cx) {
|
||||||
match res {
|
match res {
|
||||||
Ok(stream) => {
|
Ok(stream) => {
|
||||||
|
subnetwork_open_streams.extend(membership.membership(&stream.peer_id));
|
||||||
|
Self::reschedule_blobs_for_peer_stream(
|
||||||
|
&stream,
|
||||||
|
membership,
|
||||||
|
to_disperse,
|
||||||
|
disconnected_pending_blobs,
|
||||||
|
);
|
||||||
Self::handle_stream(tasks, to_disperse, idle_streams, stream);
|
Self::handle_stream(tasks, to_disperse, idle_streams, stream);
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
|
@ -479,7 +646,18 @@ impl<M: MembershipHandler<Id = PeerId, NetworkId = SubnetworkId> + 'static> Netw
|
||||||
}
|
}
|
||||||
// Deal with connection as the underlying behaviour would do
|
// Deal with connection as the underlying behaviour would do
|
||||||
match self.stream_behaviour.poll(cx) {
|
match self.stream_behaviour.poll(cx) {
|
||||||
Poll::Ready(ToSwarm::Dial { opts }) => Poll::Ready(ToSwarm::Dial { opts }),
|
Poll::Ready(ToSwarm::Dial { mut opts }) => {
|
||||||
|
// attach known peer address if possible
|
||||||
|
if let Some(address) = opts
|
||||||
|
.get_peer_id()
|
||||||
|
.and_then(|peer_id: PeerId| addresses.get_address(&peer_id))
|
||||||
|
{
|
||||||
|
opts = DialOpts::peer_id(opts.get_peer_id().unwrap())
|
||||||
|
.addresses(vec![address.clone()])
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
Poll::Ready(ToSwarm::Dial { opts })
|
||||||
|
}
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
// TODO: probably must be smarter when to wake this
|
// TODO: probably must be smarter when to wake this
|
||||||
cx.waker().wake_by_ref();
|
cx.waker().wake_by_ref();
|
||||||
|
|
|
@ -3,6 +3,7 @@ pub mod validator;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
use crate::address_book::AddressBook;
|
||||||
use crate::protocols::dispersal::executor::behaviour::DispersalExecutorBehaviour;
|
use crate::protocols::dispersal::executor::behaviour::DispersalExecutorBehaviour;
|
||||||
use crate::protocols::dispersal::validator::behaviour::{
|
use crate::protocols::dispersal::validator::behaviour::{
|
||||||
DispersalEvent, DispersalValidatorBehaviour,
|
DispersalEvent, DispersalValidatorBehaviour,
|
||||||
|
@ -15,22 +16,25 @@ pub mod test {
|
||||||
use libp2p::swarm::SwarmEvent;
|
use libp2p::swarm::SwarmEvent;
|
||||||
use libp2p::{quic, Multiaddr, PeerId};
|
use libp2p::{quic, Multiaddr, PeerId};
|
||||||
use log::info;
|
use log::info;
|
||||||
use std::time::Duration;
|
|
||||||
use subnetworks_assignations::MembershipHandler;
|
use subnetworks_assignations::MembershipHandler;
|
||||||
use tracing_subscriber::fmt::TestWriter;
|
use tracing_subscriber::fmt::TestWriter;
|
||||||
use tracing_subscriber::EnvFilter;
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
pub fn executor_swarm(
|
pub fn executor_swarm(
|
||||||
|
addressbook: AddressBook,
|
||||||
key: Keypair,
|
key: Keypair,
|
||||||
membership: impl MembershipHandler<NetworkId = u32, Id = PeerId> + 'static,
|
membership: impl MembershipHandler<NetworkId = u32, Id = PeerId> + 'static,
|
||||||
) -> libp2p::Swarm<
|
) -> libp2p::Swarm<
|
||||||
DispersalExecutorBehaviour<impl MembershipHandler<NetworkId = u32, Id = PeerId>>,
|
DispersalExecutorBehaviour<impl MembershipHandler<NetworkId = u32, Id = PeerId>>,
|
||||||
> {
|
> {
|
||||||
|
let peer_id = PeerId::from_public_key(&key.public());
|
||||||
libp2p::SwarmBuilder::with_existing_identity(key)
|
libp2p::SwarmBuilder::with_existing_identity(key)
|
||||||
.with_tokio()
|
.with_tokio()
|
||||||
.with_other_transport(|keypair| quic::tokio::Transport::new(quic::Config::new(keypair)))
|
.with_other_transport(|keypair| quic::tokio::Transport::new(quic::Config::new(keypair)))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_behaviour(|_key| DispersalExecutorBehaviour::new(membership))
|
.with_behaviour(|_key| {
|
||||||
|
DispersalExecutorBehaviour::new(peer_id, membership, addressbook)
|
||||||
|
})
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_swarm_config(|cfg| {
|
.with_swarm_config(|cfg| {
|
||||||
cfg.with_idle_connection_timeout(std::time::Duration::from_secs(u64::MAX))
|
cfg.with_idle_connection_timeout(std::time::Duration::from_secs(u64::MAX))
|
||||||
|
@ -74,12 +78,14 @@ pub mod test {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect(),
|
.collect(),
|
||||||
};
|
};
|
||||||
let mut executor = executor_swarm(k1, neighbours.clone());
|
let addr: Multiaddr = "/ip4/127.0.0.1/udp/5063/quic-v1".parse().unwrap();
|
||||||
|
let addr2 = addr.clone().with_p2p(validator_peer).unwrap();
|
||||||
|
let addressbook =
|
||||||
|
AddressBook::from_iter([(PeerId::from_public_key(&k2.public()), addr2.clone())]);
|
||||||
|
let mut executor = executor_swarm(addressbook, k1, neighbours.clone());
|
||||||
let mut validator = validator_swarm(k2, neighbours);
|
let mut validator = validator_swarm(k2, neighbours);
|
||||||
|
|
||||||
let msg_count = 10usize;
|
let msg_count = 10usize;
|
||||||
let addr: Multiaddr = "/ip4/127.0.0.1/udp/5063/quic-v1".parse().unwrap();
|
|
||||||
let addr2 = addr.clone().with_p2p(validator_peer).unwrap();
|
|
||||||
|
|
||||||
let validator_task = async move {
|
let validator_task = async move {
|
||||||
validator.listen_on(addr).unwrap();
|
validator.listen_on(addr).unwrap();
|
||||||
|
@ -100,10 +106,6 @@ pub mod test {
|
||||||
res
|
res
|
||||||
};
|
};
|
||||||
let join_validator = tokio::spawn(validator_task);
|
let join_validator = tokio::spawn(validator_task);
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
executor.dial(addr2).unwrap();
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
let executor_open_stream_sender = executor.behaviour().open_stream_sender();
|
|
||||||
let executor_disperse_blob_sender = executor.behaviour().blobs_sender();
|
let executor_disperse_blob_sender = executor.behaviour().blobs_sender();
|
||||||
let (sender, mut receiver) = tokio::sync::oneshot::channel();
|
let (sender, mut receiver) = tokio::sync::oneshot::channel();
|
||||||
let executor_poll = async move {
|
let executor_poll = async move {
|
||||||
|
@ -119,8 +121,6 @@ pub mod test {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let executor_task = tokio::spawn(executor_poll);
|
let executor_task = tokio::spawn(executor_poll);
|
||||||
executor_open_stream_sender.send(validator_peer).unwrap();
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
info!("Sending blob: {i}");
|
info!("Sending blob: {i}");
|
||||||
executor_disperse_blob_sender
|
executor_disperse_blob_sender
|
||||||
|
|
Loading…
Reference in New Issue