2023-10-02 09:39:28 +00:00
|
|
|
# Nim-LibP2P
|
2024-03-05 07:06:27 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-10-02 09:39:28 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-05-08 20:58:23 +00:00
|
|
|
{.used.}
|
|
|
|
|
2023-10-02 09:39:28 +00:00
|
|
|
import std/[options, deques, sequtils, enumerate, algorithm]
|
2020-05-21 20:24:20 +00:00
|
|
|
import stew/byteutils
|
2021-04-02 01:20:51 +00:00
|
|
|
import ../../libp2p/builders
|
2020-05-06 09:26:08 +00:00
|
|
|
import ../../libp2p/errors
|
2020-06-28 15:56:38 +00:00
|
|
|
import ../../libp2p/crypto/crypto
|
2020-05-06 09:26:08 +00:00
|
|
|
import ../../libp2p/stream/bufferstream
|
2023-10-02 09:39:28 +00:00
|
|
|
import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
|
|
|
|
import ../../libp2p/protocols/pubsub/rpc/[message, messages]
|
2021-01-15 04:48:03 +00:00
|
|
|
import ../../libp2p/switch
|
2023-03-08 11:30:19 +00:00
|
|
|
import ../../libp2p/muxers/muxer
|
2023-09-22 14:45:08 +00:00
|
|
|
import ../../libp2p/protocols/pubsub/rpc/protobuf
|
2023-10-02 09:39:28 +00:00
|
|
|
import utils
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-05-08 20:10:06 +00:00
|
|
|
import ../helpers
|
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
|
|
|
discard
|
2020-05-21 20:24:20 +00:00
|
|
|
|
2022-02-24 16:32:20 +00:00
|
|
|
const MsgIdSuccess = "msg id gen success"
|
2022-02-21 15:04:17 +00:00
|
|
|
|
2020-05-06 09:26:08 +00:00
|
|
|
suite "GossipSub internal":
|
|
|
|
teardown:
|
2020-09-21 17:48:19 +00:00
|
|
|
checkTrackers()
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-12-19 15:45:34 +00:00
|
|
|
asyncTest "subscribe/unsubscribeAll":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
|
|
|
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe.} =
|
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-12-19 15:45:34 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-12-19 15:45:34 +00:00
|
|
|
peer.sendConn = conn
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
|
|
|
|
# test via dynamic dispatch
|
|
|
|
gossipSub.PubSub.subscribe(topic, handler)
|
|
|
|
|
|
|
|
check:
|
|
|
|
gossipSub.topics.contains(topic)
|
|
|
|
gossipSub.gossipsub[topic].len() > 0
|
|
|
|
gossipSub.mesh[topic].len() > 0
|
|
|
|
|
|
|
|
# test via dynamic dispatch
|
|
|
|
gossipSub.PubSub.unsubscribeAll(topic)
|
|
|
|
|
|
|
|
check:
|
|
|
|
topic notin gossipSub.topics # not in local topics
|
|
|
|
topic notin gossipSub.mesh # not in mesh
|
|
|
|
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
asyncTest "topic params":
|
|
|
|
let params = TopicParams.init()
|
|
|
|
params.validateParameters().tryGet()
|
|
|
|
|
|
|
|
asyncTest "`rebalanceMesh` Degree Lo":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.sendConn = conn
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
|
|
|
|
check gossipSub.peers.len == 15
|
2020-12-15 01:25:22 +00:00
|
|
|
gossipSub.rebalanceMesh(topic)
|
2021-02-22 01:05:25 +00:00
|
|
|
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "rebalanceMesh - bad peers":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var scoreLow = -11'f64
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-02-22 01:05:25 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-22 01:05:25 +00:00
|
|
|
peer.sendConn = conn
|
|
|
|
peer.score = scoreLow
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
scoreLow += 1.0
|
|
|
|
|
|
|
|
check gossipSub.peers.len == 15
|
|
|
|
gossipSub.rebalanceMesh(topic)
|
|
|
|
# low score peers should not be in mesh, that's why the count must be 4
|
|
|
|
check gossipSub.mesh[topic].len == 4
|
|
|
|
for peer in gossipSub.mesh[topic]:
|
|
|
|
check peer.score >= 0.0
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`rebalanceMesh` Degree Hi":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
gossipSub.grafted(peer, topic)
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
|
|
|
|
check gossipSub.mesh[topic].len == 15
|
2020-12-15 01:25:22 +00:00
|
|
|
gossipSub.rebalanceMesh(topic)
|
2020-11-19 07:48:17 +00:00
|
|
|
check gossipSub.mesh[topic].len ==
|
|
|
|
gossipSub.parameters.d + gossipSub.parameters.dScore
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`replenishFanout` Degree Lo":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
var peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
|
|
|
|
check gossipSub.gossipsub[topic].len == 15
|
|
|
|
gossipSub.replenishFanout(topic)
|
2020-11-19 07:48:17 +00:00
|
|
|
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
|
|
|
await sleepAsync(5.millis) # allow the topic to expire
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 6:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
gossipSub.fanout[topic].incl(peer)
|
|
|
|
|
2020-11-19 07:48:17 +00:00
|
|
|
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
gossipSub.dropFanoutPeers()
|
|
|
|
check topic notin gossipSub.fanout
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic1 = "foobar1"
|
|
|
|
let topic2 = "foobar2"
|
|
|
|
gossipSub.topicParams[topic1] = TopicParams.init()
|
|
|
|
gossipSub.topicParams[topic2] = TopicParams.init()
|
|
|
|
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
|
|
|
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
|
|
|
await sleepAsync(5.millis) # allow the topic to expire
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 6:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
gossipSub.fanout[topic1].incl(peer)
|
|
|
|
gossipSub.fanout[topic2].incl(peer)
|
|
|
|
|
2020-11-19 07:48:17 +00:00
|
|
|
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
|
|
|
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
gossipSub.dropFanoutPeers()
|
|
|
|
check topic1 notin gossipSub.fanout
|
|
|
|
check topic2 in gossipSub.fanout
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
|
|
|
|
# generate mesh and fanout peers
|
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
if i mod 2 == 0:
|
|
|
|
gossipSub.fanout[topic].incl(peer)
|
|
|
|
else:
|
2020-09-21 09:16:29 +00:00
|
|
|
gossipSub.grafted(peer, topic)
|
2020-07-13 13:32:38 +00:00
|
|
|
gossipSub.mesh[topic].incl(peer)
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
# generate gossipsub (free standing) peers
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
|
|
|
|
# generate messages
|
|
|
|
var seqno = 0'u64
|
|
|
|
for i in 0 .. 5:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
2020-11-13 03:44:02 +00:00
|
|
|
inc seqno
|
2021-09-08 09:07:46 +00:00
|
|
|
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
2022-02-24 16:32:20 +00:00
|
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
check gossipSub.fanout[topic].len == 15
|
|
|
|
check gossipSub.mesh[topic].len == 15
|
|
|
|
check gossipSub.gossipsub[topic].len == 15
|
|
|
|
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
2020-11-19 07:48:17 +00:00
|
|
|
check peers.len == gossipSub.parameters.d
|
2020-11-13 03:44:02 +00:00
|
|
|
for p in peers.keys:
|
2021-12-16 10:05:20 +00:00
|
|
|
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
|
|
|
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
if i mod 2 == 0:
|
|
|
|
gossipSub.fanout[topic].incl(peer)
|
|
|
|
else:
|
2020-07-13 13:32:38 +00:00
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
# generate messages
|
|
|
|
var seqno = 0'u64
|
|
|
|
for i in 0 .. 5:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
2020-11-13 03:44:02 +00:00
|
|
|
inc seqno
|
2021-09-08 09:07:46 +00:00
|
|
|
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
2022-02-24 16:32:20 +00:00
|
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
2020-11-19 07:48:17 +00:00
|
|
|
check peers.len == gossipSub.parameters.d
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
if i mod 2 == 0:
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
gossipSub.grafted(peer, topic)
|
|
|
|
else:
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
# generate messages
|
|
|
|
var seqno = 0'u64
|
|
|
|
for i in 0 .. 5:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
2020-11-13 03:44:02 +00:00
|
|
|
inc seqno
|
2021-09-08 09:07:46 +00:00
|
|
|
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
2022-02-24 16:32:20 +00:00
|
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
2020-11-19 07:48:17 +00:00
|
|
|
check peers.len == gossipSub.parameters.d
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
|
|
|
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2020-11-13 03:44:02 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2020-11-13 03:44:02 +00:00
|
|
|
peer.handler = handler
|
|
|
|
if i mod 2 == 0:
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
gossipSub.grafted(peer, topic)
|
|
|
|
else:
|
2020-07-13 13:32:38 +00:00
|
|
|
gossipSub.fanout[topic].incl(peer)
|
2020-05-06 09:26:08 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
# generate messages
|
|
|
|
var seqno = 0'u64
|
|
|
|
for i in 0 .. 5:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2020-11-13 03:44:02 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
2020-11-13 03:44:02 +00:00
|
|
|
inc seqno
|
2021-09-08 09:07:46 +00:00
|
|
|
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
2022-02-24 16:32:20 +00:00
|
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
|
|
|
check peers.len == 0
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
2021-01-13 14:49:44 +00:00
|
|
|
|
|
|
|
asyncTest "Drop messages of topics without subscription":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2021-01-13 14:49:44 +00:00
|
|
|
check false
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-01-13 14:49:44 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-01-13 14:49:44 +00:00
|
|
|
peer.handler = handler
|
|
|
|
|
|
|
|
# generate messages
|
|
|
|
var seqno = 0'u64
|
|
|
|
for i in 0 .. 5:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-01-13 14:49:44 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-01-13 14:49:44 +00:00
|
|
|
inc seqno
|
2021-09-08 09:07:46 +00:00
|
|
|
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
2023-09-22 14:45:08 +00:00
|
|
|
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
2021-01-13 14:49:44 +00:00
|
|
|
|
|
|
|
check gossipSub.mcache.msgs.len == 0
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
2021-01-15 04:48:03 +00:00
|
|
|
|
|
|
|
asyncTest "Disconnect bad peers":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
gossipSub.parameters.disconnectBadPeers = true
|
2021-03-09 12:22:52 +00:00
|
|
|
gossipSub.parameters.appSpecificWeight = 1.0
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2021-01-15 04:48:03 +00:00
|
|
|
check false
|
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-01-15 04:48:03 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-01-15 04:48:03 +00:00
|
|
|
peer.sendConn = conn
|
|
|
|
peer.handler = handler
|
2021-03-09 12:22:52 +00:00
|
|
|
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
2021-01-15 04:48:03 +00:00
|
|
|
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
|
2023-03-08 11:30:19 +00:00
|
|
|
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
2021-01-15 04:48:03 +00:00
|
|
|
|
|
|
|
gossipSub.updateScores()
|
|
|
|
|
|
|
|
await sleepAsync(100.millis)
|
|
|
|
|
|
|
|
check:
|
|
|
|
# test our disconnect mechanics
|
|
|
|
gossipSub.gossipsub.peers(topic) == 0
|
|
|
|
# also ensure we cleanup properly the peersInIP table
|
|
|
|
gossipSub.peersInIP.len == 0
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
2021-02-12 03:27:26 +00:00
|
|
|
|
|
|
|
asyncTest "subscription limits":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
gossipSub.topicsHigh = 10
|
|
|
|
|
|
|
|
var tooManyTopics: seq[string]
|
|
|
|
for i in 0 .. gossipSub.topicsHigh + 10:
|
|
|
|
tooManyTopics &= "topic" & $i
|
|
|
|
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
|
|
|
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-12 03:27:26 +00:00
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
2021-02-12 03:27:26 +00:00
|
|
|
|
|
|
|
check:
|
2022-07-27 11:47:50 +00:00
|
|
|
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
2021-02-12 03:27:26 +00:00
|
|
|
peer.behaviourPenalty > 0.0
|
|
|
|
|
|
|
|
await conn.close()
|
|
|
|
await gossipSub.switch.stop()
|
2021-02-13 04:39:32 +00:00
|
|
|
|
2024-03-21 13:11:40 +00:00
|
|
|
asyncTest "invalid message bytes":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
|
|
|
let peerId = randomPeerId()
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
|
|
|
|
|
|
|
expect(CatchableError):
|
|
|
|
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
|
|
|
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
2021-02-22 03:04:20 +00:00
|
|
|
asyncTest "rebalanceMesh fail due to backoff":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-02-22 03:04:20 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-22 03:04:20 +00:00
|
|
|
peer.sendConn = conn
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
2021-12-16 10:05:20 +00:00
|
|
|
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
2021-09-08 09:07:46 +00:00
|
|
|
peerId, Moment.now() + 1.hours
|
|
|
|
)
|
2021-02-22 03:04:20 +00:00
|
|
|
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
|
|
|
# there must be a control prune due to violation of backoff
|
|
|
|
check prunes.len != 0
|
|
|
|
|
|
|
|
check gossipSub.peers.len == 15
|
|
|
|
gossipSub.rebalanceMesh(topic)
|
|
|
|
# expect 0 since they are all backing off
|
|
|
|
check gossipSub.mesh[topic].len == 0
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
2021-04-22 09:51:22 +00:00
|
|
|
asyncTest "rebalanceMesh fail due to backoff - remote":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
for i in 0 ..< 15:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-04-22 09:51:22 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-04-22 09:51:22 +00:00
|
|
|
peer.sendConn = conn
|
|
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
|
|
|
|
check gossipSub.peers.len == 15
|
|
|
|
gossipSub.rebalanceMesh(topic)
|
|
|
|
check gossipSub.mesh[topic].len != 0
|
|
|
|
|
|
|
|
for i in 0 ..< 15:
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = conns[i].peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-04-22 09:51:22 +00:00
|
|
|
gossipSub.handlePrune(
|
|
|
|
peer,
|
|
|
|
@[
|
|
|
|
ControlPrune(
|
|
|
|
topicID: topic,
|
|
|
|
peers: @[],
|
|
|
|
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
|
|
|
],
|
2021-04-22 09:51:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# expect topic cleaned up since they are all pruned
|
|
|
|
check topic notin gossipSub.mesh
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
|
|
|
|
2021-02-13 04:39:32 +00:00
|
|
|
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
2021-02-22 03:04:20 +00:00
|
|
|
let topic = "foobar"
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.topicParams[topic] = TopicParams.init()
|
2021-02-13 04:39:32 +00:00
|
|
|
gossipSub.parameters.dScore = 4
|
|
|
|
gossipSub.parameters.d = 6
|
|
|
|
gossipSub.parameters.dOut = 3
|
|
|
|
gossipSub.parameters.dHigh = 12
|
|
|
|
gossipSub.parameters.dLow = 4
|
|
|
|
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
for i in 0 ..< 6:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-03-02 23:23:40 +00:00
|
|
|
conn.transportDir = Direction.In
|
2021-02-13 04:39:32 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-13 04:39:32 +00:00
|
|
|
peer.score = 40.0
|
|
|
|
peer.sendConn = conn
|
|
|
|
gossipSub.grafted(peer, topic)
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
|
|
|
|
for i in 0 ..< 7:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-03-02 23:23:40 +00:00
|
|
|
conn.transportDir = Direction.Out
|
2021-02-13 04:39:32 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-13 04:39:32 +00:00
|
|
|
peer.score = 10.0
|
|
|
|
peer.sendConn = conn
|
|
|
|
gossipSub.grafted(peer, topic)
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
|
|
|
|
check gossipSub.mesh[topic].len == 13
|
|
|
|
gossipSub.rebalanceMesh(topic)
|
|
|
|
# ensure we are above dlow
|
|
|
|
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
|
|
|
var outbound = 0
|
|
|
|
for peer in gossipSub.mesh[topic]:
|
2021-03-02 23:23:40 +00:00
|
|
|
if peer.sendConn.transportDir == Direction.Out:
|
2021-02-13 04:39:32 +00:00
|
|
|
inc outbound
|
|
|
|
# ensure we give priority and keep at least dOut outbound peers
|
|
|
|
check outbound >= gossipSub.parameters.dOut
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
2021-02-26 05:27:42 +00:00
|
|
|
|
2024-09-30 05:06:46 +00:00
|
|
|
# test cases for block 5 gossibsub test plan
|
|
|
|
# tests 1 -4 for checking the formatting of objects
|
|
|
|
# have - want - graft - prune
|
|
|
|
|
|
|
|
asyncTest "Check ControlIHave formatting":
|
|
|
|
let topic = "dummytopic"
|
|
|
|
|
|
|
|
let msgID = @[0'u8, 1, 2, 3]
|
|
|
|
let msg = ControlIHave(topicID: topic, messageIDs: @[msgID])
|
|
|
|
check:
|
|
|
|
msg.topicID == topic
|
|
|
|
msg.messageIDs == @[msgID]
|
|
|
|
|
|
|
|
asyncTest "Check ControlIWant formatting":
|
|
|
|
let msgID = @[0'u8, 1, 2, 3]
|
|
|
|
let msg = ControlIWant(messageIDs: @[msgID])
|
|
|
|
check:
|
|
|
|
msg.messageIDs == @[msgID]
|
|
|
|
|
|
|
|
asyncTest "Check ControGraft formatting":
|
|
|
|
let topic = "dummytopic"
|
|
|
|
let msg = ControlGraft(topicID: topic)
|
|
|
|
check:
|
|
|
|
msg.topicID == topic
|
|
|
|
|
|
|
|
asyncTest "Check ControPrune":
|
|
|
|
let topic = "foobar"
|
|
|
|
#var peerecord:seq[bytes]
|
|
|
|
var
|
|
|
|
peerecord: seq[byte] = @[1, 2, 3]
|
|
|
|
peerData: seq[byte] = @[4, 5]
|
|
|
|
backoff: uint64 = 123
|
|
|
|
peerInfo = PeerInfoMsg(
|
|
|
|
peerId: PeerId(data: @['e'.byte]), # 1 byte
|
|
|
|
signedPeerRecord: @['f'.byte, 'g'.byte] # 2 bytes
|
|
|
|
,
|
|
|
|
)
|
|
|
|
|
|
|
|
let msg = ControlPrune(topicID: topic, peers: @[peerInfo], backoff: backoff)
|
|
|
|
check:
|
|
|
|
msg.topicID == topic
|
|
|
|
msg.peers.contains(peerInfo)
|
|
|
|
msg.backoff == backoff
|
|
|
|
|
2021-02-26 05:27:42 +00:00
|
|
|
asyncTest "handleIHave/Iwant tests":
|
|
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
|
2023-09-22 14:45:08 +00:00
|
|
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
2021-02-26 05:27:42 +00:00
|
|
|
check false
|
2024-06-11 15:18:06 +00:00
|
|
|
|
2023-07-11 10:17:50 +00:00
|
|
|
proc handler2(topic: string, data: seq[byte]) {.async.} =
|
|
|
|
discard
|
2021-02-26 05:27:42 +00:00
|
|
|
|
|
|
|
let topic = "foobar"
|
|
|
|
var conns = newSeq[Connection]()
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
2023-07-11 10:17:50 +00:00
|
|
|
gossipSub.subscribe(topic, handler2)
|
|
|
|
|
2021-02-26 05:27:42 +00:00
|
|
|
for i in 0 ..< 30:
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-02-26 05:27:42 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-26 05:27:42 +00:00
|
|
|
peer.handler = handler
|
|
|
|
gossipSub.grafted(peer, topic)
|
|
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
|
|
|
|
block:
|
|
|
|
# should ignore no budget peer
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-02-26 05:27:42 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-26 05:27:42 +00:00
|
|
|
let id = @[0'u8, 1, 2, 3]
|
|
|
|
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
2021-03-09 12:22:52 +00:00
|
|
|
peer.iHaveBudget = 0
|
2021-02-26 05:27:42 +00:00
|
|
|
let iwants = gossipSub.handleIHave(peer, @[msg])
|
2024-03-25 11:06:34 +00:00
|
|
|
check:
|
|
|
|
iwants.messageIDs.len == 0
|
2021-02-26 05:27:42 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
# given duplicate ihave should generate only one iwant
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-02-26 05:27:42 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-26 05:27:42 +00:00
|
|
|
let id = @[0'u8, 1, 2, 3]
|
|
|
|
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
|
|
|
let iwants = gossipSub.handleIHave(peer, @[msg])
|
2024-03-25 11:06:34 +00:00
|
|
|
check:
|
|
|
|
iwants.messageIDs.len == 1
|
2021-02-26 05:27:42 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
# given duplicate iwant should generate only one message
|
2021-06-07 07:32:08 +00:00
|
|
|
let conn = TestBufferStream.new(noop)
|
2021-02-26 05:27:42 +00:00
|
|
|
conns &= conn
|
2021-09-08 09:07:46 +00:00
|
|
|
let peerId = randomPeerId()
|
|
|
|
conn.peerId = peerId
|
|
|
|
let peer = gossipSub.getPubSubPeer(peerId)
|
2021-02-26 05:27:42 +00:00
|
|
|
let id = @[0'u8, 1, 2, 3]
|
|
|
|
gossipSub.mcache.put(id, Message())
|
2023-04-03 08:56:20 +00:00
|
|
|
peer.sentIHaves[^1].incl(id)
|
2021-02-26 05:27:42 +00:00
|
|
|
let msg = ControlIWant(messageIDs: @[id, id, id])
|
|
|
|
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
|
|
|
check:
|
|
|
|
genmsg.len == 1
|
|
|
|
|
|
|
|
check gossipSub.mcache.msgs.len == 1
|
|
|
|
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
|
|
await gossipSub.switch.stop()
|
2023-08-21 14:34:24 +00:00
|
|
|
|
2023-10-02 09:39:28 +00:00
|
|
|
proc setupTest(): Future[
|
|
|
|
tuple[
|
|
|
|
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
|
2024-06-11 15:18:06 +00:00
|
|
|
]
|
2023-10-02 09:39:28 +00:00
|
|
|
] {.async.} =
|
|
|
|
let nodes = generateNodes(2, gossip = true, verifySignature = false)
|
|
|
|
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
|
|
|
|
|
|
|
await nodes[1].switch.connect(
|
|
|
|
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
|
|
|
)
|
|
|
|
|
|
|
|
var receivedMessages = new(HashSet[seq[byte]])
|
|
|
|
|
2023-12-05 07:05:32 +00:00
|
|
|
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
2023-10-02 09:39:28 +00:00
|
|
|
receivedMessages[].incl(data)
|
|
|
|
|
2023-12-05 07:05:32 +00:00
|
|
|
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
2023-10-02 09:39:28 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
nodes[0].subscribe("foobar", handlerA)
|
|
|
|
nodes[1].subscribe("foobar", handlerB)
|
|
|
|
await waitSubGraph(nodes, "foobar")
|
|
|
|
|
|
|
|
var gossip0: GossipSub = GossipSub(nodes[0])
|
|
|
|
var gossip1: GossipSub = GossipSub(nodes[1])
|
|
|
|
|
|
|
|
return (gossip0, gossip1, receivedMessages)
|
|
|
|
|
|
|
|
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
|
|
|
|
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
|
|
|
|
|
|
|
|
proc createMessages(
|
|
|
|
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
|
|
|
|
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
|
|
|
|
var iwantMessageIds = newSeq[MessageId]()
|
|
|
|
var sentMessages = initHashSet[seq[byte]]()
|
|
|
|
|
|
|
|
for i, size in enumerate([size1, size2]):
|
2024-09-19 11:35:50 +00:00
|
|
|
let data = newSeqWith(size, i.byte)
|
2023-10-02 09:39:28 +00:00
|
|
|
sentMessages.incl(data)
|
|
|
|
|
|
|
|
let msg =
|
|
|
|
Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
|
|
|
|
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
|
|
|
|
iwantMessageIds.add(iwantMessageId)
|
|
|
|
gossip1.mcache.put(iwantMessageId, msg)
|
|
|
|
|
|
|
|
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
|
|
|
|
peer.sentIHaves[^1].incl(iwantMessageId)
|
|
|
|
|
|
|
|
return (iwantMessageIds, sentMessages)
|
|
|
|
|
|
|
|
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
|
|
|
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
|
|
|
# Expected: Both messages should be received.
|
|
|
|
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
|
|
|
|
|
|
|
let messageSize = gossip1.maxMessageSize div 2 + 1
|
|
|
|
let (iwantMessageIds, sentMessages) =
|
|
|
|
createMessages(gossip0, gossip1, messageSize, messageSize)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
2023-10-02 09:39:28 +00:00
|
|
|
gossip1.broadcast(
|
|
|
|
gossip1.mesh["foobar"],
|
|
|
|
RPCMsg(
|
|
|
|
control: some(
|
|
|
|
ControlMessage(
|
2024-03-25 11:06:34 +00:00
|
|
|
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
|
2024-03-05 15:05:21 +00:00
|
|
|
)
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2024-03-05 15:05:21 +00:00
|
|
|
),
|
|
|
|
isHighPriority = false,
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2023-10-02 09:39:28 +00:00
|
|
|
|
2024-02-09 10:51:27 +00:00
|
|
|
checkUntilTimeout:
|
|
|
|
receivedMessages[] == sentMessages
|
2023-10-02 09:39:28 +00:00
|
|
|
check receivedMessages[].len == 2
|
|
|
|
|
|
|
|
await teardownTest(gossip0, gossip1)
|
|
|
|
|
|
|
|
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
|
|
|
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
|
|
|
# Expected: No messages should be received.
|
|
|
|
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
|
|
|
|
|
|
|
let messageSize = gossip1.maxMessageSize + 10
|
|
|
|
let (bigIWantMessageIds, sentMessages) =
|
|
|
|
createMessages(gossip0, gossip1, messageSize, messageSize)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
2023-10-02 09:39:28 +00:00
|
|
|
gossip1.broadcast(
|
|
|
|
gossip1.mesh["foobar"],
|
|
|
|
RPCMsg(
|
|
|
|
control: some(
|
|
|
|
ControlMessage(
|
2024-03-25 11:06:34 +00:00
|
|
|
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
2024-03-05 15:05:21 +00:00
|
|
|
)
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2024-03-05 15:05:21 +00:00
|
|
|
),
|
|
|
|
isHighPriority = false,
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2023-10-02 09:39:28 +00:00
|
|
|
|
|
|
|
await sleepAsync(300.milliseconds)
|
2024-02-09 10:51:27 +00:00
|
|
|
checkUntilTimeout:
|
|
|
|
receivedMessages[].len == 0
|
2023-10-02 09:39:28 +00:00
|
|
|
|
|
|
|
await teardownTest(gossip0, gossip1)
|
|
|
|
|
|
|
|
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
|
|
|
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
|
|
|
# Expected: Both messages should be received.
|
|
|
|
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
|
|
|
let size1 = gossip1.maxMessageSize div 2
|
|
|
|
let size2 = gossip1.maxMessageSize div 3
|
|
|
|
let (bigIWantMessageIds, sentMessages) =
|
|
|
|
createMessages(gossip0, gossip1, size1, size2)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
2023-10-02 09:39:28 +00:00
|
|
|
gossip1.broadcast(
|
|
|
|
gossip1.mesh["foobar"],
|
|
|
|
RPCMsg(
|
|
|
|
control: some(
|
|
|
|
ControlMessage(
|
2024-03-25 11:06:34 +00:00
|
|
|
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
2024-03-05 15:05:21 +00:00
|
|
|
)
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2024-03-05 15:05:21 +00:00
|
|
|
),
|
|
|
|
isHighPriority = false,
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2023-10-02 09:39:28 +00:00
|
|
|
|
2024-02-09 10:51:27 +00:00
|
|
|
checkUntilTimeout:
|
|
|
|
receivedMessages[] == sentMessages
|
2023-10-02 09:39:28 +00:00
|
|
|
check receivedMessages[].len == 2
|
|
|
|
|
|
|
|
await teardownTest(gossip0, gossip1)
|
|
|
|
|
|
|
|
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
|
|
|
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
|
|
|
# Expected: Only the smaller message should be received.
|
|
|
|
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
|
|
|
let maxSize = gossip1.maxMessageSize
|
|
|
|
let size1 = maxSize div 2
|
|
|
|
let size2 = maxSize + 10
|
|
|
|
let (bigIWantMessageIds, sentMessages) =
|
|
|
|
createMessages(gossip0, gossip1, size1, size2)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
2023-10-02 09:39:28 +00:00
|
|
|
gossip1.broadcast(
|
|
|
|
gossip1.mesh["foobar"],
|
|
|
|
RPCMsg(
|
|
|
|
control: some(
|
|
|
|
ControlMessage(
|
2024-03-25 11:06:34 +00:00
|
|
|
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
2024-03-05 15:05:21 +00:00
|
|
|
)
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2024-03-05 15:05:21 +00:00
|
|
|
),
|
|
|
|
isHighPriority = false,
|
2024-06-11 15:18:06 +00:00
|
|
|
)
|
2023-10-02 09:39:28 +00:00
|
|
|
|
|
|
|
var smallestSet: HashSet[seq[byte]]
|
|
|
|
let seqs = toSeq(sentMessages)
|
|
|
|
if seqs[0] < seqs[1]:
|
|
|
|
smallestSet.incl(seqs[0])
|
|
|
|
else:
|
|
|
|
smallestSet.incl(seqs[1])
|
|
|
|
|
2024-02-09 10:51:27 +00:00
|
|
|
checkUntilTimeout:
|
|
|
|
receivedMessages[] == smallestSet
|
2023-10-02 09:39:28 +00:00
|
|
|
check receivedMessages[].len == 1
|
|
|
|
|
|
|
|
await teardownTest(gossip0, gossip1)
|