mirror of
https://github.com/codex-storage/nim-libp2p.git
synced 2025-02-10 01:53:45 +00:00
* allow multiple codecs per protocol (without breaking things) * add 1.1 protocol to gossip * explicit peering part 1 * explicit peering part 2 * explicit peering part 3 * PeerInfo and ControlPrune protocols * fix encodePrune * validated always, even explicit peers * prune by score (score is stub still) * add a way to pass parameters to gossip * standard setup fixes * take into account explicit direct peers in publish * add floodPublish logic * small fixes, publish still half broken * make sure to waitsub in sparse test * use var semantics to optimize table access * wip... lvalues don't work properly sadly... * big publish refactor, replenish and balance * fix internal tests * use g.peers for fanout (todo: don't include flood peers) * exclude non gossip from fanout * internal test fixes * fix flood tests * fix test's trypublish * test interop fixes * make sure to not remove peers from gossip table * restore old replenishFanout * cleanups * restore utility module import * restore trace vs debug in gossip * improve fanout replenish behavior further * triage publish nil peers (issue is on master too but just hidden behind a if/in) * getGossipPeers fixes * remove topics from pubsubpeer (was unused) * simplify rebalanceMesh (following spec) and make it finally reach D_high * better diagnostics * merge new pubsubpeer, copy 1.1 to new module * fix up merge * conditional enable gossip11 module * add back topics in peers, re-enable flood publish * add more heartbeat locking to prevent races * actually lock the heartbeat * minor fixes * with sugar * merge 1.0 * remove assertion in publish * fix multistream 1.1 multi proto * Fix merge oops * wip * fix gossip 11 upstream * gossipsub11 -> gossipsub * support interop testing * tests fixing * fix directchat build * control prune updates (pb) * wip parameters * gossip internal tests fixes * parameters wip * finishup with params * cleanups/wip * small sugar * grafted and pruned procs * wip updateScores * wip * fix logging issue * pubsubpeer, chronicles explicit override * fix internal gossip tests * wip * tables troubleshooting * score wip * score wip * fixes * fix test utils generateNodes * don't delete while iterating in score update * fix grafted defect * add a handleConnect in subscribeTopic * pruning improvements * wip * score fixes * post merge - builds gossip tests * further merge fixes * rebalance improvements and opportunistic grafting * fix test for now * restore explicit peering * implement peer exchange graft message * add an hard cap to PX * backoff time management * IWANT cap/budget * Adaptive gossip dissemination * outbound mesh quota, internal tests fixing * oversub prune score based, finish outbound quota * finishup with score and ihave budget * use go daemon 0.3.0 * import fixes * byScore cleanup score sorting * remove pointless scaling in `/` Duration operator * revert using libp2p org for daemon * interop fixes * fixes and cleanup * remove heartbeat assertion, minor debug fixes * logging improvements and cleaning up * (to revert) add some traces * add explicit topic to gossip rpcs * pubsub merge fixes and type fix in switch * Revert "(to revert) add some traces" This reverts commit 4663eaab6cc336c81cee50bc54025cf0b7bcbd99. * cleanup some now irrelevant todo * shuffle peers anyway as score might be disabled * add missing shuffle * old merge fix * more merge fixes * debug improvements * re-enable gossip internal tests * add gossip10 fallback (dormant but tested) * split gossipsub internal tests into 1.0 and 1.1 Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
402 lines
12 KiB
Nim
402 lines
12 KiB
Nim
include ../../libp2p/protocols/pubsub/gossipsub10
|
|
|
|
{.used.}
|
|
|
|
import unittest, bearssl
|
|
import stew/byteutils
|
|
import ../../libp2p/standard_setup
|
|
import ../../libp2p/errors
|
|
import ../../libp2p/crypto/crypto
|
|
import ../../libp2p/stream/bufferstream
|
|
|
|
import ../helpers
|
|
|
|
type
|
|
TestGossipSub = ref object of GossipSub
|
|
|
|
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
|
|
|
|
proc getPubSubPeer(p: TestGossipSub, peerId: PeerID): auto =
|
|
proc getConn(): Future[(Connection, RPCMsg)] {.async.} =
|
|
let conn = await p.switch.dial(peerId, GossipSubCodec)
|
|
return (conn, RPCMsg.withSubs(toSeq(p.topics.keys), true))
|
|
|
|
newPubSubPeer(peerId, getConn, GossipSubCodec)
|
|
|
|
proc randomPeerInfo(): PeerInfo =
|
|
PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
|
|
|
suite "GossipSub internal":
|
|
teardown:
|
|
for tracker in testTrackers():
|
|
# echo tracker.dump()
|
|
check tracker.isLeaked() == false
|
|
|
|
test "`rebalanceMesh` Degree Lo":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
let topic = "foobar"
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var conns = newSeq[Connection]()
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
for i in 0..<15:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
gossipSub.peers[peerInfo.peerId] = peer
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
check gossipSub.peers.len == 15
|
|
await gossipSub.rebalanceMesh(topic)
|
|
check gossipSub.mesh[topic].len == GossipSubD
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`rebalanceMesh` Degree Hi":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
let topic = "foobar"
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.topics[topic] = Topic() # has to be in topics to rebalance
|
|
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<15:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
gossipSub.peers[peerInfo.peerId] = peer
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
check gossipSub.mesh[topic].len == 15
|
|
await gossipSub.rebalanceMesh(topic)
|
|
check gossipSub.mesh[topic].len == GossipSubD
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`replenishFanout` Degree Lo":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic = "foobar"
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<15:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
var peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
check gossipSub.gossipsub[topic].len == 15
|
|
gossipSub.replenishFanout(topic)
|
|
check gossipSub.fanout[topic].len == GossipSubD
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`dropFanoutPeers` drop expired fanout topics":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic = "foobar"
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
|
await sleepAsync(5.millis) # allow the topic to expire
|
|
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<6:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
gossipSub.fanout[topic].incl(peer)
|
|
|
|
check gossipSub.fanout[topic].len == GossipSubD
|
|
|
|
gossipSub.dropFanoutPeers()
|
|
check topic notin gossipSub.fanout
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`dropFanoutPeers` leave unexpired fanout topics":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic1 = "foobar1"
|
|
let topic2 = "foobar2"
|
|
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
|
|
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
|
|
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
|
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
|
await sleepAsync(5.millis) # allow the topic to expire
|
|
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<6:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
gossipSub.fanout[topic1].incl(peer)
|
|
gossipSub.fanout[topic2].incl(peer)
|
|
|
|
check gossipSub.fanout[topic1].len == GossipSubD
|
|
check gossipSub.fanout[topic2].len == GossipSubD
|
|
|
|
gossipSub.dropFanoutPeers()
|
|
check topic1 notin gossipSub.fanout
|
|
check topic2 in gossipSub.fanout
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic = "foobar"
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
var conns = newSeq[Connection]()
|
|
|
|
# generate mesh and fanout peers
|
|
for i in 0..<30:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
if i mod 2 == 0:
|
|
gossipSub.fanout[topic].incl(peer)
|
|
else:
|
|
gossipSub.mesh[topic].incl(peer)
|
|
|
|
# generate gossipsub (free standing) peers
|
|
for i in 0..<15:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
# generate messages
|
|
var seqno = 0'u64
|
|
for i in 0..5:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
inc seqno
|
|
let msg = Message.init(peerInfo, ("HELLO" & $i).toBytes(), topic, seqno, false)
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg), msg)
|
|
|
|
check gossipSub.fanout[topic].len == 15
|
|
check gossipSub.mesh[topic].len == 15
|
|
check gossipSub.gossipsub[topic].len == 15
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
|
check peers.len == GossipSubD
|
|
for p in peers.keys:
|
|
check not gossipSub.fanout.hasPeerID(topic, p.peerId)
|
|
check not gossipSub.mesh.hasPeerID(topic, p.peerId)
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`getGossipPeers` - should not crash on missing topics in mesh":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic = "foobar"
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<30:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
if i mod 2 == 0:
|
|
gossipSub.fanout[topic].incl(peer)
|
|
else:
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
# generate messages
|
|
var seqno = 0'u64
|
|
for i in 0..5:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
inc seqno
|
|
let msg = Message.init(peerInfo, ("HELLO" & $i).toBytes(), topic, seqno, false)
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg), msg)
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
|
check peers.len == GossipSubD
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`getGossipPeers` - should not crash on missing topics in fanout":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic = "foobar"
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<30:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
if i mod 2 == 0:
|
|
gossipSub.mesh[topic].incl(peer)
|
|
else:
|
|
gossipSub.gossipsub[topic].incl(peer)
|
|
|
|
# generate messages
|
|
var seqno = 0'u64
|
|
for i in 0..5:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
inc seqno
|
|
let msg = Message.init(peerInfo, ("HELLO" & $i).toBytes(), topic, seqno, false)
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg), msg)
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
|
check peers.len == GossipSubD
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|
|
|
|
test "`getGossipPeers` - should not crash on missing topics in gossip":
|
|
proc testRun(): Future[bool] {.async.} =
|
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
|
|
|
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
|
discard
|
|
|
|
let topic = "foobar"
|
|
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
|
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
|
var conns = newSeq[Connection]()
|
|
for i in 0..<30:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
|
peer.handler = handler
|
|
if i mod 2 == 0:
|
|
gossipSub.mesh[topic].incl(peer)
|
|
else:
|
|
gossipSub.fanout[topic].incl(peer)
|
|
|
|
# generate messages
|
|
var seqno = 0'u64
|
|
for i in 0..5:
|
|
let conn = newBufferStream(noop)
|
|
conns &= conn
|
|
let peerInfo = randomPeerInfo()
|
|
conn.peerInfo = peerInfo
|
|
inc seqno
|
|
let msg = Message.init(peerInfo, ("bar" & $i).toBytes(), topic, seqno, false)
|
|
gossipSub.mcache.put(gossipSub.msgIdProvider(msg), msg)
|
|
|
|
let peers = gossipSub.getGossipPeers()
|
|
check peers.len == 0
|
|
|
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
|
await gossipSub.switch.stop()
|
|
|
|
result = true
|
|
|
|
check:
|
|
waitFor(testRun()) == true
|