nim-libp2p/tests/pubsub/utils.nim

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

262 lines
8.0 KiB
Nim
Raw Normal View History

# compile time options here
const
libp2p_pubsub_sign {.booldefine.} = true
libp2p_pubsub_verify {.booldefine.} = true
libp2p_pubsub_anonymize {.booldefine.} = false
import hashes, random, tables, sets, sequtils
2023-10-05 15:12:07 +00:00
import chronos, stew/[byteutils, results], chronos/ratelimit
2024-09-02 16:35:08 +00:00
import std/options
import
../../libp2p/[
builders,
protocols/pubsub/errors,
protocols/pubsub/pubsub,
protocols/pubsub/pubsubpeer,
protocols/pubsub/gossipsub,
protocols/pubsub/floodsub,
protocols/pubsub/rpc/messages,
protocols/secure/secure,
]
import ../helpers
import chronicles
export builders
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
2020-05-21 20:24:20 +00:00
randomize()
2024-09-02 16:35:08 +00:00
type
TestGossipSub* = ref object of GossipSub
DValues* = object
d*: Option[int]
dLow*: Option[int]
dHigh*: Option[int]
dScore*: Option[int]
dOut*: Option[int]
dLazy*: Option[int]
proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
proc getConn(): Future[Connection] =
p.switch.dial(peerId, GossipSubCodec_12)
let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec_12, 1024 * 1024)
debug "created new pubsub peer", peerId
p.peers[peerId] = pubSubPeer
onNewPeer(p, pubSubPeer)
pubSubPeer
proc randomPeerId*(): PeerId =
try:
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
except CatchableError as exc:
raise newException(Defect, exc.msg)
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
let mid =
if m.seqno.len > 0 and m.fromPeer.data.len > 0:
byteutils.toHex(m.seqno) & $m.fromPeer
else:
# This part is irrelevant because it's not standard,
# We use it exclusively for testing basically and users should
# implement their own logic in the case they use anonymization
$m.data.hash & $m.topic.hash
ok mid.toBytes()
proc generateNodes*(
num: Natural,
2021-12-16 10:05:20 +00:00
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
msgIdProvider: MsgIdProvider = defaultMsgIdProvider,
gossip: bool = false,
triggerSelf: bool = false,
verifySignature: bool = libp2p_pubsub_verify,
anonymize: bool = libp2p_pubsub_anonymize,
sign: bool = libp2p_pubsub_sign,
sendSignedPeerRecord = false,
2022-02-21 17:14:43 +00:00
unsubscribeBackoff = 1.seconds,
maxMessageSize: int = 1024 * 1024,
2023-10-05 15:12:07 +00:00
enablePX: bool = false,
overheadRateLimit: Opt[tuple[bytes: int, interval: Duration]] =
Opt.none(tuple[bytes: int, interval: Duration]),
gossipSubVersion: string = "",
floodPublish: bool = false,
2024-09-02 16:35:08 +00:00
dValues: Option[DValues] = DValues.none(),
gossipFactor: Option[float] = float.none(),
2023-10-05 15:12:07 +00:00
): seq[PubSub] =
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
for i in 0 ..< num:
let switch = newStandardSwitch(
secureManagers = secureManagers, sendSignedPeerRecord = sendSignedPeerRecord
)
let pubsub =
if gossip:
let g = GossipSub.init(
switch = switch,
triggerSelf = triggerSelf,
verifySignature = verifySignature,
sign = sign,
Gossip one one (#240) * allow multiple codecs per protocol (without breaking things) * add 1.1 protocol to gossip * explicit peering part 1 * explicit peering part 2 * explicit peering part 3 * PeerInfo and ControlPrune protocols * fix encodePrune * validated always, even explicit peers * prune by score (score is stub still) * add a way to pass parameters to gossip * standard setup fixes * take into account explicit direct peers in publish * add floodPublish logic * small fixes, publish still half broken * make sure to waitsub in sparse test * use var semantics to optimize table access * wip... lvalues don't work properly sadly... * big publish refactor, replenish and balance * fix internal tests * use g.peers for fanout (todo: don't include flood peers) * exclude non gossip from fanout * internal test fixes * fix flood tests * fix test's trypublish * test interop fixes * make sure to not remove peers from gossip table * restore old replenishFanout * cleanups * restore utility module import * restore trace vs debug in gossip * improve fanout replenish behavior further * triage publish nil peers (issue is on master too but just hidden behind a if/in) * getGossipPeers fixes * remove topics from pubsubpeer (was unused) * simplify rebalanceMesh (following spec) and make it finally reach D_high * better diagnostics * merge new pubsubpeer, copy 1.1 to new module * fix up merge * conditional enable gossip11 module * add back topics in peers, re-enable flood publish * add more heartbeat locking to prevent races * actually lock the heartbeat * minor fixes * with sugar * merge 1.0 * remove assertion in publish * fix multistream 1.1 multi proto * Fix merge oops * wip * fix gossip 11 upstream * gossipsub11 -> gossipsub * support interop testing * tests fixing * fix directchat build * control prune updates (pb) * wip parameters * gossip internal tests fixes * parameters wip * finishup with params * cleanups/wip * small sugar * grafted and pruned procs * wip updateScores * wip * fix logging issue * pubsubpeer, chronicles explicit override * fix internal gossip tests * wip * tables troubleshooting * score wip * score wip * fixes * fix test utils generateNodes * don't delete while iterating in score update * fix grafted defect * add a handleConnect in subscribeTopic * pruning improvements * wip * score fixes * post merge - builds gossip tests * further merge fixes * rebalance improvements and opportunistic grafting * fix test for now * restore explicit peering * implement peer exchange graft message * add an hard cap to PX * backoff time management * IWANT cap/budget * Adaptive gossip dissemination * outbound mesh quota, internal tests fixing * oversub prune score based, finish outbound quota * finishup with score and ihave budget * use go daemon 0.3.0 * import fixes * byScore cleanup score sorting * remove pointless scaling in `/` Duration operator * revert using libp2p org for daemon * interop fixes * fixes and cleanup * remove heartbeat assertion, minor debug fixes * logging improvements and cleaning up * (to revert) add some traces * add explicit topic to gossip rpcs * pubsub merge fixes and type fix in switch * Revert "(to revert) add some traces" This reverts commit 4663eaab6cc336c81cee50bc54025cf0b7bcbd99. * cleanup some now irrelevant todo * shuffle peers anyway as score might be disabled * add missing shuffle * old merge fix * more merge fixes * debug improvements * re-enable gossip internal tests * add gossip10 fallback (dormant but tested) * split gossipsub internal tests into 1.0 and 1.1 Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2020-09-21 09:16:29 +00:00
msgIdProvider = msgIdProvider,
anonymize = anonymize,
maxMessageSize = maxMessageSize,
2023-10-05 15:12:07 +00:00
parameters = (
var p = GossipSubParams.init()
p.floodPublish = floodPublish
2023-10-05 15:12:07 +00:00
p.historyLength = 20
p.historyGossip = 20
p.unsubscribeBackoff = unsubscribeBackoff
p.enablePX = enablePX
p.overheadRateLimit = overheadRateLimit
2024-09-02 16:35:08 +00:00
if gossipFactor.isSome:
p.gossipFactor = gossipFactor.get
2024-09-02 16:35:08 +00:00
if dValues.isSome:
let dValuesSome = dValues.get
if dValuesSome.d.isSome:
p.d = dValuesSome.d.get
if dValuesSome.dLow.isSome:
p.dLow = dValuesSome.dLow.get
if dValuesSome.dHigh.isSome:
p.dHigh = dValuesSome.dHigh.get
if dValuesSome.dScore.isSome:
p.dScore = dValuesSome.dScore.get
if dValuesSome.dOut.isSome:
p.dOut = dValuesSome.dOut.get
if dValuesSome.dLazy.isSome:
p.dLazy = dValuesSome.dLazy.get
2023-10-05 15:12:07 +00:00
p
),
)
# set some testing params, to enable scores
g.topicParams.mgetOrPut("foobar", TopicParams.init()).topicWeight = 1.0
g.topicParams.mgetOrPut("foo", TopicParams.init()).topicWeight = 1.0
g.topicParams.mgetOrPut("bar", TopicParams.init()).topicWeight = 1.0
if gossipSubVersion != "":
g.codecs = @[gossipSubVersion]
g.PubSub
else:
FloodSub.init(
switch = switch,
triggerSelf = triggerSelf,
verifySignature = verifySignature,
sign = sign,
msgIdProvider = msgIdProvider,
maxMessageSize = maxMessageSize,
anonymize = anonymize,
).PubSub
switch.mount(pubsub)
result.add(pubsub)
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
proc subscribeNodes*(nodes: seq[PubSub]) {.async.} =
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
for dialer in nodes:
for node in nodes:
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
2020-05-21 20:24:20 +00:00
proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
if nodes.len < degree:
raise
(ref CatchableError)(msg: "nodes count needs to be greater or equal to degree!")
for i, dialer in nodes:
if (i mod degree) != 0:
continue
for node in nodes:
if dialer.switch.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
2020-05-21 20:24:20 +00:00
for dialer in nodes:
2021-12-16 10:05:20 +00:00
var dialed: seq[PeerId]
2020-05-21 20:24:20 +00:00
while dialed.len < nodes.len - 1:
let node = sample(nodes)
if node.peerInfo.peerId notin dialed:
if dialer.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
2024-08-26 17:36:17 +00:00
proc activeWait(
interval: Duration, maximum: Moment, timeoutErrorMessage = "Timeout on activeWait"
) {.async.} =
await sleepAsync(interval)
doAssert Moment.now() < maximum, timeoutErrorMessage
proc waitSub*(sender, receiver: auto, key: string) {.async.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
let fsub = GossipSub(sender)
# this is for testing purposes only
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
while (
not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)
) and
(
not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)
) and (
not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerId(key, receiver.peerInfo.peerId)
)
:
trace "waitSub sleeping..."
# await
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
while true:
var
nodesMesh: Table[PeerId, seq[PeerId]]
seen: HashSet[PeerId]
for n in nodes:
nodesMesh[n.peerInfo.peerId] =
toSeq(GossipSub(n).mesh.getOrDefault(key).items()).mapIt(it.peerId)
2023-03-08 11:30:19 +00:00
var ok = 0
for n in nodes:
seen.clear()
proc explore(p: PeerId) =
if p in seen:
return
seen.incl(p)
for peer in nodesMesh.getOrDefault(p):
explore(peer)
2023-03-08 11:30:19 +00:00
explore(n.peerInfo.peerId)
if seen.len == nodes.len:
ok.inc()
if ok == nodes.len:
return
trace "waitSubGraph sleeping..."
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSubGraph timeout!"
2024-08-26 17:36:17 +00:00
proc waitForMesh*(
sender: auto, receiver: auto, key: string, timeoutDuration = 5.seconds
) {.async.} =
if sender == receiver:
return
let
timeoutMoment = Moment.now() + timeoutDuration
gossipsubSender = GossipSub(sender)
receiverPeerId = receiver.peerInfo.peerId
while not gossipsubSender.mesh.hasPeerId(key, receiverPeerId):
trace "waitForMesh sleeping..."
await activeWait(5.milliseconds, timeoutMoment, "waitForMesh timeout!")