mirror of
https://github.com/status-im/nim-libp2p.git
synced 2025-01-19 01:12:23 +00:00
eb2f6bf346
added test wrt subscribe and unsubscribe added tests/pubsub/testgossipinternal2 file linters feat: rendezvous refactor (#1183) Hello! This PR aim to refactor rendezvous code so that it is easier to impl. Waku rdv strategy. The hardcoded min and max TTL were out of range with what we needed and specifying which peers to interact with is also needed since Waku deals with peers on multiple separate shards. I tried to keep the changes to a minimum, specifically I did not change the name of any public procs which result in less than descriptive names in some cases. I also wanted to return results instead of raising exceptions but didn't. Would it be acceptable to do so? Please advise on best practices, thank you. --------- Co-authored-by: Ludovic Chenut <ludovic@status.im> refactor and suite name refactor chore(ci): Enable S3 caching for interop (#1193) - Adds our S3 bucket for caching docker images as Protocol Labs shut down their shared one. - Remove the free disk space workaround that prevented the jobs from failing for using too much space for the images. --------- Co-authored-by: diegomrsantos <diego@status.im> PR review comment changes
151 lines
4.6 KiB
Nim
151 lines
4.6 KiB
Nim
{.used.}
|
|
|
|
# Nim-Libp2p
|
|
# Copyright (c) 2023 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
# at your option.
|
|
# This file may not be copied, modified, or distributed except according to
|
|
# those terms.
|
|
|
|
import sequtils, strutils
|
|
import chronos
|
|
import ../libp2p/[protocols/rendezvous, switch, builders]
|
|
import ../libp2p/discovery/[rendezvousinterface, discoverymngr]
|
|
import ./helpers
|
|
|
|
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
|
SwitchBuilder
|
|
.new()
|
|
.withRng(newRng())
|
|
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
|
.withTcpTransport()
|
|
.withMplex()
|
|
.withNoise()
|
|
.withRendezVous(rdv)
|
|
.build()
|
|
|
|
suite "RendezVous":
|
|
teardown:
|
|
checkTrackers()
|
|
asyncTest "Simple local test":
|
|
let
|
|
rdv = RendezVous.new()
|
|
s = createSwitch(rdv)
|
|
|
|
await s.start()
|
|
let res0 = rdv.requestLocally("empty")
|
|
check res0.len == 0
|
|
await rdv.advertise("foo")
|
|
let res1 = rdv.requestLocally("foo")
|
|
check:
|
|
res1.len == 1
|
|
res1[0] == s.peerInfo.signedPeerRecord.data
|
|
let res2 = rdv.requestLocally("bar")
|
|
check res2.len == 0
|
|
rdv.unsubscribeLocally("foo")
|
|
let res3 = rdv.requestLocally("foo")
|
|
check res3.len == 0
|
|
await s.stop()
|
|
|
|
asyncTest "Simple remote test":
|
|
let
|
|
rdv = RendezVous.new()
|
|
client = createSwitch(rdv)
|
|
remoteSwitch = createSwitch()
|
|
|
|
await client.start()
|
|
await remoteSwitch.start()
|
|
await client.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
|
|
let res0 = await rdv.request("empty")
|
|
check res0.len == 0
|
|
await rdv.advertise("foo")
|
|
let res1 = await rdv.request("foo")
|
|
check:
|
|
res1.len == 1
|
|
res1[0] == client.peerInfo.signedPeerRecord.data
|
|
let res2 = await rdv.request("bar")
|
|
check res2.len == 0
|
|
await rdv.unsubscribe("foo")
|
|
let res3 = await rdv.request("foo")
|
|
check res3.len == 0
|
|
await allFutures(client.stop(), remoteSwitch.stop())
|
|
|
|
asyncTest "Harder remote test":
|
|
var
|
|
rdvSeq: seq[RendezVous] = @[]
|
|
clientSeq: seq[Switch] = @[]
|
|
remoteSwitch = createSwitch()
|
|
|
|
for x in 0 .. 10:
|
|
rdvSeq.add(RendezVous.new())
|
|
clientSeq.add(createSwitch(rdvSeq[^1]))
|
|
await remoteSwitch.start()
|
|
await allFutures(clientSeq.mapIt(it.start()))
|
|
await allFutures(
|
|
clientSeq.mapIt(remoteSwitch.connect(it.peerInfo.peerId, it.peerInfo.addrs))
|
|
)
|
|
await allFutures(rdvSeq.mapIt(it.advertise("foo")))
|
|
var data = clientSeq.mapIt(it.peerInfo.signedPeerRecord.data)
|
|
let res1 = await rdvSeq[0].request("foo", 5)
|
|
check res1.len == 5
|
|
for d in res1:
|
|
check d in data
|
|
data.keepItIf(it notin res1)
|
|
let res2 = await rdvSeq[0].request("foo")
|
|
check res2.len == 5
|
|
for d in res2:
|
|
check d in data
|
|
let res3 = await rdvSeq[0].request("foo")
|
|
check res3.len == 0
|
|
await remoteSwitch.stop()
|
|
await allFutures(clientSeq.mapIt(it.stop()))
|
|
|
|
asyncTest "Simple cookie test":
|
|
let
|
|
rdvA = RendezVous.new()
|
|
rdvB = RendezVous.new()
|
|
clientA = createSwitch(rdvA)
|
|
clientB = createSwitch(rdvB)
|
|
remoteSwitch = createSwitch()
|
|
|
|
await clientA.start()
|
|
await clientB.start()
|
|
await remoteSwitch.start()
|
|
await clientA.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
|
|
await clientB.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
|
|
await rdvA.advertise("foo")
|
|
let res1 = await rdvA.request("foo")
|
|
await rdvB.advertise("foo")
|
|
let res2 = await rdvA.request("foo")
|
|
check:
|
|
res2.len == 1
|
|
res2[0] == clientB.peerInfo.signedPeerRecord.data
|
|
await allFutures(clientA.stop(), clientB.stop(), remoteSwitch.stop())
|
|
|
|
asyncTest "Various local error":
|
|
let
|
|
rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
|
|
switch = createSwitch(rdv)
|
|
expect RendezVousError:
|
|
discard await rdv.request("A".repeat(300))
|
|
expect RendezVousError:
|
|
discard await rdv.request("A", -1)
|
|
expect RendezVousError:
|
|
discard await rdv.request("A", 3000)
|
|
expect RendezVousError:
|
|
await rdv.advertise("A".repeat(300))
|
|
expect RendezVousError:
|
|
await rdv.advertise("A", 73.hours)
|
|
expect RendezVousError:
|
|
await rdv.advertise("A", 30.seconds)
|
|
|
|
test "Various config error":
|
|
expect RendezVousError:
|
|
discard RendezVous.new(minDuration = 30.seconds)
|
|
expect RendezVousError:
|
|
discard RendezVous.new(maxDuration = 73.hours)
|
|
expect RendezVousError:
|
|
discard RendezVous.new(minDuration = 15.minutes, maxDuration = 10.minutes)
|