2021-03-23 08:04:51 +00:00
|
|
|
|
{.used.}
|
|
|
|
|
|
2021-02-04 10:32:58 +00:00
|
|
|
|
import
|
2024-10-28 08:17:46 +00:00
|
|
|
|
std/[options, sequtils, times, sugar, net],
|
2022-10-28 18:13:41 +00:00
|
|
|
|
stew/shims/net as stewNet,
|
|
|
|
|
testutils/unittests,
|
2022-11-24 13:11:23 +00:00
|
|
|
|
chronos,
|
2022-11-04 09:52:08 +00:00
|
|
|
|
json_rpc/rpcserver,
|
|
|
|
|
json_rpc/rpcclient,
|
2022-10-28 18:13:41 +00:00
|
|
|
|
eth/keys,
|
|
|
|
|
eth/common/eth_types,
|
2021-06-09 14:37:08 +00:00
|
|
|
|
libp2p/[builders, switch, multiaddress],
|
2021-02-04 10:32:58 +00:00
|
|
|
|
libp2p/protobuf/minprotobuf,
|
|
|
|
|
libp2p/stream/[bufferstream, connection],
|
|
|
|
|
libp2p/crypto/crypto,
|
|
|
|
|
libp2p/protocols/pubsub/pubsub,
|
2023-01-26 09:20:20 +00:00
|
|
|
|
libp2p/protocols/pubsub/rpc/message,
|
2023-01-31 12:24:49 +00:00
|
|
|
|
libp2p/peerid
|
2022-08-05 15:05:17 +00:00
|
|
|
|
import
|
2024-07-05 22:03:38 +00:00
|
|
|
|
waku/[
|
|
|
|
|
common/databases/db_sqlite,
|
|
|
|
|
node/peer_manager/peer_manager,
|
|
|
|
|
node/peer_manager/peer_store/waku_peer_storage,
|
|
|
|
|
waku_node,
|
|
|
|
|
waku_core,
|
|
|
|
|
waku_enr/capabilities,
|
|
|
|
|
waku_relay/protocol,
|
|
|
|
|
waku_filter_v2/common,
|
|
|
|
|
waku_store/common,
|
|
|
|
|
waku_lightpush/common,
|
|
|
|
|
waku_peer_exchange,
|
|
|
|
|
waku_metadata,
|
|
|
|
|
],
|
2023-02-13 10:43:49 +00:00
|
|
|
|
./testlib/common,
|
|
|
|
|
./testlib/testutils,
|
2023-04-05 14:01:51 +00:00
|
|
|
|
./testlib/wakucore,
|
|
|
|
|
./testlib/wakunode
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
|
|
|
|
procSuite "Peer Manager":
|
2024-10-29 16:37:07 +00:00
|
|
|
|
asyncTest "connectPeer() works":
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# Create 2 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 2).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2023-03-28 11:29:48 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let connOk =
|
2024-10-29 16:37:07 +00:00
|
|
|
|
await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo())
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
check:
|
|
|
|
|
connOk == true
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[1].peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Connectedness.Connected
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "dialPeer() works":
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Create 2 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 2).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
2023-03-28 11:29:48 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.mountFilter()))
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
|
|
|
|
# Dial node2 from node1
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let conn = await nodes[0].peerManager.dialPeer(
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[1].peerInfo.toRemotePeerInfo(), WakuFilterSubscribeCodec
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
2021-02-04 10:32:58 +00:00
|
|
|
|
# Check connection
|
|
|
|
|
check:
|
2023-03-28 11:29:48 +00:00
|
|
|
|
conn.isSome()
|
|
|
|
|
conn.get.activity
|
|
|
|
|
conn.get.peerId == nodes[1].peerInfo.peerId
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-04 10:32:58 +00:00
|
|
|
|
# Check that node2 is being managed in node1
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[1].peerInfo.peerId
|
|
|
|
|
)
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
|
|
|
|
# Check connectedness
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Connectedness.Connected
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
asyncTest "dialPeer() fails gracefully":
|
|
|
|
|
# Create 2 nodes and start them
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 2).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2023-03-28 11:29:48 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nonExistentPeerRes = parsePeerInfo(
|
2024-03-11 13:48:20 +00:00
|
|
|
|
"/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
require nonExistentPeerRes.isOk()
|
|
|
|
|
|
|
|
|
|
let nonExistentPeer = nonExistentPeerRes.value
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# Dial non-existent peer from node1
|
2024-05-13 15:25:44 +00:00
|
|
|
|
let conn1 = await nodes[0].peerManager.dialPeer(nonExistentPeer, WakuStoreCodec)
|
2023-03-28 11:29:48 +00:00
|
|
|
|
check:
|
|
|
|
|
conn1.isNone()
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# Dial peer not supporting given protocol
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let conn2 = await nodes[0].peerManager.dialPeer(
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[1].peerInfo.toRemotePeerInfo(), WakuStoreCodec
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2021-02-05 10:49:11 +00:00
|
|
|
|
check:
|
2023-03-28 11:29:48 +00:00
|
|
|
|
conn2.isNone()
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
2021-02-11 08:58:25 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "Adding, selecting and filtering peers work":
|
|
|
|
|
let
|
2024-03-15 23:08:47 +00:00
|
|
|
|
node =
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
2023-02-13 10:43:49 +00:00
|
|
|
|
|
2021-02-11 08:58:25 +00:00
|
|
|
|
# Create filter peer
|
|
|
|
|
filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
2023-02-13 10:43:49 +00:00
|
|
|
|
filterPeer = PeerInfo.new(generateEcdsaKey(), @[filterLoc])
|
2021-02-11 08:58:25 +00:00
|
|
|
|
# Create store peer
|
|
|
|
|
storeLoc = MultiAddress.init("/ip4/127.0.0.3/tcp/4").tryGet()
|
2023-02-13 10:43:49 +00:00
|
|
|
|
storePeer = PeerInfo.new(generateEcdsaKey(), @[storeLoc])
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-11 08:58:25 +00:00
|
|
|
|
await node.start()
|
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
node.peerManager.addServicePeer(storePeer.toRemotePeerInfo(), WakuStoreCodec)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
node.peerManager.addServicePeer(
|
2024-03-25 18:07:56 +00:00
|
|
|
|
filterPeer.toRemotePeerInfo(), WakuFilterSubscribeCodec
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2021-02-11 08:58:25 +00:00
|
|
|
|
|
|
|
|
|
# Check peers were successfully added to peer manager
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node.peerManager.wakuPeerStore.peers().len == 2
|
|
|
|
|
node.peerManager.wakuPeerStore.peers(WakuFilterSubscribeCodec).allIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and
|
2024-03-25 18:07:56 +00:00
|
|
|
|
it.protocols.contains(WakuFilterSubscribeCodec)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node.peerManager.wakuPeerStore.peers(WakuStoreCodec).allIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and
|
|
|
|
|
it.protocols.contains(WakuStoreCodec)
|
|
|
|
|
)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-11 08:58:25 +00:00
|
|
|
|
await node.stop()
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
asyncTest "Peer manager keeps track of connections":
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Create 2 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 2).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
2023-01-23 20:24:46 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
2021-02-12 08:53:52 +00:00
|
|
|
|
|
|
|
|
|
# Test default connectedness for new peers
|
2023-02-27 17:24:31 +00:00
|
|
|
|
nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo())
|
2021-02-12 08:53:52 +00:00
|
|
|
|
check:
|
|
|
|
|
# No information about node2's connectedness
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
NotConnected
|
2021-02-12 08:53:52 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# Failed connection
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nonExistentPeerRes = parsePeerInfo(
|
2024-03-11 13:48:20 +00:00
|
|
|
|
"/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
require:
|
|
|
|
|
nonExistentPeerRes.isOk()
|
|
|
|
|
|
|
|
|
|
let nonExistentPeer = nonExistentPeerRes.value
|
2023-03-28 11:29:48 +00:00
|
|
|
|
require:
|
2024-10-29 16:37:07 +00:00
|
|
|
|
(await nodes[0].peerManager.connectPeer(nonExistentPeer)) == false
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
2023-04-12 09:29:11 +00:00
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
check:
|
|
|
|
|
# Cannot connect to node2
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
CannotConnect
|
2021-02-12 08:53:52 +00:00
|
|
|
|
|
|
|
|
|
# Successful connection
|
2023-03-28 11:29:48 +00:00
|
|
|
|
require:
|
2024-10-29 16:37:07 +00:00
|
|
|
|
(await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo())) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
check:
|
|
|
|
|
# Currently connected to node2
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
|
|
|
|
Connected
|
2021-02-12 08:53:52 +00:00
|
|
|
|
|
|
|
|
|
# Stop node. Gracefully disconnect from all peers.
|
2023-01-23 20:24:46 +00:00
|
|
|
|
await nodes[0].stop()
|
2021-02-12 08:53:52 +00:00
|
|
|
|
check:
|
|
|
|
|
# Not currently connected to node2, but had recent, successful connection.
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
CanConnect
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
|
|
|
|
await nodes[1].stop()
|
|
|
|
|
|
|
|
|
|
asyncTest "Peer manager updates failed peers correctly":
|
|
|
|
|
# Create 2 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 2).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nonExistentPeerRes = parsePeerInfo(
|
2024-03-11 13:48:20 +00:00
|
|
|
|
"/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
require nonExistentPeerRes.isOk()
|
|
|
|
|
|
|
|
|
|
let nonExistentPeer = nonExistentPeerRes.value
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
|
|
|
|
nodes[0].peerManager.addPeer(nonExistentPeer)
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
|
|
|
|
# Set a low backoff to speed up test: 2, 4, 8, 16
|
|
|
|
|
nodes[0].peerManager.initialBackoffInSec = 2
|
|
|
|
|
nodes[0].peerManager.backoffFactor = 2
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# try to connect to peer that doesnt exist
|
2024-10-29 16:37:07 +00:00
|
|
|
|
let conn1Ok = await nodes[0].peerManager.connectPeer(nonExistentPeer)
|
2023-01-23 20:24:46 +00:00
|
|
|
|
check:
|
|
|
|
|
# Cannot connect to node2
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
CannotConnect
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][nonExistentPeer.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
CannotConnect
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nonExistentPeer.peerId] ==
|
|
|
|
|
1
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# Connection attempt failed
|
|
|
|
|
conn1Ok == false
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
|
|
|
|
# Right after failing there is a backoff period
|
2023-04-14 13:12:22 +00:00
|
|
|
|
nodes[0].peerManager.canBeConnected(nonExistentPeer.peerId) == false
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
|
|
|
|
# We wait the first backoff period
|
2023-04-14 13:12:22 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(2100))
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
|
|
|
|
# And backoff period is over
|
|
|
|
|
check:
|
2023-04-14 13:12:22 +00:00
|
|
|
|
nodes[0].peerManager.canBeConnected(nodes[1].peerInfo.peerId) == true
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# After a successful connection, the number of failed connections is reset
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] =
|
|
|
|
|
4
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let conn2Ok =
|
2024-10-29 16:37:07 +00:00
|
|
|
|
await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo())
|
2023-01-23 20:24:46 +00:00
|
|
|
|
check:
|
2023-03-28 11:29:48 +00:00
|
|
|
|
conn2Ok == true
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] ==
|
|
|
|
|
0
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "Peer manager can use persistent storage and survive restarts":
|
|
|
|
|
let
|
2022-10-28 18:13:41 +00:00
|
|
|
|
database = SqliteDatabase.new(":memory:")[]
|
2021-03-26 08:49:51 +00:00
|
|
|
|
storage = WakuPeerStorage.new(database)[]
|
2024-01-30 12:28:21 +00:00
|
|
|
|
node1 = newTestWakuNode(
|
2024-10-28 08:17:46 +00:00
|
|
|
|
generateSecp256k1Key(), getPrimaryIPAddr(), Port(44048), peerStorage = storage
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-10-28 08:17:46 +00:00
|
|
|
|
node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(34023))
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
node1.mountMetadata(0).expect("Mounted Waku Metadata")
|
|
|
|
|
node2.mountMetadata(0).expect("Mounted Waku Metadata")
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
await node1.start()
|
|
|
|
|
await node2.start()
|
|
|
|
|
|
2022-09-07 15:31:27 +00:00
|
|
|
|
await node1.mountRelay()
|
|
|
|
|
await node2.mountRelay()
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let peerInfo2 = node2.switch.peerInfo
|
|
|
|
|
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
|
|
|
|
|
remotePeerInfo2.enr = some(node2.enr)
|
2023-12-07 11:48:28 +00:00
|
|
|
|
|
2024-10-29 16:37:07 +00:00
|
|
|
|
let is12Connected = await node1.peerManager.connectPeer(remotePeerInfo2)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
assert is12Connected == true, "Node 1 and 2 not connected"
|
|
|
|
|
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
remotePeerInfo2.addrs
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# wait for the peer store update
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
check:
|
|
|
|
|
# Currently connected to node2
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node1.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
|
|
|
|
# Simulate restart by initialising a new node using the same storage
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let node3 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
|
|
|
|
ValidIpAddress.init("127.0.0.1"),
|
|
|
|
|
Port(56037),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
peerStorage = storage,
|
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
node3.mountMetadata(0).expect("Mounted Waku Metadata")
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
await node3.start()
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
check:
|
|
|
|
|
# Node2 has been loaded after "restart", but we have not yet reconnected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node3.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
await node3.mountRelay()
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
2023-12-20 14:23:41 +00:00
|
|
|
|
await node3.peerManager.connectToRelayPeers()
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# Reconnected to node2 after "restart"
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node3.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
await allFutures([node1.stop(), node2.stop(), node3.stop()])
|
|
|
|
|
|
|
|
|
|
asyncTest "Sharded peer manager can use persistent storage and survive restarts":
|
|
|
|
|
let
|
|
|
|
|
database = SqliteDatabase.new(":memory:")[]
|
|
|
|
|
storage = WakuPeerStorage.new(database)[]
|
|
|
|
|
node1 = newTestWakuNode(
|
2024-10-28 08:17:46 +00:00
|
|
|
|
generateSecp256k1Key(), getPrimaryIPAddr(), Port(44048), peerStorage = storage
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-10-28 08:17:46 +00:00
|
|
|
|
node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(34023))
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
node1.mountMetadata(0).expect("Mounted Waku Metadata")
|
|
|
|
|
node2.mountMetadata(0).expect("Mounted Waku Metadata")
|
|
|
|
|
|
|
|
|
|
await node1.start()
|
|
|
|
|
await node2.start()
|
|
|
|
|
|
|
|
|
|
await node1.mountRelay()
|
|
|
|
|
await node2.mountRelay()
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let peerInfo2 = node2.switch.peerInfo
|
|
|
|
|
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
|
|
|
|
|
remotePeerInfo2.enr = some(node2.enr)
|
|
|
|
|
|
2024-10-29 16:37:07 +00:00
|
|
|
|
let is12Connected = await node1.peerManager.connectPeer(remotePeerInfo2)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
assert is12Connected == true, "Node 1 and 2 not connected"
|
|
|
|
|
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
remotePeerInfo2.addrs
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# wait for the peer store update
|
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# Currently connected to node2
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node1.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# Simulate restart by initialising a new node using the same storage
|
|
|
|
|
let node3 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
|
|
|
|
ValidIpAddress.init("127.0.0.1"),
|
|
|
|
|
Port(56037),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
peerStorage = storage,
|
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
node3.mountMetadata(0).expect("Mounted Waku Metadata")
|
|
|
|
|
|
|
|
|
|
await node3.start()
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# Node2 has been loaded after "restart", but we have not yet reconnected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node3.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
await node3.mountRelay()
|
|
|
|
|
|
|
|
|
|
await node3.peerManager.manageRelayPeers()
|
|
|
|
|
|
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
check:
|
|
|
|
|
# Reconnected to node2 after "restart"
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node3.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
await allFutures([node1.stop(), node2.stop(), node3.stop()])
|
2021-07-27 06:48:56 +00:00
|
|
|
|
|
2023-10-11 06:58:45 +00:00
|
|
|
|
asyncTest "Peer manager drops conections to peers on different networks":
|
|
|
|
|
let
|
2024-05-13 15:25:44 +00:00
|
|
|
|
port = Port(0)
|
2023-10-11 06:58:45 +00:00
|
|
|
|
# different network
|
2023-11-21 20:15:39 +00:00
|
|
|
|
node1 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
2023-12-20 14:23:41 +00:00
|
|
|
|
ValidIpAddress.init("0.0.0.0"),
|
2024-05-13 15:25:44 +00:00
|
|
|
|
port,
|
2024-09-10 21:07:12 +00:00
|
|
|
|
clusterId = 3,
|
|
|
|
|
shards = @[uint16(0)],
|
2023-11-21 20:15:39 +00:00
|
|
|
|
)
|
2023-10-11 06:58:45 +00:00
|
|
|
|
|
|
|
|
|
# same network
|
2023-11-21 20:15:39 +00:00
|
|
|
|
node2 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
2023-12-20 14:23:41 +00:00
|
|
|
|
ValidIpAddress.init("0.0.0.0"),
|
2024-05-13 15:25:44 +00:00
|
|
|
|
port,
|
2024-09-10 21:07:12 +00:00
|
|
|
|
clusterId = 4,
|
|
|
|
|
shards = @[uint16(0)],
|
2023-11-21 20:15:39 +00:00
|
|
|
|
)
|
|
|
|
|
node3 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
2023-12-20 14:23:41 +00:00
|
|
|
|
ValidIpAddress.init("0.0.0.0"),
|
2024-05-13 15:25:44 +00:00
|
|
|
|
port,
|
2024-09-10 21:07:12 +00:00
|
|
|
|
clusterId = 4,
|
|
|
|
|
shards = @[uint16(0)],
|
2023-11-21 20:15:39 +00:00
|
|
|
|
)
|
|
|
|
|
|
2024-05-13 15:25:44 +00:00
|
|
|
|
node1.mountMetadata(3).expect("Mounted Waku Metadata")
|
|
|
|
|
node2.mountMetadata(4).expect("Mounted Waku Metadata")
|
|
|
|
|
node3.mountMetadata(4).expect("Mounted Waku Metadata")
|
2023-10-11 06:58:45 +00:00
|
|
|
|
|
|
|
|
|
# Start nodes
|
|
|
|
|
await allFutures([node1.start(), node2.start(), node3.start()])
|
|
|
|
|
|
|
|
|
|
# 1->2 (fails)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let conn1 = await node1.peerManager.dialPeer(
|
|
|
|
|
node2.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
|
|
|
|
)
|
2023-10-11 06:58:45 +00:00
|
|
|
|
|
|
|
|
|
# 1->3 (fails)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let conn2 = await node1.peerManager.dialPeer(
|
|
|
|
|
node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
|
|
|
|
)
|
2023-10-11 06:58:45 +00:00
|
|
|
|
|
|
|
|
|
# 2->3 (succeeds)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let conn3 = await node2.peerManager.dialPeer(
|
|
|
|
|
node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
|
|
|
|
)
|
2023-10-11 06:58:45 +00:00
|
|
|
|
|
|
|
|
|
check:
|
2024-08-14 14:38:31 +00:00
|
|
|
|
conn1.isNone or conn1.get().isClosed
|
|
|
|
|
conn2.isNone or conn2.get().isClosed
|
|
|
|
|
conn3.isSome and not conn3.get().isClosed
|
2023-10-11 06:58:45 +00:00
|
|
|
|
|
2022-11-24 13:11:23 +00:00
|
|
|
|
# TODO: nwaku/issues/1377
|
|
|
|
|
xasyncTest "Peer manager support multiple protocol IDs when reconnecting to peers":
|
2021-07-27 06:48:56 +00:00
|
|
|
|
let
|
2022-10-28 18:13:41 +00:00
|
|
|
|
database = SqliteDatabase.new(":memory:")[]
|
2021-07-27 06:48:56 +00:00
|
|
|
|
storage = WakuPeerStorage.new(database)[]
|
2024-03-15 23:08:47 +00:00
|
|
|
|
node1 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
|
|
|
|
ValidIpAddress.init("0.0.0.0"),
|
|
|
|
|
Port(0),
|
|
|
|
|
peerStorage = storage,
|
|
|
|
|
)
|
|
|
|
|
node2 =
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
2022-01-10 15:07:35 +00:00
|
|
|
|
peerInfo2 = node2.switch.peerInfo
|
2021-07-27 06:48:56 +00:00
|
|
|
|
betaCodec = "/vac/waku/relay/2.0.0-beta2"
|
|
|
|
|
stableCodec = "/vac/waku/relay/2.0.0"
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-07-27 06:48:56 +00:00
|
|
|
|
await node1.start()
|
|
|
|
|
await node2.start()
|
|
|
|
|
|
2022-09-07 15:31:27 +00:00
|
|
|
|
await node1.mountRelay()
|
2021-07-27 06:48:56 +00:00
|
|
|
|
node1.wakuRelay.codec = betaCodec
|
2022-09-07 15:31:27 +00:00
|
|
|
|
await node2.mountRelay()
|
2021-07-27 06:48:56 +00:00
|
|
|
|
node2.wakuRelay.codec = betaCodec
|
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
require:
|
2024-10-29 16:37:07 +00:00
|
|
|
|
(await node1.peerManager.connectPeer(peerInfo2.toRemotePeerInfo())) == true
|
2021-07-27 06:48:56 +00:00
|
|
|
|
check:
|
|
|
|
|
# Currently connected to node2
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node1.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node1.peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.protocols.contains(node2.wakuRelay.codec)
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
|
2021-07-27 06:48:56 +00:00
|
|
|
|
|
|
|
|
|
# Simulate restart by initialising a new node using the same storage
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let node3 = newTestWakuNode(
|
|
|
|
|
generateSecp256k1Key(),
|
|
|
|
|
ValidIpAddress.init("0.0.0.0"),
|
|
|
|
|
Port(0),
|
|
|
|
|
peerStorage = storage,
|
|
|
|
|
)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2022-09-07 15:31:27 +00:00
|
|
|
|
await node3.mountRelay()
|
2021-07-27 06:48:56 +00:00
|
|
|
|
node3.wakuRelay.codec = stableCodec
|
|
|
|
|
check:
|
|
|
|
|
# Node 2 and 3 have differing codecs
|
|
|
|
|
node2.wakuRelay.codec == betaCodec
|
|
|
|
|
node3.wakuRelay.codec == stableCodec
|
|
|
|
|
# Node2 has been loaded after "restart", but we have not yet reconnected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node3.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec))
|
|
|
|
|
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-07-27 06:48:56 +00:00
|
|
|
|
await node3.start() # This should trigger a reconnect
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# Reconnected to node2 after "restart"
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node3.peerManager.wakuPeerStore.peers().len == 1
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec))
|
|
|
|
|
node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(stableCodec))
|
|
|
|
|
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-07-27 06:48:56 +00:00
|
|
|
|
await allFutures([node1.stop(), node2.stop(), node3.stop()])
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "Peer manager connects to all peers supporting a given protocol":
|
|
|
|
|
# Create 4 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 4).mapIt(
|
|
|
|
|
newTestWakuNode(
|
|
|
|
|
nodeKey = generateSecp256k1Key(),
|
|
|
|
|
bindIp = ValidIpAddress.init("0.0.0.0"),
|
|
|
|
|
bindPort = Port(0),
|
|
|
|
|
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
2024-01-30 12:28:21 +00:00
|
|
|
|
)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# Start them
|
2024-01-30 12:28:21 +00:00
|
|
|
|
discard nodes.mapIt(it.mountMetadata(0))
|
2023-12-20 14:23:41 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
2024-01-30 12:28:21 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# Get all peer infos
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let peerInfos = collect:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
for i in 0 .. nodes.high:
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo()
|
|
|
|
|
peerInfo.enr = some(nodes[i].enr)
|
|
|
|
|
peerInfo
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# Add all peers (but self) to node 0
|
2023-02-27 17:24:31 +00:00
|
|
|
|
nodes[0].peerManager.addPeer(peerInfos[1])
|
|
|
|
|
nodes[0].peerManager.addPeer(peerInfos[2])
|
|
|
|
|
nodes[0].peerManager.addPeer(peerInfos[3])
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
# Connect to relay peers
|
2023-12-20 14:23:41 +00:00
|
|
|
|
await nodes[0].peerManager.connectToRelayPeers()
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# Peerstore track all three peers
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().len == 3
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# All peer ids are correct
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[1].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[2].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[3].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# All peers support the relay protocol
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# All peers are connected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[1].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[2].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[3].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
|
|
|
|
|
|
|
|
|
asyncTest "Sharded peer manager connects to all peers supporting a given protocol":
|
|
|
|
|
# Create 4 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 4).mapIt(
|
|
|
|
|
newTestWakuNode(
|
|
|
|
|
nodeKey = generateSecp256k1Key(),
|
|
|
|
|
bindIp = ValidIpAddress.init("0.0.0.0"),
|
|
|
|
|
bindPort = Port(0),
|
|
|
|
|
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
2024-01-30 12:28:21 +00:00
|
|
|
|
)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# Start them
|
|
|
|
|
discard nodes.mapIt(it.mountMetadata(0))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
|
|
|
|
|
# Get all peer infos
|
|
|
|
|
let peerInfos = collect:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
for i in 0 .. nodes.high:
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo()
|
|
|
|
|
peerInfo.enr = some(nodes[i].enr)
|
|
|
|
|
peerInfo
|
|
|
|
|
|
|
|
|
|
# Add all peers (but self) to node 0
|
|
|
|
|
nodes[0].peerManager.addPeer(peerInfos[1])
|
|
|
|
|
nodes[0].peerManager.addPeer(peerInfos[2])
|
|
|
|
|
nodes[0].peerManager.addPeer(peerInfos[3])
|
|
|
|
|
|
|
|
|
|
# Connect to relay peers
|
|
|
|
|
await nodes[0].peerManager.manageRelayPeers()
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# Peerstore track all three peers
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().len == 3
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# All peer ids are correct
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[1].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[2].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[3].switch.peerInfo.peerId
|
|
|
|
|
)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# All peers support the relay protocol
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# All peers are connected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[1].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[2].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[3].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "Peer store keeps track of incoming connections":
|
|
|
|
|
# Create 4 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 4).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# Start them
|
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
|
|
|
|
|
# Get all peer infos
|
|
|
|
|
let peerInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo())
|
|
|
|
|
|
|
|
|
|
# all nodes connect to peer 0
|
2023-03-28 11:29:48 +00:00
|
|
|
|
require:
|
2024-10-29 16:37:07 +00:00
|
|
|
|
(await nodes[1].peerManager.connectPeer(peerInfos[0])) == true
|
|
|
|
|
(await nodes[2].peerManager.connectPeer(peerInfos[0])) == true
|
|
|
|
|
(await nodes[3].peerManager.connectPeer(peerInfos[0])) == true
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
|
|
|
|
|
2022-11-29 16:35:25 +00:00
|
|
|
|
check:
|
|
|
|
|
# Peerstore track all three peers
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().len == 3
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# Inbound/Outbound number of peers match
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 3
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 0
|
|
|
|
|
nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
|
|
|
|
|
nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
|
|
|
|
|
nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
|
|
|
|
|
nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
|
|
|
|
|
nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
|
|
|
|
|
nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# All peer ids are correct
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[1].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[2].switch.peerInfo.peerId
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
it.peerId == nodes[3].switch.peerInfo.peerId
|
|
|
|
|
)
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# All peers support the relay protocol
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
WakuRelayCodec
|
|
|
|
|
)
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# All peers are connected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[1].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[2].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
|
|
|
|
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
|
|
|
|
|
nodes[3].switch.peerInfo.peerId
|
|
|
|
|
] == Connected
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# All peers are Inbound in peer 0
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Inbound
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Inbound
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Inbound
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
# All peers have an Outbound connection with peer 0
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[1].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Outbound
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[2].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Outbound
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[3].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
Outbound
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "Peer store addServicePeer() stores service peers":
|
|
|
|
|
# Valid peer id missing the last digit
|
|
|
|
|
let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D"
|
|
|
|
|
|
|
|
|
|
let
|
2024-03-15 23:08:47 +00:00
|
|
|
|
node =
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
2024-03-25 18:07:56 +00:00
|
|
|
|
peers = toSeq(1 .. 4)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
|
|
|
|
.filterIt(it.isOk())
|
|
|
|
|
.mapIt(it.value)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
|
|
|
|
|
require:
|
2024-03-25 18:07:56 +00:00
|
|
|
|
peers.len == 4
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# service peers
|
2023-04-12 09:29:11 +00:00
|
|
|
|
node.peerManager.addServicePeer(peers[0], WakuStoreCodec)
|
2024-03-25 18:07:56 +00:00
|
|
|
|
node.peerManager.addServicePeer(peers[1], WakuLightPushCodec)
|
|
|
|
|
node.peerManager.addServicePeer(peers[2], WakuPeerExchangeCodec)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# relay peers (should not be added)
|
2024-03-25 18:07:56 +00:00
|
|
|
|
node.peerManager.addServicePeer(peers[3], WakuRelayCodec)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# all peers are stored in the peerstore
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[0].peerId)
|
|
|
|
|
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[1].peerId)
|
|
|
|
|
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[2].peerId)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# but the relay peer is not
|
2024-09-27 12:46:46 +00:00
|
|
|
|
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[3].peerId) == false
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# all service peers are added to its service slot
|
|
|
|
|
check:
|
2023-04-12 09:29:11 +00:00
|
|
|
|
node.peerManager.serviceSlots[WakuStoreCodec].peerId == peers[0].peerId
|
2024-03-25 18:07:56 +00:00
|
|
|
|
node.peerManager.serviceSlots[WakuLightPushCodec].peerId == peers[1].peerId
|
|
|
|
|
node.peerManager.serviceSlots[WakuPeerExchangeCodec].peerId == peers[2].peerId
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# but the relay peer is not
|
|
|
|
|
node.peerManager.serviceSlots.hasKey(WakuRelayCodec) == false
|
|
|
|
|
|
2023-05-18 07:40:14 +00:00
|
|
|
|
asyncTest "connectedPeers() returns expected number of connections per protocol":
|
2023-04-12 11:05:34 +00:00
|
|
|
|
# Create 4 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 4).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
|
|
|
|
# Start them with relay + filter
|
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountFilter()))
|
|
|
|
|
|
|
|
|
|
let pInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo())
|
|
|
|
|
|
|
|
|
|
# create some connections/streams
|
2024-03-25 18:07:56 +00:00
|
|
|
|
check:
|
2023-04-12 11:05:34 +00:00
|
|
|
|
# some relay connections
|
2024-10-29 16:37:07 +00:00
|
|
|
|
(await nodes[0].peerManager.connectPeer(pInfos[1])) == true
|
|
|
|
|
(await nodes[0].peerManager.connectPeer(pInfos[2])) == true
|
|
|
|
|
(await nodes[1].peerManager.connectPeer(pInfos[2])) == true
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[0].peerManager.dialPeer(pInfos[2], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
|
|
|
|
# isolated dial creates a relay conn under the hood (libp2p behaviour)
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[2].peerManager.dialPeer(pInfos[3], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
|
|
|
|
# assert physical connections
|
|
|
|
|
check:
|
2023-05-18 07:40:14 +00:00
|
|
|
|
nodes[0].peerManager.connectedPeers(WakuRelayCodec)[0].len == 0
|
|
|
|
|
nodes[0].peerManager.connectedPeers(WakuRelayCodec)[1].len == 2
|
2023-04-26 08:47:46 +00:00
|
|
|
|
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[0].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 0
|
|
|
|
|
nodes[0].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 2
|
2023-04-26 08:47:46 +00:00
|
|
|
|
|
2023-05-18 07:40:14 +00:00
|
|
|
|
nodes[1].peerManager.connectedPeers(WakuRelayCodec)[0].len == 1
|
|
|
|
|
nodes[1].peerManager.connectedPeers(WakuRelayCodec)[1].len == 1
|
2023-04-26 08:47:46 +00:00
|
|
|
|
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[1].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 1
|
|
|
|
|
nodes[1].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 0
|
2023-05-18 07:40:14 +00:00
|
|
|
|
|
|
|
|
|
nodes[2].peerManager.connectedPeers(WakuRelayCodec)[0].len == 2
|
|
|
|
|
nodes[2].peerManager.connectedPeers(WakuRelayCodec)[1].len == 1
|
|
|
|
|
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[2].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 1
|
|
|
|
|
nodes[2].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 1
|
2023-05-18 07:40:14 +00:00
|
|
|
|
|
|
|
|
|
nodes[3].peerManager.connectedPeers(WakuRelayCodec)[0].len == 1
|
|
|
|
|
nodes[3].peerManager.connectedPeers(WakuRelayCodec)[1].len == 0
|
|
|
|
|
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[3].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 1
|
|
|
|
|
nodes[3].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 0
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "getNumStreams() returns expected number of connections per protocol":
|
|
|
|
|
# Create 2 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 2).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
|
|
|
|
# Start them with relay + filter
|
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountFilter()))
|
|
|
|
|
|
|
|
|
|
let pInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo())
|
|
|
|
|
|
|
|
|
|
require:
|
|
|
|
|
# multiple streams are multiplexed over a single connection.
|
|
|
|
|
# note that a relay connection is created under the hood when dialing a peer (libp2p behaviour)
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2024-03-25 18:07:56 +00:00
|
|
|
|
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() ==
|
2024-03-15 23:08:47 +00:00
|
|
|
|
true
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
|
|
|
|
check:
|
2023-04-26 08:47:46 +00:00
|
|
|
|
nodes[0].peerManager.getNumStreams(WakuRelayCodec) == (1, 1)
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[0].peerManager.getNumStreams(WakuFilterSubscribeCodec) == (0, 4)
|
2023-04-26 08:47:46 +00:00
|
|
|
|
|
|
|
|
|
nodes[1].peerManager.getNumStreams(WakuRelayCodec) == (1, 1)
|
2024-03-25 18:07:56 +00:00
|
|
|
|
nodes[1].peerManager.getNumStreams(WakuFilterSubscribeCodec) == (4, 0)
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
test "selectPeer() returns the correct peer":
|
|
|
|
|
# Valid peer id missing the last digit
|
|
|
|
|
let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D"
|
|
|
|
|
|
|
|
|
|
# Create peer manager
|
|
|
|
|
let pm = PeerManager.new(
|
|
|
|
|
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise().build(),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# Create 3 peer infos
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let peers = toSeq(1 .. 3)
|
|
|
|
|
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
|
|
|
|
.filterIt(it.isOk())
|
|
|
|
|
.mapIt(it.value)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
require:
|
|
|
|
|
peers.len == 3
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# Add a peer[0] to the peerstore
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[AddressBook][peers[0].peerId] = peers[0].addrs
|
|
|
|
|
pm.wakuPeerStore[ProtoBook][peers[0].peerId] =
|
2024-03-25 18:07:56 +00:00
|
|
|
|
@[WakuRelayCodec, WakuStoreCodec, WakuFilterSubscribeCodec]
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# When no service peers, we get one from the peerstore
|
|
|
|
|
let selectedPeer1 = pm.selectPeer(WakuStoreCodec)
|
|
|
|
|
check:
|
|
|
|
|
selectedPeer1.isSome() == true
|
|
|
|
|
selectedPeer1.get().peerId == peers[0].peerId
|
|
|
|
|
|
|
|
|
|
# Same for other protocol
|
2024-03-25 18:07:56 +00:00
|
|
|
|
let selectedPeer2 = pm.selectPeer(WakuFilterSubscribeCodec)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
check:
|
|
|
|
|
selectedPeer2.isSome() == true
|
|
|
|
|
selectedPeer2.get().peerId == peers[0].peerId
|
|
|
|
|
|
|
|
|
|
# And return none if we dont have any peer for that protocol
|
|
|
|
|
let selectedPeer3 = pm.selectPeer(WakuLightPushCodec)
|
|
|
|
|
check:
|
|
|
|
|
selectedPeer3.isSome() == false
|
|
|
|
|
|
|
|
|
|
# Now we add service peers for different protocols peer[1..3]
|
|
|
|
|
pm.addServicePeer(peers[1], WakuStoreCodec)
|
|
|
|
|
pm.addServicePeer(peers[2], WakuLightPushCodec)
|
|
|
|
|
|
|
|
|
|
# We no longer get one from the peerstore. Slots are being used instead.
|
|
|
|
|
let selectedPeer4 = pm.selectPeer(WakuStoreCodec)
|
|
|
|
|
check:
|
|
|
|
|
selectedPeer4.isSome() == true
|
|
|
|
|
selectedPeer4.get().peerId == peers[1].peerId
|
|
|
|
|
|
|
|
|
|
let selectedPeer5 = pm.selectPeer(WakuLightPushCodec)
|
|
|
|
|
check:
|
|
|
|
|
selectedPeer5.isSome() == true
|
|
|
|
|
selectedPeer5.get().peerId == peers[2].peerId
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
test "peer manager cant have more max connections than peerstore size":
|
|
|
|
|
# Peerstore size can't be smaller than max connections
|
|
|
|
|
let peerStoreSize = 5
|
|
|
|
|
let maxConnections = 10
|
|
|
|
|
|
|
|
|
|
expect(Defect):
|
|
|
|
|
let pm = PeerManager.new(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
switch = SwitchBuilder
|
|
|
|
|
.new()
|
|
|
|
|
.withRng(rng)
|
|
|
|
|
.withMplex()
|
|
|
|
|
.withNoise()
|
|
|
|
|
.withPeerStore(peerStoreSize)
|
|
|
|
|
.withMaxConnections(maxConnections)
|
|
|
|
|
.build(),
|
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
test "prunePeerStore() correctly removes peers to match max quota":
|
|
|
|
|
# Create peer manager
|
|
|
|
|
let pm = PeerManager.new(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
switch = SwitchBuilder
|
|
|
|
|
.new()
|
|
|
|
|
.withRng(rng)
|
|
|
|
|
.withMplex()
|
|
|
|
|
.withNoise()
|
|
|
|
|
.withPeerStore(10)
|
|
|
|
|
.withMaxConnections(5)
|
|
|
|
|
.build(),
|
2023-01-31 12:24:49 +00:00
|
|
|
|
maxFailedAttempts = 1,
|
2023-07-04 11:31:18 +00:00
|
|
|
|
maxRelayPeers = some(5),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
# Create 15 peers and add them to the peerstore
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let peers = toSeq(1 .. 15)
|
|
|
|
|
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/0/p2p/" & $PeerId.random().get()))
|
|
|
|
|
.filterIt(it.isOk())
|
|
|
|
|
.mapIt(it.value)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
for p in peers:
|
|
|
|
|
pm.addPeer(p)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
# Check that we have 15 peers in the peerstore
|
|
|
|
|
check:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.peers.len == 15
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
# fake that some peers failed to connected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][peers[0].peerId] = 2
|
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][peers[1].peerId] = 2
|
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][peers[2].peerId] = 2
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
# fake that some peers are connected
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peers[5].peerId] = Connected
|
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peers[8].peerId] = Connected
|
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peers[10].peerId] = Connected
|
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peers[12].peerId] = Connected
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
2023-02-13 17:10:20 +00:00
|
|
|
|
# Prune the peerstore (current=15, target=5)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
pm.prunePeerStore()
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
# ensure peerstore was pruned
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.peers.len == 10
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
# ensure connected peers were not pruned
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[5].peerId)
|
|
|
|
|
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[8].peerId)
|
|
|
|
|
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[10].peerId)
|
|
|
|
|
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[12].peerId)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
|
|
|
|
# ensure peers that failed were the first to be pruned
|
2024-09-27 12:46:46 +00:00
|
|
|
|
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[0].peerId)
|
|
|
|
|
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[1].peerId)
|
|
|
|
|
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[2].peerId)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "canBeConnected() returns correct value":
|
|
|
|
|
let pm = PeerManager.new(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
switch = SwitchBuilder
|
|
|
|
|
.new()
|
|
|
|
|
.withRng(rng)
|
|
|
|
|
.withMplex()
|
|
|
|
|
.withNoise()
|
|
|
|
|
.withPeerStore(10)
|
|
|
|
|
.withMaxConnections(5)
|
|
|
|
|
.build(),
|
|
|
|
|
initialBackoffInSec = 1,
|
|
|
|
|
# with InitialBackoffInSec = 1 backoffs are: 1, 2, 4, 8secs.
|
2023-04-14 13:12:22 +00:00
|
|
|
|
backoffFactor = 2,
|
|
|
|
|
maxFailedAttempts = 10,
|
2023-07-04 11:31:18 +00:00
|
|
|
|
maxRelayPeers = some(5),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
var p1: PeerId
|
|
|
|
|
require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW" & "1")
|
|
|
|
|
|
|
|
|
|
# new peer with no errors can be connected
|
|
|
|
|
check:
|
|
|
|
|
pm.canBeConnected(p1) == true
|
|
|
|
|
|
|
|
|
|
# peer with ONE error that just failed
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][p1] = 1
|
|
|
|
|
pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
# we cant connect right now
|
|
|
|
|
check:
|
|
|
|
|
pm.canBeConnected(p1) == false
|
|
|
|
|
|
|
|
|
|
# but we can after the first backoff of 1 seconds
|
|
|
|
|
await sleepAsync(chronos.milliseconds(1200))
|
|
|
|
|
check:
|
|
|
|
|
pm.canBeConnected(p1) == true
|
|
|
|
|
|
|
|
|
|
# peer with TWO errors, we can connect until 2 seconds have passed
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][p1] = 2
|
|
|
|
|
pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
|
|
|
|
# cant be connected after 1 second
|
|
|
|
|
await sleepAsync(chronos.milliseconds(1000))
|
|
|
|
|
check:
|
|
|
|
|
pm.canBeConnected(p1) == false
|
|
|
|
|
|
|
|
|
|
# can be connected after 2 seconds
|
|
|
|
|
await sleepAsync(chronos.milliseconds(1200))
|
|
|
|
|
check:
|
|
|
|
|
pm.canBeConnected(p1) == true
|
|
|
|
|
|
|
|
|
|
# can't be connected if failed attempts are equal to maxFailedAttempts
|
|
|
|
|
pm.maxFailedAttempts = 2
|
|
|
|
|
check:
|
|
|
|
|
pm.canBeConnected(p1) == false
|
|
|
|
|
|
|
|
|
|
test "peer manager must fail if max backoff is over a week":
|
|
|
|
|
# Should result in overflow exception
|
|
|
|
|
expect(Defect):
|
|
|
|
|
let pm = PeerManager.new(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
switch = SwitchBuilder
|
|
|
|
|
.new()
|
|
|
|
|
.withRng(rng)
|
|
|
|
|
.withMplex()
|
|
|
|
|
.withNoise()
|
|
|
|
|
.withPeerStore(10)
|
|
|
|
|
.withMaxConnections(5)
|
|
|
|
|
.build(),
|
2023-07-04 11:31:18 +00:00
|
|
|
|
maxRelayPeers = some(5),
|
2023-04-14 13:12:22 +00:00
|
|
|
|
maxFailedAttempts = 150,
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
|
|
|
|
# Should result in backoff > 1 week
|
|
|
|
|
expect(Defect):
|
|
|
|
|
let pm = PeerManager.new(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
switch = SwitchBuilder
|
|
|
|
|
.new()
|
|
|
|
|
.withRng(rng)
|
|
|
|
|
.withMplex()
|
|
|
|
|
.withNoise()
|
|
|
|
|
.withPeerStore(10)
|
|
|
|
|
.withMaxConnections(5)
|
|
|
|
|
.build(),
|
2023-04-14 13:12:22 +00:00
|
|
|
|
maxFailedAttempts = 10,
|
2023-07-04 11:31:18 +00:00
|
|
|
|
maxRelayPeers = some(5),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
|
|
|
|
let pm = PeerManager.new(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
switch = SwitchBuilder
|
|
|
|
|
.new()
|
|
|
|
|
.withRng(rng)
|
|
|
|
|
.withMplex()
|
|
|
|
|
.withNoise()
|
|
|
|
|
.withPeerStore(10)
|
|
|
|
|
.withMaxConnections(5)
|
|
|
|
|
.build(),
|
2023-04-14 13:12:22 +00:00
|
|
|
|
maxFailedAttempts = 5,
|
2023-07-04 11:31:18 +00:00
|
|
|
|
maxRelayPeers = some(5),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage = nil,
|
|
|
|
|
)
|
2023-05-31 07:47:56 +00:00
|
|
|
|
|
|
|
|
|
asyncTest "colocationLimit is enforced by pruneConnsByIp()":
|
|
|
|
|
# Create 5 nodes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nodes = toSeq(0 ..< 5).mapIt(
|
|
|
|
|
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
|
|
|
|
)
|
2023-05-31 07:47:56 +00:00
|
|
|
|
|
|
|
|
|
# Start them with relay + filter
|
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
|
|
|
|
|
let pInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo())
|
|
|
|
|
|
2023-06-28 07:14:11 +00:00
|
|
|
|
# force max 1 conn per ip
|
|
|
|
|
nodes[0].peerManager.colocationLimit = 1
|
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# 2 in connections
|
2024-10-29 16:37:07 +00:00
|
|
|
|
discard await nodes[1].peerManager.connectPeer(pInfos[0])
|
|
|
|
|
discard await nodes[2].peerManager.connectPeer(pInfos[0])
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
2023-05-31 07:47:56 +00:00
|
|
|
|
|
2023-06-28 07:14:11 +00:00
|
|
|
|
# but one is pruned
|
|
|
|
|
check nodes[0].peerManager.switch.connManager.getConnections().len == 1
|
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# 2 out connections
|
2024-10-29 16:37:07 +00:00
|
|
|
|
discard await nodes[0].peerManager.connectPeer(pInfos[3])
|
|
|
|
|
discard await nodes[0].peerManager.connectPeer(pInfos[4])
|
2023-10-11 06:58:45 +00:00
|
|
|
|
await sleepAsync(chronos.milliseconds(500))
|
2023-05-31 07:47:56 +00:00
|
|
|
|
|
2023-09-14 19:28:57 +00:00
|
|
|
|
# they are also prunned
|
2023-06-28 07:14:11 +00:00
|
|
|
|
check nodes[0].peerManager.switch.connManager.getConnections().len == 1
|
2023-05-31 07:47:56 +00:00
|
|
|
|
|
2023-06-28 07:14:11 +00:00
|
|
|
|
# we should have 4 peers (2in/2out) but due to collocation limit
|
|
|
|
|
# they are pruned to max 1
|
2023-05-31 07:47:56 +00:00
|
|
|
|
check:
|
|
|
|
|
nodes[0].peerManager.ipTable["127.0.0.1"].len == 1
|
|
|
|
|
nodes[0].peerManager.switch.connManager.getConnections().len == 1
|
2024-09-27 12:46:46 +00:00
|
|
|
|
nodes[0].peerManager.wakuPeerStore.peers().len == 1
|
2023-06-28 07:14:11 +00:00
|
|
|
|
|
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|