mirror of https://github.com/waku-org/nwaku.git
Remove node peerInfo copy (#801)
This commit is contained in:
parent
62dbb3d0f3
commit
f3ecdb80ea
|
@ -9,13 +9,12 @@ This release contains the following:
|
||||||
### Changes
|
### Changes
|
||||||
- The WakuInfo Object field of `listenStr` is deprecated and is now replaced with `listenAddresses`
|
- The WakuInfo Object field of `listenStr` is deprecated and is now replaced with `listenAddresses`
|
||||||
which is a sequence of string.
|
which is a sequence of string.
|
||||||
|
- Removed cached `peerInfo` on local node. Rely on underlying libp2p switch instead.
|
||||||
- Metrics: added counters for protocol messages
|
- Metrics: added counters for protocol messages
|
||||||
|
|
||||||
### Fixes
|
### Fixes
|
||||||
- All `HistoryResponse` messages are now auto-paginated to a maximum of 100 messages per response
|
- All `HistoryResponse` messages are now auto-paginated to a maximum of 100 messages per response
|
||||||
- Increased maximum length for reading from a libp2p input stream to allow largest possible protocol messages, including `HistoryResponse` messages at max size.
|
- Increased maximum length for reading from a libp2p input stream to allow largest possible protocol messages, including `HistoryResponse` messages at max size.
|
||||||
|
|
||||||
### Fixes
|
|
||||||
- Added GossipSub `MessageIdProvider` for `11/WAKU2-RELAY` messages.
|
- Added GossipSub `MessageIdProvider` for `11/WAKU2-RELAY` messages.
|
||||||
|
|
||||||
## 2021-11-05 v0.6
|
## 2021-11-05 v0.6
|
||||||
|
|
|
@ -387,7 +387,7 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
|
||||||
echo "Couldn't select a random node to connect to. Check --fleet configuration."
|
echo "Couldn't select a random node to connect to. Check --fleet configuration."
|
||||||
echo randNode.error()
|
echo randNode.error()
|
||||||
|
|
||||||
let peerInfo = node.peerInfo
|
let peerInfo = node.switch.peerInfo
|
||||||
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
|
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
|
||||||
echo &"Listening on\n {listenStr}"
|
echo &"Listening on\n {listenStr}"
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ procSuite "Waku v2 JSON-RPC API":
|
||||||
let response = await client.get_waku_v2_debug_v1_info()
|
let response = await client.get_waku_v2_debug_v1_info()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
response.listenAddresses == @[$node.peerInfo.addrs[^1] & "/p2p/" & $node.peerInfo.peerId]
|
response.listenAddresses == @[$node.switch.peerInfo.addrs[^1] & "/p2p/" & $node.switch.peerInfo.peerId]
|
||||||
|
|
||||||
server.stop()
|
server.stop()
|
||||||
server.close()
|
server.close()
|
||||||
|
@ -142,8 +142,8 @@ procSuite "Waku v2 JSON-RPC API":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay(@[pubSubTopic])
|
node3.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# RPC server setup
|
# RPC server setup
|
||||||
let
|
let
|
||||||
|
@ -397,11 +397,11 @@ procSuite "Waku v2 JSON-RPC API":
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"),
|
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60004))
|
Port(60004))
|
||||||
peerInfo3 = node3.peerInfo
|
peerInfo3 = node3.switch.peerInfo
|
||||||
|
|
||||||
await allFutures([node1.start(), node2.start(), node3.start()])
|
await allFutures([node1.start(), node2.start(), node3.start()])
|
||||||
|
|
||||||
|
@ -453,11 +453,11 @@ procSuite "Waku v2 JSON-RPC API":
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"),
|
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60004))
|
Port(60004))
|
||||||
peerInfo3 = node3.peerInfo
|
peerInfo3 = node3.switch.peerInfo
|
||||||
|
|
||||||
await allFutures([node1.start(), node2.start(), node3.start()])
|
await allFutures([node1.start(), node2.start(), node3.start()])
|
||||||
|
|
||||||
|
@ -575,8 +575,8 @@ procSuite "Waku v2 JSON-RPC API":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay(@[pubSubTopic])
|
node3.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# Setup two servers so we can see both sides of encrypted communication
|
# Setup two servers so we can see both sides of encrypted communication
|
||||||
let
|
let
|
||||||
|
@ -665,8 +665,8 @@ procSuite "Waku v2 JSON-RPC API":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay(@[pubSubTopic])
|
node3.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# Setup two servers so we can see both sides of encrypted communication
|
# Setup two servers so we can see both sides of encrypted communication
|
||||||
let
|
let
|
||||||
|
|
|
@ -26,7 +26,7 @@ procSuite "Peer Manager":
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
|
|
||||||
await allFutures([node1.start(), node2.start()])
|
await allFutures([node1.start(), node2.start()])
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ procSuite "Peer Manager":
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
|
|
||||||
await node1.start()
|
await node1.start()
|
||||||
# Purposefully don't start node2
|
# Purposefully don't start node2
|
||||||
|
@ -128,7 +128,7 @@ procSuite "Peer Manager":
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
|
|
||||||
await node1.start()
|
await node1.start()
|
||||||
|
|
||||||
|
@ -173,7 +173,7 @@ procSuite "Peer Manager":
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
|
|
||||||
await node1.start()
|
await node1.start()
|
||||||
await node2.start()
|
await node2.start()
|
||||||
|
@ -221,7 +221,7 @@ asyncTest "Peer manager support multiple protocol IDs when reconnecting to peers
|
||||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
|
||||||
Port(60002))
|
Port(60002))
|
||||||
peerInfo2 = node2.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
betaCodec = "/vac/waku/relay/2.0.0-beta2"
|
betaCodec = "/vac/waku/relay/2.0.0-beta2"
|
||||||
stableCodec = "/vac/waku/relay/2.0.0"
|
stableCodec = "/vac/waku/relay/2.0.0"
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ procSuite "WakuBridge":
|
||||||
v2Node.mountRelay(@[DefaultBridgeTopic], triggerSelf = false)
|
v2Node.mountRelay(@[DefaultBridgeTopic], triggerSelf = false)
|
||||||
|
|
||||||
discard waitFor v1Node.rlpxConnect(newNode(bridge.nodev1.toENode()))
|
discard waitFor v1Node.rlpxConnect(newNode(bridge.nodev1.toENode()))
|
||||||
waitFor v2Node.connectToNodes(@[bridge.nodev2.peerInfo.toRemotePeerInfo()])
|
waitFor v2Node.connectToNodes(@[bridge.nodev2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
|
|
||||||
|
|
|
@ -77,20 +77,20 @@ procSuite "Waku DNS Discovery":
|
||||||
# We have discovered all three nodes
|
# We have discovered all three nodes
|
||||||
res.isOk()
|
res.isOk()
|
||||||
res[].len == 3
|
res[].len == 3
|
||||||
res[].mapIt(it.peerId).contains(node1.peerInfo.peerId)
|
res[].mapIt(it.peerId).contains(node1.switch.peerInfo.peerId)
|
||||||
res[].mapIt(it.peerId).contains(node2.peerInfo.peerId)
|
res[].mapIt(it.peerId).contains(node2.switch.peerInfo.peerId)
|
||||||
res[].mapIt(it.peerId).contains(node3.peerInfo.peerId)
|
res[].mapIt(it.peerId).contains(node3.switch.peerInfo.peerId)
|
||||||
|
|
||||||
# Connect to discovered nodes
|
# Connect to discovered nodes
|
||||||
await node4.connectToNodes(res[])
|
await node4.connectToNodes(res[])
|
||||||
|
|
||||||
check:
|
check:
|
||||||
# We have successfully connected to all discovered nodes
|
# We have successfully connected to all discovered nodes
|
||||||
node4.peerManager.peers().anyIt(it.peerId == node1.peerInfo.peerId)
|
node4.peerManager.peers().anyIt(it.peerId == node1.switch.peerInfo.peerId)
|
||||||
node4.peerManager.connectedness(node1.peerInfo.peerId) == Connected
|
node4.peerManager.connectedness(node1.switch.peerInfo.peerId) == Connected
|
||||||
node4.peerManager.peers().anyIt(it.peerId == node2.peerInfo.peerId)
|
node4.peerManager.peers().anyIt(it.peerId == node2.switch.peerInfo.peerId)
|
||||||
node4.peerManager.connectedness(node2.peerInfo.peerId) == Connected
|
node4.peerManager.connectedness(node2.switch.peerInfo.peerId) == Connected
|
||||||
node4.peerManager.peers().anyIt(it.peerId == node3.peerInfo.peerId)
|
node4.peerManager.peers().anyIt(it.peerId == node3.switch.peerInfo.peerId)
|
||||||
node4.peerManager.connectedness(node3.peerInfo.peerId) == Connected
|
node4.peerManager.connectedness(node3.switch.peerInfo.peerId) == Connected
|
||||||
|
|
||||||
await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()])
|
await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()])
|
||||||
|
|
|
@ -41,7 +41,7 @@ procSuite "Waku Keepalive":
|
||||||
node2.mountRelay()
|
node2.mountRelay()
|
||||||
node2.switch.mount(Ping.new(handler = pingHandler))
|
node2.switch.mount(Ping.new(handler = pingHandler))
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
node1.startKeepalive()
|
node1.startKeepalive()
|
||||||
|
|
||||||
|
|
|
@ -72,9 +72,9 @@ procSuite "Waku SWAP Accounting":
|
||||||
|
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
node1.wakuStore.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuStore.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
node1.wakuSwap.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuSwap.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
node2.wakuSwap.setPeer(node1.peerInfo.toRemotePeerInfo())
|
node2.wakuSwap.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
proc storeHandler(response: HistoryResponse) {.gcsafe, closure.} =
|
proc storeHandler(response: HistoryResponse) {.gcsafe, closure.} =
|
||||||
debug "storeHandler hit"
|
debug "storeHandler hit"
|
||||||
|
@ -87,8 +87,8 @@ procSuite "Waku SWAP Accounting":
|
||||||
check:
|
check:
|
||||||
(await completionFut.withTimeout(5.seconds)) == true
|
(await completionFut.withTimeout(5.seconds)) == true
|
||||||
# Accounting table updated with credit and debit, respectively
|
# Accounting table updated with credit and debit, respectively
|
||||||
node1.wakuSwap.accounting[node2.peerInfo.peerId] == 1
|
node1.wakuSwap.accounting[node2.switch.peerInfo.peerId] == 1
|
||||||
node2.wakuSwap.accounting[node1.peerInfo.peerId] == -1
|
node2.wakuSwap.accounting[node1.switch.peerInfo.peerId] == -1
|
||||||
await node1.stop()
|
await node1.stop()
|
||||||
await node2.stop()
|
await node2.stop()
|
||||||
|
|
||||||
|
@ -122,9 +122,9 @@ procSuite "Waku SWAP Accounting":
|
||||||
|
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
node1.wakuStore.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuStore.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
node1.wakuSwap.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuSwap.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
node2.wakuSwap.setPeer(node1.peerInfo.toRemotePeerInfo())
|
node2.wakuSwap.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
proc handler1(response: HistoryResponse) {.gcsafe, closure.} =
|
proc handler1(response: HistoryResponse) {.gcsafe, closure.} =
|
||||||
futures[0].complete(true)
|
futures[0].complete(true)
|
||||||
|
@ -139,7 +139,7 @@ procSuite "Waku SWAP Accounting":
|
||||||
(await allFutures(futures).withTimeout(5.seconds)) == true
|
(await allFutures(futures).withTimeout(5.seconds)) == true
|
||||||
# Accounting table updated with credit and debit, respectively
|
# Accounting table updated with credit and debit, respectively
|
||||||
# After sending a cheque the balance is partially adjusted
|
# After sending a cheque the balance is partially adjusted
|
||||||
node1.wakuSwap.accounting[node2.peerInfo.peerId] == 1
|
node1.wakuSwap.accounting[node2.switch.peerInfo.peerId] == 1
|
||||||
node2.wakuSwap.accounting[node1.peerInfo.peerId] == -1
|
node2.wakuSwap.accounting[node1.switch.peerInfo.peerId] == -1
|
||||||
await node1.stop()
|
await node1.stop()
|
||||||
await node2.stop()
|
await node2.stop()
|
||||||
|
|
|
@ -129,12 +129,12 @@ procSuite "WakuNode":
|
||||||
node1.subscribe(pubSubTopic, relayHandler)
|
node1.subscribe(pubSubTopic, relayHandler)
|
||||||
# Subscribe a contentFilter to trigger a specific application handler when
|
# Subscribe a contentFilter to trigger a specific application handler when
|
||||||
# WakuMessages with that content are received
|
# WakuMessages with that content are received
|
||||||
node1.wakuFilter.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuFilter.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
await node1.subscribe(filterRequest, contentHandler)
|
await node1.subscribe(filterRequest, contentHandler)
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
# Connect peers by dialing from node2 to node1
|
# Connect peers by dialing from node2 to node1
|
||||||
let conn = await node2.switch.dial(node1.peerInfo.peerId, node1.peerInfo.addrs, WakuRelayCodec)
|
let conn = await node2.switch.dial(node1.switch.peerInfo.peerId, node1.switch.peerInfo.addrs, WakuRelayCodec)
|
||||||
|
|
||||||
# We need to sleep to allow the subscription to go through
|
# We need to sleep to allow the subscription to go through
|
||||||
info "Going to sleep to allow subscribe to go through"
|
info "Going to sleep to allow subscribe to go through"
|
||||||
|
@ -172,7 +172,7 @@ procSuite "WakuNode":
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountRelay()
|
node2.mountRelay()
|
||||||
node2.mountFilter()
|
node2.mountFilter()
|
||||||
node2.wakuFilter.setPeer(node1.peerInfo.toRemotePeerInfo())
|
node2.wakuFilter.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
var defaultComplete = newFuture[bool]()
|
var defaultComplete = newFuture[bool]()
|
||||||
var otherComplete = newFuture[bool]()
|
var otherComplete = newFuture[bool]()
|
||||||
|
@ -243,7 +243,7 @@ procSuite "WakuNode":
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountRelay(relayMessages=false) # Do not start WakuRelay or subscribe to any topics
|
node2.mountRelay(relayMessages=false) # Do not start WakuRelay or subscribe to any topics
|
||||||
node2.mountFilter()
|
node2.mountFilter()
|
||||||
node2.wakuFilter.setPeer(node1.peerInfo.toRemotePeerInfo())
|
node2.wakuFilter.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
check:
|
check:
|
||||||
node1.wakuRelay.isNil == false # Node1 is a full node
|
node1.wakuRelay.isNil == false # Node1 is a full node
|
||||||
|
@ -294,7 +294,7 @@ procSuite "WakuNode":
|
||||||
|
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
node1.wakuStore.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuStore.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
proc storeHandler(response: HistoryResponse) {.gcsafe, closure.} =
|
proc storeHandler(response: HistoryResponse) {.gcsafe, closure.} =
|
||||||
check:
|
check:
|
||||||
|
@ -326,7 +326,7 @@ procSuite "WakuNode":
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountFilter()
|
node2.mountFilter()
|
||||||
|
|
||||||
node1.wakuFilter.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuFilter.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
proc handler(msg: WakuMessage) {.gcsafe, closure.} =
|
proc handler(msg: WakuMessage) {.gcsafe, closure.} =
|
||||||
check:
|
check:
|
||||||
|
@ -371,8 +371,8 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay(@[pubSubTopic])
|
node3.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -429,7 +429,7 @@ procSuite "WakuNode":
|
||||||
|
|
||||||
# Now verify that protocol matcher returns `true` and relay works
|
# Now verify that protocol matcher returns `true` and relay works
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -530,8 +530,8 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay(@[pubSubTopic])
|
node3.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
|
|
||||||
var completionFutValidatorAcc = newFuture[bool]()
|
var completionFutValidatorAcc = newFuture[bool]()
|
||||||
|
@ -643,8 +643,8 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
|
|
||||||
# connect them together
|
# connect them together
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -741,8 +741,8 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
|
|
||||||
# connect them together
|
# connect them together
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# define a custom relay handler
|
# define a custom relay handler
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
|
@ -843,8 +843,8 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
|
|
||||||
# connect the nodes together node1 <-> node2 <-> node3
|
# connect the nodes together node1 <-> node2 <-> node3
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# get the current epoch time
|
# get the current epoch time
|
||||||
let time = epochTime()
|
let time = epochTime()
|
||||||
|
@ -983,9 +983,9 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay(@[pubSubTopic])
|
node3.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
discard await node1.peerManager.dialPeer(node2.peerInfo.toRemotePeerInfo(), WakuLightPushCodec)
|
discard await node1.peerManager.dialPeer(node2.switch.peerInfo.toRemotePeerInfo(), WakuLightPushCodec)
|
||||||
await sleepAsync(5.seconds)
|
await sleepAsync(5.seconds)
|
||||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFutLightPush = newFuture[bool]()
|
var completionFutLightPush = newFuture[bool]()
|
||||||
var completionFutRelay = newFuture[bool]()
|
var completionFutRelay = newFuture[bool]()
|
||||||
|
@ -1044,7 +1044,7 @@ procSuite "WakuNode":
|
||||||
|
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
node1.wakuStore.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuStore.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
await node1.resume()
|
await node1.resume()
|
||||||
|
|
||||||
|
@ -1085,7 +1085,7 @@ procSuite "WakuNode":
|
||||||
|
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
node1.wakuStore.setPeer(node2.peerInfo.toRemotePeerInfo())
|
node1.wakuStore.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
|
||||||
|
|
||||||
|
|
||||||
# populate db with msg1 to be a duplicate
|
# populate db with msg1 to be a duplicate
|
||||||
|
@ -1145,14 +1145,14 @@ procSuite "WakuNode":
|
||||||
await node3.start()
|
await node3.start()
|
||||||
node3.mountRelay()
|
node3.mountRelay()
|
||||||
|
|
||||||
discard await node1.peerManager.dialPeer(node2.peerInfo.toRemotePeerInfo(), WakuRelayCodec)
|
discard await node1.peerManager.dialPeer(node2.switch.peerInfo.toRemotePeerInfo(), WakuRelayCodec)
|
||||||
await sleepAsync(3.seconds)
|
await sleepAsync(3.seconds)
|
||||||
discard await node1.peerManager.dialPeer(node3.peerInfo.toRemotePeerInfo(), WakuRelayCodec)
|
discard await node1.peerManager.dialPeer(node3.switch.peerInfo.toRemotePeerInfo(), WakuRelayCodec)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
# Verify that only the first connection succeeded
|
# Verify that only the first connection succeeded
|
||||||
node1.switch.isConnected(node2.peerInfo.peerId)
|
node1.switch.isConnected(node2.switch.peerInfo.peerId)
|
||||||
node1.switch.isConnected(node3.peerInfo.peerId) == false
|
node1.switch.isConnected(node3.switch.peerInfo.peerId) == false
|
||||||
|
|
||||||
await allFutures([node1.stop(), node2.stop(), node3.stop()])
|
await allFutures([node1.stop(), node2.stop(), node3.stop()])
|
||||||
|
|
||||||
|
@ -1176,7 +1176,7 @@ asyncTest "Messages are relayed between two websocket nodes":
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountRelay(@[pubSubTopic])
|
node2.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -1221,7 +1221,7 @@ asyncTest "Messages are relayed between nodes with multiple transports (TCP and
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountRelay(@[pubSubTopic])
|
node2.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -1267,9 +1267,9 @@ asyncTest "Messages relaying fails with non-overlapping transports (TCP or Webso
|
||||||
|
|
||||||
#delete websocket peer address
|
#delete websocket peer address
|
||||||
# TODO: a better way to find the index - this is too brittle
|
# TODO: a better way to find the index - this is too brittle
|
||||||
node2.peerInfo.addrs.delete(0)
|
node2.switch.peerInfo.addrs.delete(0)
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -1313,7 +1313,7 @@ asyncTest "Messages are relayed between nodes with multiple transports (TCP and
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountRelay(@[pubSubTopic])
|
node2.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
@ -1366,7 +1366,7 @@ asyncTest "Messages are relayed between nodes with multiple transports (websocke
|
||||||
await node2.start()
|
await node2.start()
|
||||||
node2.mountRelay(@[pubSubTopic])
|
node2.mountRelay(@[pubSubTopic])
|
||||||
|
|
||||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
|
|
@ -44,8 +44,8 @@ proc subscribeNodes*(nodes: seq[PubSub]) {.async.} =
|
||||||
for dialer in nodes:
|
for dialer in nodes:
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
await dialer.switch.connect(node.switch.peerInfo.peerId, node.switch.peerInfo.addrs)
|
||||||
dialer.subscribePeer(node.peerInfo.peerId)
|
dialer.subscribePeer(node.switch.peerInfo.peerId)
|
||||||
|
|
||||||
proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||||
if nodes.len < degree:
|
if nodes.len < degree:
|
||||||
|
@ -56,17 +56,17 @@ proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
if dialer.switch.peerInfo.peerId != node.peerInfo.peerId:
|
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
await dialer.switch.connect(node.switch.peerInfo.peerId, node.switch.peerInfo.addrs)
|
||||||
dialer.subscribePeer(node.peerInfo.peerId)
|
dialer.subscribePeer(node.switch.peerInfo.peerId)
|
||||||
|
|
||||||
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
||||||
for dialer in nodes:
|
for dialer in nodes:
|
||||||
var dialed: seq[PeerID]
|
var dialed: seq[PeerID]
|
||||||
while dialed.len < nodes.len - 1:
|
while dialed.len < nodes.len - 1:
|
||||||
let node = sample(nodes)
|
let node = sample(nodes)
|
||||||
if node.peerInfo.peerId notin dialed:
|
if node.switch.peerInfo.peerId notin dialed:
|
||||||
if dialer.peerInfo.peerId != node.peerInfo.peerId:
|
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
await dialer.switch.connect(node.switch.peerInfo.peerId, node.switch.peerInfo.addrs)
|
||||||
dialer.subscribePeer(node.peerInfo.peerId)
|
dialer.subscribePeer(node.switch.peerInfo.peerId)
|
||||||
dialed.add(node.peerInfo.peerId)
|
dialed.add(node.switch.peerInfo.peerId)
|
||||||
|
|
|
@ -82,7 +82,6 @@ type
|
||||||
wakuSwap*: WakuSwap
|
wakuSwap*: WakuSwap
|
||||||
wakuRlnRelay*: WakuRLNRelay
|
wakuRlnRelay*: WakuRLNRelay
|
||||||
wakuLightPush*: WakuLightPush
|
wakuLightPush*: WakuLightPush
|
||||||
peerInfo*: PeerInfo
|
|
||||||
enr*: enr.Record
|
enr*: enr.Record
|
||||||
libp2pPing*: Ping
|
libp2pPing*: Ping
|
||||||
libp2pTransportLoops*: seq[Future[void]]
|
libp2pTransportLoops*: seq[Future[void]]
|
||||||
|
@ -195,7 +194,6 @@ proc new*(T: type WakuNode, nodeKey: crypto.PrivateKey,
|
||||||
## Initialize peer
|
## Initialize peer
|
||||||
let
|
let
|
||||||
rng = crypto.newRng()
|
rng = crypto.newRng()
|
||||||
peerInfo = PeerInfo.new(nodekey)
|
|
||||||
enrIp = if extIp.isSome(): extIp
|
enrIp = if extIp.isSome(): extIp
|
||||||
else: some(bindIp)
|
else: some(bindIp)
|
||||||
enrTcpPort = if extPort.isSome(): extPort
|
enrTcpPort = if extPort.isSome(): extPort
|
||||||
|
@ -209,12 +207,8 @@ proc new*(T: type WakuNode, nodeKey: crypto.PrivateKey,
|
||||||
wakuFlags,
|
wakuFlags,
|
||||||
enrMultiaddrs)
|
enrMultiaddrs)
|
||||||
|
|
||||||
# TODO: local peerInfo should be removed
|
info "Initializing networking", addrs=announcedAddresses
|
||||||
for multiaddr in announcedAddresses:
|
|
||||||
peerInfo.addrs.add(multiaddr)
|
|
||||||
|
|
||||||
info "Initializing networking", addrs=peerInfo.addrs
|
|
||||||
|
|
||||||
var switch = newWakuSwitch(some(nodekey),
|
var switch = newWakuSwitch(some(nodekey),
|
||||||
hostAddress,
|
hostAddress,
|
||||||
wsHostAddress,
|
wsHostAddress,
|
||||||
|
@ -228,8 +222,7 @@ proc new*(T: type WakuNode, nodeKey: crypto.PrivateKey,
|
||||||
let wakuNode = WakuNode(
|
let wakuNode = WakuNode(
|
||||||
peerManager: PeerManager.new(switch, peerStorage),
|
peerManager: PeerManager.new(switch, peerStorage),
|
||||||
switch: switch,
|
switch: switch,
|
||||||
rng: rng,
|
rng: rng,
|
||||||
peerInfo: peerInfo,
|
|
||||||
enr: enr,
|
enr: enr,
|
||||||
filters: initTable[string, Filter](),
|
filters: initTable[string, Filter](),
|
||||||
announcedAddresses: announcedAddresses
|
announcedAddresses: announcedAddresses
|
||||||
|
@ -423,8 +416,7 @@ proc info*(node: WakuNode): WakuInfo =
|
||||||
## Status: Implemented.
|
## Status: Implemented.
|
||||||
##
|
##
|
||||||
|
|
||||||
let
|
let peerInfo = node.switch.peerInfo
|
||||||
peerInfo = node.peerInfo
|
|
||||||
|
|
||||||
var listenStr : seq[string]
|
var listenStr : seq[string]
|
||||||
for address in node.announcedAddresses:
|
for address in node.announcedAddresses:
|
||||||
|
@ -868,7 +860,7 @@ proc start*(node: WakuNode) {.async.} =
|
||||||
node.libp2pTransportLoops = await node.switch.start()
|
node.libp2pTransportLoops = await node.switch.start()
|
||||||
|
|
||||||
# TODO Get this from WakuNode obj
|
# TODO Get this from WakuNode obj
|
||||||
let peerInfo = node.peerInfo
|
let peerInfo = node.switch.peerInfo
|
||||||
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
|
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
|
||||||
var listenStr = ""
|
var listenStr = ""
|
||||||
for address in node.announcedAddresses:
|
for address in node.announcedAddresses:
|
||||||
|
|
Loading…
Reference in New Issue