chore: update nwaku submodules (#1123)

* chore: update submodules

* fix: SIGTERM ambiguity

* fix: ambiguous RNGs

* fix: ContentType is no longer a string

* fix: more fixes related to ambiguous RNGs

* fix: start all protocols

* chore: also update nim-eth

* chore: important new fixes in nim-libp2p

* fix: more changes related to RNG. Some reversion to reflect nim-eth update

* fix: breaking changes in nim-eth submodule

* fix: start protocols in tests

* fix: chat2bridge protocols async mounting

* fix: v1 test compilation

* fix: rln test compilation

* fix: remove confusing keys qualifier for the same HmacDrbgContext
This commit is contained in:
Hanno Cornelius 2022-09-07 16:31:27 +01:00 committed by GitHub
parent 9af4676939
commit 73a93ed0d2
79 changed files with 445 additions and 335 deletions

View File

@ -10,7 +10,7 @@ import
const clientId = "Waku example v1"
proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext) =
proc run(config: WakuNodeConf, rng: ref HmacDrbgContext) =
# Set up the address according to NAT information.
let (ipExt, tcpPortExt, udpPortExt) = setupNat(config.nat, clientId,
Port(config.tcpPort + config.portsShift),
@ -33,6 +33,8 @@ proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext) =
nil, # Database, not required for Waku
clientId, # Client id string
addAllCapabilities = false, # Disable default all RLPx capabilities
bindUdpPort = address.udpPort, # Assume same as external
bindTcpPort = address.tcpPort, # Assume same as external
rng = rng)
node.addCapability Waku # Enable only the Waku protocol.
@ -57,7 +59,7 @@ proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext) =
# connection occurs, which is why we use a callback to exit on errors instead of
# using `await`.
# TODO: This looks a bit awkward and the API should perhaps be altered here.
let connectedFut = node.connectToNetwork(@[],
let connectedFut = node.connectToNetwork(
true, # Enable listening
false # Disable discovery (only discovery v4 is currently supported)
)

View File

@ -25,7 +25,7 @@ proc runBackground() {.async.} =
Port(uint16(conf.tcpPort) + conf.portsShift), extIp, extTcpPort)
await node.start()
node.mountRelay()
await node.mountRelay()
# Subscribe to a topic
let topic = cast[Topic]("foobar")

View File

@ -214,9 +214,10 @@ proc publish(c: Chat, line: string) =
when PayloadV1:
# Use Waku v1 payload encoding/encryption
let
rng = keys.newRng()
payload = Payload(payload: chat2pb.buffer, symKey: some(c.symKey))
version = 1'u32
encodedPayload = payload.encode(version, c.node.rng[])
encodedPayload = payload.encode(version, rng[])
if encodedPayload.isOk():
var message = WakuMessage(payload: encodedPayload.get(),
contentTopic: c.contentTopic, version: version, timestamp: getNanosecondTime(time))
@ -359,7 +360,7 @@ proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} =
discard waitFor transp.write(line & "\r\n")
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
proc processInput(rfd: AsyncFD) {.async.} =
let transp = fromPipe(rfd)
let
@ -375,10 +376,10 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
wssEnabled = conf.websocketSecureSupport)
await node.start()
node.mountRelay(conf.topics.split(" "),
relayMessages = conf.relay) # Indicates if node is capable to relay messages
if conf.relay:
await node.mountRelay(conf.topics.split(" "))
node.mountLibp2pPing()
await node.mountLibp2pPing()
let nick = await readNick(transp)
echo "Welcome, " & nick & "!"
@ -445,10 +446,10 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
echo &"Listening on\n {listenStr}"
if conf.swap:
node.mountSwap()
await node.mountSwap()
if (conf.storenode != "") or (conf.store == true):
node.mountStore(persistMessages = conf.persistMessages)
await node.mountStore(persistMessages = conf.persistMessages)
var storenode: Option[RemotePeerInfo]
@ -477,12 +478,12 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
# NOTE Must be mounted after relay
if conf.lightpushnode != "":
mountLightPush(node)
await mountLightPush(node)
node.wakuLightPush.setPeer(parseRemotePeerInfo(conf.lightpushnode))
if conf.filternode != "":
node.mountFilter()
await node.mountFilter()
node.wakuFilter.setPeer(parseRemotePeerInfo(conf.filternode))
@ -545,7 +546,6 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
runForever()
proc main() {.async.} =
let rng = crypto.newRng() # Singe random number source for the whole application
let (rfd, wfd) = createAsyncPipe()
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
raise newException(ValueError, "Could not initialize pipe!")
@ -553,7 +553,7 @@ proc main() {.async.} =
var thread: Thread[AsyncFD]
thread.createThread(readInput, wfd)
await processInput(rfd, rng)
await processInput(rfd)
when isMainModule: # isMainModule = true when the module is compiled as the main file
waitFor(main())

View File

@ -24,7 +24,7 @@ type
nodekey* {.
desc: "P2P node private key as 64 char hex string.",
defaultValue: crypto.PrivateKey.random(Secp256k1, keys.newRng()[]).tryGet()
defaultValue: crypto.PrivateKey.random(Secp256k1, crypto.newRng()[]).tryGet()
name: "nodekey" }: crypto.PrivateKey
listenAddress* {.

View File

@ -186,7 +186,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
# Always mount relay for bridge
# `triggerSelf` is false on a `bridge` to avoid duplicates
cmb.nodev2.mountRelay(triggerSelf = false)
await cmb.nodev2.mountRelay(triggerSelf = false)
# Bridging
# Handle messages on Waku v2 and bridge to Matterbridge
@ -263,13 +263,13 @@ when isMainModule:
# Now load rest of config
# Mount configured Waku v2 protocols
mountLibp2pPing(bridge.nodev2)
waitFor mountLibp2pPing(bridge.nodev2)
if conf.store:
mountStore(bridge.nodev2)
waitFor mountStore(bridge.nodev2)
if conf.filter:
mountFilter(bridge.nodev2)
waitFor mountFilter(bridge.nodev2)
if conf.staticnodes.len > 0:
waitFor connectToNodes(bridge.nodev2, conf.staticnodes)

View File

@ -1,5 +1,5 @@
import
chronos, bearssl,
chronos, bearssl/rand,
eth/[keys, p2p]
import libp2p/crypto/crypto
@ -12,22 +12,27 @@ proc localAddress*(port: int): Address =
ip: parseIpAddress("127.0.0.1"))
proc setupTestNode*(
rng: ref BrHmacDrbgContext,
rng: ref HmacDrbgContext,
capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode =
let keys1 = keys.KeyPair.random(rng[])
result = newEthereumNode(keys1, localAddress(nextPort), NetworkId(1), nil,
addAllCapabilities = false, rng = rng)
let
keys1 = keys.KeyPair.random(rng[])
address = localAddress(nextPort)
result = newEthereumNode(keys1, address, NetworkId(1), nil,
addAllCapabilities = false,
bindUdpPort = address.udpPort, # Assume same as external
bindTcpPort = address.tcpPort, # Assume same as external
rng = rng)
nextPort.inc
for capability in capabilities:
result.addCapability capability
# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28
type RngWrap = object
rng: ref BrHmacDrbgContext
rng: ref rand.HmacDrbgContext
var rngVar: RngWrap
proc getRng(): ref BrHmacDrbgContext =
proc getRng(): ref rand.HmacDrbgContext =
# TODO if `rngVar` is a threadvar like it should be, there are random and
# spurious compile failures on mac - this is not gcsafe but for the
# purpose of the tests, it's ok as long as we only use a single thread
@ -36,5 +41,5 @@ proc getRng(): ref BrHmacDrbgContext =
rngVar.rng = crypto.newRng()
rngVar.rng
template rng*(): ref BrHmacDrbgContext =
template rng*(): ref rand.HmacDrbgContext =
getRng()

View File

@ -14,14 +14,14 @@ const sigPath = sourceDir / ParDir / ParDir / "waku" / "v1" / "node" / "rpc" / "
createRpcSigs(RpcSocketClient, sigPath)
proc setupNode(capabilities: varargs[ProtocolInfo, `protocolInfo`],
rng: ref BrHmacDrbgContext, ): EthereumNode =
rng: ref HmacDrbgContext, ): EthereumNode =
let
keypair = KeyPair.random(rng[])
srvAddress = Address(ip: parseIpAddress("0.0.0.0"), tcpPort: Port(30303),
udpPort: Port(30303))
result = newEthereumNode(keypair, srvAddress, NetworkId(1), nil, "waku test rpc",
addAllCapabilities = false, rng = rng)
addAllCapabilities = false, bindUdpPort = srvAddress.udpPort, bindTcpPort = srvAddress.tcpPort, rng = rng)
for capability in capabilities:
result.addCapability capability

View File

@ -44,11 +44,11 @@ procSuite "Waku connections":
n3 = setupTestNode(rng, Waku)
n4 = setupTestNode(rng, Waku)
var topics: seq[Topic]
var topics: seq[waku_protocol.Topic]
n1.protocolState(Waku).config.topics = some(topics)
n2.protocolState(Waku).config.topics = some(topics)
n3.protocolState(Waku).config.topics = none(seq[Topic])
n4.protocolState(Waku).config.topics = none(seq[Topic])
n3.protocolState(Waku).config.topics = none(seq[waku_protocol.Topic])
n4.protocolState(Waku).config.topics = none(seq[waku_protocol.Topic])
n1.startListening()
n3.startListening()
@ -499,7 +499,7 @@ procSuite "Waku connections":
let bloomFilterUpdatedCondition = proc(): bool =
for peer in wakuNode.peerPool.peers:
return peer.state(Waku).bloom == bloom and
peer.state(Waku).topics == none(seq[Topic])
peer.state(Waku).topics == none(seq[waku_protocol.Topic])
let bloomFilterUpdated =
await eventually(conditionTimeoutMs, bloomFilterUpdatedCondition)

View File

@ -47,7 +47,7 @@ procSuite "Waku v2 JSON-RPC API":
asyncTest "Debug API: get node info":
waitFor node.start()
node.mountRelay()
await node.mountRelay()
# RPC server setup
let
@ -74,7 +74,7 @@ procSuite "Waku v2 JSON-RPC API":
asyncTest "Relay API: publish and subscribe/unsubscribe":
waitFor node.start()
node.mountRelay()
await node.mountRelay()
# RPC server setup
let
@ -137,13 +137,13 @@ procSuite "Waku v2 JSON-RPC API":
message2 = WakuMessage(payload: payload2, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node3.start()
node3.mountRelay(@[pubSubTopic])
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -215,7 +215,7 @@ procSuite "Waku v2 JSON-RPC API":
asyncTest "Store API: retrieve historical messages":
waitFor node.start()
node.mountRelay()
await node.mountRelay()
# RPC server setup
let
@ -231,7 +231,7 @@ procSuite "Waku v2 JSON-RPC API":
key = wakunode2.PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.new(key)
node.mountStore(persistMessages = true)
await node.mountStore(persistMessages = true)
var listenSwitch = newStandardSwitch(some(key))
waitFor listenSwitch.start()
@ -273,9 +273,9 @@ procSuite "Waku v2 JSON-RPC API":
asyncTest "Filter API: subscribe/unsubscribe":
waitFor node.start()
node.mountRelay()
await node.mountRelay()
node.mountFilter()
await node.mountFilter()
# RPC server setup
let
@ -329,7 +329,7 @@ procSuite "Waku v2 JSON-RPC API":
installFilterApiHandlers(node, server, newTable[ContentTopic, seq[WakuMessage]]())
server.start()
node.mountFilter()
await node.mountFilter()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort, false)
@ -412,9 +412,9 @@ procSuite "Waku v2 JSON-RPC API":
await allFutures([node1.start(), node2.start(), node3.start()])
node1.mountRelay()
node2.mountRelay()
node3.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
await node3.mountRelay()
# RPC server setup
let
@ -469,9 +469,9 @@ procSuite "Waku v2 JSON-RPC API":
await allFutures([node1.start(), node2.start(), node3.start()])
node1.mountRelay()
node2.mountRelay()
node3.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
await node3.mountRelay()
# Dial nodes 2 and 3 from node1
await node1.connectToNodes(@[constructMultiaddrStr(peerInfo2)])
@ -525,9 +525,9 @@ procSuite "Waku v2 JSON-RPC API":
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort, false)
node.mountFilter()
node.mountSwap()
node.mountStore(persistMessages = true)
await node.mountFilter()
await node.mountSwap()
await node.mountStore(persistMessages = true)
# Create and set some peers
let
@ -577,13 +577,13 @@ procSuite "Waku v2 JSON-RPC API":
topicCache = newTable[string, seq[WakuMessage]]()
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node3.start()
node3.mountRelay(@[pubSubTopic])
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -598,8 +598,8 @@ procSuite "Waku v2 JSON-RPC API":
server3 = newRpcHttpServer([ta3])
# Let's connect to nodes 1 and 3 via the API
installPrivateApiHandlers(node1, server1, rng, newTable[string, seq[WakuMessage]]())
installPrivateApiHandlers(node3, server3, rng, topicCache)
installPrivateApiHandlers(node1, server1, newTable[string, seq[WakuMessage]]())
installPrivateApiHandlers(node3, server3, topicCache)
installRelayApiHandlers(node3, server3, topicCache)
server1.start()
server3.start()
@ -668,13 +668,13 @@ procSuite "Waku v2 JSON-RPC API":
topicCache = newTable[string, seq[WakuMessage]]()
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node3.start()
node3.mountRelay(@[pubSubTopic])
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -689,8 +689,8 @@ procSuite "Waku v2 JSON-RPC API":
server3 = newRpcHttpServer([ta3])
# Let's connect to nodes 1 and 3 via the API
installPrivateApiHandlers(node1, server1, rng, newTable[string, seq[WakuMessage]]())
installPrivateApiHandlers(node3, server3, rng, topicCache)
installPrivateApiHandlers(node1, server1, newTable[string, seq[WakuMessage]]())
installPrivateApiHandlers(node3, server3, topicCache)
installRelayApiHandlers(node3, server3, topicCache)
server1.start()
server3.start()

View File

@ -52,9 +52,9 @@ procSuite "Peer Exchange":
peerExchangeHandler = handlePeerExchange
emptyHandler = ignorePeerExchange
node1.mountRelay(peerExchangeHandler = some(emptyHandler))
node2.mountRelay(peerExchangeHandler = some(emptyHandler))
node3.mountRelay(peerExchangeHandler = some(peerExchangeHandler))
await node1.mountRelay(peerExchangeHandler = some(emptyHandler))
await node2.mountRelay(peerExchangeHandler = some(emptyHandler))
await node3.mountRelay(peerExchangeHandler = some(peerExchangeHandler))
# Ensure that node1 prunes all peers after the first connection
node1.wakuRelay.parameters.dHigh = 1

View File

@ -34,8 +34,8 @@ procSuite "Peer Manager":
await allFutures([node1.start(), node2.start()])
node1.mountRelay()
node2.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
# Dial node2 from node1
let conn = (await node1.peerManager.dialPeer(peerInfo2.toRemotePeerInfo(), WakuRelayCodec)).get()
@ -68,8 +68,8 @@ procSuite "Peer Manager":
await node1.start()
# Purposefully don't start node2
node1.mountRelay()
node2.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
# Dial node2 from node1
let connOpt = await node1.peerManager.dialPeer(peerInfo2.toRemotePeerInfo(), WakuRelayCodec, 2.seconds)
@ -100,9 +100,9 @@ procSuite "Peer Manager":
await node.start()
node.mountFilter()
node.mountSwap()
node.mountStore(persistMessages = true)
await node.mountFilter()
await node.mountSwap()
await node.mountStore(persistMessages = true)
node.wakuFilter.setPeer(filterPeer.toRemotePeerInfo())
node.wakuSwap.setPeer(swapPeer.toRemotePeerInfo())
@ -136,8 +136,8 @@ procSuite "Peer Manager":
await node1.start()
node1.mountRelay()
node2.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
# Test default connectedness for new peers
node1.peerManager.addPeer(peerInfo2.toRemotePeerInfo(), WakuRelayCodec)
@ -182,8 +182,8 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
node1.mountRelay()
node2.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
discard await node1.peerManager.dialPeer(peerInfo2.toRemotePeerInfo(), WakuRelayCodec, 2.seconds)
check:
@ -205,7 +205,7 @@ procSuite "Peer Manager":
node3.peerManager.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.connectedness(peerInfo2.peerId) == NotConnected
node3.mountRelay() # This should trigger a reconnect
await node3.mountRelay() # This should trigger a reconnect
check:
# Reconnected to node2 after "restart"
@ -232,9 +232,9 @@ asyncTest "Peer manager support multiple protocol IDs when reconnecting to peers
await node1.start()
await node2.start()
node1.mountRelay()
await node1.mountRelay()
node1.wakuRelay.codec = betaCodec
node2.mountRelay()
await node2.mountRelay()
node2.wakuRelay.codec = betaCodec
discard await node1.peerManager.dialPeer(peerInfo2.toRemotePeerInfo(), node2.wakuRelay.codec, 2.seconds)
@ -251,7 +251,7 @@ asyncTest "Peer manager support multiple protocol IDs when reconnecting to peers
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"),
Port(60004), peerStorage = storage)
node3.mountRelay()
await node3.mountRelay()
node3.wakuRelay.codec = stableCodec
check:
# Node 2 and 3 have differing codecs

View File

@ -28,7 +28,7 @@ suite "REST API - Debug":
# Given
let node = testWakuNode()
await node.start()
node.mountRelay()
await node.mountRelay()
let restPort = Port(8546)
let restAddress = ValidIpAddress.init("0.0.0.0")

View File

@ -39,7 +39,7 @@ suite "REST API - Relay":
# Given
let node = testWakuNode()
await node.start()
node.mountRelay()
await node.mountRelay()
let restPort = Port(8546)
let restAddress = ValidIpAddress.init("0.0.0.0")
@ -84,7 +84,7 @@ suite "REST API - Relay":
# Given
let node = testWakuNode()
await node.start()
node.mountRelay()
await node.mountRelay()
let restPort = Port(8546)
let restAddress = ValidIpAddress.init("0.0.0.0")
@ -132,7 +132,7 @@ suite "REST API - Relay":
# Given
let node = testWakuNode()
await node.start()
node.mountRelay()
await node.mountRelay()
let restPort = Port(8546)
let restAddress = ValidIpAddress.init("0.0.0.0")
@ -183,7 +183,7 @@ suite "REST API - Relay":
# Given
let node = testWakuNode()
await node.start()
node.mountRelay()
await node.mountRelay()
# RPC server setup
let restPort = Port(8546)

View File

@ -85,7 +85,7 @@ procSuite "FloodSub":
)
for node in nodes:
node.mountRelay()
await node.mountRelay()
await subscribeNodes(nodes)

View File

@ -31,10 +31,11 @@ procSuite "WakuBridge":
let
rng = keys.newRng()
cryptoRng = crypto.newRng()
# Bridge
nodev1Key = keys.KeyPair.random(rng[])
nodev2Key = crypto.PrivateKey.random(Secp256k1, rng[])[]
nodev2Key = crypto.PrivateKey.random(Secp256k1, cryptoRng[])[]
bridge = WakuBridge.new(
nodev1Key= nodev1Key,
nodev1Address = localAddress(30302),
@ -48,7 +49,7 @@ procSuite "WakuBridge":
v1Node = setupTestNode(rng, Waku)
# Waku v2 node
v2NodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
v2NodeKey = crypto.PrivateKey.random(Secp256k1, cryptoRng[])[]
v2Node = WakuNode.new(v2NodeKey, ValidIpAddress.init("0.0.0.0"), Port(60002))
contentTopic = ContentTopic("/waku/1/0x1a2b3c4d/rfc26")
@ -118,7 +119,7 @@ procSuite "WakuBridge":
waitFor bridge.start()
waitFor v2Node.start()
v2Node.mountRelay(@[DefaultBridgeTopic], triggerSelf = false)
await v2Node.mountRelay(@[DefaultBridgeTopic], triggerSelf = false)
discard waitFor v1Node.rlpxConnect(newNode(bridge.nodev1.toENode()))
waitFor v2Node.connectToNodes(@[bridge.nodev2.switch.peerInfo.toRemotePeerInfo()])

View File

@ -84,9 +84,9 @@ procSuite "Waku Discovery v5":
node3.rng
)
node1.mountRelay()
node2.mountRelay()
node3.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
await node3.mountRelay()
await allFutures([node1.start(), node2.start(), node3.start()])

View File

@ -34,9 +34,9 @@ procSuite "Waku DNS Discovery":
node3 = WakuNode.new(nodeKey3, bindIp, Port(60003))
enr3 = node3.enr
node1.mountRelay()
node2.mountRelay()
node3.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
await node3.mountRelay()
await allFutures([node1.start(), node2.start(), node3.start()])
# Build and sign tree
@ -44,7 +44,7 @@ procSuite "Waku DNS Discovery":
@[enr1, enr2, enr3], # ENR entries
@[]).get() # No link entries
let treeKeys = keys.KeyPair.random(rng[])
let treeKeys = keys.KeyPair.random(keys.newRng()[])
# Sign tree
check:
@ -68,7 +68,7 @@ procSuite "Waku DNS Discovery":
nodeKey4 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node4 = WakuNode.new(nodeKey4, bindIp, Port(60004))
node4.mountRelay()
await node4.mountRelay()
await node4.start()
var wakuDnsDisc = WakuDnsDiscovery.init(location, resolver).get()

View File

@ -45,6 +45,7 @@ procSuite "Waku Filter":
let
serverPeerManager = PeerManager.new(serverSwitch)
serverProto = WakuFilter.init(serverPeerManager, rng, dummyHandler)
await serverProto.start()
serverSwitch.mount(serverProto)
# Client
@ -55,6 +56,7 @@ procSuite "Waku Filter":
let
clientPeerManager = PeerManager.new(clientSwitch)
clientProto = WakuFilter.init(clientPeerManager, rng, handler)
await clientProto.start()
clientSwitch.mount(clientProto)
clientProto.setPeer(serverSwitch.peerInfo.toRemotePeerInfo())
@ -93,6 +95,7 @@ procSuite "Waku Filter":
let
serverPeerManager = PeerManager.new(serverSwitch)
serverProto = WakuFilter.init(serverPeerManager, rng, dummyHandler)
await serverProto.start()
serverSwitch.mount(serverProto)
# Client
@ -103,6 +106,7 @@ procSuite "Waku Filter":
let
clientPeerManager = PeerManager.new(clientSwitch)
clientProto = WakuFilter.init(clientPeerManager, rng, handler)
await clientProto.start()
clientSwitch.mount(clientProto)
clientProto.setPeer(serverSwitch.peerInfo.toRemotePeerInfo())
@ -144,6 +148,7 @@ procSuite "Waku Filter":
## Given
let clientProto = WakuFilter.init(PeerManager.new(clientSwitch), crypto.newRng(), dummyHandler)
await clientProto.start()
clientSwitch.mount(clientProto)
## When
@ -168,6 +173,7 @@ procSuite "Waku Filter":
let
serverPeerManager = PeerManager.new(serverSwitch)
serverProto = WakuFilter.init(serverPeerManager, rng, dummyHandler, timeout=1.seconds)
await serverProto.start()
serverSwitch.mount(serverProto)
# Client
@ -178,6 +184,7 @@ procSuite "Waku Filter":
let
clientPeerManager = PeerManager.new(clientSwitch)
clientProto = WakuFilter.init(clientPeerManager, rng, handler)
await clientProto.start()
clientSwitch.mount(clientProto)
clientProto.setPeer(serverSwitch.peerInfo.toRemotePeerInfo())
@ -242,6 +249,7 @@ procSuite "Waku Filter":
let
serverPeerManager = PeerManager.new(serverSwitch)
serverProto = WakuFilter.init(serverPeerManager, rng, dummyHandler, timeout=2.seconds)
await serverProto.start()
serverSwitch.mount(serverProto)
# Client
@ -252,6 +260,7 @@ procSuite "Waku Filter":
let
clientPeerManager = PeerManager.new(clientSwitch)
clientProto = WakuFilter.init(clientPeerManager, rng, handler)
await clientProto.start()
clientSwitch.mount(clientProto)
clientProto.setPeer(serverSwitch.peerInfo.toRemotePeerInfo())
@ -289,6 +298,7 @@ procSuite "Waku Filter":
# Start switch with same key as before
var clientSwitch2 = newTestSwitch(some(clientKey), some(clientAddress))
await clientSwitch2.start()
await clientProto.start()
clientSwitch2.mount(clientProto)
# If push succeeds after failure, the peer should removed from failed peers list

View File

@ -34,12 +34,15 @@ procSuite "Waku Keepalive":
completionFut.complete(true)
await node1.start()
node1.mountRelay()
node1.mountLibp2pPing()
await node1.mountRelay()
await node1.mountLibp2pPing()
await node2.start()
node2.mountRelay()
node2.switch.mount(Ping.new(handler = pingHandler))
await node2.mountRelay()
let pingProto = Ping.new(handler = pingHandler)
await pingProto.start()
node2.switch.mount(pingProto)
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])

View File

@ -49,6 +49,7 @@ procSuite "Waku Lightpush":
proto = WakuLightPush.init(peerManager, rng, requestHandler)
proto.setPeer(listenSwitch.peerInfo.toRemotePeerInfo())
waitFor proto.start()
dialSwitch.mount(proto)
@ -63,7 +64,7 @@ procSuite "Waku Lightpush":
peerManager2 = PeerManager.new(listenSwitch)
rng2 = crypto.newRng()
proto2 = WakuLightPush.init(peerManager2, rng2, requestHandler2)
waitFor proto2.start()
listenSwitch.mount(proto2)

View File

@ -44,7 +44,7 @@ procSuite "Waku rln relay":
let index = MembershipIndex(5)
# -------- mount rln-relay in the off-chain mode
node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
await node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
node.mountRlnRelayStatic(group = groupIDCommitments,
memKeyPair = groupKeyPairs[index],
memIndex = index,

View File

@ -346,7 +346,7 @@ procSuite "Waku-rln-relay":
# test ------------------------------
# start rln-relay
node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
await node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
node.mountRlnRelayStatic(group = group,
memKeyPair = keypair.get(),
memIndex = index,
@ -427,7 +427,7 @@ procSuite "Waku-rln-relay":
# test ------------------------------
# start rln-relay
node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
await node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
discard await node.mountRlnRelayDynamic(ethClientAddr = EthClient,
ethAccAddr = ethacc,
ethAccountPrivKeyOpt = some(ethPrivKey),
@ -480,7 +480,7 @@ procSuite "Waku-rln-relay":
let (ethPrivKey, ethacc) = await createEthAccount()
# start rln-relay on the first node, leave rln-relay credentials empty
node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
await node.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
discard await node.mountRlnRelayDynamic(ethClientAddr = EthClient,
ethAccAddr = ethacc,
ethAccountPrivKeyOpt = some(ethPrivKey),
@ -493,7 +493,7 @@ procSuite "Waku-rln-relay":
# start rln-relay on the second node, leave rln-relay credentials empty
node2.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
await node2.mountRelay(@[RLNRELAY_PUBSUB_TOPIC])
discard await node2.mountRlnRelayDynamic(ethClientAddr = EthClient,
ethAccAddr = ethacc,
ethAccountPrivKeyOpt = some(ethPrivKey),

View File

@ -53,6 +53,7 @@ proc newTestWakuStore(switch: Switch): WakuStore =
store = WakuMessageStore.init(database).tryGet()
proto = WakuStore.init(peerManager, rng, store)
waitFor proto.start()
switch.mount(proto)
return proto
@ -468,6 +469,7 @@ procSuite "Waku Store - fault tolerant store":
let storePeer = peer.get(listenSwitch.peerInfo.toRemotePeerInfo())
proto.setPeer(storePeer)
await proto.start()
listenSwitch.mount(proto)
return (listenSwitch, dialSwitch, proto)

View File

@ -62,11 +62,11 @@ procSuite "Waku SWAP Accounting":
# Start nodes and mount protocols
await node1.start()
node1.mountSwap()
node1.mountStore(persistMessages = true)
await node1.mountSwap()
await node1.mountStore(persistMessages = true)
await node2.start()
node2.mountSwap()
node2.mountStore(persistMessages = true)
await node2.mountSwap()
await node2.mountStore(persistMessages = true)
await node2.wakuStore.handleMessage("/waku/2/default-waku/proto", message)
@ -112,11 +112,11 @@ procSuite "Waku SWAP Accounting":
# Start nodes and mount protocols
await node1.start()
node1.mountSwap(swapConfig)
node1.mountStore(persistMessages = true)
await node1.mountSwap(swapConfig)
await node1.mountStore(persistMessages = true)
await node2.start()
node2.mountSwap(swapConfig)
node2.mountStore(persistMessages = true)
await node2.mountSwap(swapConfig)
await node2.mountStore(persistMessages = true)
await node2.wakuStore.handleMessage("/waku/2/default-waku/proto", message)

View File

@ -34,7 +34,7 @@ const KEY_PATH = sourceDir / "resources/test_key.pem"
const CERT_PATH = sourceDir / "resources/test_cert.pem"
procSuite "WakuNode":
let rng = keys.newRng()
let rng = crypto.newRng()
asyncTest "Message published with content filter is retrievable":
let
@ -67,7 +67,7 @@ procSuite "WakuNode":
await node.start()
node.mountRelay()
await node.mountRelay()
# Subscribe our node to the pubSubTopic where all chat data go onto.
node.subscribe(pubSubTopic, relayHandler)
@ -119,11 +119,11 @@ procSuite "WakuNode":
await allFutures([node1.start(), node2.start()])
node1.mountRelay()
node2.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
node1.mountFilter()
node2.mountFilter()
await node1.mountFilter()
await node2.mountFilter()
# Subscribe our node to the pubSubTopic where all chat data go onto.
node1.subscribe(pubSubTopic, relayHandler)
@ -166,12 +166,12 @@ procSuite "WakuNode":
otherFR = FilterRequest(contentFilters: @[ContentFilter(contentTopic: otherContentTopic)], subscribe: true)
await node1.start()
node1.mountRelay()
node1.mountFilter()
await node1.mountRelay()
await node1.mountFilter()
await node2.start()
node2.mountRelay()
node2.mountFilter()
await node2.mountRelay()
await node2.mountFilter()
node2.wakuFilter.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
var defaultComplete = newFuture[bool]()
@ -237,12 +237,11 @@ procSuite "WakuNode":
filterRequest = FilterRequest(contentFilters: @[ContentFilter(contentTopic: contentTopic)], subscribe: true)
await node1.start()
node1.mountRelay()
node1.mountFilter()
await node1.mountRelay()
await node1.mountFilter()
await node2.start()
node2.mountRelay(relayMessages=false) # Do not start WakuRelay or subscribe to any topics
node2.mountFilter()
await node2.mountFilter()
node2.wakuFilter.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
check:
@ -286,9 +285,9 @@ procSuite "WakuNode":
var completionFut = newFuture[bool]()
await node1.start()
node1.mountStore(persistMessages = true)
await node1.mountStore(persistMessages = true)
await node2.start()
node2.mountStore(persistMessages = true)
await node2.mountStore(persistMessages = true)
await node2.wakuStore.handleMessage("/waku/2/default-waku/proto", message)
@ -322,9 +321,9 @@ procSuite "WakuNode":
var completionFut = newFuture[bool]()
await node1.start()
node1.mountFilter()
await node1.mountFilter()
await node2.start()
node2.mountFilter()
await node2.mountFilter()
node1.wakuFilter.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
@ -363,12 +362,12 @@ procSuite "WakuNode":
storeComplFut = newFuture[bool]()
await node1.start()
node1.mountStore(persistMessages = true)
node1.mountFilter()
await node1.mountStore(persistMessages = true)
await node1.mountFilter()
await node2.start()
node2.mountStore(persistMessages = true)
node2.mountFilter()
await node2.mountStore(persistMessages = true)
await node2.mountFilter()
node2.wakuFilter.setPeer(node1.switch.peerInfo.toRemotePeerInfo())
node1.wakuStore.setPeer(node2.switch.peerInfo.toRemotePeerInfo())
@ -422,13 +421,13 @@ procSuite "WakuNode":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node3.start()
node3.mountRelay(@[pubSubTopic])
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -472,13 +471,13 @@ procSuite "WakuNode":
# Setup node 1 with stable codec "/vac/waku/relay/2.0.0"
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
node1.wakuRelay.codec = "/vac/waku/relay/2.0.0"
# Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2"
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2"
check:
@ -528,8 +527,8 @@ procSuite "WakuNode":
node2PeerId = $(node2.switch.peerInfo.peerId)
node2Dns4Addr = "/dns4/localhost/tcp/60002/p2p/" & node2PeerId
node1.mountRelay()
node2.mountRelay()
await node1.mountRelay()
await node2.mountRelay()
await allFutures([node1.start(), node2.start()])
@ -570,13 +569,13 @@ procSuite "WakuNode":
# start all the nodes
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node3.start()
node3.mountRelay(@[pubSubTopic])
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -648,7 +647,7 @@ procSuite "WakuNode":
await node1.start()
node1.mountRelay()
await node1.mountRelay()
check:
GossipSub(node1.wakuRelay).heartbeatFut.isNil == false
@ -660,7 +659,7 @@ procSuite "WakuNode":
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60002))
node2.mountRelay()
await node2.mountRelay()
check:
# Relay has not yet started as node has not yet started
@ -692,17 +691,16 @@ procSuite "WakuNode":
# Light node, only lightpush
await node1.start()
node1.mountRelay(relayMessages=false) # Mount WakuRelay, but do not start or subscribe to any topics
node1.mountLightPush()
await node1.mountLightPush()
# Intermediate node
await node2.start()
node2.mountRelay(@[pubSubTopic])
node2.mountLightPush()
await node2.mountRelay(@[pubSubTopic])
await node2.mountLightPush()
# Receiving node
await node3.start()
node3.mountRelay(@[pubSubTopic])
await node3.mountRelay(@[pubSubTopic])
discard await node1.peerManager.dialPeer(node2.switch.peerInfo.toRemotePeerInfo(), WakuLightPushCodec)
await sleepAsync(5.seconds)
@ -757,9 +755,9 @@ procSuite "WakuNode":
message = WakuMessage(payload: "hello world".toBytes(), contentTopic: contentTopic)
await node1.start()
node1.mountStore(persistMessages = true)
await node1.mountStore(persistMessages = true)
await node2.start()
node2.mountStore(persistMessages = true)
await node2.mountStore(persistMessages = true)
await node2.wakuStore.handleMessage("/waku/2/default-waku/proto", message)
@ -797,9 +795,9 @@ procSuite "WakuNode":
var completionFut = newFuture[bool]()
await node1.start()
node1.mountStore(persistMessages = true, store = store)
await node1.mountStore(persistMessages = true, store = store)
await node2.start()
node2.mountStore(persistMessages = true)
await node2.mountStore(persistMessages = true)
await node2.wakuStore.handleMessage(DefaultTopic, msg1)
await node2.wakuStore.handleMessage(DefaultTopic, msg2)
@ -852,15 +850,15 @@ procSuite "WakuNode":
# Node with connection limit set to 1
await node1.start()
node1.mountRelay()
await node1.mountRelay()
# Remote node 1
await node2.start()
node2.mountRelay()
await node2.mountRelay()
# Remote node 2
await node3.start()
node3.mountRelay()
await node3.mountRelay()
discard await node1.peerManager.dialPeer(node2.switch.peerInfo.toRemotePeerInfo(), WakuRelayCodec)
await sleepAsync(3.seconds)
@ -888,10 +886,10 @@ procSuite "WakuNode":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -933,10 +931,10 @@ procSuite "WakuNode":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -977,10 +975,10 @@ procSuite "WakuNode":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
#delete websocket peer address
# TODO: a better way to find the index - this is too brittle
@ -1025,10 +1023,10 @@ procSuite "WakuNode":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -1078,10 +1076,10 @@ procSuite "WakuNode":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
node1.mountRelay(@[pubSubTopic])
await node1.mountRelay(@[pubSubTopic])
await node2.start()
node2.mountRelay(@[pubSubTopic])
await node2.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])

View File

@ -47,7 +47,7 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
node1.mountRelay(@[rlnRelayPubSubTopic])
await node1.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt1, memKeyPairOpt1, memIndexOpt1) = rlnRelayStaticSetUp(1) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node1.mountRlnRelayStatic(group = groupOpt1.get(),
@ -58,7 +58,7 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
node2.mountRelay(@[rlnRelayPubSubTopic])
await node2.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt2, memKeyPairOpt2, memIndexOpt2) = rlnRelayStaticSetUp(2) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node2.mountRlnRelayStatic(group = groupOpt2.get(),
@ -69,7 +69,7 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
node3.mountRelay(@[rlnRelayPubSubTopic])
await node3.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt3, memKeyPairOpt3, memIndexOpt3) = rlnRelayStaticSetUp(3) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node3.mountRlnRelayStatic(group = groupOpt3.get(),
@ -133,7 +133,7 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
node1.mountRelay(@[rlnRelayPubSubTopic])
await node1.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt1, memKeyPairOpt1, memIndexOpt1) = rlnRelayStaticSetUp(1) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node1.mountRlnRelayStatic(group = groupOpt1.get(),
@ -144,7 +144,7 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
node2.mountRelay(@[rlnRelayPubSubTopic])
await node2.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt2, memKeyPairOpt2, memIndexOpt2) = rlnRelayStaticSetUp(2) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node2.mountRlnRelayStatic(group = groupOpt2.get(),
@ -155,7 +155,7 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
node3.mountRelay(@[rlnRelayPubSubTopic])
await node3.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt3, memKeyPairOpt3, memIndexOpt3) = rlnRelayStaticSetUp(3) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node3.mountRlnRelayStatic(group = groupOpt3.get(),
@ -237,7 +237,7 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
node1.mountRelay(@[rlnRelayPubSubTopic])
await node1.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt1, memKeyPairOpt1, memIndexOpt1) = rlnRelayStaticSetUp(1) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node1.mountRlnRelayStatic(group = groupOpt1.get(),
@ -248,7 +248,7 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
node2.mountRelay(@[rlnRelayPubSubTopic])
await node2.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt2, memKeyPairOpt2, memIndexOpt2) = rlnRelayStaticSetUp(2) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node2.mountRlnRelayStatic(group = groupOpt2.get(),
@ -259,7 +259,7 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
node3.mountRelay(@[rlnRelayPubSubTopic])
await node3.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt3, memKeyPairOpt3, memIndexOpt3) = rlnRelayStaticSetUp(3) # set up rln relay inputs
# mount rlnrelay in off-chain mode
node3.mountRlnRelayStatic(group = groupOpt3.get(),

2
vendor/news vendored

@ -1 +1 @@
Subproject commit fadc54f4683869fe9c97fe59e122daa2607abc8e
Subproject commit 5f09c76d44e1ea3bbe998ba0dc222f6fd9c54d57

2
vendor/nim-bearssl vendored

@ -1 +1 @@
Subproject commit 0ebb1d7a4af5f4b4d4756a9b6dbfe5d411fa55d9
Subproject commit f4c4233de453cb7eac0ce3f3ffad6496295f83ab

@ -1 +1 @@
Subproject commit 972f25d6c3a324848728d2d05796209f1b9d120e
Subproject commit 7631f7b2ee03398cb1512a79923264e8f9410af6

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit b3548583fcc768d93654685e7ea55126c1752c29
Subproject commit 1334cdfebdc6182ff752e7d20796d9936cc8faa3

@ -1 +1 @@
Subproject commit fc03a0c4e172471294ee5f58a880fa1c8495c00b
Subproject commit 40c6f0b378a34d4812e410e9d6762e21f059de4c

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 92cd608a5f47de1aa55861afa6dcc13bea4ae842
Subproject commit b057057e64cc041d797e894a728963b10fbfac69

@ -1 +1 @@
Subproject commit 49e2c52eb5dda46b1c9c10d079abe7bffe6cea89
Subproject commit 682b9c6541bbb82b09e415fbe9ef944570b62f76

@ -1 +1 @@
Subproject commit f83fbce4d6ec7927b75be3f85e4fa905fcb69788
Subproject commit e88e231dfcef4585fe3b2fbd9b664dbd28a88040

2
vendor/nim-json-rpc vendored

@ -1 +1 @@
Subproject commit 335f292a5816910aebf215e3a88db8a665133e0e
Subproject commit 32ba2d16b919310a8c6797ac45747595f301a207

@ -1 +1 @@
Subproject commit 5034fef8d048d8d9e5e0228864dacf627d35b96f
Subproject commit e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4

@ -1 +1 @@
Subproject commit 2ea147a71c4c05d64fdff5e7b8d8990eb5821399
Subproject commit b29c22ba0ef13de50b779c776830dbea1d50cd33

2
vendor/nim-libp2p vendored

@ -1 +1 @@
Subproject commit 718374d890f3997b56bee61cb5971eb367f05b59
Subproject commit abbeaab684c500f4c6ff5881797bb8f184b41ccc

2
vendor/nim-metrics vendored

@ -1 +1 @@
Subproject commit 11edec862f96e42374bc2d584c84cc88d5d1f95f
Subproject commit 0a6477268e850d7bc98347b3875301524871765f

@ -1 +1 @@
Subproject commit 11df74552d3a3abe2c722c536c8075ef6814d5fa
Subproject commit 48f94ebd194874d98da14a2494e89fc3a619a7ae

2
vendor/nim-presto vendored

@ -1 +1 @@
Subproject commit 1dba6dd6f466cd4e7b793b0e473c237ce453d82a
Subproject commit d298b0ba307023f2664757cee92ec94dde6acd18

@ -1 +1 @@
Subproject commit 9631fbd1c81c8b25ff8740df440ca7ba87fa6131
Subproject commit 493d18b8292fc03aa4f835fd825dea1183f97466

2
vendor/nim-stew vendored

@ -1 +1 @@
Subproject commit 412a691f5d29c93bee8f083d213ee8f2c6578bed
Subproject commit 018760954a1530b7336aed7133393908875d860f

2
vendor/nim-stint vendored

@ -1 +1 @@
Subproject commit c05f75a8dae5f0066db5008dbe41a803ecbfbbcf
Subproject commit d1acb427434bf3aa8433acb6873228bbfd16b0f4

@ -1 +1 @@
Subproject commit b6867213f289f12d8f15acdd154a32b98df332bf
Subproject commit 29f98cec6fd84652b2954394d3c86dd63c85dd8d

@ -1 +1 @@
Subproject commit 90369dd67b4a41109e26716829f6f3f077eddf38
Subproject commit 1043942d3479ba931437b371834de0d19218e621

@ -1 +1 @@
Subproject commit 26f31488a881d638d239359a17082a36c055b726
Subproject commit bdb5eca353acd46654f89edeef6f84d0dba8f5d0

2
vendor/nim-web3 vendored

@ -1 +1 @@
Subproject commit 755b6dc92b1545d6c9eb242c551e8188e35ffe5d
Subproject commit 06ef3497d4017f0f85edcadb6f743452566a2448

2
vendor/nim-websock vendored

@ -1 +1 @@
Subproject commit 47b486b52f850d3534b8a1e778fcf9cf40ffe7f6
Subproject commit af8779d9d95e488ec9fd2d584b6328bd506c702b

2
vendor/nim-zlib vendored

@ -1 +1 @@
Subproject commit 74cdeb54b21bededb5a515d36f608bc1850555a2
Subproject commit 6a6670afba6b97b29b920340e2641978c05ab4d8

@ -1 +1 @@
Subproject commit a8ab2dc39aad4d69ba3be72868772d851b4b9741
Subproject commit c7977002fb9e631a946b6589a0b7adc7dc9acef9

2
vendor/nimcrypto vendored

@ -1 +1 @@
Subproject commit a5742a9a214ac33f91615f3862c7b099aec43b00
Subproject commit 24e006df85927f64916e60511620583b11403178

2
vendor/zerokit vendored

@ -1 +1 @@
Subproject commit 3378aed85716144231e036c076fe3f14ed9538de
Subproject commit 2455bea9225b183d2441620037a13cc38b3108c7

View File

@ -106,7 +106,7 @@ type
nodekeyV2* {.
desc: "P2P node private key as hex"
defaultValue: crypto.PrivateKey.random(Secp256k1, keys.newRng()[]).tryGet()
defaultValue: crypto.PrivateKey.random(Secp256k1, crypto.newRng()[]).tryGet()
name: "nodekey-v2" }: crypto.PrivateKey
store* {.

View File

@ -49,7 +49,7 @@ type
nodev2*: WakuNode
nodev2PubsubTopic: wakunode2.Topic # Pubsub topic to bridge to/from
seen: seq[hashes.Hash] # FIFO queue of seen WakuMessages. Used for deduplication.
rng: ref BrHmacDrbgContext
rng: ref HmacDrbgContext
v1Pool: seq[Node] # Pool of v1 nodes for possible connections
targetV1Peers: int # Target number of v1 peers to maintain
started: bool # Indicates that bridge is running
@ -215,7 +215,7 @@ proc new*(T: type WakuBridge,
nodev1Key: keys.KeyPair,
nodev1Address: Address,
powRequirement = 0.002,
rng: ref BrHmacDrbgContext,
rng: ref HmacDrbgContext,
topicInterest = none(seq[waku_protocol.Topic]),
bloom = some(fullBloom()),
# NodeV2 initialisation
@ -233,7 +233,7 @@ proc new*(T: type WakuBridge,
var
nodev1 = newEthereumNode(keys = nodev1Key, address = nodev1Address,
networkId = NetworkId(1), chain = nil, clientId = ClientIdV1,
addAllCapabilities = false, rng = rng)
addAllCapabilities = false, bindUdpPort = nodev1Address.udpPort, bindTcpPort = nodev1Address.tcpPort, rng = rng)
nodev1.addCapability Waku # Always enable Waku protocol
@ -268,7 +268,7 @@ proc start*(bridge: WakuBridge) {.async.} =
debug "Start listening on Waku v1"
# Start listening on Waku v1 node
let connectedFut = bridge.nodev1.connectToNetwork(@[],
let connectedFut = bridge.nodev1.connectToNetwork(
true, # Always enable listening
false # Disable discovery (only discovery v4 is currently supported)
)
@ -284,7 +284,7 @@ proc start*(bridge: WakuBridge) {.async.} =
# Always mount relay for bridge.
# `triggerSelf` is false on a `bridge` to avoid duplicates
bridge.nodev2.mountRelay(triggerSelf = false)
await bridge.nodev2.mountRelay(triggerSelf = false)
# Bridging
# Handle messages on Waku v1 and bridge to Waku v2
@ -317,9 +317,9 @@ proc stop*(bridge: WakuBridge) {.async.} =
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
when isMainModule:
import
eth/p2p/whispernodes,
libp2p/nameresolving/dnsresolver,
./utils/nat,
../whisper/whispernodes,
../v1/node/rpc/wakusim,
../v1/node/rpc/waku,
../v1/node/rpc/key_storage,
@ -424,13 +424,13 @@ when isMainModule:
# Now load rest of config
# Mount configured Waku v2 protocols
mountLibp2pPing(bridge.nodev2)
waitFor mountLibp2pPing(bridge.nodev2)
if conf.store:
mountStore(bridge.nodev2, persistMessages = false) # Bridge does not persist messages
waitFor mountStore(bridge.nodev2, persistMessages = false) # Bridge does not persist messages
if conf.filter:
mountFilter(bridge.nodev2)
waitFor mountFilter(bridge.nodev2)
if conf.staticnodesV2.len > 0:
waitFor connectToNodes(bridge.nodev2, conf.staticnodesV2)

View File

@ -10,7 +10,7 @@ from stew/byteutils import hexToSeqByte, hexToByteArray
# Blatant copy of Whisper RPC but for the Waku protocol
proc setupWakuRPC*(node: EthereumNode, keys: KeyStorage, rpcsrv: RpcServer,
rng: ref BrHmacDrbgContext) =
rng: ref HmacDrbgContext) =
rpcsrv.rpc("waku_version") do() -> string:
## Returns string of the current Waku protocol version.

View File

@ -5,15 +5,15 @@ import
metrics, metrics/chronicles_support, metrics/chronos_httpserver,
stew/shims/net as stewNet,
eth/[keys, p2p], eth/common/utils,
eth/p2p/[discovery, enode, peer_pool, bootnodes, whispernodes],
../../whisper/whisper_protocol,
eth/p2p/[discovery, enode, peer_pool, bootnodes],
../../whisper/[whispernodes, whisper_protocol],
../protocol/[waku_protocol, waku_bridge],
../../common/utils/nat,
./rpc/[waku, wakusim, key_storage], ./waku_helpers, ./config
const clientId = "Nimbus waku node"
proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext)
proc run(config: WakuNodeConf, rng: ref HmacDrbgContext)
{.raises: [Defect, ValueError, RpcBindError, CatchableError, Exception]} =
## `udpPort` is only supplied to satisfy underlying APIs but is not
## actually a supported transport.
@ -32,10 +32,15 @@ proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext)
Address(ip: ipExt.get(),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(udpPort + config.portsShift))
bootnodes = if config.bootnodes.len > 0: setBootNodes(config.bootnodes)
elif config.fleet == prod: setBootNodes(StatusBootNodes)
elif config.fleet == staging: setBootNodes(StatusBootNodesStaging)
elif config.fleet == test : setBootNodes(StatusBootNodesTest)
else: @[]
# Set-up node
var node = newEthereumNode(config.nodekey, address, NetworkId(1), nil, clientId,
addAllCapabilities = false, rng = rng)
addAllCapabilities = false, bootstrapNodes = bootnodes, bindUdpPort = address.udpPort, bindTcpPort = address.tcpPort, rng = rng)
if not config.bootnodeOnly:
node.addCapability Waku # Always enable Waku protocol
var topicInterest: Option[seq[waku_protocol.Topic]]
@ -57,14 +62,7 @@ proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext)
if config.whisperBridge:
node.shareMessageQueue()
# TODO: Status fleet bootnodes are discv5? That will not work.
let bootnodes = if config.bootnodes.len > 0: setBootNodes(config.bootnodes)
elif config.fleet == prod: setBootNodes(StatusBootNodes)
elif config.fleet == staging: setBootNodes(StatusBootNodesStaging)
elif config.fleet == test : setBootNodes(StatusBootNodesTest)
else: @[]
let connectedFut = node.connectToNetwork(bootnodes, not config.noListen,
let connectedFut = node.connectToNetwork(not config.noListen,
config.discovery)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:

View File

@ -72,7 +72,7 @@ type
maxMsgSize*: uint32
confirmationsEnabled*: bool
rateLimits*: Option[RateLimits]
topics*: Option[seq[Topic]]
topics*: Option[seq[whisper_types.Topic]]
Accounting* = ref object
sent*: uint
@ -84,7 +84,7 @@ type
bloom*: Bloom
isLightNode*: bool
trusted*: bool
topics*: Option[seq[Topic]]
topics*: Option[seq[whisper_types.Topic]]
received: HashSet[Hash]
accounting*: Accounting
@ -112,7 +112,7 @@ type
lightNode*: Option[bool]
confirmationsEnabled*: Option[bool]
rateLimits*: Option[RateLimits]
topicInterest*: Option[seq[Topic]]
topicInterest*: Option[seq[whisper_types.Topic]]
KeyKind* = enum
powRequirementKey,
@ -191,7 +191,7 @@ proc read*(rlp: var Rlp, T: typedesc[StatusOptions]):
of rateLimitsKey:
result.rateLimits = some(rlp.read(RateLimits))
of topicInterestKey:
result.topicInterest = some(rlp.read(seq[Topic]))
result.topicInterest = some(rlp.read(seq[whisper_types.Topic]))
proc allowed*(msg: Message, config: WakuConfig): bool =
# Check max msg size, already happens in RLPx but there is a specific waku
@ -235,7 +235,7 @@ proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} =
network.config.confirmationsEnabled = false
network.config.rateLimits = none(RateLimits)
network.config.maxMsgSize = defaultMaxMsgSize
network.config.topics = none(seq[Topic])
network.config.topics = none(seq[whisper_types.Topic])
asyncSpawn node.run(network)
p2pProtocol Waku(version = wakuVersion,
@ -349,7 +349,7 @@ p2pProtocol Waku(version = wakuVersion,
peer.state.topics = options.topicInterest
elif options.bloomFilter.isSome():
peer.state.bloom = options.bloomFilter.get()
peer.state.topics = none(seq[Topic])
peer.state.topics = none(seq[whisper_types.Topic])
if options.powRequirement.isSome():
peer.state.powRequirement = options.powRequirement.get()
@ -498,7 +498,7 @@ proc queueMessage(node: EthereumNode, msg: Message): bool =
# Public EthereumNode calls ----------------------------------------------------
proc postEncoded*(node: EthereumNode, ttl: uint32,
topic: Topic, encodedPayload: seq[byte],
topic: whisper_types.Topic, encodedPayload: seq[byte],
powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
@ -548,7 +548,7 @@ proc postEncoded*(node: EthereumNode, ttl: uint32,
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
symKey = none[SymKey](), src = none[PrivateKey](),
ttl: uint32, topic: Topic, payload: seq[byte],
ttl: uint32, topic: whisper_types.Topic, payload: seq[byte],
padding = none[seq[byte]](), powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
@ -614,7 +614,7 @@ proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
# NOTE: do we need a tolerance of old bloom filter for some time?
node.protocolState(Waku).config.bloom = some(bloom)
# reset topics
node.protocolState(Waku).config.topics = none(seq[Topic])
node.protocolState(Waku).config.topics = none(seq[whisper_types.Topic])
var futures: seq[Future[void]] = @[]
let list = StatusOptions(bloomFilter: some(bloom))
@ -624,7 +624,7 @@ proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setTopicInterest*(node: EthereumNode, topics: seq[Topic]):
proc setTopicInterest*(node: EthereumNode, topics: seq[whisper_types.Topic]):
Future[bool] {.async.} =
if topics.len > topicInterestMax:
return false

View File

@ -35,7 +35,7 @@ type
nodekey* {.
desc: "P2P node private key as 64 char hex string.",
defaultValue: crypto.PrivateKey.random(Secp256k1, keys.newRng()[]).tryGet()
defaultValue: crypto.PrivateKey.random(Secp256k1, crypto.newRng()[]).tryGet()
name: "nodekey" }: crypto.PrivateKey
listenAddress* {.

View File

@ -108,7 +108,7 @@ proc new*(T: type WakuDiscoveryV5,
privateKey: keys.PrivateKey,
flags: WakuEnrBitfield,
enrFields: openArray[(string, seq[byte])],
rng: ref BrHmacDrbgContext,
rng: ref HmacDrbgContext,
discv5Config: protocol.DiscoveryConfig = protocol.defaultDiscoveryConfig): T =
## TODO: consider loading from a configurable bootstrap file
@ -140,7 +140,7 @@ proc new*(T: type WakuDiscoveryV5,
privateKey: keys.PrivateKey,
flags: WakuEnrBitfield,
enrFields: openArray[(string, seq[byte])],
rng: ref BrHmacDrbgContext,
rng: ref HmacDrbgContext,
discv5Config: protocol.DiscoveryConfig = protocol.defaultDiscoveryConfig): T =
var bootstrapEnrs: seq[enr.Record]

View File

@ -54,7 +54,7 @@ proc toWakuMessage*(relayMessage: WakuRelayMessage, version: uint32): WakuMessag
version: version,
timestamp: t)
proc toWakuMessage*(relayMessage: WakuRelayMessage, version: uint32, rng: ref BrHmacDrbgContext, symkey: Option[SymKey], pubKey: Option[keys.PublicKey]): WakuMessage =
proc toWakuMessage*(relayMessage: WakuRelayMessage, version: uint32, rng: ref HmacDrbgContext, symkey: Option[SymKey], pubKey: Option[keys.PublicKey]): WakuMessage =
# @TODO global definition for default content topic
const defaultCT = ContentTopic("/waku/2/default-content/proto")

View File

@ -3,6 +3,7 @@
import
std/[tables,sequtils],
chronicles,
eth/keys,
json_rpc/rpcserver,
nimcrypto/sysrand,
../wakunode2,
@ -17,8 +18,7 @@ logScope:
const futTimeout* = 5.seconds # Max time to wait for futures
proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHmacDrbgContext, topicCache: TopicCache) =
proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, topicCache: TopicCache) =
## Private API version 1 definitions
## Definitions for symmetric cryptography
@ -38,7 +38,7 @@ proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHm
debug "post_waku_v2_private_v1_symmetric_message"
let msg = message.toWakuMessage(version = 1,
rng = rng,
rng = node.rng,
pubKey = none(waku_payload.PublicKey),
symkey = some(symkey.toSymKey()))
@ -73,7 +73,7 @@ proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHm
## Generates and returns a public/private key pair for asymmetric message encryption and decryption.
debug "get_waku_v2_private_v1_asymmetric_keypair"
let privKey = waku_payload.PrivateKey.random(rng[])
let privKey = waku_payload.PrivateKey.random(node.rng[])
return WakuKeyPair(seckey: privKey, pubkey: privKey.toPublicKey())
@ -82,7 +82,7 @@ proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHm
debug "post_waku_v2_private_v1_asymmetric_message"
let msg = message.toWakuMessage(version = 1,
rng = rng,
rng = node.rng,
symkey = none(SymKey),
pubKey = some(publicKey.toPublicKey()))

View File

@ -36,7 +36,7 @@ proc installRelayPostSubscriptionsV1Handler*(router: var RestRouter, node: WakuN
if contentBody.isNone():
return RestApiResponse.badRequest()
let reqBodyContentType = MediaType.init(contentBody.get().contentType)
let reqBodyContentType = MediaType.init($contentBody.get().contentType)
if reqBodyContentType != MIMETYPE_JSON:
return RestApiResponse.badRequest()
@ -67,7 +67,7 @@ proc installRelayDeleteSubscriptionsV1Handler*(router: var RestRouter, node: Wak
if contentBody.isNone():
return RestApiResponse.badRequest()
let reqBodyContentType = MediaType.init(contentBody.get().contentType)
let reqBodyContentType = MediaType.init($contentBody.get().contentType)
if reqBodyContentType != MIMETYPE_JSON:
return RestApiResponse.badRequest()
@ -124,7 +124,7 @@ proc installRelayPostMessagesV1Handler*(router: var RestRouter, node: WakuNode)
if contentBody.isNone():
return RestApiResponse.badRequest()
let reqBodyContentType = MediaType.init(contentBody.get().contentType)
let reqBodyContentType = MediaType.init($contentBody.get().contentType)
if reqBodyContentType != MIMETYPE_JSON:
return RestApiResponse.badRequest()

View File

@ -65,7 +65,7 @@ proc decodePayload*(message: WakuMessage, keyInfo: KeyInfo):
# TODO: same story as for `decodedPayload`, but then regarding the `Payload`
# object.
proc encode*(payload: Payload, version: uint32, rng: var BrHmacDrbgContext):
proc encode*(payload: Payload, version: uint32, rng: var HmacDrbgContext):
WakuResult[seq[byte]] =
case version
of 0:

View File

@ -419,7 +419,7 @@ proc info*(node: WakuNode): WakuInfo =
let wakuInfo = WakuInfo(listenAddresses: listenStr, enrUri: enrUri)
return wakuInfo
proc mountFilter*(node: WakuNode, filterTimeout: Duration = WakuFilterTimeout) {.raises: [Defect, LPError]} =
proc mountFilter*(node: WakuNode, filterTimeout: Duration = WakuFilterTimeout) {.async, raises: [Defect, LPError]} =
info "mounting filter"
proc filterHandler(requestId: string, msg: MessagePush) {.async, gcsafe.} =
@ -434,18 +434,26 @@ proc mountFilter*(node: WakuNode, filterTimeout: Duration = WakuFilterTimeout) {
waku_node_messages.inc(labelValues = ["filter"])
node.wakuFilter = WakuFilter.init(node.peerManager, node.rng, filterHandler, filterTimeout)
if node.started:
# Node has started already. Let's start filter too.
await node.wakuFilter.start()
node.switch.mount(node.wakuFilter, protocolMatcher(WakuFilterCodec))
# NOTE: If using the swap protocol, it must be mounted before store. This is
# because store is using a reference to the swap protocol.
proc mountSwap*(node: WakuNode, swapConfig: SwapConfig = SwapConfig.init()) {.raises: [Defect, LPError].} =
proc mountSwap*(node: WakuNode, swapConfig: SwapConfig = SwapConfig.init()) {.async, raises: [Defect, LPError].} =
info "mounting swap", mode = $swapConfig.mode
node.wakuSwap = WakuSwap.init(node.peerManager, node.rng, swapConfig)
node.switch.mount(node.wakuSwap, protocolMatcher(WakuSwapCodec))
# NYI - Do we need this?
#node.subscriptions.subscribe(WakuSwapCodec, node.wakuSwap.subscription())
proc mountStore*(node: WakuNode, store: MessageStore = nil, persistMessages: bool = false, capacity = StoreDefaultCapacity, isSqliteOnly = false) {.raises: [Defect, LPError].} =
node.wakuSwap = WakuSwap.init(node.peerManager, node.rng, swapConfig)
if node.started:
# Node has started already. Let's start swap too.
await node.wakuSwap.start()
node.switch.mount(node.wakuSwap, protocolMatcher(WakuSwapCodec))
proc mountStore*(node: WakuNode, store: MessageStore = nil, persistMessages: bool = false, capacity = StoreDefaultCapacity, isSqliteOnly = false) {.async, raises: [Defect, LPError].} =
info "mounting store"
if node.wakuSwap.isNil:
@ -454,6 +462,10 @@ proc mountStore*(node: WakuNode, store: MessageStore = nil, persistMessages: boo
else:
debug "mounting store with swap"
node.wakuStore = WakuStore.init(node.peerManager, node.rng, store, node.wakuSwap, persistMessages=persistMessages, capacity=capacity, isSqliteOnly=isSqliteOnly)
if node.started:
# Node has started already. Let's start store too.
await node.wakuStore.start()
node.switch.mount(node.wakuStore, protocolMatcher(WakuStoreCodec))
@ -488,11 +500,10 @@ proc startRelay*(node: WakuNode) {.async.} =
proc mountRelay*(node: WakuNode,
topics: seq[string] = newSeq[string](),
relayMessages = true,
triggerSelf = true,
peerExchangeHandler = none(RoutingRecordsHandler))
# @TODO: Better error handling: CatchableError is raised by `waitFor`
{.gcsafe, raises: [Defect, InitializationError, LPError, CatchableError].} =
{.async, gcsafe, raises: [Defect, InitializationError, LPError, CatchableError].} =
proc msgIdProvider(m: messages.Message): Result[MessageID, ValidationResult] =
let mh = MultiHash.digest("sha2-256", m.data)
@ -510,7 +521,7 @@ proc mountRelay*(node: WakuNode,
maxMessageSize = MaxWakuMessageSize
)
info "mounting relay", relayMessages=relayMessages
info "mounting relay"
## The default relay topics is the union of
## all configured topics plus the hard-coded defaultTopic(s)
@ -521,24 +532,16 @@ proc mountRelay*(node: WakuNode,
wakuRelay.parameters.enablePX = true # Feature flag for peer exchange in nim-libp2p
wakuRelay.routingRecordsHandler.add(peerExchangeHandler.get())
node.switch.mount(wakuRelay, protocolMatcher(WakuRelayCodec))
node.wakuRelay = wakuRelay
if node.started:
# Node has started already. Let's start relay too.
await node.startRelay()
if relayMessages:
## Some nodes may choose not to have the capability to relay messages (e.g. "light" nodes).
## All nodes, however, currently require WakuRelay, regardless of desired capabilities.
## This is to allow protocol stream negotation with relay-capable nodes to succeed.
## Here we mount relay on the switch only, but do not proceed to subscribe to any pubsub
## topics. We also never start the relay protocol. node.wakuRelay remains nil.
## @TODO: in future, this WakuRelay dependency will be removed completely
node.wakuRelay = wakuRelay
node.switch.mount(wakuRelay, protocolMatcher(WakuRelayCodec))
info "relay mounted successfully"
if node.started:
# Node has started already. Let's start relay too.
waitFor node.startRelay()
proc mountLightPush*(node: WakuNode) {.raises: [Defect, LPError].} =
proc mountLightPush*(node: WakuNode) {.async, raises: [Defect, LPError].} =
info "mounting light push"
if node.wakuRelay.isNil:
@ -548,9 +551,13 @@ proc mountLightPush*(node: WakuNode) {.raises: [Defect, LPError].} =
debug "mounting lightpush with relay"
node.wakuLightPush = WakuLightPush.init(node.peerManager, node.rng, nil, node.wakuRelay)
if node.started:
# Node has started already. Let's start lightpush too.
await node.wakuLightPush.start()
node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec))
proc mountLibp2pPing*(node: WakuNode) {.raises: [Defect, LPError].} =
proc mountLibp2pPing*(node: WakuNode) {.async, raises: [Defect, LPError].} =
info "mounting libp2p ping protocol"
try:
@ -559,7 +566,11 @@ proc mountLibp2pPing*(node: WakuNode) {.raises: [Defect, LPError].} =
# This is necessary as `Ping.new*` does not have explicit `raises` requirement
# @TODO: remove exception handling once explicit `raises` in ping module
raise newException(LPError, "Failed to initialize ping protocol")
if node.started:
# Node has started already. Let's start ping too.
await node.libp2pPing.start()
node.switch.mount(node.libp2pPing)
proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} =
@ -745,8 +756,6 @@ proc start*(node: WakuNode) {.async.} =
##
## Status: Implemented.
await node.switch.start()
# TODO Get this from WakuNode obj
let peerInfo = node.switch.peerInfo
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
@ -762,11 +771,24 @@ proc start*(node: WakuNode) {.async.} =
## Update switch peer info with announced addrs
node.updateSwitchPeerInfo()
# Start mounted protocols. For now we start each one explicitly
if not node.wakuRelay.isNil:
await node.startRelay()
if not node.wakuStore.isNil:
await node.wakuStore.start()
if not node.wakuFilter.isNil:
await node.wakuFilter.start()
if not node.wakuLightPush.isNil:
await node.wakuLightPush.start()
if not node.wakuSwap.isNil:
await node.wakuSwap.start()
if not node.libp2pPing.isNil:
await node.libp2pPing.start()
await node.switch.start()
node.started = true
info "Node started successfully"
node.started = true
proc stop*(node: WakuNode) {.async.} =
if not node.wakuRelay.isNil:
@ -1013,13 +1035,13 @@ when isMainModule:
peerExchangeHandler = some(handlePeerExchange)
mountRelay(node,
conf.topics.split(" "),
relayMessages = conf.relay, # Indicates if node is capable to relay messages
peerExchangeHandler = peerExchangeHandler)
if conf.relay:
waitFor mountRelay(node,
conf.topics.split(" "),
peerExchangeHandler = peerExchangeHandler)
# Keepalive mounted on all nodes
mountLibp2pPing(node)
waitFor mountLibp2pPing(node)
when defined(rln):
if conf.rlnRelay:
@ -1028,26 +1050,26 @@ when isMainModule:
debug "could not mount WakuRlnRelay"
if conf.swap:
mountSwap(node)
waitFor mountSwap(node)
# TODO Set swap peer, for now should be same as store peer
# Store setup
if (conf.storenode != "") or (conf.store):
mountStore(node, mStorage, conf.persistMessages, conf.storeCapacity, conf.sqliteStore)
waitFor mountStore(node, mStorage, conf.persistMessages, conf.storeCapacity, conf.sqliteStore)
if conf.storenode != "":
setStorePeer(node, conf.storenode)
# NOTE Must be mounted after relay
if (conf.lightpushnode != "") or (conf.lightpush):
mountLightPush(node)
waitFor mountLightPush(node)
if conf.lightpushnode != "":
setLightPushPeer(node, conf.lightpushnode)
# Filter setup. NOTE Must be mounted after relay
if (conf.filternode != "") or (conf.filter):
mountFilter(node, filterTimeout = chronos.seconds(conf.filterTimeout))
waitFor mountFilter(node, filterTimeout = chronos.seconds(conf.filterTimeout))
if conf.filternode != "":
setFilterPeer(node, conf.filternode)
@ -1210,7 +1232,7 @@ when isMainModule:
waitFor node.stop()
quit(QuitSuccess)
c_signal(SIGTERM, handleSigterm)
c_signal(ansi_c.SIGTERM, handleSigterm)
debug "Node setup complete"

View File

@ -36,7 +36,7 @@ proc startRpcServer*(node: WakuNode, rpcIp: ValidIpAddress, rpcPort: Port, conf:
if conf.rpcPrivate:
# Private API access allows WakuRelay functionality that
# is backwards compatible with Waku v1.
installPrivateApiHandlers(node, rpcServer, node.rng, topicCache)
installPrivateApiHandlers(node, rpcServer, topicCache)
if conf.filter:
let messageCache = newTable[ContentTopic, seq[WakuMessage]]()

View File

@ -1,4 +1,5 @@
import
bearssl/rand,
eth/p2p/discoveryv5/enr,
libp2p/crypto/crypto,
libp2p/protocols/ping,
@ -41,7 +42,7 @@ type
enr*: enr.Record
libp2pPing*: Ping
filters*: Filters
rng*: ref BrHmacDrbgContext
rng*: ref rand.HmacDrbgContext
wakuDiscv5*: WakuDiscoveryV5
announcedAddresses* : seq[MultiAddress]
started*: bool # Indicates that node has started listening

View File

@ -4,7 +4,7 @@ import
chronicles,
chronos,
metrics,
bearssl,
bearssl/rand,
libp2p/protocols/protocol,
libp2p/crypto/crypto
import
@ -74,7 +74,7 @@ type
WakuFilterResult*[T] = Result[T, string]
WakuFilter* = ref object of LPProtocol
rng*: ref BrHmacDrbgContext
rng*: ref rand.HmacDrbgContext
peerManager*: PeerManager
pushHandler*: MessagePushHandler
subscriptions*: seq[Subscription]
@ -134,7 +134,7 @@ proc init(wf: WakuFilter) =
proc init*(T: type WakuFilter,
peerManager: PeerManager,
rng: ref BrHmacDrbgContext,
rng: ref rand.HmacDrbgContext,
handler: MessagePushHandler,
timeout: Duration = WakuFilterTimeout): T =
let wf = WakuFilter(rng: rng,

View File

@ -6,7 +6,7 @@ import
chronicles,
chronos,
metrics,
bearssl,
bearssl/rand,
libp2p/crypto/crypto
import
@ -46,7 +46,7 @@ type
WakuLightPushResult*[T] = Result[T, string]
WakuLightPush* = ref object of LPProtocol
rng*: ref BrHmacDrbgContext
rng*: ref rand.HmacDrbgContext
peerManager*: PeerManager
requestHandler*: PushRequestHandler
relayReference*: WakuRelay
@ -98,7 +98,7 @@ proc init*(wl: WakuLightPush) =
wl.handler = handle
wl.codec = WakuLightPushCodec
proc init*(T: type WakuLightPush, peerManager: PeerManager, rng: ref BrHmacDrbgContext, handler: PushRequestHandler, relay: WakuRelay = nil): T =
proc init*(T: type WakuLightPush, peerManager: PeerManager, rng: ref rand.HmacDrbgContext, handler: PushRequestHandler, relay: WakuRelay = nil): T =
debug "init"
let rng = crypto.newRng()
let wl = WakuLightPush(rng: rng,

View File

@ -11,7 +11,7 @@
import std/[oids, options, strutils, tables]
import chronos
import chronicles
import bearssl
import bearssl/rand
import stew/[results, byteutils, endians2]
import nimcrypto/[utils, sha2, hmac]
@ -159,9 +159,9 @@ proc setCipherStateKey*(cs: var CipherState, key: ChaChaPolyKey) =
cs.k = key
# Generates a random Symmetric Cipher State for test purposes
proc randomCipherState*(rng: var BrHmacDrbgContext, nonce: uint64 = 0): CipherState =
proc randomCipherState*(rng: var HmacDrbgContext, nonce: uint64 = 0): CipherState =
var randomCipherState: CipherState
brHmacDrbgGenerate(rng, randomCipherState.k)
hmacDrbgGenerate(rng, randomCipherState.k)
setNonce(randomCipherState, nonce)
return randomCipherState

View File

@ -7,7 +7,7 @@
import std/[oids, options, strutils, tables]
import chronos
import chronicles
import bearssl
import bearssl/rand
import stew/[results, endians2]
import nimcrypto/[utils, sha2, hmac]
@ -229,7 +229,7 @@ proc processMessagePatternPayload(hs: var HandshakeState, transportMessage: seq[
return payload
# We process an input handshake message according to current handshake state and we return the next handshake step's handshake message
proc processMessagePatternTokens(rng: var BrHmacDrbgContext, hs: var HandshakeState, inputHandshakeMessage: seq[NoisePublicKey] = @[]): Result[seq[NoisePublicKey], cstring]
proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var HandshakeState, inputHandshakeMessage: seq[NoisePublicKey] = @[]): Result[seq[NoisePublicKey], cstring]
{.raises: [Defect, NoiseHandshakeError, NoiseMalformedHandshake, NoisePublicKeyError, NoiseDecryptTagError, NoiseNonceMaxError].} =
# We retrieve current message pattern (direction + tokens) to process
@ -463,7 +463,7 @@ proc initialize*(hsPattern: HandshakePattern, ephemeralKey: KeyPair = default(Ke
# Each user in a handshake alternates writing and reading of handshake messages.
# If the user is writing the handshake message, the transport message (if not empty) has to be passed to transportMessage and readPayloadV2 can be left to its default value
# It the user is reading the handshake message, the read payload v2 has to be passed to readPayloadV2 and the transportMessage can be left to its default values.
proc stepHandshake*(rng: var BrHmacDrbgContext, hs: var HandshakeState, readPayloadV2: PayloadV2 = default(PayloadV2), transportMessage: seq[byte] = @[]): Result[HandshakeStepResult, cstring]
proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readPayloadV2: PayloadV2 = default(PayloadV2), transportMessage: seq[byte] = @[]): Result[HandshakeStepResult, cstring]
{.raises: [Defect, NoiseHandshakeError, NoiseMalformedHandshake, NoisePublicKeyError, NoiseDecryptTagError, NoiseNonceMaxError].} =
var hsStepResult: HandshakeStepResult

View File

@ -8,7 +8,7 @@
import std/[oids, options, strutils, tables, sequtils]
import chronos
import chronicles
import bearssl
import bearssl/rand
import stew/[results, endians2, byteutils]
import nimcrypto/[utils, sha2, hmac]
@ -28,9 +28,9 @@ logScope:
#################################
# Generates random byte sequences of given size
proc randomSeqByte*(rng: var BrHmacDrbgContext, size: int): seq[byte] =
proc randomSeqByte*(rng: var HmacDrbgContext, size: int): seq[byte] =
var output = newSeq[byte](size.uint32)
brHmacDrbgGenerate(rng, output)
hmacDrbgGenerate(rng, output)
return output
# Pads a payload according to PKCS#7 as per RFC 5652 https://datatracker.ietf.org/doc/html/rfc5652#section-6.3
@ -69,7 +69,7 @@ proc isDefault*[T](value: T): bool =
#################################
# Generate random (public, private) Elliptic Curve key pairs
proc genKeyPair*(rng: var BrHmacDrbgContext): KeyPair =
proc genKeyPair*(rng: var HmacDrbgContext): KeyPair =
var keyPair: KeyPair
keyPair.privateKey = EllipticCurveKey.random(rng)
keyPair.publicKey = keyPair.privateKey.public()
@ -158,18 +158,18 @@ proc dh*(private: EllipticCurveKey, public: EllipticCurveKey): EllipticCurveKey
#################################
# Generates a random ChaChaPolyKey for testing encryption/decryption
proc randomChaChaPolyKey*(rng: var BrHmacDrbgContext): ChaChaPolyKey =
proc randomChaChaPolyKey*(rng: var HmacDrbgContext): ChaChaPolyKey =
var key: ChaChaPolyKey
brHmacDrbgGenerate(rng, key)
hmacDrbgGenerate(rng, key)
return key
# Generates a random ChaChaPoly Cipher State for testing encryption/decryption
proc randomChaChaPolyCipherState*(rng: var BrHmacDrbgContext): ChaChaPolyCipherState =
proc randomChaChaPolyCipherState*(rng: var HmacDrbgContext): ChaChaPolyCipherState =
var randomCipherState: ChaChaPolyCipherState
randomCipherState.k = randomChaChaPolyKey(rng)
brHmacDrbgGenerate(rng, randomCipherState.nonce)
hmacDrbgGenerate(rng, randomCipherState.nonce)
randomCipherState.ad = newSeq[byte](32)
brHmacDrbgGenerate(rng, randomCipherState.ad)
hmacDrbgGenerate(rng, randomCipherState.ad)
return randomCipherState
#################################################################
@ -190,7 +190,7 @@ proc toNoisePublicKey*(publicKey: EllipticCurveKey): NoisePublicKey =
return noisePublicKey
# Generates a random Noise public key
proc genNoisePublicKey*(rng: var BrHmacDrbgContext): NoisePublicKey =
proc genNoisePublicKey*(rng: var HmacDrbgContext): NoisePublicKey =
var noisePublicKey: NoisePublicKey
# We generate a random key pair
let keyPair: KeyPair = genKeyPair(rng)
@ -281,7 +281,7 @@ proc `==`*(p1, p2: PayloadV2): bool =
# Generates a random PayloadV2
proc randomPayloadV2*(rng: var BrHmacDrbgContext): PayloadV2 =
proc randomPayloadV2*(rng: var HmacDrbgContext): PayloadV2 =
var payload2: PayloadV2
# To generate a random protocol id, we generate a random 1-byte long sequence, and we convert the first element to uint8
payload2.protocolId = randomSeqByte(rng, 1)[0].uint8

View File

@ -8,7 +8,7 @@ import
stew/results,
chronicles,
chronos,
bearssl,
bearssl/rand,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
libp2p/protobuf/minprotobuf,
@ -65,7 +65,7 @@ type
WakuStore* = ref object of LPProtocol
peerManager*: PeerManager
rng*: ref BrHmacDrbgContext
rng*: ref rand.HmacDrbgContext
messages*: StoreQueueRef # in-memory message store
store*: MessageStore # sqlite DB handle
wakuSwap*: WakuSwap
@ -186,7 +186,7 @@ proc init*(ws: WakuStore, capacity = StoreDefaultCapacity) =
debug "the number of messages in the memory", messageNum=ws.messages.len
waku_store_messages.set(ws.messages.len.int64, labelValues = ["stored"])
proc init*(T: type WakuStore, peerManager: PeerManager, rng: ref BrHmacDrbgContext,
proc init*(T: type WakuStore, peerManager: PeerManager, rng: ref rand.HmacDrbgContext,
store: MessageStore = nil, wakuSwap: WakuSwap = nil, persistMessages = true,
capacity = StoreDefaultCapacity, isSqliteOnly = false): T =
let ws = WakuStore(rng: rng, peerManager: peerManager, store: store, wakuSwap: wakuSwap, persistMessages: persistMessages, isSqliteOnly: isSqliteOnly)

View File

@ -34,7 +34,7 @@
import
std/[tables, options, json],
bearssl,
bearssl/rand,
chronos, chronicles, metrics, stew/results,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
@ -285,7 +285,7 @@ proc init*(wakuSwap: WakuSwap) =
wakuswap.applyPolicy = applyPolicy
# TODO Expression return?
proc init*(T: type WakuSwap, peerManager: PeerManager, rng: ref BrHmacDrbgContext, swapConfig: SwapConfig): T =
proc init*(T: type WakuSwap, peerManager: PeerManager, rng: ref rand.HmacDrbgContext, swapConfig: SwapConfig): T =
info "wakuSwap init 2"
let
accounting = initTable[PeerId, int]()

View File

@ -2,7 +2,7 @@
import
std/tables,
bearssl,
bearssl/rand,
libp2p/protocols/protocol,
../../node/peer_manager/peer_manager
@ -44,7 +44,7 @@ type
WakuSwap* = ref object of LPProtocol
peerManager*: PeerManager
rng*: ref BrHmacDrbgContext
rng*: ref rand.HmacDrbgContext
text*: string
accounting*: Table[PeerId, int]
credit*: CreditHandler

View File

@ -2,9 +2,9 @@
{.push raises: [Defect].}
import bearssl, stew/byteutils
import bearssl/rand, stew/byteutils
proc generateRequestId*(rng: ref BrHmacDrbgContext): string =
proc generateRequestId*(rng: ref HmacDrbgContext): string =
var bytes: array[10, byte]
brHmacDrbgGenerate(rng[], bytes)
hmacDrbgGenerate(rng[], bytes)
return toHex(bytes)

View File

@ -342,7 +342,7 @@ proc queueMessage(node: EthereumNode, msg: Message): bool =
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
symKey = none[SymKey](), src = none[PrivateKey](),
ttl: uint32, topic: Topic, payload: seq[byte],
ttl: uint32, topic: whisper_types.Topic, payload: seq[byte],
padding = none[seq[byte]](), powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =

View File

@ -9,7 +9,7 @@
import
std/[algorithm, bitops, math, options, tables, times, hashes],
chronicles, stew/[byteutils, endians2], metrics, bearssl,
chronicles, stew/[byteutils, endians2], metrics, bearssl/rand,
nimcrypto/[bcmode, hash, keccak, rijndael],
eth/[keys, rlp, p2p], eth/p2p/ecies
@ -160,9 +160,9 @@ proc topicBloom*(topic: Topic): Bloom =
doAssert idx <= 511
result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16))
proc generateRandomID*(rng: var BrHmacDrbgContext): string =
proc generateRandomID*(rng: var HmacDrbgContext): string =
var bytes: array[256 div 8, byte]
brHmacDrbgGenerate(rng, bytes)
hmacDrbgGenerate(rng, bytes)
toHex(bytes)
proc `or`(a, b: Bloom): Bloom =
@ -231,7 +231,7 @@ proc decryptAesGcm(cipher: openarray[byte], key: SymKey): Option[seq[byte]] =
# simply because that makes it closer to EIP 627 - see also:
# https://github.com/paritytech/parity-ethereum/issues/9652
proc encode*(rng: var BrHmacDrbgContext, self: Payload): Option[seq[byte]] =
proc encode*(rng: var HmacDrbgContext, self: Payload): Option[seq[byte]] =
## Encode a payload according so as to make it suitable to put in an Envelope
## The format follows EIP 627 - https://eips.ethereum.org/EIPS/eip-627
@ -284,7 +284,7 @@ proc encode*(rng: var BrHmacDrbgContext, self: Payload): Option[seq[byte]] =
plain.add self.padding.get()
else:
var padding = newSeq[byte](padLen)
brHmacDrbgGenerate(rng, padding)
hmacDrbgGenerate(rng, padding)
plain.add padding
@ -303,7 +303,7 @@ proc encode*(rng: var BrHmacDrbgContext, self: Payload): Option[seq[byte]] =
if self.symKey.isSome(): # Symmetric key present - encryption requested
var iv: array[gcmIVLen, byte]
brHmacDrbgGenerate(rng, iv)
hmacDrbgGenerate(rng, iv)
return some(encryptAesGcm(plain, self.symKey.get(), iv))
@ -579,7 +579,7 @@ proc initFilter*(src = none[PublicKey](), privateKey = none[PrivateKey](),
powReq: powReq, allowP2P: allowP2P, bloom: toBloom(topics))
proc subscribeFilter*(
rng: var BrHmacDrbgContext, filters: var Filters, filter: Filter,
rng: var HmacDrbgContext, filters: var Filters, filter: Filter,
handler: FilterMsgHandler = nil): string =
# NOTE: Should we allow a filter without a key? Encryption is mandatory in v6?
# Check if asymmetric _and_ symmetric key? Now asymmetric just has precedence.

View File

@ -0,0 +1,67 @@
const
# Whisper nodes taken from:
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.prod"].whisper[] + "\","' -r
WhisperNodes* = [
"enode://b957e51f41e4abab8382e1ea7229e88c6e18f34672694c6eae389eac22dab8655622bbd4a08192c321416b9becffaab11c8e2b7a5d0813b922aa128b82990dab@47.75.222.178:443",
"enode://66ba15600cda86009689354c3a77bdf1a97f4f4fb3ab50ffe34dbc904fac561040496828397be18d9744c75881ffc6ac53729ddbd2cdbdadc5f45c400e2622f7@178.128.141.87:443",
"enode://182ed5d658d1a1a4382c9e9f7c9e5d8d9fec9db4c71ae346b9e23e1a589116aeffb3342299bdd00e0ab98dbf804f7b2d8ae564ed18da9f45650b444aed79d509@34.68.132.118:443",
"enode://8bebe73ddf7cf09e77602c7d04c93a73f455b51f24ae0d572917a4792f1dec0bb4c562759b8830cc3615a658d38c1a4a38597a1d7ae3ba35111479fc42d65dec@47.75.85.212:443",
"enode://4ea35352702027984a13274f241a56a47854a7fd4b3ba674a596cff917d3c825506431cf149f9f2312a293bb7c2b1cca55db742027090916d01529fe0729643b@134.209.136.79:443",
"enode://fbeddac99d396b91d59f2c63a3cb5fc7e0f8a9f7ce6fe5f2eed5e787a0154161b7173a6a73124a4275ef338b8966dc70a611e9ae2192f0f2340395661fad81c0@34.67.230.193:443",
"enode://ac3948b2c0786ada7d17b80cf869cf59b1909ea3accd45944aae35bf864cc069126da8b82dfef4ddf23f1d6d6b44b1565c4cf81c8b98022253c6aea1a89d3ce2@47.75.88.12:443",
"enode://ce559a37a9c344d7109bd4907802dd690008381d51f658c43056ec36ac043338bd92f1ac6043e645b64953b06f27202d679756a9c7cf62fdefa01b2e6ac5098e@134.209.136.123:443",
"enode://c07aa0deea3b7056c5d45a85bca42f0d8d3b1404eeb9577610f386e0a4744a0e7b2845ae328efc4aa4b28075af838b59b5b3985bffddeec0090b3b7669abc1f3@35.226.92.155:443",
"enode://385579fc5b14e04d5b04af7eee835d426d3d40ccf11f99dbd95340405f37cf3bbbf830b3eb8f70924be0c2909790120682c9c3e791646e2d5413e7801545d353@47.244.221.249:443",
"enode://4e0a8db9b73403c9339a2077e911851750fc955db1fc1e09f81a4a56725946884dd5e4d11258eac961f9078a393c45bcab78dd0e3bc74e37ce773b3471d2e29c@134.209.136.101:443",
"enode://0624b4a90063923c5cc27d12624b6a49a86dfb3623fcb106801217fdbab95f7617b83fa2468b9ae3de593ff6c1cf556ccf9bc705bfae9cb4625999765127b423@35.222.158.246:443",
"enode://b77bffc29e2592f30180311dd81204ab845e5f78953b5ba0587c6631be9c0862963dea5eb64c90617cf0efd75308e22a42e30bc4eb3cd1bbddbd1da38ff6483e@47.75.10.177:443",
"enode://a8bddfa24e1e92a82609b390766faa56cf7a5eef85b22a2b51e79b333c8aaeec84f7b4267e432edd1cf45b63a3ad0fc7d6c3a16f046aa6bc07ebe50e80b63b8c@178.128.141.249:443",
"enode://a5fe9c82ad1ffb16ae60cb5d4ffe746b9de4c5fbf20911992b7dd651b1c08ba17dd2c0b27ee6b03162c52d92f219961cc3eb14286aca8a90b75cf425826c3bd8@104.154.230.58:443",
"enode://cf5f7a7e64e3b306d1bc16073fba45be3344cb6695b0b616ccc2da66ea35b9f35b3b231c6cf335fdfaba523519659a440752fc2e061d1e5bc4ef33864aac2f19@47.75.221.196:443",
"enode://887cbd92d95afc2c5f1e227356314a53d3d18855880ac0509e0c0870362aee03939d4074e6ad31365915af41d34320b5094bfcc12a67c381788cd7298d06c875@178.128.141.0:443",
"enode://282e009967f9f132a5c2dd366a76319f0d22d60d0c51f7e99795a1e40f213c2705a2c10e4cc6f3890319f59da1a535b8835ed9b9c4b57c3aad342bf312fd7379@35.223.240.17:443",
"enode://13d63a1f85ccdcbd2fb6861b9bd9d03f94bdba973608951f7c36e5df5114c91de2b8194d71288f24bfd17908c48468e89dd8f0fb8ccc2b2dedae84acdf65f62a@47.244.210.80:443",
"enode://2b01955d7e11e29dce07343b456e4e96c081760022d1652b1c4b641eaf320e3747871870fa682e9e9cfb85b819ce94ed2fee1ac458904d54fd0b97d33ba2c4a4@134.209.136.112:443",
"enode://b706a60572634760f18a27dd407b2b3582f7e065110dae10e3998498f1ae3f29ba04db198460d83ed6d2bfb254bb06b29aab3c91415d75d3b869cd0037f3853c@35.239.5.162:443",
"enode://32915c8841faaef21a6b75ab6ed7c2b6f0790eb177ad0f4ea6d731bacc19b938624d220d937ebd95e0f6596b7232bbb672905ee12601747a12ee71a15bfdf31c@47.75.59.11:443",
"enode://0d9d65fcd5592df33ed4507ce862b9c748b6dbd1ea3a1deb94e3750052760b4850aa527265bbaf357021d64d5cc53c02b410458e732fafc5b53f257944247760@178.128.141.42:443",
"enode://e87f1d8093d304c3a9d6f1165b85d6b374f1c0cc907d39c0879eb67f0a39d779be7a85cbd52920b6f53a94da43099c58837034afa6a7be4b099bfcd79ad13999@35.238.106.101:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.staging"].whisper[] + "\","' -r
WhisperNodesStaging* = [
"enode://00395686f5954662a3796e170b9e87bbaf68a050d57e9987b78a2292502dae44aae2b8803280a017ec9af9be0b3121db9d6b3693ab3a0451a866bcbedd58fdac@47.52.226.137:443",
"enode://914c0b30f27bab30c1dfd31dad7652a46fda9370542aee1b062498b1345ee0913614b8b9e3e84622e84a7203c5858ae1d9819f63aece13ee668e4f6668063989@167.99.19.148:443",
"enode://2d897c6e846949f9dcf10279f00e9b8325c18fe7fa52d658520ad7be9607c83008b42b06aefd97cfe1fdab571f33a2a9383ff97c5909ed51f63300834913237e@35.192.0.86:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.test"].whisper[] + "\","' -r
WhisperNodesTest* = [
"enode://ad38f94030a846cc7005b7a1f3b6b01bf4ef59d34e8d3d6f4d12df23d14ba8656702a435d34cf4df3b412c0c1923df5adcce8461321a0d8ffb9435b26e572c2a@47.52.255.194:443",
"enode://1d193635e015918fb85bbaf774863d12f65d70c6977506187ef04420d74ec06c9e8f0dcb57ea042f85df87433dab17a1260ed8dde1bdf9d6d5d2de4b7bf8e993@206.189.243.163:443",
"enode://f593a27731bc0f8eb088e2d39222c2d59dfb9bf0b3950d7a828d51e8ab9e08fffbd9916a82fd993c1a080c57c2bd70ed6c36f489a969de697aff93088dbee1a9@35.194.31.108:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.prod"].boot[] + "\","' -r
StatusBootNodes* = [
"enode://6e6554fb3034b211398fcd0f0082cbb6bd13619e1a7e76ba66e1809aaa0c5f1ac53c9ae79cf2fd4a7bacb10d12010899b370c75fed19b991d9c0cdd02891abad@47.75.99.169:443",
"enode://436cc6f674928fdc9a9f7990f2944002b685d1c37f025c1be425185b5b1f0900feaf1ccc2a6130268f9901be4a7d252f37302c8335a2c1a62736e9232691cc3a@178.128.138.128:443",
"enode://32ff6d88760b0947a3dee54ceff4d8d7f0b4c023c6dad34568615fcae89e26cc2753f28f12485a4116c977be937a72665116596265aa0736b53d46b27446296a@34.70.75.208:443",
"enode://23d0740b11919358625d79d4cac7d50a34d79e9c69e16831c5c70573757a1f5d7d884510bc595d7ee4da3c1508adf87bbc9e9260d804ef03f8c1e37f2fb2fc69@47.52.106.107:443",
"enode://5395aab7833f1ecb671b59bf0521cf20224fe8162fc3d2675de4ee4d5636a75ec32d13268fc184df8d1ddfa803943906882da62a4df42d4fccf6d17808156a87@178.128.140.188:443",
"enode://5405c509df683c962e7c9470b251bb679dd6978f82d5b469f1f6c64d11d50fbd5dd9f7801c6ad51f3b20a5f6c7ffe248cc9ab223f8bcbaeaf14bb1c0ef295fd0@35.223.215.156:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.staging"].boot[] + "\","' -r
StatusBootNodesStaging* = [
"enode://630b0342ca4e9552f50714b6c8e28d6955bc0fd14e7950f93bc3b2b8cc8c1f3b6d103df66f51a13d773b5db0f130661fb5c7b8fa21c48890c64c79b41a56a490@47.91.229.44:443",
"enode://f79fb3919f72ca560ad0434dcc387abfe41e0666201ebdada8ede0462454a13deb05cda15f287d2c4bd85da81f0eb25d0a486bbbc8df427b971ac51533bd00fe@174.138.107.239:443",
"enode://10a78c17929a7019ef4aa2249d7302f76ae8a06f40b2dc88b7b31ebff4a623fbb44b4a627acba296c1ced3775d91fbe18463c15097a6a36fdb2c804ff3fc5b35@35.238.97.234:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.test"].boot[] + "\","' -r
StatusBootNodesTest* = [
"enode://daae2e72820e86e942fa2a8aa7d6e9954d4043a753483d8bd338e16be82cf962392d5c0e1ae57c3d793c3d3dddd8fd58339262e4234dc966f953cd73b535f5fa@47.52.188.149:443",
"enode://9e0988575eb7717c25dea72fd11c7b37767dc09c1a7686f7c2ec577d308d24b377ceb675de4317474a1a870e47882732967f4fa785b02ba95d669b31d464dec0@206.189.243.164:443",
"enode://c1e5018887c863d64e431b69bf617561087825430e4401733f5ba77c70db14236df381fefb0ebe1ac42294b9e261bbe233dbdb83e32c586c66ae26c8de70cb4c@35.188.168.137:443",
]