nwaku/waku/v2/node/wakunode2.nim

1193 lines
43 KiB
Nim
Raw Normal View History

{.push raises: [Defect].}
import
std/[hashes, options, tables, strutils, sequtils, os],
2021-06-09 14:37:08 +00:00
chronos, chronicles, metrics,
stew/shims/net as stewNet,
Off-chain group construction and management (#718) * WIP * WIP: fixes a bug * adds test for static group formation * adds static group creation when rln-relay is enabled * adds createStatic group * wip: adds group formation to mount rlnrelay * adds createMembershipList utility function * adds doc strings and todos * cleans up the code and add comments * defaults createRLNInstance depth argument to 32 * renames Depth * distinguishes between onchain and offchain modes * updates index boundaries * updates log levels * updates docstring * updates log level of displayed membership keys * relocates a todo * activates all the tests * fixes some comments and todos * extracts some utils procs for better debugging * adds todo * moves calculateMerkleRoot and toMembersipKeyPairs to the rln utils * makes calls to the utils functions * adds unit test for createMembershipList * adds unittest for toMembershipKeyPairs and calcMerkleRoot * cleans up the code and fixes tree root value * reverts an unwanted change * minor * adds comments and cleans up the code * updates config message * adds more comments * fixes a minor value mismatch * edits the size of group * minor rewording * defines a const var for the group keys * replaces the sequence literal with the StaticGroupKeys const * converts var to let when applicable * replaces hardcoded value with well-defined constants * moves createMembershipList to the rln relay utils module * renames HashSize to HashHexSize * minor updates on the comments * reorganizes the consts * indicates that rlnRelayMemIndex is an experimental option * fixes a type conversion bug * revises the unittest of "mount waku rln-relay off-chain" * clarifies the use of index * updates a docstring * removes redundant constants and capitalize all of them * deletes the ETH_CLIENT const from the test file * renames a few vars for the sake of clarity * reorganizes unittest into blocks of execution, debug messages, and checks * adds more comments * more comments and clarifications * cleans up the tests * minor * adds a minor fix * replaces a var usage with let * fixes a bug
2021-09-17 17:31:25 +00:00
stew/byteutils,
eth/keys,
Feat(rln-relay): enables two modes of onchain and offchain (#992) * adds the contract handler file * adds integration test for the group listening * adds groupManagement proc * deletes rln relay contract handler file * brings back all the tests * replaces toUINT256 with getIdCommitment proc * replaces individual futures with an array of futures * adds code documentation * asyncSpawn instead of await * adds untitest for toIDCommitment and toUInt256 * reorganizes the test and add rlnInstance * mounts handleGroupUpdates on the rln peer * asyncSpawn to await * implements toIDCommitment * updates the unittest * improves the code documentation * removes unused tests * WIP * uncomments the tests * defines a new mountRlnRelayStatic proc * splits mountRlnRelay into two procs for dynamic and static group management * adds a config for off-chain and on-chain rln-relay * runs dynamic or static mode of rln-relay based on the input config * adds Eth private key and account configs * reads Eth private key and account the configs * comments put the second register proc * add proper call to the rlnrelay dynamic mode * adds todo * adds new rln relay configs * splits register into two procs * makes eth account private key non-optional * removes getIdCommitment and edits the register proc * removes getIdCommitment calls * uncomments the commented tests * fixes a bug * removes contract deployment for the offchain test * fixes a bug, edits comments * removes custom types without proper parsing and serialization routines from the configs * fixes a bug * switches to stew byte utils * removes log decoding * WIP * updates register proc * edits test titles * removes eth private key config * changes the output of register proc to return the registered index * integrates the registration process into mountRlnRelayDynamic * integration test for the register proc * brings back the onchain tests * updates comments * cleans up * disambiguates the Address type namespace * fixes type ambiguities * adds default values for rln key and index * updates config descriptions * adds type conversion from hex to MembershipKeyPair * adds more code documentation * passed the group value instead of option to the mount proc * fix a bug * a minor input type fix for rln chat2 * groups let declarations * adds default values for addresses * logs registered keys
2022-06-17 22:00:19 +00:00
nimcrypto,
eth/p2p/discoveryv5/enr,
2020-05-15 04:11:14 +00:00
libp2p/crypto/crypto,
libp2p/protocols/ping,
libp2p/protocols/pubsub/[gossipsub, rpc/messages],
libp2p/nameresolving/nameresolver,
libp2p/[builders, multihash],
libp2p/transports/[transport, tcptransport, wstransport]
import
../node/storage/message/waku_store_queue,
../protocol/[waku_relay, waku_message],
../protocol/waku_store,
../protocol/waku_swap/waku_swap,
../protocol/waku_filter,
../protocol/waku_lightpush,
Off-chain group construction and management (#718) * WIP * WIP: fixes a bug * adds test for static group formation * adds static group creation when rln-relay is enabled * adds createStatic group * wip: adds group formation to mount rlnrelay * adds createMembershipList utility function * adds doc strings and todos * cleans up the code and add comments * defaults createRLNInstance depth argument to 32 * renames Depth * distinguishes between onchain and offchain modes * updates index boundaries * updates log levels * updates docstring * updates log level of displayed membership keys * relocates a todo * activates all the tests * fixes some comments and todos * extracts some utils procs for better debugging * adds todo * moves calculateMerkleRoot and toMembersipKeyPairs to the rln utils * makes calls to the utils functions * adds unit test for createMembershipList * adds unittest for toMembershipKeyPairs and calcMerkleRoot * cleans up the code and fixes tree root value * reverts an unwanted change * minor * adds comments and cleans up the code * updates config message * adds more comments * fixes a minor value mismatch * edits the size of group * minor rewording * defines a const var for the group keys * replaces the sequence literal with the StaticGroupKeys const * converts var to let when applicable * replaces hardcoded value with well-defined constants * moves createMembershipList to the rln relay utils module * renames HashSize to HashHexSize * minor updates on the comments * reorganizes the consts * indicates that rlnRelayMemIndex is an experimental option * fixes a type conversion bug * revises the unittest of "mount waku rln-relay off-chain" * clarifies the use of index * updates a docstring * removes redundant constants and capitalize all of them * deletes the ETH_CLIENT const from the test file * renames a few vars for the sake of clarity * reorganizes unittest into blocks of execution, debug messages, and checks * adds more comments * more comments and clarifications * cleans up the tests * minor * adds a minor fix * replaces a var usage with let * fixes a bug
2021-09-17 17:31:25 +00:00
../protocol/waku_rln_relay/[waku_rln_relay_types],
2021-12-06 19:51:37 +00:00
../utils/[peers, requests, wakuswitch, wakuenr],
./peer_manager/peer_manager,
./storage/message/message_store,
2021-11-01 18:02:39 +00:00
./dnsdisc/waku_dnsdisc,
./discv5/waku_discv5,
./wakunode2_types
export
builders,
waku_relay, waku_message,
waku_swap,
waku_rln_relay_types,
wakunode2_types
when defined(rln):
import ../protocol/waku_rln_relay/waku_rln_relay_utils
declarePublicCounter waku_node_messages, "number of messages received", ["type"]
2021-01-29 08:42:41 +00:00
declarePublicGauge waku_node_filters, "number of content filter subscriptions"
declarePublicGauge waku_node_errors, "number of wakunode errors", ["type"]
declarePublicCounter waku_node_conns_initiated, "number of connections initiated by this node", ["source"]
2021-01-29 08:42:41 +00:00
logScope:
topics = "wakunode"
# Default clientId
const clientId* = "Nimbus Waku v2 node"
# Default topic
const defaultTopic* = "/waku/2/default-waku/proto"
# Default Waku Filter Timeout
const WakuFilterTimeout: Duration = 1.days
proc protocolMatcher(codec: string): Matcher =
## Returns a protocol matcher function for the provided codec
proc match(proto: string): bool {.gcsafe.} =
## Matches a proto with any postfix to the provided codec.
## E.g. if the codec is `/vac/waku/filter/2.0.0` it matches the protos:
## `/vac/waku/filter/2.0.0`, `/vac/waku/filter/2.0.0-beta3`, `/vac/waku/filter/2.0.0-actualnonsense`
return proto.startsWith(codec)
return match
proc updateSwitchPeerInfo(node: WakuNode) =
## TODO: remove this when supported upstream
##
## nim-libp2p does not yet support announcing addrs
## different from bound addrs.
##
## This is a temporary workaround to replace
## peer info addrs in switch to announced
## addresses.
##
## WARNING: this should only be called once the switch
## has already been started.
if node.announcedAddresses.len > 0:
node.switch.peerInfo.addrs = node.announcedAddresses
template ip4TcpEndPoint(address, port): MultiAddress =
MultiAddress.init(address, tcpProtocol, port)
template dns4Ma(dns4DomainName: string): MultiAddress =
MultiAddress.init("/dns4/" & dns4DomainName).tryGet()
template tcpPortMa(port: Port): MultiAddress =
MultiAddress.init("/tcp/" & $port).tryGet()
template dns4TcpEndPoint(dns4DomainName: string, port: Port): MultiAddress =
dns4Ma(dns4DomainName) & tcpPortMa(port)
template wsFlag(wssEnabled: bool): MultiAddress =
2021-12-06 19:51:37 +00:00
if wssEnabled: MultiAddress.init("/wss").tryGet()
else: MultiAddress.init("/ws").tryGet()
proc new*(T: type WakuNode, nodeKey: crypto.PrivateKey,
bindIp: ValidIpAddress, bindPort: Port,
extIp = none(ValidIpAddress), extPort = none(Port),
peerStorage: PeerStorage = nil,
maxConnections = builders.MaxConnections,
wsBindPort: Port = (Port)8000,
Secure websocket (#759) * add config Signed-off-by: rshiv <reeshav96@gmail.com> * secure websocket integration Signed-off-by: rshiv <reeshav96@gmail.com> * secure websocket debug Signed-off-by: rshiv <reeshav96@gmail.com> * config change Signed-off-by: rshiv <reeshav96@gmail.com> * working secure websocket changes Signed-off-by: rshiv <reeshav96@gmail.com> * Update submodules (#761) * Release v0.6 (#760) * Read from path Signed-off-by: rshiv <reeshav96@gmail.com> * Tests for secure websockets Signed-off-by: rshiv <reeshav96@gmail.com> * CI failure fix Signed-off-by: rshiv <reeshav96@gmail.com> * path resolver CI Signed-off-by: rshiv <reeshav96@gmail.com> * self review fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update examples/v2/config_chat2.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update waku/v2/utils/peers.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/node/wakunode2.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update tests/v2/test_wakunode.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * handle review comments Signed-off-by: rshiv <reeshav96@gmail.com> Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com>
2021-11-10 12:05:36 +00:00
wsEnabled: bool = false,
wssEnabled: bool = false,
secureKey: string = "",
2021-12-06 19:51:37 +00:00
secureCert: string = "",
wakuFlags = none(WakuEnrBitfield),
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
dns4DomainName = none(string),
discv5UdpPort = none(Port)
2021-12-06 19:51:37 +00:00
): T
{.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} =
## Creates a Waku Node.
##
## Status: Implemented.
##
2021-12-06 19:51:37 +00:00
## Initialize addresses
let
2021-12-06 19:51:37 +00:00
# Bind addresses
hostAddress = ip4TcpEndPoint(bindIp, bindPort)
wsHostAddress = if wsEnabled or wssEnabled: some(ip4TcpEndPoint(bindIp, wsbindPort) & wsFlag(wssEnabled))
2021-12-06 19:51:37 +00:00
else: none(MultiAddress)
# Setup external addresses, if available
var
hostExtAddress, wsExtAddress = none(MultiAddress)
if (dns4DomainName.isSome()):
# Use dns4 for externally announced addresses
hostExtAddress = some(dns4TcpEndPoint(dns4DomainName.get(), extPort.get()))
if (wsHostAddress.isSome()):
wsExtAddress = some(dns4TcpEndPoint(dns4DomainName.get(), wsBindPort) & wsFlag(wssEnabled))
else:
# No public domain name, use ext IP if available
if extIp.isSome() and extPort.isSome():
hostExtAddress = some(ip4TcpEndPoint(extIp.get(), extPort.get()))
if (wsHostAddress.isSome()):
wsExtAddress = some(ip4TcpEndPoint(extIp.get(), wsBindPort) & wsFlag(wssEnabled))
2021-12-06 19:51:37 +00:00
var announcedAddresses: seq[MultiAddress]
if hostExtAddress.isSome:
announcedAddresses.add(hostExtAddress.get())
else:
announcedAddresses.add(hostAddress) # We always have at least a bind address for the host
if wsExtAddress.isSome:
announcedAddresses.add(wsExtAddress.get())
elif wsHostAddress.isSome:
announcedAddresses.add(wsHostAddress.get())
## Initialize peer
let
rng = crypto.newRng()
enrIp = if extIp.isSome(): extIp
else: some(bindIp)
enrTcpPort = if extPort.isSome(): extPort
else: some(bindPort)
2021-12-06 19:51:37 +00:00
enrMultiaddrs = if wsExtAddress.isSome: @[wsExtAddress.get()] # Only add ws/wss to `multiaddrs` field
elif wsHostAddress.isSome: @[wsHostAddress.get()]
else: @[]
enr = initEnr(nodeKey,
enrIp,
enrTcpPort,
discv5UdpPort,
2021-12-06 19:51:37 +00:00
wakuFlags,
enrMultiaddrs)
2022-01-10 15:07:35 +00:00
info "Initializing networking", addrs=announcedAddresses
Secure websocket (#759) * add config Signed-off-by: rshiv <reeshav96@gmail.com> * secure websocket integration Signed-off-by: rshiv <reeshav96@gmail.com> * secure websocket debug Signed-off-by: rshiv <reeshav96@gmail.com> * config change Signed-off-by: rshiv <reeshav96@gmail.com> * working secure websocket changes Signed-off-by: rshiv <reeshav96@gmail.com> * Update submodules (#761) * Release v0.6 (#760) * Read from path Signed-off-by: rshiv <reeshav96@gmail.com> * Tests for secure websockets Signed-off-by: rshiv <reeshav96@gmail.com> * CI failure fix Signed-off-by: rshiv <reeshav96@gmail.com> * path resolver CI Signed-off-by: rshiv <reeshav96@gmail.com> * self review fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update examples/v2/config_chat2.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update waku/v2/utils/peers.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/node/wakunode2.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update tests/v2/test_wakunode.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * handle review comments Signed-off-by: rshiv <reeshav96@gmail.com> Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com>
2021-11-10 12:05:36 +00:00
var switch = newWakuSwitch(some(nodekey),
2021-12-06 19:51:37 +00:00
hostAddress,
wsHostAddress,
transportFlags = {ServerFlags.ReuseAddr},
rng = rng,
maxConnections = maxConnections,
wssEnabled = wssEnabled,
secureKeyPath = secureKey,
secureCertPath = secureCert,
nameResolver = nameResolver,
sendSignedPeerRecord = sendSignedPeerRecord)
let wakuNode = WakuNode(
peerManager: PeerManager.new(switch, peerStorage),
switch: switch,
2022-01-10 15:07:35 +00:00
rng: rng,
enr: enr,
filters: Filters.init(),
announcedAddresses: announcedAddresses
)
return wakuNode
proc subscribe(node: WakuNode, topic: Topic, handler: Option[TopicHandler]) =
if node.wakuRelay.isNil:
error "Invalid API call to `subscribe`. WakuRelay not mounted."
# @TODO improved error handling
return
info "subscribe", topic=topic
proc defaultHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
# A default handler should be registered for all topics
trace "Hit default handler", topic=topic, data=data
let msg = WakuMessage.init(data)
if msg.isOk():
# Notify mounted protocols of new message
if (not node.wakuFilter.isNil):
await node.wakuFilter.handleMessage(topic, msg.value())
if (not node.wakuStore.isNil):
await node.wakuStore.handleMessage(topic, msg.value())
waku_node_messages.inc(labelValues = ["relay"])
let wakuRelay = node.wakuRelay
if topic notin PubSub(wakuRelay).topics:
# Add default handler only for new topics
debug "Registering default handler", topic=topic
wakuRelay.subscribe(topic, defaultHandler)
if handler.isSome:
debug "Registering handler", topic=topic
wakuRelay.subscribe(topic, handler.get())
proc subscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) =
## Subscribes to a PubSub topic. Triggers handler when receiving messages on
## this topic. TopicHandler is a method that takes a topic and some data.
##
## NOTE The data field SHOULD be decoded as a WakuMessage.
## Status: Implemented.
node.subscribe(topic, some(handler))
proc subscribe*(node: WakuNode, request: FilterRequest, handler: ContentFilterHandler) {.async, gcsafe.} =
## Registers for messages that match a specific filter. Triggers the handler whenever a message is received.
## FilterHandler is a method that takes a MessagePush.
##
## Status: Implemented.
# Sanity check for well-formed subscribe FilterRequest
doAssert(request.subscribe, "invalid subscribe request")
info "subscribe content", filter=request
var id = generateRequestId(node.rng)
if node.wakuFilter.isNil == false:
let
pubsubTopic = request.pubsubTopic
contentTopics = request.contentFilters.mapIt(it.contentTopic)
let resSubscription = await node.wakuFilter.subscribe(pubsubTopic, contentTopics)
if resSubscription.isOk():
id = resSubscription.get()
else:
# Failed to subscribe
error "remote subscription to filter failed", filter = request
waku_node_errors.inc(labelValues = ["subscribe_filter_failure"])
# Register handler for filter, whether remote subscription succeeded or not
node.filters.addContentFilters(id, request.pubSubTopic, request.contentFilters, handler)
2021-01-29 08:42:41 +00:00
waku_node_filters.set(node.filters.len.int64)
proc unsubscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) =
## Unsubscribes a handler from a PubSub topic.
##
## Status: Implemented.
if node.wakuRelay.isNil:
error "Invalid API call to `unsubscribe`. WakuRelay not mounted."
# @TODO improved error handling
return
info "unsubscribe", topic=topic
let wakuRelay = node.wakuRelay
wakuRelay.unsubscribe(@[(topic, handler)])
proc unsubscribeAll*(node: WakuNode, topic: Topic) =
## Unsubscribes all handlers registered on a specific PubSub topic.
##
## Status: Implemented.
if node.wakuRelay.isNil:
error "Invalid API call to `unsubscribeAll`. WakuRelay not mounted."
# @TODO improved error handling
return
info "unsubscribeAll", topic=topic
let wakuRelay = node.wakuRelay
wakuRelay.unsubscribeAll(topic)
proc unsubscribe*(node: WakuNode, request: FilterRequest) {.async, gcsafe.} =
## Unsubscribe from a content filter.
##
## Status: Implemented.
# Sanity check for well-formed unsubscribe FilterRequest
doAssert(request.subscribe == false, "invalid unsubscribe request")
info "unsubscribe content", filter=request
let
pubsubTopic = request.pubsubTopic
contentTopics = request.contentFilters.mapIt(it.contentTopic)
discard await node.wakuFilter.unsubscribe(pubsubTopic, contentTopics)
node.filters.removeContentFilters(request.contentFilters)
2021-01-29 08:42:41 +00:00
waku_node_filters.set(node.filters.len.int64)
Integrates proof generation and verification into wakunode2 (#735) * WIP * WIP: fixes a bug * adds test for static group formation * adds static group creation when rln-relay is enabled * adds createStatic group * wip: adds group formation to mount rlnrelay * adds createMembershipList utility function * adds doc strings and todos * cleans up the code and add comments * defaults createRLNInstance depth argument to 32 * renames Depth * distinguishes between onchain and offchain modes * updates index boundaries * updates log levels * updates docstring * updates log level of displayed membership keys * relocates a todo * activates all the tests * fixes some comments and todos * extracts some utils procs for better debugging * adds todo * moves calculateMerkleRoot and toMembersipKeyPairs to the rln utils * makes calls to the utils functions * adds unit test for createMembershipList * adds unittest for toMembershipKeyPairs and calcMerkleRoot * cleans up the code and fixes tree root value * reverts an unwanted change * minor * adds comments and cleans up the code * updates config message * adds more comments * fixes a minor value mismatch * edits the size of group * minor rewording * defines a const var for the group keys * replaces the sequence literal with the StaticGroupKeys const * adds a rudimentary unittest * adds todos * adds more comment * replaces uint with MembeshipIndex type * fixes rln relay mem index config message * adds rln relay setup proc * decouples relay and rln-relay * uses MemIndexType instead of uint * brings back the rlnRelayEnabled flag to mountRlnRelay * deletes commented codes * adds rln relay topic validator inside updates rln relay mounting procedure * adds rln-relay-pubsub-topic cli option * adds a static rln-relay topic * deletes rlnrelayEnabled argument * adds pubsub topic for rln-relay * deletes static pubsub topic * mounts relay before rlnrelay in the tests * logs rln relay pubsub topic * cleans up the code * edits rlnrelay setup * uninitializes the input parameter of rlnrelay setup * adds comments * removes unused comments * compiles addRLNRelayValidtor when RLN compilation flag is set * adds comment about topic validator * minor * mode modifications on the description of add validator * adds pubsubtopic field to wakuRlnRelay type * WIP: shaping the test * Checks whether rln relay pubsub topic is within the supported topics of relay protocol * minor * WIP: unit test for actual proof * fixes a bug * removes a redundant proc * refines the test for actual proof * breaks lines to 80 chars * defines NonSpamProof type * adds a return * defines Epoch type * WIP: proof gen * implements actual proof gen * adds proto enc and init * adds notes about proof structure * adds NonSpamProof to wakumessage * adds proof gen * WIP: non working tests for protobuf * fixes the protobuf encoding issue * discards the output of copyFrom * WIP: hash unittest and proofVrfy and ProofGen * integrates proofVrfy * uses toBuffer inside the hash proc * adds comment * fixes a bug * removes proof field initialization * cleans up the test * generalizes input from byte seq to byte openArray * adds toBuffer * adds a bad test * cleans up unused tests * adds integration test * adds comments * cleans up * adds description to the integration test * adds test for unhappy path * tides up the tests * tides up hash unit test * renames a few var * uses a const for wku rln relay pubsub topic * minor refinement * deletes an obsolete comment * comment revision * adds comments * cleans up and adds docstrings * profGen returns proofRes instead of proof * removes extra sleepAsync * fixes two bugs * returns reject when proof is not verified\ * addresses comments * adds comments * links to rln doc * more comments * fixes space format * uncomments v2 tests * dnsclient branch update * undo branch update * minor spacing fix * makes proof field conditional
2021-10-20 00:37:29 +00:00
proc publish*(node: WakuNode, topic: Topic, message: WakuMessage) {.async, gcsafe.} =
## Publish a `WakuMessage` to a PubSub topic. `WakuMessage` should contain a
## `contentTopic` field for light node functionality. This field may be also
## be omitted.
##
## Status: Implemented.
Integrates proof generation and verification into wakunode2 (#735) * WIP * WIP: fixes a bug * adds test for static group formation * adds static group creation when rln-relay is enabled * adds createStatic group * wip: adds group formation to mount rlnrelay * adds createMembershipList utility function * adds doc strings and todos * cleans up the code and add comments * defaults createRLNInstance depth argument to 32 * renames Depth * distinguishes between onchain and offchain modes * updates index boundaries * updates log levels * updates docstring * updates log level of displayed membership keys * relocates a todo * activates all the tests * fixes some comments and todos * extracts some utils procs for better debugging * adds todo * moves calculateMerkleRoot and toMembersipKeyPairs to the rln utils * makes calls to the utils functions * adds unit test for createMembershipList * adds unittest for toMembershipKeyPairs and calcMerkleRoot * cleans up the code and fixes tree root value * reverts an unwanted change * minor * adds comments and cleans up the code * updates config message * adds more comments * fixes a minor value mismatch * edits the size of group * minor rewording * defines a const var for the group keys * replaces the sequence literal with the StaticGroupKeys const * adds a rudimentary unittest * adds todos * adds more comment * replaces uint with MembeshipIndex type * fixes rln relay mem index config message * adds rln relay setup proc * decouples relay and rln-relay * uses MemIndexType instead of uint * brings back the rlnRelayEnabled flag to mountRlnRelay * deletes commented codes * adds rln relay topic validator inside updates rln relay mounting procedure * adds rln-relay-pubsub-topic cli option * adds a static rln-relay topic * deletes rlnrelayEnabled argument * adds pubsub topic for rln-relay * deletes static pubsub topic * mounts relay before rlnrelay in the tests * logs rln relay pubsub topic * cleans up the code * edits rlnrelay setup * uninitializes the input parameter of rlnrelay setup * adds comments * removes unused comments * compiles addRLNRelayValidtor when RLN compilation flag is set * adds comment about topic validator * minor * mode modifications on the description of add validator * adds pubsubtopic field to wakuRlnRelay type * WIP: shaping the test * Checks whether rln relay pubsub topic is within the supported topics of relay protocol * minor * WIP: unit test for actual proof * fixes a bug * removes a redundant proc * refines the test for actual proof * breaks lines to 80 chars * defines NonSpamProof type * adds a return * defines Epoch type * WIP: proof gen * implements actual proof gen * adds proto enc and init * adds notes about proof structure * adds NonSpamProof to wakumessage * adds proof gen * WIP: non working tests for protobuf * fixes the protobuf encoding issue * discards the output of copyFrom * WIP: hash unittest and proofVrfy and ProofGen * integrates proofVrfy * uses toBuffer inside the hash proc * adds comment * fixes a bug * removes proof field initialization * cleans up the test * generalizes input from byte seq to byte openArray * adds toBuffer * adds a bad test * cleans up unused tests * adds integration test * adds comments * cleans up * adds description to the integration test * adds test for unhappy path * tides up the tests * tides up hash unit test * renames a few var * uses a const for wku rln relay pubsub topic * minor refinement * deletes an obsolete comment * comment revision * adds comments * cleans up and adds docstrings * profGen returns proofRes instead of proof * removes extra sleepAsync * fixes two bugs * returns reject when proof is not verified\ * addresses comments * adds comments * links to rln doc * more comments * fixes space format * uncomments v2 tests * dnsclient branch update * undo branch update * minor spacing fix * makes proof field conditional
2021-10-20 00:37:29 +00:00
if node.wakuRelay.isNil:
error "Invalid API call to `publish`. WakuRelay not mounted. Try `lightpush` instead."
# @TODO improved error handling
return
let wakuRelay = node.wakuRelay
trace "publish", topic=topic, contentTopic=message.contentTopic
var publishingMessage = message
let data = message.encode().buffer
discard await wakuRelay.publish(topic, data)
proc lightpush*(node: WakuNode, topic: Topic, message: WakuMessage, handler: PushResponseHandler) {.async, gcsafe.} =
## Pushes a `WakuMessage` to a node which relays it further on PubSub topic.
## Returns whether relaying was successful or not in `handler`.
## `WakuMessage` should contain a `contentTopic` field for light node
## functionality. This field may be also be omitted.
##
## Status: Implemented.
debug "Publishing with lightpush", topic=topic, contentTopic=message.contentTopic
let rpc = PushRequest(pubSubTopic: topic, message: message)
await node.wakuLightPush.request(rpc, handler)
proc query*(node: WakuNode, query: HistoryQuery, handler: QueryHandlerFunc) {.async, gcsafe.} =
## Queries known nodes for historical messages. Triggers the handler whenever a response is received.
## QueryHandlerFunc is a method that takes a HistoryResponse.
##
## Status: Implemented.
# TODO Once waku swap is less experimental, this can simplified
if node.wakuSwap.isNil:
debug "Using default query"
await node.wakuStore.query(query, handler)
else:
debug "Using SWAPAccounting query"
# TODO wakuSwap now part of wakuStore object
await node.wakuStore.queryWithAccounting(query, handler)
proc resume*(node: WakuNode, peerList: Option[seq[RemotePeerInfo]] = none(seq[RemotePeerInfo])) {.async, gcsafe.} =
## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku node has been online
## for resume to work properly the waku node must have the store protocol mounted in the full mode (i.e., persisting messages)
## messages are stored in the the wakuStore's messages field and in the message db
## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
## an offset of 20 second is added to the time window to count for nodes asynchrony
## peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed).
## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from.
## The history gets fetched successfully if the dialed peer has been online during the queried time window.
if not node.wakuStore.isNil:
let retrievedMessages = await node.wakuStore.resume(peerList)
if retrievedMessages.isOk:
info "the number of retrieved messages since the last online time: ", number=retrievedMessages.value
# TODO Extend with more relevant info: topics, peers, memory usage, online time, etc
proc info*(node: WakuNode): WakuInfo =
## Returns information about the Node, such as what multiaddress it can be reached at.
##
## Status: Implemented.
##
2022-01-10 15:07:35 +00:00
let peerInfo = node.switch.peerInfo
2021-12-06 19:51:37 +00:00
var listenStr : seq[string]
for address in node.announcedAddresses:
var fulladdr = $address & "/p2p/" & $peerInfo.peerId
listenStr &= fulladdr
let enrUri = if node.wakuDiscV5 != nil: node.wakuDiscV5.protocol.localNode.record.toUri()
else: node.enr.toUri()
let wakuInfo = WakuInfo(listenAddresses: listenStr, enrUri: enrUri)
return wakuInfo
proc mountFilter*(node: WakuNode, filterTimeout: Duration = WakuFilterTimeout) {.raises: [Defect, LPError]} =
info "mounting filter"
proc filterHandler(requestId: string, msg: MessagePush) {.async, gcsafe.} =
info "push received"
for message in msg.messages:
2021-04-22 13:45:13 +00:00
node.filters.notify(message, requestId) # Trigger filter handlers on a light node
if not node.wakuStore.isNil and (requestId in node.filters):
let pubSubTopic = node.filters[requestId].pubSubTopic
await node.wakuStore.handleMessage(pubSubTopic, message)
waku_node_messages.inc(labelValues = ["filter"])
node.wakuFilter = WakuFilter.init(node.peerManager, node.rng, filterHandler, filterTimeout)
node.switch.mount(node.wakuFilter, protocolMatcher(WakuFilterCodec))
# NOTE: If using the swap protocol, it must be mounted before store. This is
# because store is using a reference to the swap protocol.
proc mountSwap*(node: WakuNode, swapConfig: SwapConfig = SwapConfig.init()) {.raises: [Defect, LPError].} =
info "mounting swap", mode = $swapConfig.mode
node.wakuSwap = WakuSwap.init(node.peerManager, node.rng, swapConfig)
node.switch.mount(node.wakuSwap, protocolMatcher(WakuSwapCodec))
# NYI - Do we need this?
#node.subscriptions.subscribe(WakuSwapCodec, node.wakuSwap.subscription())
proc mountStore*(node: WakuNode, store: MessageStore = nil, persistMessages: bool = false, capacity = StoreDefaultCapacity, isSqliteOnly = false) {.raises: [Defect, LPError].} =
info "mounting store"
if node.wakuSwap.isNil:
debug "mounting store without swap"
node.wakuStore = WakuStore.init(node.peerManager, node.rng, store, persistMessages=persistMessages, capacity=capacity, isSqliteOnly=isSqliteOnly)
else:
debug "mounting store with swap"
node.wakuStore = WakuStore.init(node.peerManager, node.rng, store, node.wakuSwap, persistMessages=persistMessages, capacity=capacity, isSqliteOnly=isSqliteOnly)
node.switch.mount(node.wakuStore, protocolMatcher(WakuStoreCodec))
proc startRelay*(node: WakuNode) {.async.} =
if node.wakuRelay.isNil:
trace "Failed to start relay. Not mounted."
return
## Setup and start relay protocol
info "starting relay"
# Topic subscriptions
for topic in node.wakuRelay.defaultTopics:
node.subscribe(topic, none(TopicHandler))
# Resume previous relay connections
if node.peerManager.hasPeers(protocolMatcher(WakuRelayCodec)):
info "Found previous WakuRelay peers. Reconnecting."
# Reconnect to previous relay peers. This will respect a backoff period, if necessary
let backoffPeriod = node.wakuRelay.parameters.pruneBackoff + chronos.seconds(BackoffSlackTime)
await node.peerManager.reconnectPeers(WakuRelayCodec,
protocolMatcher(WakuRelayCodec),
backoffPeriod)
# Start the WakuRelay protocol
await node.wakuRelay.start()
info "relay started successfully"
proc mountRelay*(node: WakuNode,
topics: seq[string] = newSeq[string](),
2021-05-24 11:19:33 +00:00
relayMessages = true,
triggerSelf = true,
peerExchangeHandler = none(RoutingRecordsHandler))
# @TODO: Better error handling: CatchableError is raised by `waitFor`
{.gcsafe, raises: [Defect, InitializationError, LPError, CatchableError].} =
proc msgIdProvider(m: messages.Message): Result[MessageID, ValidationResult] =
let mh = MultiHash.digest("sha2-256", m.data)
if mh.isOk():
return ok(mh[].data.buffer)
else:
return ok(($m.data.hash).toBytes())
let wakuRelay = WakuRelay.init(
switch = node.switch,
msgIdProvider = msgIdProvider,
2021-05-24 11:19:33 +00:00
triggerSelf = triggerSelf,
sign = false,
verifySignature = false,
maxMessageSize = MaxWakuMessageSize
)
info "mounting relay", relayMessages=relayMessages
## The default relay topics is the union of
## all configured topics plus the hard-coded defaultTopic(s)
wakuRelay.defaultTopics = concat(@[defaultTopic], topics)
## Add peer exchange handler
2022-05-20 10:57:19 +00:00
if peerExchangeHandler.isSome():
wakuRelay.parameters.enablePX = true # Feature flag for peer exchange in nim-libp2p
wakuRelay.routingRecordsHandler.add(peerExchangeHandler.get())
node.switch.mount(wakuRelay, protocolMatcher(WakuRelayCodec))
if relayMessages:
## Some nodes may choose not to have the capability to relay messages (e.g. "light" nodes).
## All nodes, however, currently require WakuRelay, regardless of desired capabilities.
## This is to allow protocol stream negotation with relay-capable nodes to succeed.
## Here we mount relay on the switch only, but do not proceed to subscribe to any pubsub
## topics. We also never start the relay protocol. node.wakuRelay remains nil.
## @TODO: in future, this WakuRelay dependency will be removed completely
node.wakuRelay = wakuRelay
Off-chain group construction and management (#718) * WIP * WIP: fixes a bug * adds test for static group formation * adds static group creation when rln-relay is enabled * adds createStatic group * wip: adds group formation to mount rlnrelay * adds createMembershipList utility function * adds doc strings and todos * cleans up the code and add comments * defaults createRLNInstance depth argument to 32 * renames Depth * distinguishes between onchain and offchain modes * updates index boundaries * updates log levels * updates docstring * updates log level of displayed membership keys * relocates a todo * activates all the tests * fixes some comments and todos * extracts some utils procs for better debugging * adds todo * moves calculateMerkleRoot and toMembersipKeyPairs to the rln utils * makes calls to the utils functions * adds unit test for createMembershipList * adds unittest for toMembershipKeyPairs and calcMerkleRoot * cleans up the code and fixes tree root value * reverts an unwanted change * minor * adds comments and cleans up the code * updates config message * adds more comments * fixes a minor value mismatch * edits the size of group * minor rewording * defines a const var for the group keys * replaces the sequence literal with the StaticGroupKeys const * converts var to let when applicable * replaces hardcoded value with well-defined constants * moves createMembershipList to the rln relay utils module * renames HashSize to HashHexSize * minor updates on the comments * reorganizes the consts * indicates that rlnRelayMemIndex is an experimental option * fixes a type conversion bug * revises the unittest of "mount waku rln-relay off-chain" * clarifies the use of index * updates a docstring * removes redundant constants and capitalize all of them * deletes the ETH_CLIENT const from the test file * renames a few vars for the sake of clarity * reorganizes unittest into blocks of execution, debug messages, and checks * adds more comments * more comments and clarifications * cleans up the tests * minor * adds a minor fix * replaces a var usage with let * fixes a bug
2021-09-17 17:31:25 +00:00
info "relay mounted successfully"
if node.started:
# Node has started already. Let's start relay too.
waitFor node.startRelay()
proc mountLightPush*(node: WakuNode) {.raises: [Defect, LPError].} =
info "mounting light push"
if node.wakuRelay.isNil:
debug "mounting lightpush without relay"
node.wakuLightPush = WakuLightPush.init(node.peerManager, node.rng, nil)
else:
debug "mounting lightpush with relay"
node.wakuLightPush = WakuLightPush.init(node.peerManager, node.rng, nil, node.wakuRelay)
node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec))
proc mountLibp2pPing*(node: WakuNode) {.raises: [Defect, LPError].} =
info "mounting libp2p ping protocol"
try:
node.libp2pPing = Ping.new(rng = node.rng)
except Exception as e:
# This is necessary as `Ping.new*` does not have explicit `raises` requirement
# @TODO: remove exception handling once explicit `raises` in ping module
raise newException(LPError, "Failed to initialize ping protocol")
node.switch.mount(node.libp2pPing)
proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} =
while node.started:
# Keep all connected peers alive while running
trace "Running keepalive"
# First get a list of connected peer infos
let peers = node.peerManager.peers()
.filterIt(node.peerManager.connectedness(it.peerId) == Connected)
.mapIt(it.toRemotePeerInfo())
# Attempt to retrieve and ping the active outgoing connection for each peer
for peer in peers:
let connOpt = await node.peerManager.dialPeer(peer, PingCodec)
if connOpt.isNone:
# @TODO more sophisticated error handling here
debug "failed to connect to remote peer", peer=peer
waku_node_errors.inc(labelValues = ["keep_alive_failure"])
return
discard await node.libp2pPing.ping(connOpt.get()) # Ping connection
await sleepAsync(keepalive)
proc startKeepalive*(node: WakuNode) =
let defaultKeepalive = 2.minutes # 20% of the default chronosstream timeout duration
info "starting keepalive", keepalive=defaultKeepalive
asyncSpawn node.keepaliveLoop(defaultKeepalive)
## Helpers
proc connectToNode(n: WakuNode, remotePeer: RemotePeerInfo, source = "api") {.async.} =
## `source` indicates source of node addrs (static config, api call, discovery, etc)
info "Connecting to node", remotePeer = remotePeer, source = source
# NOTE This is dialing on WakuRelay protocol specifically
info "Attempting dial", wireAddr = remotePeer.addrs[0], peerId = remotePeer.peerId
let connOpt = await n.peerManager.dialPeer(remotePeer, WakuRelayCodec)
if connOpt.isSome():
info "Successfully connected to peer", wireAddr = remotePeer.addrs[0], peerId = remotePeer.peerId
waku_node_conns_initiated.inc(labelValues = [source])
else:
error "Failed to connect to peer", wireAddr = remotePeer.addrs[0], peerId = remotePeer.peerId
waku_node_errors.inc(labelValues = ["conn_init_failure"])
proc setStorePeer*(n: WakuNode, address: string) {.raises: [Defect, ValueError, LPError].} =
info "Set store peer", address = address
let remotePeer = parseRemotePeerInfo(address)
n.wakuStore.setPeer(remotePeer)
proc setFilterPeer*(n: WakuNode, address: string) {.raises: [Defect, ValueError, LPError].} =
info "Set filter peer", address = address
let remotePeer = parseRemotePeerInfo(address)
n.wakuFilter.setPeer(remotePeer)
proc setLightPushPeer*(n: WakuNode, address: string) {.raises: [Defect, ValueError, LPError].} =
info "Set lightpush peer", address = address
let remotePeer = parseRemotePeerInfo(address)
n.wakuLightPush.setPeer(remotePeer)
proc connectToNodes*(n: WakuNode, nodes: seq[string], source = "api") {.async.} =
## `source` indicates source of node addrs (static config, api call, discovery, etc)
info "connectToNodes", len = nodes.len
for nodeId in nodes:
await connectToNode(n, parseRemotePeerInfo(nodeId), source)
# The issue seems to be around peers not being fully connected when
# trying to subscribe. So what we do is sleep to guarantee nodes are
# fully connected.
#
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
# later.
await sleepAsync(5.seconds)
proc connectToNodes*(n: WakuNode, nodes: seq[RemotePeerInfo], source = "api") {.async.} =
## `source` indicates source of node addrs (static config, api call, discovery, etc)
info "connectToNodes", len = nodes.len
for remotePeerInfo in nodes:
await connectToNode(n, remotePeerInfo, source)
# The issue seems to be around peers not being fully connected when
# trying to subscribe. So what we do is sleep to guarantee nodes are
# fully connected.
#
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
# later.
await sleepAsync(5.seconds)
2021-11-01 18:02:39 +00:00
proc runDiscv5Loop(node: WakuNode) {.async.} =
## Continuously add newly discovered nodes
## using Node Discovery v5
if (node.wakuDiscv5.isNil):
warn "Trying to run discovery v5 while it's disabled"
return
info "Starting discovery loop"
while node.wakuDiscv5.listening:
trace "Running discovery loop"
## Query for a random target and collect all discovered nodes
## @TODO: we could filter nodes here
let discoveredPeers = await node.wakuDiscv5.findRandomPeers()
if discoveredPeers.isOk:
## Let's attempt to connect to peers we
## have not encountered before
trace "Discovered peers", count=discoveredPeers.get().len()
let newPeers = discoveredPeers.get().filterIt(
2022-06-01 09:49:41 +00:00
not node.switch.peerStore[AddressBook].contains(it.peerId))
2021-11-01 18:02:39 +00:00
if newPeers.len > 0:
debug "Connecting to newly discovered peers", count=newPeers.len()
await connectToNodes(node, newPeers, "discv5")
2021-11-01 18:02:39 +00:00
# Discovery `queryRandom` can have a synchronous fast path for example
# when no peers are in the routing table. Don't run it in continuous loop.
#
# Also, give some time to dial the discovered nodes and update stats etc
await sleepAsync(5.seconds)
proc startDiscv5*(node: WakuNode): Future[bool] {.async.} =
## Start Discovery v5 service
info "Starting discovery v5 service"
if not node.wakuDiscv5.isNil:
## First start listening on configured port
try:
trace "Start listening on discv5 port"
node.wakuDiscv5.open()
except CatchableError:
error "Failed to start discovery service. UDP port may be already in use"
return false
## Start Discovery v5
trace "Start discv5 service"
node.wakuDiscv5.start()
trace "Start discovering new peers using discv5"
asyncSpawn node.runDiscv5Loop()
debug "Successfully started discovery v5 service"
info "Discv5: discoverable ENR ", enr = node.wakuDiscV5.protocol.localNode.record.toUri()
2021-11-01 18:02:39 +00:00
return true
return false
proc stopDiscv5*(node: WakuNode): Future[bool] {.async.} =
## Stop Discovery v5 service
if not node.wakuDiscv5.isNil:
info "Stopping discovery v5 service"
## Stop Discovery v5 process and close listening port
if node.wakuDiscv5.listening:
trace "Stop listening on discv5 port"
await node.wakuDiscv5.closeWait()
debug "Successfully stopped discovery v5 service"
proc start*(node: WakuNode) {.async.} =
## Starts a created Waku Node and
## all its mounted protocols.
##
## Status: Implemented.
2022-01-14 09:25:01 +00:00
await node.switch.start()
# TODO Get this from WakuNode obj
2022-01-10 15:07:35 +00:00
let peerInfo = node.switch.peerInfo
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
var listenStr = ""
for address in node.announcedAddresses:
var fulladdr = "[" & $address & "/p2p/" & $peerInfo.peerId & "]"
listenStr &= fulladdr
## XXX: this should be /ip4..., / stripped?
info "Listening on", full = listenStr
info "DNS: discoverable ENR ", enr = node.enr.toUri()
## Update switch peer info with announced addrs
node.updateSwitchPeerInfo()
if not node.wakuRelay.isNil:
await node.startRelay()
info "Node started successfully"
node.started = true
proc stop*(node: WakuNode) {.async.} =
if not node.wakuRelay.isNil:
await node.wakuRelay.stop()
2021-11-01 18:02:39 +00:00
if not node.wakuDiscv5.isNil:
discard await node.stopDiscv5()
await node.switch.stop()
node.started = false
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
when isMainModule:
## Node setup happens in 6 phases:
## 1. Set up storage
## 2. Initialize node
## 3. Mount and initialize configured protocols
## 4. Start node and mounted protocols
## 5. Start monitoring tools and external interfaces
## 6. Setup graceful shutdown hooks
import
confutils, toml_serialization,
system/ansi_c,
libp2p/nameresolving/dnsresolver,
../../common/utils/nat,
./config,
./wakunode2_setup,
./wakunode2_setup_rest,
./wakunode2_setup_metrics,
./wakunode2_setup_rpc,
./wakunode2_setup_sql_migrations,
./storage/message/waku_message_store,
./storage/peer/waku_peer_storage
logScope:
topics = "wakunode.setup"
###################
# Setup functions #
###################
2022-03-17 16:33:17 +00:00
# 1/7 Setup storage
proc setupStorage(conf: WakuNodeConf):
SetupResult[tuple[pStorage: WakuPeerStorage, mStorage: WakuMessageStore]] =
## Setup a SQLite Database for a wakunode based on a supplied
## configuration file and perform all necessary migration.
##
## If config allows, return peer storage and message store
## for use elsewhere.
var
sqliteDatabase: SqliteDatabase
storeTuple: tuple[pStorage: WakuPeerStorage, mStorage: WakuMessageStore]
# Setup DB
if conf.dbPath != "":
let dbRes = SqliteDatabase.init(conf.dbPath)
if dbRes.isErr:
warn "failed to init database", err = dbRes.error
waku_node_errors.inc(labelValues = ["init_db_failure"])
return err("failed to init database")
else:
sqliteDatabase = dbRes.value
if not sqliteDatabase.isNil and (conf.persistPeers or conf.persistMessages):
# Database initialized. Let's set it up
sqliteDatabase.runMigrations(conf) # First migrate what we have
if conf.persistPeers:
# Peer persistence enable. Set up Peer table in storage
let res = WakuPeerStorage.new(sqliteDatabase)
if res.isErr:
warn "failed to init new WakuPeerStorage", err = res.error
waku_node_errors.inc(labelValues = ["init_store_failure"])
else:
storeTuple.pStorage = res.value
if conf.persistMessages:
# Historical message persistence enable. Set up Message table in storage
let res = WakuMessageStore.init(sqliteDatabase, conf.storeCapacity, conf.sqliteStore, conf.sqliteRetentionTime)
if res.isErr:
warn "failed to init WakuMessageStore", err = res.error
waku_node_errors.inc(labelValues = ["init_store_failure"])
else:
storeTuple.mStorage = res.value
ok(storeTuple)
2022-03-17 16:33:17 +00:00
# 2/7 Retrieve dynamic bootstrap nodes
proc retrieveDynamicBootstrapNodes(conf: WakuNodeConf): SetupResult[seq[RemotePeerInfo]] =
2022-03-17 16:33:17 +00:00
if conf.dnsDiscovery and conf.dnsDiscoveryUrl != "":
# DNS discovery
2022-03-17 16:33:17 +00:00
debug "Discovering nodes using Waku DNS discovery", url=conf.dnsDiscoveryUrl
var nameServers: seq[TransportAddress]
for ip in conf.dnsDiscoveryNameServers:
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
let dnsResolver = DnsResolver.new(nameServers)
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
trace "resolving", domain=domain
let resolved = await dnsResolver.resolveTxt(domain)
return resolved[0] # Use only first answer
var wakuDnsDiscovery = WakuDnsDiscovery.init(conf.dnsDiscoveryUrl,
resolver)
if wakuDnsDiscovery.isOk:
return wakuDnsDiscovery.get().findPeers()
.mapErr(proc (e: cstring): string = $e)
else:
warn "Failed to init Waku DNS discovery"
debug "No method for retrieving dynamic bootstrap nodes specified."
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
2022-03-17 16:33:17 +00:00
# 3/7 Initialize node
proc initNode(conf: WakuNodeConf,
2022-03-17 16:33:17 +00:00
pStorage: WakuPeerStorage = nil,
dynamicBootstrapNodes: openArray[RemotePeerInfo] = @[]): SetupResult[WakuNode] =
## Setup a basic Waku v2 node based on a supplied configuration
## file. Optionally include persistent peer storage.
## No protocols are mounted yet.
var dnsResolver: DnsResolver
if conf.dnsAddrs:
# Support for DNS multiaddrs
var nameServers: seq[TransportAddress]
for ip in conf.dnsAddrsNameServers:
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
dnsResolver = DnsResolver.new(nameServers)
2021-11-01 18:02:39 +00:00
let
## `udpPort` is only supplied to satisfy underlying APIs but is not
## actually a supported transport for libp2p traffic.
udpPort = conf.tcpPort
(extIp, extTcpPort, extUdpPort) = setupNat(conf.nat,
clientId,
Port(uint16(conf.tcpPort) + conf.portsShift),
Port(uint16(udpPort) + conf.portsShift))
dns4DomainName = if conf.dns4DomainName != "": some(conf.dns4DomainName)
else: none(string)
discv5UdpPort = if conf.discv5Discovery: some(Port(uint16(conf.discv5UdpPort) + conf.portsShift))
else: none(Port)
## @TODO: the NAT setup assumes a manual port mapping configuration if extIp config is set. This probably
## implies adding manual config item for extPort as well. The following heuristic assumes that, in absence of manual
## config, the external port is the same as the bind port.
extPort = if (extIp.isSome() or dns4DomainName.isSome()) and extTcpPort.isNone():
some(Port(uint16(conf.tcpPort) + conf.portsShift))
else:
extTcpPort
2021-12-06 19:51:37 +00:00
wakuFlags = initWakuFlags(conf.lightpush,
conf.filter,
conf.store,
conf.relay)
2021-11-01 18:02:39 +00:00
Secure websocket (#759) * add config Signed-off-by: rshiv <reeshav96@gmail.com> * secure websocket integration Signed-off-by: rshiv <reeshav96@gmail.com> * secure websocket debug Signed-off-by: rshiv <reeshav96@gmail.com> * config change Signed-off-by: rshiv <reeshav96@gmail.com> * working secure websocket changes Signed-off-by: rshiv <reeshav96@gmail.com> * Update submodules (#761) * Release v0.6 (#760) * Read from path Signed-off-by: rshiv <reeshav96@gmail.com> * Tests for secure websockets Signed-off-by: rshiv <reeshav96@gmail.com> * CI failure fix Signed-off-by: rshiv <reeshav96@gmail.com> * path resolver CI Signed-off-by: rshiv <reeshav96@gmail.com> * self review fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update examples/v2/config_chat2.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update waku/v2/utils/peers.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * Update waku/v2/node/wakunode2.nim Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> * review comment fix Signed-off-by: rshiv <reeshav96@gmail.com> * Update tests/v2/test_wakunode.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * Update waku/v2/utils/wakuswitch.nim Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com> * handle review comments Signed-off-by: rshiv <reeshav96@gmail.com> Co-authored-by: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> Co-authored-by: Sanaz Taheri Boshrooyeh <35961250+staheri14@users.noreply.github.com>
2021-11-10 12:05:36 +00:00
node = WakuNode.new(conf.nodekey,
conf.listenAddress, Port(uint16(conf.tcpPort) + conf.portsShift),
extIp, extPort,
pStorage,
conf.maxConnections.int,
Port(uint16(conf.websocketPort) + conf.portsShift),
conf.websocketSupport,
conf.websocketSecureSupport,
conf.websocketSecureKeyPath,
conf.websocketSecureCertPath,
some(wakuFlags),
dnsResolver,
conf.relayPeerExchange, # We send our own signed peer record when peer exchange enabled
dns4DomainName,
discv5UdpPort
)
2021-11-01 18:02:39 +00:00
if conf.discv5Discovery:
let
discoveryConfig = DiscoveryConfig.init(
conf.discv5TableIpLimit, conf.discv5BucketIpLimit, conf.discv5BitsPerHop)
2021-11-01 18:02:39 +00:00
2022-03-17 16:33:17 +00:00
# select dynamic bootstrap nodes that have an ENR containing a udp port.
# Discv5 only supports UDP https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md)
var discv5BootstrapEnrs: seq[enr.Record]
for n in dynamicBootstrapNodes:
if n.enr.isSome():
let
enr = n.enr.get()
tenrRes = enr.toTypedRecord()
if tenrRes.isOk() and (tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()):
discv5BootstrapEnrs.add(enr)
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
for enrUri in conf.discv5BootstrapNodes:
addBootstrapNode(enrUri, discv5BootstrapEnrs)
2021-11-01 18:02:39 +00:00
node.wakuDiscv5 = WakuDiscoveryV5.new(
extIP, extPort, discv5UdpPort,
2021-11-01 18:02:39 +00:00
conf.listenAddress,
discv5UdpPort.get(),
2022-03-17 16:33:17 +00:00
discv5BootstrapEnrs,
2021-11-01 18:02:39 +00:00
conf.discv5EnrAutoUpdate,
keys.PrivateKey(conf.nodekey.skkey),
2021-12-06 19:51:37 +00:00
wakuFlags,
2021-11-01 18:02:39 +00:00
[], # Empty enr fields, for now
node.rng,
discoveryConfig
2021-11-01 18:02:39 +00:00
)
ok(node)
2022-03-17 16:33:17 +00:00
# 4/7 Mount and initialize configured protocols
proc setupProtocols(node: WakuNode,
2021-11-01 18:02:39 +00:00
conf: WakuNodeConf,
mStorage: WakuMessageStore = nil): SetupResult[bool] =
2020-12-24 08:02:30 +00:00
## Setup configured protocols on an existing Waku v2 node.
## Optionally include persistent message storage.
## No protocols are started yet.
2020-12-24 08:02:30 +00:00
# Mount relay on all nodes
var peerExchangeHandler = none(RoutingRecordsHandler)
if conf.relayPeerExchange:
proc handlePeerExchange(peer: PeerId, topic: string,
peers: seq[RoutingRecordsPair]) {.gcsafe, raises: [Defect].} =
## Handle peers received via gossipsub peer exchange
# TODO: Only consider peers on pubsub topics we subscribe to
let exchangedPeers = peers.filterIt(it.record.isSome()) # only peers with populated records
.mapIt(toRemotePeerInfo(it.record.get()))
debug "connecting to exchanged peers", src=peer, topic=topic, numPeers=exchangedPeers.len
# asyncSpawn, as we don't want to block here
asyncSpawn node.connectToNodes(exchangedPeers, "peer exchange")
peerExchangeHandler = some(handlePeerExchange)
mountRelay(node,
conf.topics.split(" "),
relayMessages = conf.relay, # Indicates if node is capable to relay messages
peerExchangeHandler = peerExchangeHandler)
2020-12-24 08:02:30 +00:00
# Keepalive mounted on all nodes
mountLibp2pPing(node)
2020-12-24 08:02:30 +00:00
when defined(rln):
if conf.rlnRelay:
let res = node.mountRlnRelay(conf)
if res.isErr():
debug "could not mount WakuRlnRelay"
if conf.swap:
mountSwap(node)
# TODO Set swap peer, for now should be same as store peer
# Store setup
if (conf.storenode != "") or (conf.store):
mountStore(node, mStorage, conf.persistMessages, conf.storeCapacity, conf.sqliteStore)
if conf.storenode != "":
setStorePeer(node, conf.storenode)
# NOTE Must be mounted after relay
if (conf.lightpushnode != "") or (conf.lightpush):
mountLightPush(node)
if conf.lightpushnode != "":
setLightPushPeer(node, conf.lightpushnode)
# Filter setup. NOTE Must be mounted after relay
if (conf.filternode != "") or (conf.filter):
mountFilter(node, filterTimeout = chronos.seconds(conf.filterTimeout))
if conf.filternode != "":
setFilterPeer(node, conf.filternode)
ok(true) # Success
2022-03-17 16:33:17 +00:00
# 5/7 Start node and mounted protocols
proc startNode(node: WakuNode, conf: WakuNodeConf,
dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]): SetupResult[bool] =
## Start a configured node and all mounted protocols.
## Resume history, connect to static nodes and start
## keep-alive, if configured.
# Start Waku v2 node
waitFor node.start()
2022-03-17 16:33:17 +00:00
# Start discv5 and connect to discovered nodes
if conf.discv5Discovery:
if not waitFor node.startDiscv5():
error "could not start Discovery v5"
# Resume historical messages, this has to be called after the node has been started
if conf.store and conf.persistMessages:
waitFor node.resume()
# Connect to configured static nodes
if conf.staticnodes.len > 0:
waitFor connectToNodes(node, conf.staticnodes, "static")
if dynamicBootstrapNodes.len > 0:
info "Connecting to dynamic bootstrap peers"
waitFor connectToNodes(node, dynamicBootstrapNodes, "dynamic bootstrap")
2022-03-17 16:33:17 +00:00
# Start keepalive, if enabled
if conf.keepAlive:
node.startKeepalive()
ok(true) # Success
2022-03-17 16:33:17 +00:00
# 6/7 Start monitoring tools and external interfaces
proc startExternal(node: WakuNode, conf: WakuNodeConf): SetupResult[bool] =
## Start configured external interfaces and monitoring tools
## on a Waku v2 node, including the RPC API, REST API and metrics
## monitoring ports.
if conf.rpc:
startRpcServer(node, conf.rpcAddress, Port(conf.rpcPort + conf.portsShift), conf)
if conf.rest:
startRestServer(node, conf.restAddress, Port(conf.restPort + conf.portsShift), conf)
if conf.metricsLogging:
startMetricsLog()
if conf.metricsServer:
startMetricsServer(conf.metricsServerAddress,
Port(conf.metricsServerPort + conf.portsShift))
ok(true) # Success
{.push warning[ProveInit]: off.}
let conf = try:
WakuNodeConf.load(
secondarySources = proc (conf: WakuNodeConf, sources: auto) =
if conf.configFile.isSome:
sources.addConfigFile(Toml, conf.configFile.get)
)
except CatchableError as err:
error "Failure while loading the configuration: \n", err_msg=err.msg
quit 1 # if we don't leave here, the initialization of conf does not work in the success case
{.pop.}
# if called with --version, print the version and quit
if conf.version:
const git_version {.strdefine.} = "n/a"
echo "version / git commit hash: ", git_version
quit(QuitSuccess)
var
node: WakuNode # This is the node we're going to setup using the conf
##############
# Node setup #
##############
2022-03-17 16:33:17 +00:00
debug "1/7 Setting up storage"
var
pStorage: WakuPeerStorage
mStorage: WakuMessageStore
let setupStorageRes = setupStorage(conf)
if setupStorageRes.isErr:
2022-03-17 16:33:17 +00:00
error "1/7 Setting up storage failed. Continuing without storage."
else:
(pStorage, mStorage) = setupStorageRes.get()
2022-03-17 16:33:17 +00:00
debug "2/7 Retrieve dynamic bootstrap nodes"
var dynamicBootstrapNodes: seq[RemotePeerInfo]
let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(conf)
if dynamicBootstrapNodesRes.isErr:
warn "2/7 Retrieving dynamic bootstrap nodes failed. Continuing without dynamic bootstrap nodes."
2022-03-17 16:33:17 +00:00
else:
dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
debug "3/7 Initializing node"
2022-03-17 16:33:17 +00:00
let initNodeRes = initNode(conf, pStorage, dynamicBootstrapNodes)
if initNodeRes.isErr:
2022-03-17 16:33:17 +00:00
error "3/7 Initializing node failed. Quitting."
quit(QuitFailure)
else:
node = initNodeRes.get()
2022-03-17 16:33:17 +00:00
debug "4/7 Mounting protocols"
let setupProtocolsRes = setupProtocols(node, conf, mStorage)
if setupProtocolsRes.isErr:
2022-03-17 16:33:17 +00:00
error "4/7 Mounting protocols failed. Continuing in current state."
2022-03-17 16:33:17 +00:00
debug "5/7 Starting node and mounted protocols"
2022-03-17 16:33:17 +00:00
let startNodeRes = startNode(node, conf, dynamicBootstrapNodes)
if startNodeRes.isErr:
2022-03-17 16:33:17 +00:00
error "5/7 Starting node and mounted protocols failed. Continuing in current state."
2022-03-17 16:33:17 +00:00
debug "6/7 Starting monitoring and external interfaces"
let startExternalRes = startExternal(node, conf)
if startExternalRes.isErr:
2022-03-17 16:33:17 +00:00
error "6/7 Starting monitoring and external interfaces failed. Continuing in current state."
2022-03-17 16:33:17 +00:00
debug "7/7 Setting up shutdown hooks"
2022-03-17 16:33:17 +00:00
# 7/7 Setup graceful shutdown hooks
## Setup shutdown hooks for this process.
## Stop node gracefully on shutdown.
# Handle Ctrl-C SIGINT
proc handleCtrlC() {.noconv.} =
when defined(windows):
# workaround for https://github.com/nim-lang/Nim/issues/4057
setupForeignThreadGc()
info "Shutting down after receiving SIGINT"
waitFor node.stop()
quit(QuitSuccess)
setControlCHook(handleCtrlC)
# Handle SIGTERM
when defined(posix):
proc handleSigterm(signal: cint) {.noconv.} =
info "Shutting down after receiving SIGTERM"
waitFor node.stop()
quit(QuitSuccess)
c_signal(SIGTERM, handleSigterm)
debug "Node setup complete"
runForever()