mirror of https://github.com/waku-org/nwaku.git
parent
7be949ed67
commit
87b98a2460
|
@ -15,6 +15,7 @@ This release contains the following:
|
||||||
The filed numbers of the `HistoryResponse` are shifted up by one to match up the [13/WAKU2-STORE](https://rfc.vac.dev/spec/13/) specs.
|
The filed numbers of the `HistoryResponse` are shifted up by one to match up the [13/WAKU2-STORE](https://rfc.vac.dev/spec/13/) specs.
|
||||||
- Adds optional `timestamp` to `WakuRelayMessage`.
|
- Adds optional `timestamp` to `WakuRelayMessage`.
|
||||||
#### General refactoring
|
#### General refactoring
|
||||||
|
- `wakunode2` setup refactored into 6 distinct phases with improved logging and error handling
|
||||||
#### Docs
|
#### Docs
|
||||||
- Adds the database migration tutorial.
|
- Adds the database migration tutorial.
|
||||||
#### Schema
|
#### Schema
|
||||||
|
|
|
@ -2,8 +2,8 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[tables,sequtils],
|
std/[tables,sequtils],
|
||||||
|
chronicles,
|
||||||
json_rpc/rpcserver,
|
json_rpc/rpcserver,
|
||||||
eth/[common, rlp, keys, p2p],
|
|
||||||
../wakunode2,
|
../wakunode2,
|
||||||
./jsonrpc_types
|
./jsonrpc_types
|
||||||
|
|
||||||
|
|
|
@ -2,11 +2,13 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[tables,sequtils],
|
std/[tables,sequtils],
|
||||||
|
chronicles,
|
||||||
json_rpc/rpcserver,
|
json_rpc/rpcserver,
|
||||||
nimcrypto/sysrand,
|
nimcrypto/sysrand,
|
||||||
eth/[common, rlp, keys, p2p],
|
../wakunode2,
|
||||||
../wakunode2, ../waku_payload,
|
../waku_payload,
|
||||||
./jsonrpc_types, ./jsonrpc_utils
|
./jsonrpc_types,
|
||||||
|
./jsonrpc_utils
|
||||||
|
|
||||||
export waku_payload, jsonrpc_types
|
export waku_payload, jsonrpc_types
|
||||||
|
|
||||||
|
@ -37,7 +39,7 @@ proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHm
|
||||||
|
|
||||||
let msg = message.toWakuMessage(version = 1,
|
let msg = message.toWakuMessage(version = 1,
|
||||||
rng = rng,
|
rng = rng,
|
||||||
pubKey = none(keys.PublicKey),
|
pubKey = none(waku_payload.PublicKey),
|
||||||
symkey = some(symkey.toSymKey()))
|
symkey = some(symkey.toSymKey()))
|
||||||
|
|
||||||
if (await node.publish(topic, msg).withTimeout(futTimeout)):
|
if (await node.publish(topic, msg).withTimeout(futTimeout)):
|
||||||
|
@ -60,7 +62,7 @@ proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHm
|
||||||
# Clear cache before next call
|
# Clear cache before next call
|
||||||
topicCache[topic] = @[]
|
topicCache[topic] = @[]
|
||||||
return msgs.mapIt(it.toWakuRelayMessage(symkey = some(symkey.toSymKey()),
|
return msgs.mapIt(it.toWakuRelayMessage(symkey = some(symkey.toSymKey()),
|
||||||
privateKey = none(keys.PrivateKey)))
|
privateKey = none(waku_payload.PrivateKey)))
|
||||||
else:
|
else:
|
||||||
# Not subscribed to this topic
|
# Not subscribed to this topic
|
||||||
raise newException(ValueError, "Not subscribed to topic: " & topic)
|
raise newException(ValueError, "Not subscribed to topic: " & topic)
|
||||||
|
@ -71,7 +73,7 @@ proc installPrivateApiHandlers*(node: WakuNode, rpcsrv: RpcServer, rng: ref BrHm
|
||||||
## Generates and returns a public/private key pair for asymmetric message encryption and decryption.
|
## Generates and returns a public/private key pair for asymmetric message encryption and decryption.
|
||||||
debug "get_waku_v2_private_v1_asymmetric_keypair"
|
debug "get_waku_v2_private_v1_asymmetric_keypair"
|
||||||
|
|
||||||
let privKey = keys.PrivateKey.random(rng[])
|
let privKey = waku_payload.PrivateKey.random(rng[])
|
||||||
|
|
||||||
return WakuKeyPair(seckey: privKey, pubkey: privKey.toPublicKey())
|
return WakuKeyPair(seckey: privKey, pubkey: privKey.toPublicKey())
|
||||||
|
|
||||||
|
|
|
@ -2,11 +2,12 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[tables,sequtils],
|
std/[tables,sequtils],
|
||||||
|
chronicles,
|
||||||
json_rpc/rpcserver,
|
json_rpc/rpcserver,
|
||||||
libp2p/protocols/pubsub/pubsub,
|
libp2p/protocols/pubsub/pubsub,
|
||||||
eth/[common, rlp, keys, p2p],
|
|
||||||
../wakunode2,
|
../wakunode2,
|
||||||
./jsonrpc_types, ./jsonrpc_utils
|
./jsonrpc_types,
|
||||||
|
./jsonrpc_utils
|
||||||
|
|
||||||
export jsonrpc_types
|
export jsonrpc_types
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
## Collection of utilities commonly used
|
||||||
|
## during the setup phase of a Waku v2 node
|
||||||
|
|
||||||
|
import
|
||||||
|
std/tables,
|
||||||
|
chronos,
|
||||||
|
chronicles,
|
||||||
|
json_rpc/rpcserver,
|
||||||
|
metrics,
|
||||||
|
metrics/chronos_httpserver,
|
||||||
|
stew/results,
|
||||||
|
stew/shims/net,
|
||||||
|
./storage/sqlite,
|
||||||
|
./storage/migration/migration_types,
|
||||||
|
./jsonrpc/[admin_api,
|
||||||
|
debug_api,
|
||||||
|
filter_api,
|
||||||
|
relay_api,
|
||||||
|
store_api,
|
||||||
|
private_api,
|
||||||
|
debug_api],
|
||||||
|
./config,
|
||||||
|
./wakunode2
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "wakunode.setup"
|
||||||
|
|
||||||
|
type
|
||||||
|
SetupResult*[T] = Result[T, string]
|
||||||
|
|
||||||
|
##########################
|
||||||
|
# Setup helper functions #
|
||||||
|
##########################
|
||||||
|
|
||||||
|
proc startRpc*(node: WakuNode, rpcIp: ValidIpAddress, rpcPort: Port, conf: WakuNodeConf)
|
||||||
|
{.raises: [Defect, RpcBindError, CatchableError].} =
|
||||||
|
# @TODO: API handlers still raise CatchableError
|
||||||
|
|
||||||
|
let
|
||||||
|
ta = initTAddress(rpcIp, rpcPort)
|
||||||
|
rpcServer = newRpcHttpServer([ta])
|
||||||
|
installDebugApiHandlers(node, rpcServer)
|
||||||
|
|
||||||
|
# Install enabled API handlers:
|
||||||
|
if conf.relay:
|
||||||
|
let topicCache = newTable[string, seq[WakuMessage]]()
|
||||||
|
installRelayApiHandlers(node, rpcServer, topicCache)
|
||||||
|
if conf.rpcPrivate:
|
||||||
|
# Private API access allows WakuRelay functionality that
|
||||||
|
# is backwards compatible with Waku v1.
|
||||||
|
installPrivateApiHandlers(node, rpcServer, node.rng, topicCache)
|
||||||
|
|
||||||
|
if conf.filter:
|
||||||
|
let messageCache = newTable[ContentTopic, seq[WakuMessage]]()
|
||||||
|
installFilterApiHandlers(node, rpcServer, messageCache)
|
||||||
|
|
||||||
|
if conf.store:
|
||||||
|
installStoreApiHandlers(node, rpcServer)
|
||||||
|
|
||||||
|
if conf.rpcAdmin:
|
||||||
|
installAdminApiHandlers(node, rpcServer)
|
||||||
|
|
||||||
|
rpcServer.start()
|
||||||
|
info "RPC Server started", ta
|
||||||
|
|
||||||
|
proc startMetricsServer*(serverIp: ValidIpAddress, serverPort: Port) =
|
||||||
|
info "Starting metrics HTTP server", serverIp, serverPort
|
||||||
|
|
||||||
|
try:
|
||||||
|
startMetricsHttpServer($serverIp, serverPort)
|
||||||
|
except Exception as e:
|
||||||
|
raiseAssert("Exception while starting metrics HTTP server: " & e.msg)
|
||||||
|
|
||||||
|
info "Metrics HTTP server started", serverIp, serverPort
|
||||||
|
|
||||||
|
proc startMetricsLog*() =
|
||||||
|
# https://github.com/nim-lang/Nim/issues/17369
|
||||||
|
var logMetrics: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||||
|
logMetrics = proc(udata: pointer) =
|
||||||
|
{.gcsafe.}:
|
||||||
|
# TODO: libp2p_pubsub_peers is not public, so we need to make this either
|
||||||
|
# public in libp2p or do our own peer counting after all.
|
||||||
|
var
|
||||||
|
totalMessages = 0.float64
|
||||||
|
|
||||||
|
for key in waku_node_messages.metrics.keys():
|
||||||
|
try:
|
||||||
|
totalMessages = totalMessages + waku_node_messages.value(key)
|
||||||
|
except KeyError:
|
||||||
|
discard
|
||||||
|
|
||||||
|
info "Node metrics", totalMessages
|
||||||
|
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
|
||||||
|
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
|
||||||
|
|
||||||
|
proc runMigrations*(sqliteDatabase: SqliteDatabase, conf: WakuNodeConf) =
|
||||||
|
# Run migration scripts on persistent storage
|
||||||
|
|
||||||
|
var migrationPath: string
|
||||||
|
if conf.persistPeers and conf.persistMessages:
|
||||||
|
migrationPath = migration_types.ALL_STORE_MIGRATION_PATH
|
||||||
|
elif conf.persistPeers:
|
||||||
|
migrationPath = migration_types.PEER_STORE_MIGRATION_PATH
|
||||||
|
elif conf.persistMessages:
|
||||||
|
migrationPath = migration_types.MESSAGE_STORE_MIGRATION_PATH
|
||||||
|
|
||||||
|
# run migration
|
||||||
|
info "running migration ... "
|
||||||
|
let migrationResult = sqliteDatabase.migrate(migrationPath)
|
||||||
|
if migrationResult.isErr:
|
||||||
|
warn "migration failed"
|
||||||
|
else:
|
||||||
|
info "migration is done"
|
|
@ -3,7 +3,6 @@
|
||||||
import
|
import
|
||||||
std/[options, tables, strutils, sequtils, os],
|
std/[options, tables, strutils, sequtils, os],
|
||||||
chronos, chronicles, metrics,
|
chronos, chronicles, metrics,
|
||||||
metrics/chronos_httpserver,
|
|
||||||
stew/shims/net as stewNet,
|
stew/shims/net as stewNet,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
libp2p/crypto/crypto,
|
libp2p/crypto/crypto,
|
||||||
|
@ -77,8 +76,6 @@ type
|
||||||
peerInfo*: PeerInfo
|
peerInfo*: PeerInfo
|
||||||
libp2pPing*: Ping
|
libp2pPing*: Ping
|
||||||
libp2pTransportLoops*: seq[Future[void]]
|
libp2pTransportLoops*: seq[Future[void]]
|
||||||
# TODO Revist messages field indexing as well as if this should be Message or WakuMessage
|
|
||||||
messages*: seq[(Topic, WakuMessage)]
|
|
||||||
filters*: Filters
|
filters*: Filters
|
||||||
rng*: ref BrHmacDrbgContext
|
rng*: ref BrHmacDrbgContext
|
||||||
started*: bool # Indicates that node has started listening
|
started*: bool # Indicates that node has started listening
|
||||||
|
@ -163,33 +160,6 @@ proc new*(T: type WakuNode, nodeKey: crypto.PrivateKey,
|
||||||
|
|
||||||
return wakuNode
|
return wakuNode
|
||||||
|
|
||||||
proc start*(node: WakuNode) {.async.} =
|
|
||||||
## Starts a created Waku Node.
|
|
||||||
##
|
|
||||||
## Status: Implemented.
|
|
||||||
##
|
|
||||||
node.libp2pTransportLoops = await node.switch.start()
|
|
||||||
|
|
||||||
# TODO Get this from WakuNode obj
|
|
||||||
let peerInfo = node.peerInfo
|
|
||||||
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
|
|
||||||
let listenStr = $peerInfo.addrs[^1] & "/p2p/" & $peerInfo.peerId
|
|
||||||
## XXX: this should be /ip4..., / stripped?
|
|
||||||
info "Listening on", full = listenStr
|
|
||||||
|
|
||||||
if not node.wakuRelay.isNil:
|
|
||||||
await node.wakuRelay.start()
|
|
||||||
|
|
||||||
node.started = true
|
|
||||||
|
|
||||||
proc stop*(node: WakuNode) {.async.} =
|
|
||||||
if not node.wakuRelay.isNil:
|
|
||||||
await node.wakuRelay.stop()
|
|
||||||
|
|
||||||
await node.switch.stop()
|
|
||||||
|
|
||||||
node.started = false
|
|
||||||
|
|
||||||
proc subscribe(node: WakuNode, topic: Topic, handler: Option[TopicHandler]) =
|
proc subscribe(node: WakuNode, topic: Topic, handler: Option[TopicHandler]) =
|
||||||
if node.wakuRelay.isNil:
|
if node.wakuRelay.isNil:
|
||||||
error "Invalid API call to `subscribe`. WakuRelay not mounted."
|
error "Invalid API call to `subscribe`. WakuRelay not mounted."
|
||||||
|
@ -379,7 +349,6 @@ proc resume*(node: WakuNode, peerList: Option[seq[PeerInfo]] = none(seq[PeerInfo
|
||||||
if retrievedMessages.isOk:
|
if retrievedMessages.isOk:
|
||||||
info "the number of retrieved messages since the last online time: ", number=retrievedMessages.value
|
info "the number of retrieved messages since the last online time: ", number=retrievedMessages.value
|
||||||
|
|
||||||
|
|
||||||
# TODO Extend with more relevant info: topics, peers, memory usage, online time, etc
|
# TODO Extend with more relevant info: topics, peers, memory usage, online time, etc
|
||||||
proc info*(node: WakuNode): WakuInfo =
|
proc info*(node: WakuNode): WakuInfo =
|
||||||
## Returns information about the Node, such as what multiaddress it can be reached at.
|
## Returns information about the Node, such as what multiaddress it can be reached at.
|
||||||
|
@ -464,7 +433,6 @@ when defined(rln):
|
||||||
|
|
||||||
node.wakuRlnRelay = rlnPeer
|
node.wakuRlnRelay = rlnPeer
|
||||||
|
|
||||||
when defined(rln):
|
|
||||||
|
|
||||||
proc addRLNRelayValidator*(node: WakuNode, pubsubTopic: string) =
|
proc addRLNRelayValidator*(node: WakuNode, pubsubTopic: string) =
|
||||||
## this procedure is a thin wrapper for the pubsub addValidator method
|
## this procedure is a thin wrapper for the pubsub addValidator method
|
||||||
|
@ -480,6 +448,39 @@ when defined(rln):
|
||||||
let pb = PubSub(node.wakuRelay)
|
let pb = PubSub(node.wakuRelay)
|
||||||
pb.addValidator(pubsubTopic, validator)
|
pb.addValidator(pubsubTopic, validator)
|
||||||
|
|
||||||
|
proc startRelay*(node: WakuNode) {.async.} =
|
||||||
|
if node.wakuRelay.isNil:
|
||||||
|
trace "Failed to start relay. Not mounted."
|
||||||
|
return
|
||||||
|
|
||||||
|
## Setup and start relay protocol
|
||||||
|
info "starting relay"
|
||||||
|
|
||||||
|
# Topic subscriptions
|
||||||
|
for topic in node.wakuRelay.defaultTopics:
|
||||||
|
node.subscribe(topic, none(TopicHandler))
|
||||||
|
|
||||||
|
# Resume previous relay connections
|
||||||
|
if node.peerManager.hasPeers(WakuRelayCodec):
|
||||||
|
info "Found previous WakuRelay peers. Reconnecting."
|
||||||
|
|
||||||
|
# Reconnect to previous relay peers. This will respect a backoff period, if necessary
|
||||||
|
let backoffPeriod = node.wakuRelay.parameters.pruneBackoff + chronos.seconds(BackoffSlackTime)
|
||||||
|
|
||||||
|
await node.peerManager.reconnectPeers(WakuRelayCodec,
|
||||||
|
backoffPeriod)
|
||||||
|
|
||||||
|
when defined(rln):
|
||||||
|
if node.wakuRelay.rlnRelayEnabled:
|
||||||
|
# TODO currently the message validator is set for the defaultTopic, this can be configurable to accept other pubsub topics as well
|
||||||
|
addRLNRelayValidator(node, defaultTopic)
|
||||||
|
info "WakuRLNRelay is mounted successfully"
|
||||||
|
|
||||||
|
# Start the WakuRelay protocol
|
||||||
|
await node.wakuRelay.start()
|
||||||
|
|
||||||
|
info "relay started successfully"
|
||||||
|
|
||||||
proc mountRelay*(node: WakuNode,
|
proc mountRelay*(node: WakuNode,
|
||||||
topics: seq[string] = newSeq[string](),
|
topics: seq[string] = newSeq[string](),
|
||||||
rlnRelayEnabled = false,
|
rlnRelayEnabled = false,
|
||||||
|
@ -499,44 +500,34 @@ proc mountRelay*(node: WakuNode,
|
||||||
|
|
||||||
info "mounting relay", rlnRelayEnabled=rlnRelayEnabled, relayMessages=relayMessages
|
info "mounting relay", rlnRelayEnabled=rlnRelayEnabled, relayMessages=relayMessages
|
||||||
|
|
||||||
|
## The default relay topics is the union of
|
||||||
|
## all configured topics plus the hard-coded defaultTopic(s)
|
||||||
|
wakuRelay.defaultTopics = concat(@[defaultTopic], topics)
|
||||||
|
wakuRelay.rlnRelayEnabled = rlnRelayEnabled
|
||||||
|
|
||||||
node.switch.mount(wakuRelay, protocolMatcher(WakuRelayCodec))
|
node.switch.mount(wakuRelay, protocolMatcher(WakuRelayCodec))
|
||||||
|
|
||||||
if not relayMessages:
|
if relayMessages:
|
||||||
## Some nodes may choose not to have the capability to relay messages (e.g. "light" nodes).
|
## Some nodes may choose not to have the capability to relay messages (e.g. "light" nodes).
|
||||||
## All nodes, however, currently require WakuRelay, regardless of desired capabilities.
|
## All nodes, however, currently require WakuRelay, regardless of desired capabilities.
|
||||||
## This is to allow protocol stream negotation with relay-capable nodes to succeed.
|
## This is to allow protocol stream negotation with relay-capable nodes to succeed.
|
||||||
## Here we mount relay on the switch only, but do not proceed to subscribe to any pubsub
|
## Here we mount relay on the switch only, but do not proceed to subscribe to any pubsub
|
||||||
## topics. We also never start the relay protocol. node.wakuRelay remains nil.
|
## topics. We also never start the relay protocol. node.wakuRelay remains nil.
|
||||||
## @TODO: in future, this WakuRelay dependency will be removed completely
|
## @TODO: in future, this WakuRelay dependency will be removed completely
|
||||||
return
|
node.wakuRelay = wakuRelay
|
||||||
|
|
||||||
node.wakuRelay = wakuRelay
|
|
||||||
|
|
||||||
node.subscribe(defaultTopic, none(TopicHandler))
|
|
||||||
|
|
||||||
for topic in topics:
|
|
||||||
node.subscribe(topic, none(TopicHandler))
|
|
||||||
|
|
||||||
if node.peerManager.hasPeers(WakuRelayCodec):
|
|
||||||
trace "Found previous WakuRelay peers. Reconnecting."
|
|
||||||
# Reconnect to previous relay peers. This will respect a backoff period, if necessary
|
|
||||||
waitFor node.peerManager.reconnectPeers(WakuRelayCodec,
|
|
||||||
wakuRelay.parameters.pruneBackoff + chronos.seconds(BackoffSlackTime))
|
|
||||||
when defined(rln):
|
when defined(rln):
|
||||||
if rlnRelayEnabled:
|
if rlnRelayEnabled:
|
||||||
# TODO pass rln relay inputs to this proc, right now it uses default values that are set in the mountRlnRelay proc
|
# TODO pass rln relay inputs to this proc, right now it uses default values that are set in the mountRlnRelay proc
|
||||||
info "WakuRLNRelay is enabled"
|
info "WakuRLNRelay is enabled"
|
||||||
waitFor mountRlnRelay(node)
|
waitFor mountRlnRelay(node)
|
||||||
# TODO currently the message validator is set for the defaultTopic, this can be configurable to accept other pubsub topics as well
|
|
||||||
addRLNRelayValidator(node, defaultTopic)
|
|
||||||
info "WakuRLNRelay is mounted successfully"
|
info "WakuRLNRelay is mounted successfully"
|
||||||
|
|
||||||
|
info "relay mounted successfully"
|
||||||
|
|
||||||
if node.started:
|
if node.started:
|
||||||
# Node has already started. Start the WakuRelay protocol
|
# Node has started already. Let's start relay too.
|
||||||
|
waitFor node.startRelay()
|
||||||
waitFor node.wakuRelay.start()
|
|
||||||
|
|
||||||
info "relay mounted and started successfully"
|
|
||||||
|
|
||||||
proc mountLightPush*(node: WakuNode) {.raises: [Defect, LPError].} =
|
proc mountLightPush*(node: WakuNode) {.raises: [Defect, LPError].} =
|
||||||
info "mounting light push"
|
info "mounting light push"
|
||||||
|
@ -558,7 +549,7 @@ proc mountLibp2pPing*(node: WakuNode) {.raises: [Defect, LPError].} =
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# This is necessary as `Ping.new*` does not have explicit `raises` requirement
|
# This is necessary as `Ping.new*` does not have explicit `raises` requirement
|
||||||
# @TODO: remove exception handling once explicit `raises` in ping module
|
# @TODO: remove exception handling once explicit `raises` in ping module
|
||||||
raise newException(LPError, "Failed to initialise ping protocol")
|
raise newException(LPError, "Failed to initialize ping protocol")
|
||||||
|
|
||||||
node.switch.mount(node.libp2pPing)
|
node.switch.mount(node.libp2pPing)
|
||||||
|
|
||||||
|
@ -652,195 +643,283 @@ proc connectToNodes*(n: WakuNode, nodes: seq[PeerInfo]) {.async.} =
|
||||||
# later.
|
# later.
|
||||||
await sleepAsync(5.seconds)
|
await sleepAsync(5.seconds)
|
||||||
|
|
||||||
|
proc start*(node: WakuNode) {.async.} =
|
||||||
|
## Starts a created Waku Node and
|
||||||
|
## all its mounted protocols.
|
||||||
|
##
|
||||||
|
## Status: Implemented.
|
||||||
|
|
||||||
|
node.libp2pTransportLoops = await node.switch.start()
|
||||||
|
|
||||||
|
# TODO Get this from WakuNode obj
|
||||||
|
let peerInfo = node.peerInfo
|
||||||
|
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
|
||||||
|
let listenStr = $peerInfo.addrs[^1] & "/p2p/" & $peerInfo.peerId
|
||||||
|
## XXX: this should be /ip4..., / stripped?
|
||||||
|
info "Listening on", full = listenStr
|
||||||
|
|
||||||
|
if not node.wakuRelay.isNil:
|
||||||
|
await node.startRelay()
|
||||||
|
|
||||||
|
info "Node started successfully"
|
||||||
|
node.started = true
|
||||||
|
|
||||||
|
proc stop*(node: WakuNode) {.async.} =
|
||||||
|
if not node.wakuRelay.isNil:
|
||||||
|
await node.wakuRelay.stop()
|
||||||
|
|
||||||
|
await node.switch.stop()
|
||||||
|
|
||||||
|
node.started = false
|
||||||
|
|
||||||
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
|
## Node setup happens in 6 phases:
|
||||||
|
## 1. Set up storage
|
||||||
|
## 2. Initialize node
|
||||||
|
## 3. Mount and initialize configured protocols
|
||||||
|
## 4. Start node and mounted protocols
|
||||||
|
## 5. Start monitoring tools and external interfaces
|
||||||
|
## 6. Setup graceful shutdown hooks
|
||||||
|
|
||||||
import
|
import
|
||||||
|
confutils,
|
||||||
system/ansi_c,
|
system/ansi_c,
|
||||||
confutils, json_rpc/rpcserver, metrics,
|
../../common/utils/nat,
|
||||||
./config,
|
./config,
|
||||||
./jsonrpc/[admin_api,
|
./waku_setup,
|
||||||
debug_api,
|
|
||||||
filter_api,
|
|
||||||
private_api,
|
|
||||||
relay_api,
|
|
||||||
store_api],
|
|
||||||
./storage/message/waku_message_store,
|
./storage/message/waku_message_store,
|
||||||
./storage/peer/waku_peer_storage,
|
./storage/peer/waku_peer_storage
|
||||||
../../common/utils/nat
|
|
||||||
|
logScope:
|
||||||
|
topics = "wakunode.setup"
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Setup functions #
|
||||||
|
###################
|
||||||
|
|
||||||
proc startRpc(node: WakuNode, rpcIp: ValidIpAddress, rpcPort: Port, conf: WakuNodeConf) {.raises: [Defect, RpcBindError, CatchableError].} =
|
# 1/6 Setup storage
|
||||||
let
|
proc setupStorage(conf: WakuNodeConf):
|
||||||
ta = initTAddress(rpcIp, rpcPort)
|
SetupResult[tuple[pStorage: WakuPeerStorage, mStorage: WakuMessageStore]] =
|
||||||
rpcServer = newRpcHttpServer([ta])
|
|
||||||
installDebugApiHandlers(node, rpcServer)
|
|
||||||
|
|
||||||
# Install enabled API handlers:
|
## Setup a SQLite Database for a wakunode based on a supplied
|
||||||
if conf.relay:
|
## configuration file and perform all necessary migration.
|
||||||
let topicCache = newTable[string, seq[WakuMessage]]()
|
##
|
||||||
installRelayApiHandlers(node, rpcServer, topicCache)
|
## If config allows, return peer storage and message store
|
||||||
if conf.rpcPrivate:
|
## for use elsewhere.
|
||||||
# Private API access allows WakuRelay functionality that
|
|
||||||
# is backwards compatible with Waku v1.
|
|
||||||
installPrivateApiHandlers(node, rpcServer, node.rng, topicCache)
|
|
||||||
|
|
||||||
if conf.filter:
|
var
|
||||||
let messageCache = newTable[ContentTopic, seq[WakuMessage]]()
|
sqliteDatabase: SqliteDatabase
|
||||||
installFilterApiHandlers(node, rpcServer, messageCache)
|
storeTuple: tuple[pStorage: WakuPeerStorage, mStorage: WakuMessageStore]
|
||||||
|
|
||||||
if conf.store:
|
|
||||||
installStoreApiHandlers(node, rpcServer)
|
|
||||||
|
|
||||||
if conf.rpcAdmin:
|
|
||||||
installAdminApiHandlers(node, rpcServer)
|
|
||||||
|
|
||||||
rpcServer.start()
|
|
||||||
info "RPC Server started", ta
|
|
||||||
|
|
||||||
proc startMetricsServer(serverIp: ValidIpAddress, serverPort: Port) {.raises: [Defect, Exception].} =
|
# Setup DB
|
||||||
info "Starting metrics HTTP server", serverIp, serverPort
|
if conf.dbPath != "":
|
||||||
|
let dbRes = SqliteDatabase.init(conf.dbPath)
|
||||||
|
if dbRes.isErr:
|
||||||
|
warn "failed to init database", err = dbRes.error
|
||||||
|
waku_node_errors.inc(labelValues = ["init_db_failure"])
|
||||||
|
return err("failed to init database")
|
||||||
|
else:
|
||||||
|
sqliteDatabase = dbRes.value
|
||||||
|
|
||||||
|
if not sqliteDatabase.isNil:
|
||||||
|
# Database initialized. Let's set it up
|
||||||
|
sqliteDatabase.runMigrations(conf) # First migrate what we have
|
||||||
|
|
||||||
|
if conf.persistPeers:
|
||||||
|
# Peer persistence enable. Set up Peer table in storage
|
||||||
|
let res = WakuPeerStorage.new(sqliteDatabase)
|
||||||
|
|
||||||
|
if res.isErr:
|
||||||
|
warn "failed to init new WakuPeerStorage", err = res.error
|
||||||
|
waku_node_errors.inc(labelValues = ["init_store_failure"])
|
||||||
|
else:
|
||||||
|
storeTuple.pStorage = res.value
|
||||||
|
|
||||||
startMetricsHttpServer($serverIp, serverPort)
|
if conf.persistMessages:
|
||||||
|
# Historical message persistence enable. Set up Message table in storage
|
||||||
|
let res = WakuMessageStore.init(sqliteDatabase)
|
||||||
|
|
||||||
info "Metrics HTTP server started", serverIp, serverPort
|
if res.isErr:
|
||||||
|
warn "failed to init WakuMessageStore", err = res.error
|
||||||
|
waku_node_errors.inc(labelValues = ["init_store_failure"])
|
||||||
|
else:
|
||||||
|
storeTuple.mStorage = res.value
|
||||||
|
|
||||||
|
ok(storeTuple)
|
||||||
|
|
||||||
proc startMetricsLog() =
|
# 2/6 Initialize node
|
||||||
# https://github.com/nim-lang/Nim/issues/17369
|
proc initNode(conf: WakuNodeConf,
|
||||||
var logMetrics: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
pStorage: WakuPeerStorage = nil): SetupResult[WakuNode] =
|
||||||
logMetrics = proc(udata: pointer) =
|
|
||||||
{.gcsafe.}:
|
## Setup a basic Waku v2 node based on a supplied configuration
|
||||||
# TODO: libp2p_pubsub_peers is not public, so we need to make this either
|
## file. Optionally include persistent peer storage.
|
||||||
# public in libp2p or do our own peer counting after all.
|
## No protocols are mounted yet.
|
||||||
var
|
|
||||||
totalMessages = 0.float64
|
|
||||||
|
|
||||||
for key in waku_node_messages.metrics.keys():
|
let
|
||||||
try:
|
(extIp, extTcpPort, extUdpPort) = setupNat(conf.nat,
|
||||||
totalMessages = totalMessages + waku_node_messages.value(key)
|
clientId,
|
||||||
except KeyError:
|
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||||
discard
|
Port(uint16(conf.udpPort) + conf.portsShift))
|
||||||
|
## @TODO: the NAT setup assumes a manual port mapping configuration if extIp config is set. This probably
|
||||||
|
## implies adding manual config item for extPort as well. The following heuristic assumes that, in absence of manual
|
||||||
|
## config, the external port is the same as the bind port.
|
||||||
|
extPort = if extIp.isSome() and extTcpPort.isNone():
|
||||||
|
some(Port(uint16(conf.tcpPort) + conf.portsShift))
|
||||||
|
else:
|
||||||
|
extTcpPort
|
||||||
|
node = WakuNode.new(conf.nodekey,
|
||||||
|
conf.listenAddress, Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||||
|
extIp, extPort,
|
||||||
|
pStorage)
|
||||||
|
|
||||||
|
ok(node)
|
||||||
|
|
||||||
info "Node metrics", totalMessages
|
# 3/6 Mount and initialize configured protocols
|
||||||
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
|
proc setupProtocols(node: var WakuNode,
|
||||||
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
|
conf: WakuNodeConf,
|
||||||
|
mStorage: WakuMessageStore = nil): SetupResult[bool] =
|
||||||
|
|
||||||
|
## Setup configured protocols on an existing Waku v2 node.
|
||||||
|
## Optionally include persistent message storage.
|
||||||
|
## No protocols are started yet.
|
||||||
|
|
||||||
|
# Mount relay on all nodes
|
||||||
|
mountRelay(node,
|
||||||
|
conf.topics.split(" "),
|
||||||
|
rlnRelayEnabled = conf.rlnRelay,
|
||||||
|
relayMessages = conf.relay) # Indicates if node is capable to relay messages
|
||||||
|
|
||||||
|
# Keepalive mounted on all nodes
|
||||||
|
mountLibp2pPing(node)
|
||||||
|
|
||||||
|
if conf.swap:
|
||||||
|
mountSwap(node)
|
||||||
|
# TODO Set swap peer, for now should be same as store peer
|
||||||
|
|
||||||
|
# Store setup
|
||||||
|
if (conf.storenode != "") or (conf.store):
|
||||||
|
mountStore(node, mStorage, conf.persistMessages)
|
||||||
|
|
||||||
|
if conf.storenode != "":
|
||||||
|
setStorePeer(node, conf.storenode)
|
||||||
|
|
||||||
|
# NOTE Must be mounted after relay
|
||||||
|
if (conf.lightpushnode != "") or (conf.lightpush):
|
||||||
|
mountLightPush(node)
|
||||||
|
|
||||||
|
if conf.lightpushnode != "":
|
||||||
|
setLightPushPeer(node, conf.lightpushnode)
|
||||||
|
|
||||||
|
# Filter setup. NOTE Must be mounted after relay
|
||||||
|
if (conf.filternode != "") or (conf.filter):
|
||||||
|
mountFilter(node)
|
||||||
|
|
||||||
|
if conf.filternode != "":
|
||||||
|
setFilterPeer(node, conf.filternode)
|
||||||
|
|
||||||
|
ok(true) # Success
|
||||||
|
|
||||||
|
# 4/6 Start node and mounted protocols
|
||||||
|
proc startNode(node: WakuNode, conf: WakuNodeConf): SetupResult[bool] =
|
||||||
|
## Start a configured node and all mounted protocols.
|
||||||
|
## Resume history, connect to static nodes and start
|
||||||
|
## keep-alive, if configured.
|
||||||
|
|
||||||
|
# Start Waku v2 node
|
||||||
|
waitFor node.start()
|
||||||
|
|
||||||
|
# Resume historical messages, this has to be called after the node has been started
|
||||||
|
if conf.store and conf.persistMessages:
|
||||||
|
waitFor node.resume()
|
||||||
|
|
||||||
|
# Connect to configured static nodes
|
||||||
|
if conf.staticnodes.len > 0:
|
||||||
|
waitFor connectToNodes(node, conf.staticnodes)
|
||||||
|
|
||||||
|
# Start keepalive, if enabled
|
||||||
|
if conf.keepAlive:
|
||||||
|
node.startKeepalive()
|
||||||
|
|
||||||
|
ok(true) # Success
|
||||||
|
|
||||||
|
# 5/6 Start monitoring tools and external interfaces
|
||||||
|
proc startExternal(node: WakuNode, conf: WakuNodeConf): SetupResult[bool] =
|
||||||
|
## Start configured external interfaces and monitoring tools
|
||||||
|
## on a Waku v2 node, including the RPC API and metrics
|
||||||
|
## monitoring ports.
|
||||||
|
|
||||||
|
if conf.rpc:
|
||||||
|
startRpc(node, conf.rpcAddress, Port(conf.rpcPort + conf.portsShift), conf)
|
||||||
|
|
||||||
|
if conf.metricsLogging:
|
||||||
|
startMetricsLog()
|
||||||
|
|
||||||
|
if conf.metricsServer:
|
||||||
|
startMetricsServer(conf.metricsServerAddress,
|
||||||
|
Port(conf.metricsServerPort + conf.portsShift))
|
||||||
|
|
||||||
|
ok(true) # Success
|
||||||
|
|
||||||
let
|
let
|
||||||
conf = WakuNodeConf.load()
|
conf = WakuNodeConf.load()
|
||||||
|
|
||||||
# Storage setup
|
|
||||||
var sqliteDatabase: SqliteDatabase
|
|
||||||
|
|
||||||
if conf.dbPath != "":
|
|
||||||
let dbRes = SqliteDatabase.init(conf.dbPath)
|
|
||||||
if dbRes.isErr:
|
|
||||||
warn "failed to init database", err = dbRes.error
|
|
||||||
waku_node_errors.inc(labelValues = ["init_db_failure"])
|
|
||||||
else:
|
|
||||||
sqliteDatabase = dbRes.value
|
|
||||||
|
|
||||||
if not sqliteDatabase.isNil:
|
|
||||||
var migrationPath = ""
|
|
||||||
if conf.persistPeers and conf.persistMessages: migrationPath = migration_types.ALL_STORE_MIGRATION_PATH
|
|
||||||
elif conf.persistPeers: migrationPath = migration_types.PEER_STORE_MIGRATION_PATH
|
|
||||||
elif conf.persistMessages: migrationPath = migration_types.MESSAGE_STORE_MIGRATION_PATH
|
|
||||||
|
|
||||||
# run migration
|
|
||||||
info "running migration ... "
|
|
||||||
let migrationResult = sqliteDatabase.migrate(migrationPath)
|
|
||||||
if migrationResult.isErr:
|
|
||||||
warn "migration failed"
|
|
||||||
else:
|
|
||||||
info "migration is done"
|
|
||||||
|
|
||||||
var pStorage: WakuPeerStorage
|
|
||||||
|
|
||||||
if conf.persistPeers and not sqliteDatabase.isNil:
|
|
||||||
let res = WakuPeerStorage.new(sqliteDatabase)
|
|
||||||
if res.isErr:
|
|
||||||
warn "failed to init new WakuPeerStorage", err = res.error
|
|
||||||
waku_node_errors.inc(labelValues = ["init_store_failure"])
|
|
||||||
else:
|
|
||||||
pStorage = res.value
|
|
||||||
|
|
||||||
let
|
|
||||||
(extIp, extTcpPort, extUdpPort) = setupNat(conf.nat, clientId,
|
|
||||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
|
||||||
Port(uint16(conf.udpPort) + conf.portsShift))
|
|
||||||
## @TODO: the NAT setup assumes a manual port mapping configuration if extIp config is set. This probably
|
|
||||||
## implies adding manual config item for extPort as well. The following heuristic assumes that, in absence of manual
|
|
||||||
## config, the external port is the same as the bind port.
|
|
||||||
extPort = if extIp.isSome() and extTcpPort.isNone(): some(Port(uint16(conf.tcpPort) + conf.portsShift))
|
|
||||||
else: extTcpPort
|
|
||||||
node = WakuNode.new(conf.nodekey,
|
|
||||||
conf.listenAddress, Port(uint16(conf.tcpPort) + conf.portsShift),
|
|
||||||
extIp, extPort,
|
|
||||||
pStorage)
|
|
||||||
|
|
||||||
waitFor node.start()
|
|
||||||
|
|
||||||
if conf.swap:
|
|
||||||
mountSwap(node)
|
|
||||||
|
|
||||||
# TODO Set swap peer, for now should be same as store peer
|
|
||||||
|
|
||||||
# Store setup
|
|
||||||
if (conf.storenode != "") or (conf.store):
|
|
||||||
var store: WakuMessageStore
|
|
||||||
if (not sqliteDatabase.isNil) and conf.persistMessages:
|
|
||||||
|
|
||||||
let res = WakuMessageStore.init(sqliteDatabase)
|
|
||||||
if res.isErr:
|
|
||||||
warn "failed to init WakuMessageStore", err = res.error
|
|
||||||
waku_node_errors.inc(labelValues = ["init_store_failure"])
|
|
||||||
else:
|
|
||||||
store = res.value
|
|
||||||
|
|
||||||
mountStore(node, store, conf.persistMessages)
|
|
||||||
|
|
||||||
if conf.storenode != "":
|
|
||||||
setStorePeer(node, conf.storenode)
|
|
||||||
|
|
||||||
|
|
||||||
# Relay setup
|
|
||||||
mountRelay(node,
|
|
||||||
conf.topics.split(" "),
|
|
||||||
rlnRelayEnabled = conf.rlnRelay,
|
|
||||||
relayMessages = conf.relay) # Indicates if node is capable to relay messages
|
|
||||||
|
|
||||||
# Keepalive mounted on all nodes
|
var
|
||||||
mountLibp2pPing(node)
|
node: WakuNode # This is the node we're going to setup using the conf
|
||||||
|
|
||||||
|
##############
|
||||||
|
# Node setup #
|
||||||
|
##############
|
||||||
|
|
||||||
|
debug "1/6 Setting up storage"
|
||||||
|
|
||||||
|
var
|
||||||
|
pStorage: WakuPeerStorage
|
||||||
|
mStorage: WakuMessageStore
|
||||||
|
|
||||||
# Resume historical messages, this has to be called after the relay setup
|
let setupStorageRes = setupStorage(conf)
|
||||||
if conf.store and conf.persistMessages:
|
|
||||||
waitFor node.resume()
|
|
||||||
|
|
||||||
if conf.staticnodes.len > 0:
|
if setupStorageRes.isErr:
|
||||||
waitFor connectToNodes(node, conf.staticnodes)
|
error "1/6 Setting up storage failed. Continuing without storage."
|
||||||
|
else:
|
||||||
|
(pStorage, mStorage) = setupStorageRes.get()
|
||||||
|
|
||||||
# NOTE Must be mounted after relay
|
debug "2/6 Initializing node"
|
||||||
if (conf.lightpushnode != "") or (conf.lightpush):
|
|
||||||
mountLightPush(node)
|
|
||||||
|
|
||||||
if conf.lightpushnode != "":
|
let initNodeRes = initNode(conf, pStorage)
|
||||||
setLightPushPeer(node, conf.lightpushnode)
|
|
||||||
|
if initNodeRes.isErr:
|
||||||
|
error "2/6 Initializing node failed. Quitting."
|
||||||
|
quit(QuitFailure)
|
||||||
|
else:
|
||||||
|
node = initNodeRes.get()
|
||||||
|
|
||||||
|
debug "3/6 Mounting protocols"
|
||||||
|
|
||||||
|
let setupProtocolsRes = setupProtocols(node, conf, mStorage)
|
||||||
|
|
||||||
|
if setupProtocolsRes.isErr:
|
||||||
|
error "3/6 Mounting protocols failed. Continuing in current state."
|
||||||
|
|
||||||
|
debug "4/6 Starting node and mounted protocols"
|
||||||
|
|
||||||
# Filter setup. NOTE Must be mounted after relay
|
let startNodeRes = startNode(node, conf)
|
||||||
if (conf.filternode != "") or (conf.filter):
|
|
||||||
mountFilter(node)
|
|
||||||
|
|
||||||
if conf.filternode != "":
|
if startNodeRes.isErr:
|
||||||
setFilterPeer(node, conf.filternode)
|
error "4/6 Starting node and mounted protocols failed. Continuing in current state."
|
||||||
|
|
||||||
if conf.rpc:
|
debug "5/6 Starting monitoring and external interfaces"
|
||||||
startRpc(node, conf.rpcAddress, Port(conf.rpcPort + conf.portsShift), conf)
|
|
||||||
|
|
||||||
if conf.metricsLogging:
|
let startExternalRes = startExternal(node, conf)
|
||||||
startMetricsLog()
|
|
||||||
|
|
||||||
if conf.metricsServer:
|
if startExternalRes.isErr:
|
||||||
startMetricsServer(conf.metricsServerAddress,
|
error "5/6 Starting monitoring and external interfaces failed. Continuing in current state."
|
||||||
Port(conf.metricsServerPort + conf.portsShift))
|
|
||||||
|
debug "6/6 Setting up shutdown hooks"
|
||||||
# Setup graceful shutdown
|
|
||||||
|
# 6/6 Setup graceful shutdown hooks
|
||||||
|
## Setup shutdown hooks for this process.
|
||||||
|
## Stop node gracefully on shutdown.
|
||||||
|
|
||||||
# Handle Ctrl-C SIGINT
|
# Handle Ctrl-C SIGINT
|
||||||
proc handleCtrlC() {.noconv.} =
|
proc handleCtrlC() {.noconv.} =
|
||||||
|
@ -862,8 +941,6 @@ when isMainModule:
|
||||||
|
|
||||||
c_signal(SIGTERM, handleSigterm)
|
c_signal(SIGTERM, handleSigterm)
|
||||||
|
|
||||||
# Start keepalive, if enabled
|
debug "Node setup complete"
|
||||||
if conf.keepAlive:
|
|
||||||
node.startKeepalive()
|
|
||||||
|
|
||||||
runForever()
|
runForever()
|
||||||
|
|
|
@ -19,9 +19,12 @@ const
|
||||||
|
|
||||||
type
|
type
|
||||||
WakuRelay* = ref object of GossipSub
|
WakuRelay* = ref object of GossipSub
|
||||||
|
defaultTopics*: seq[string] # Default configured PubSub topics
|
||||||
|
rlnRelayEnabled*: bool # Flag indicating if RLN relay is enabled
|
||||||
|
|
||||||
method init*(w: WakuRelay) =
|
method init*(w: WakuRelay) =
|
||||||
debug "init"
|
debug "init WakuRelay"
|
||||||
|
|
||||||
proc handler(conn: Connection, proto: string) {.async.} =
|
proc handler(conn: Connection, proto: string) {.async.} =
|
||||||
## main protocol handler that gets triggered on every
|
## main protocol handler that gets triggered on every
|
||||||
## connection for a protocol string
|
## connection for a protocol string
|
||||||
|
|
Loading…
Reference in New Issue