2023-04-25 13:34:57 +00:00
|
|
|
when (NimMajor, NimMinor) < (1, 4):
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
else:
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
std/[options, strutils, sequtils],
|
|
|
|
stew/results,
|
|
|
|
chronicles,
|
|
|
|
chronos,
|
2023-10-27 07:11:47 +00:00
|
|
|
libp2p/wire,
|
|
|
|
libp2p/multicodec,
|
2023-04-25 13:34:57 +00:00
|
|
|
libp2p/crypto/crypto,
|
|
|
|
libp2p/nameresolving/dnsresolver,
|
|
|
|
libp2p/protocols/pubsub/gossipsub,
|
|
|
|
libp2p/peerid,
|
|
|
|
eth/keys,
|
2023-04-26 17:25:18 +00:00
|
|
|
presto,
|
|
|
|
metrics,
|
|
|
|
metrics/chronos_httpserver
|
2023-04-25 13:34:57 +00:00
|
|
|
import
|
2023-05-17 16:32:53 +00:00
|
|
|
../../waku/common/utils/nat,
|
2024-01-03 12:11:50 +00:00
|
|
|
../../waku/common/utils/parse_size_units,
|
2023-06-22 09:27:40 +00:00
|
|
|
../../waku/common/databases/db_sqlite,
|
2023-08-09 17:11:50 +00:00
|
|
|
../../waku/waku_archive/driver/builder,
|
|
|
|
../../waku/waku_archive/retention_policy/builder,
|
|
|
|
../../waku/waku_core,
|
|
|
|
../../waku/waku_node,
|
|
|
|
../../waku/node/waku_metrics,
|
|
|
|
../../waku/node/peer_manager,
|
|
|
|
../../waku/node/peer_manager/peer_store/waku_peer_storage,
|
|
|
|
../../waku/node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations,
|
2023-09-22 13:36:46 +00:00
|
|
|
../../waku/waku_api/message_cache,
|
2023-10-27 19:43:54 +00:00
|
|
|
../../waku/waku_api/handlers,
|
2023-09-22 13:36:46 +00:00
|
|
|
../../waku/waku_api/rest/server,
|
|
|
|
../../waku/waku_api/rest/debug/handlers as rest_debug_api,
|
|
|
|
../../waku/waku_api/rest/relay/handlers as rest_relay_api,
|
|
|
|
../../waku/waku_api/rest/filter/legacy_handlers as rest_legacy_filter_api,
|
|
|
|
../../waku/waku_api/rest/filter/handlers as rest_filter_api,
|
|
|
|
../../waku/waku_api/rest/lightpush/handlers as rest_lightpush_api,
|
|
|
|
../../waku/waku_api/rest/store/handlers as rest_store_api,
|
|
|
|
../../waku/waku_api/rest/health/handlers as rest_health_api,
|
2023-10-05 12:00:09 +00:00
|
|
|
../../waku/waku_api/rest/admin/handlers as rest_admin_api,
|
2023-09-26 11:33:52 +00:00
|
|
|
../../waku/waku_archive,
|
|
|
|
../../waku/waku_dnsdisc,
|
2024-01-30 12:15:23 +00:00
|
|
|
../../waku/waku_enr/sharding,
|
2023-09-26 11:33:52 +00:00
|
|
|
../../waku/waku_discv5,
|
|
|
|
../../waku/waku_peer_exchange,
|
|
|
|
../../waku/waku_rln_relay,
|
|
|
|
../../waku/waku_store,
|
2024-01-30 12:28:21 +00:00
|
|
|
../../waku/waku_lightpush/common,
|
2023-09-26 11:33:52 +00:00
|
|
|
../../waku/waku_filter,
|
|
|
|
../../waku/waku_filter_v2,
|
|
|
|
./wakunode2_validator_signed,
|
|
|
|
./internal_config,
|
|
|
|
./external_config
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "wakunode app"
|
|
|
|
|
|
|
|
|
|
|
|
# Git version in git describe format (defined at compile time)
|
|
|
|
const git_version* {.strdefine.} = "n/a"
|
|
|
|
|
|
|
|
type
|
|
|
|
App* = object
|
|
|
|
version: string
|
|
|
|
conf: WakuNodeConf
|
2023-06-22 20:58:14 +00:00
|
|
|
netConf: NetConfig
|
2023-04-25 13:34:57 +00:00
|
|
|
rng: ref HmacDrbgContext
|
2023-06-22 20:58:14 +00:00
|
|
|
key: crypto.PrivateKey
|
|
|
|
record: Record
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
wakuDiscv5: Option[WakuDiscoveryV5]
|
2023-04-25 13:34:57 +00:00
|
|
|
peerStore: Option[WakuPeerStorage]
|
|
|
|
dynamicBootstrapNodes: seq[RemotePeerInfo]
|
|
|
|
|
|
|
|
node: WakuNode
|
|
|
|
|
2024-02-29 08:48:14 +00:00
|
|
|
restServer: Option[WakuRestServerRef]
|
2023-04-26 17:25:18 +00:00
|
|
|
metricsServer: Option[MetricsHttpServerRef]
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
AppResult*[T] = Result[T, string]
|
|
|
|
|
|
|
|
|
|
|
|
func node*(app: App): WakuNode =
|
|
|
|
app.node
|
|
|
|
|
|
|
|
func version*(app: App): string =
|
|
|
|
app.version
|
|
|
|
|
|
|
|
|
|
|
|
## Initialisation
|
|
|
|
|
|
|
|
proc init*(T: type App, rng: ref HmacDrbgContext, conf: WakuNodeConf): T =
|
2023-06-22 20:58:14 +00:00
|
|
|
let key =
|
|
|
|
if conf.nodeKey.isSome():
|
|
|
|
conf.nodeKey.get()
|
|
|
|
else:
|
|
|
|
let keyRes = crypto.PrivateKey.random(Secp256k1, rng[])
|
|
|
|
|
|
|
|
if keyRes.isErr():
|
|
|
|
error "failed to generate key", error=keyRes.error
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
keyRes.get()
|
|
|
|
|
2023-06-29 19:59:53 +00:00
|
|
|
let netConfigRes = networkConfiguration(conf, clientId)
|
|
|
|
|
2023-06-22 20:58:14 +00:00
|
|
|
let netConfig =
|
|
|
|
if netConfigRes.isErr():
|
|
|
|
error "failed to create internal config", error=netConfigRes.error
|
|
|
|
quit(QuitFailure)
|
|
|
|
else: netConfigRes.get()
|
|
|
|
|
2023-10-27 07:11:47 +00:00
|
|
|
let recordRes = enrConfiguration(conf, netConfig, key)
|
2023-06-29 19:59:53 +00:00
|
|
|
|
2023-06-22 20:58:14 +00:00
|
|
|
let record =
|
|
|
|
if recordRes.isErr():
|
|
|
|
error "failed to create record", error=recordRes.error
|
|
|
|
quit(QuitFailure)
|
|
|
|
else: recordRes.get()
|
|
|
|
|
2024-01-30 12:15:23 +00:00
|
|
|
if isClusterMismatched(record, conf.clusterId):
|
|
|
|
error "cluster id mismatch configured shards"
|
|
|
|
quit(QuitFailure)
|
2023-11-21 20:15:39 +00:00
|
|
|
|
2023-06-22 20:58:14 +00:00
|
|
|
App(
|
|
|
|
version: git_version,
|
|
|
|
conf: conf,
|
|
|
|
netConf: netConfig,
|
|
|
|
rng: rng,
|
|
|
|
key: key,
|
|
|
|
record: record,
|
|
|
|
node: nil
|
|
|
|
)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
## Peer persistence
|
|
|
|
|
2023-06-27 11:24:31 +00:00
|
|
|
const PeerPersistenceDbUrl = "peers.db"
|
2023-04-25 13:34:57 +00:00
|
|
|
proc setupPeerStorage(): AppResult[Option[WakuPeerStorage]] =
|
2023-06-27 11:24:31 +00:00
|
|
|
let db = ? SqliteDatabase.new(PeerPersistenceDbUrl)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
2023-06-27 11:24:31 +00:00
|
|
|
? peer_store_sqlite_migrations.migrate(db)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
2023-06-27 11:24:31 +00:00
|
|
|
let res = WakuPeerStorage.new(db)
|
2023-04-25 13:34:57 +00:00
|
|
|
if res.isErr():
|
|
|
|
return err("failed to init peer store" & res.error)
|
|
|
|
|
|
|
|
ok(some(res.value))
|
|
|
|
|
|
|
|
proc setupPeerPersistence*(app: var App): AppResult[void] =
|
|
|
|
if not app.conf.peerPersistence:
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
let peerStoreRes = setupPeerStorage()
|
|
|
|
if peerStoreRes.isErr():
|
|
|
|
return err("failed to setup peer store" & peerStoreRes.error)
|
|
|
|
|
|
|
|
app.peerStore = peerStoreRes.get()
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
## Retrieve dynamic bootstrap nodes (DNS discovery)
|
|
|
|
|
2023-12-14 06:16:39 +00:00
|
|
|
proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool,
|
|
|
|
dnsDiscoveryUrl: string,
|
|
|
|
dnsDiscoveryNameServers: seq[IpAddress]):
|
|
|
|
AppResult[seq[RemotePeerInfo]] =
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
if dnsDiscovery and dnsDiscoveryUrl != "":
|
|
|
|
# DNS discovery
|
|
|
|
debug "Discovering nodes using Waku DNS discovery", url=dnsDiscoveryUrl
|
|
|
|
|
|
|
|
var nameServers: seq[TransportAddress]
|
|
|
|
for ip in dnsDiscoveryNameServers:
|
|
|
|
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
|
|
|
|
|
|
|
let dnsResolver = DnsResolver.new(nameServers)
|
|
|
|
|
|
|
|
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
|
|
|
trace "resolving", domain=domain
|
|
|
|
let resolved = await dnsResolver.resolveTxt(domain)
|
|
|
|
return resolved[0] # Use only first answer
|
|
|
|
|
|
|
|
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver)
|
|
|
|
if wakuDnsDiscovery.isOk():
|
|
|
|
return wakuDnsDiscovery.get().findPeers()
|
|
|
|
.mapErr(proc (e: cstring): string = $e)
|
|
|
|
else:
|
|
|
|
warn "Failed to init Waku DNS discovery"
|
|
|
|
|
|
|
|
debug "No method for retrieving dynamic bootstrap nodes specified."
|
|
|
|
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
|
|
|
|
|
|
|
proc setupDyamicBootstrapNodes*(app: var App): AppResult[void] =
|
|
|
|
let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(app.conf.dnsDiscovery,
|
|
|
|
app.conf.dnsDiscoveryUrl,
|
|
|
|
app.conf.dnsDiscoveryNameServers)
|
|
|
|
if dynamicBootstrapNodesRes.isOk():
|
|
|
|
app.dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
|
|
|
else:
|
|
|
|
warn "2/7 Retrieving dynamic bootstrap nodes failed. Continuing without dynamic bootstrap nodes.", error=dynamicBootstrapNodesRes.error
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
## Setup DiscoveryV5
|
|
|
|
|
|
|
|
proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 =
|
|
|
|
let dynamicBootstrapEnrs = app.dynamicBootstrapNodes
|
|
|
|
.filterIt(it.hasUdpPort())
|
|
|
|
.mapIt(it.enr.get())
|
|
|
|
|
|
|
|
var discv5BootstrapEnrs: seq[enr.Record]
|
|
|
|
|
|
|
|
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
|
|
|
for enrUri in app.conf.discv5BootstrapNodes:
|
|
|
|
addBootstrapNode(enrUri, discv5BootstrapEnrs)
|
|
|
|
|
|
|
|
discv5BootstrapEnrs.add(dynamicBootstrapEnrs)
|
|
|
|
|
|
|
|
let discv5Config = DiscoveryConfig.init(app.conf.discv5TableIpLimit,
|
|
|
|
app.conf.discv5BucketIpLimit,
|
|
|
|
app.conf.discv5BitsPerHop)
|
|
|
|
|
|
|
|
let discv5UdpPort = Port(uint16(app.conf.discv5UdpPort) + app.conf.portsShift)
|
|
|
|
|
|
|
|
let discv5Conf = WakuDiscoveryV5Config(
|
|
|
|
discv5Config: some(discv5Config),
|
2023-06-28 12:57:10 +00:00
|
|
|
address: app.conf.listenAddress,
|
2023-06-27 13:50:11 +00:00
|
|
|
port: discv5UdpPort,
|
|
|
|
privateKey: keys.PrivateKey(app.key.skkey),
|
|
|
|
bootstrapRecords: discv5BootstrapEnrs,
|
|
|
|
autoupdateRecord: app.conf.discv5EnrAutoUpdate,
|
|
|
|
)
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
WakuDiscoveryV5.new(
|
2024-01-16 16:27:40 +00:00
|
|
|
app.rng,
|
2023-11-21 20:15:39 +00:00
|
|
|
discv5Conf,
|
|
|
|
some(app.record),
|
|
|
|
some(app.node.peerManager),
|
|
|
|
app.node.topicSubscriptionQueue,
|
|
|
|
)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
## Init waku node instance
|
|
|
|
|
|
|
|
proc initNode(conf: WakuNodeConf,
|
2023-06-22 20:58:14 +00:00
|
|
|
netConfig: NetConfig,
|
2023-04-25 13:34:57 +00:00
|
|
|
rng: ref HmacDrbgContext,
|
2023-06-22 20:58:14 +00:00
|
|
|
nodeKey: crypto.PrivateKey,
|
|
|
|
record: enr.Record,
|
2023-04-25 13:34:57 +00:00
|
|
|
peerStore: Option[WakuPeerStorage],
|
|
|
|
dynamicBootstrapNodes: openArray[RemotePeerInfo] = @[]): AppResult[WakuNode] =
|
|
|
|
|
|
|
|
## Setup a basic Waku v2 node based on a supplied configuration
|
|
|
|
## file. Optionally include persistent peer storage.
|
|
|
|
## No protocols are mounted yet.
|
|
|
|
|
|
|
|
var dnsResolver: DnsResolver
|
|
|
|
if conf.dnsAddrs:
|
|
|
|
# Support for DNS multiaddrs
|
|
|
|
var nameServers: seq[TransportAddress]
|
|
|
|
for ip in conf.dnsAddrsNameServers:
|
|
|
|
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
|
|
|
|
|
|
|
dnsResolver = DnsResolver.new(nameServers)
|
|
|
|
|
|
|
|
var node: WakuNode
|
|
|
|
|
|
|
|
let pStorage = if peerStore.isNone(): nil
|
|
|
|
else: peerStore.get()
|
|
|
|
|
|
|
|
# Build waku node instance
|
|
|
|
var builder = WakuNodeBuilder.init()
|
|
|
|
builder.withRng(rng)
|
|
|
|
builder.withNodeKey(nodekey)
|
2023-06-22 20:58:14 +00:00
|
|
|
builder.withRecord(record)
|
2023-04-25 13:34:57 +00:00
|
|
|
builder.withNetworkConfiguration(netConfig)
|
|
|
|
builder.withPeerStorage(pStorage, capacity = conf.peerStoreCapacity)
|
|
|
|
builder.withSwitchConfiguration(
|
|
|
|
maxConnections = some(conf.maxConnections.int),
|
|
|
|
secureKey = some(conf.websocketSecureKeyPath),
|
|
|
|
secureCert = some(conf.websocketSecureCertPath),
|
|
|
|
nameResolver = dnsResolver,
|
|
|
|
sendSignedPeerRecord = conf.relayPeerExchange, # We send our own signed peer record when peer exchange enabled
|
|
|
|
agentString = some(conf.agentString)
|
|
|
|
)
|
2024-01-02 13:01:18 +00:00
|
|
|
builder.withColocationLimit(conf.colocationLimit)
|
2024-01-30 12:28:21 +00:00
|
|
|
builder.withPeerManagerConfig(
|
|
|
|
maxRelayPeers = conf.maxRelayPeers,
|
|
|
|
shardAware = conf.relayShardedPeerManagement,)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
node = ? builder.build().mapErr(proc (err: string): string = "failed to create waku node instance: " & err)
|
|
|
|
|
|
|
|
ok(node)
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
proc setupWakuApp*(app: var App): AppResult[void] =
|
2023-04-25 13:34:57 +00:00
|
|
|
## Waku node
|
2023-06-22 20:58:14 +00:00
|
|
|
let initNodeRes = initNode(app.conf, app.netConf, app.rng, app.key, app.record, app.peerStore, app.dynamicBootstrapNodes)
|
2023-04-25 13:34:57 +00:00
|
|
|
if initNodeRes.isErr():
|
|
|
|
return err("failed to init node: " & initNodeRes.error)
|
|
|
|
|
|
|
|
app.node = initNodeRes.get()
|
2023-06-27 13:50:11 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
## Discv5
|
|
|
|
if app.conf.discv5Discovery:
|
|
|
|
app.wakuDiscV5 = some(app.setupDiscoveryV5())
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
ok()
|
|
|
|
|
2023-10-27 07:11:47 +00:00
|
|
|
proc getPorts(listenAddrs: seq[MultiAddress]):
|
2024-01-16 16:27:40 +00:00
|
|
|
AppResult[tuple[tcpPort, websocketPort: Option[Port]]] =
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
|
|
var tcpPort, websocketPort = none(Port)
|
|
|
|
|
|
|
|
for a in listenAddrs:
|
|
|
|
if a.isWsAddress():
|
|
|
|
if websocketPort.isNone():
|
|
|
|
let wsAddress = initTAddress(a).valueOr:
|
|
|
|
return err("getPorts wsAddr error:" & $error)
|
|
|
|
websocketPort = some(wsAddress.port)
|
|
|
|
elif tcpPort.isNone():
|
|
|
|
let tcpAddress = initTAddress(a).valueOr:
|
|
|
|
return err("getPorts tcpAddr error:" & $error)
|
|
|
|
tcpPort = some(tcpAddress.port)
|
|
|
|
|
|
|
|
return ok((tcpPort: tcpPort, websocketPort: websocketPort))
|
|
|
|
|
|
|
|
proc updateNetConfig(app: var App): AppResult[void] =
|
|
|
|
|
|
|
|
var conf = app.conf
|
|
|
|
let (tcpPort, websocketPort) = getPorts(app.node.switch.peerInfo.listenAddrs).valueOr:
|
|
|
|
return err("Could not retrieve ports " & error)
|
|
|
|
|
|
|
|
if tcpPort.isSome():
|
|
|
|
conf.tcpPort = tcpPort.get()
|
|
|
|
|
|
|
|
if websocketPort.isSome():
|
|
|
|
conf.websocketPort = websocketPort.get()
|
|
|
|
|
|
|
|
# Rebuild NetConfig with bound port values
|
|
|
|
let netConf = networkConfiguration(conf, clientId).valueOr:
|
|
|
|
return err("Could not update NetConfig: " & error)
|
|
|
|
|
|
|
|
app.netConf = netConf
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc updateEnr(app: var App): AppResult[void] =
|
|
|
|
|
|
|
|
let record = enrConfiguration(app.conf, app.netConf, app.key).valueOr:
|
2023-11-21 20:15:39 +00:00
|
|
|
return err("ENR setup failed: " & error)
|
|
|
|
|
2024-01-30 12:15:23 +00:00
|
|
|
if isClusterMismatched(record, app.conf.clusterId):
|
|
|
|
return err("cluster id mismatch configured shards")
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
|
|
app.record = record
|
|
|
|
app.node.enr = record
|
|
|
|
|
|
|
|
if app.conf.discv5Discovery:
|
|
|
|
app.wakuDiscV5 = some(app.setupDiscoveryV5())
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc updateApp(app: var App): AppResult[void] =
|
|
|
|
|
|
|
|
if app.conf.tcpPort == Port(0) or app.conf.websocketPort == Port(0):
|
|
|
|
|
|
|
|
updateNetConfig(app).isOkOr:
|
|
|
|
return err("error calling updateNetConfig: " & $error)
|
|
|
|
|
|
|
|
updateEnr(app).isOkOr:
|
|
|
|
return err("error calling updateEnr: " & $error)
|
|
|
|
|
|
|
|
app.node.announcedAddresses = app.netConf.announcedAddresses
|
|
|
|
|
|
|
|
printNodeNetworkInfo(app.node)
|
|
|
|
|
|
|
|
return ok()
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
## Mount protocols
|
|
|
|
|
2023-06-22 20:58:14 +00:00
|
|
|
proc setupProtocols(node: WakuNode,
|
|
|
|
conf: WakuNodeConf,
|
2023-06-27 11:24:31 +00:00
|
|
|
nodeKey: crypto.PrivateKey):
|
|
|
|
Future[AppResult[void]] {.async.} =
|
2023-04-25 13:34:57 +00:00
|
|
|
## Setup configured protocols on an existing Waku v2 node.
|
|
|
|
## Optionally include persistent message storage.
|
|
|
|
## No protocols are started yet.
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
node.mountMetadata(conf.clusterId).isOkOr:
|
|
|
|
return err("failed to mount waku metadata protocol: " & error)
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
# Mount relay on all nodes
|
|
|
|
var peerExchangeHandler = none(RoutingRecordsHandler)
|
|
|
|
if conf.relayPeerExchange:
|
|
|
|
proc handlePeerExchange(peer: PeerId, topic: string,
|
|
|
|
peers: seq[RoutingRecordsPair]) {.gcsafe.} =
|
|
|
|
## Handle peers received via gossipsub peer exchange
|
|
|
|
# TODO: Only consider peers on pubsub topics we subscribe to
|
|
|
|
let exchangedPeers = peers.filterIt(it.record.isSome()) # only peers with populated records
|
|
|
|
.mapIt(toRemotePeerInfo(it.record.get()))
|
|
|
|
|
|
|
|
debug "connecting to exchanged peers", src=peer, topic=topic, numPeers=exchangedPeers.len
|
|
|
|
|
|
|
|
# asyncSpawn, as we don't want to block here
|
|
|
|
asyncSpawn node.connectToNodes(exchangedPeers, "peer exchange")
|
|
|
|
|
|
|
|
peerExchangeHandler = some(handlePeerExchange)
|
|
|
|
|
|
|
|
if conf.relay:
|
2023-08-31 20:13:45 +00:00
|
|
|
let pubsubTopics =
|
|
|
|
if conf.pubsubTopics.len > 0 or conf.contentTopics.len > 0:
|
|
|
|
# TODO autoshard content topics only once.
|
|
|
|
# Already checked for errors in app.init
|
|
|
|
let shards = conf.contentTopics.mapIt(getShard(it).expect("Valid Shard"))
|
|
|
|
conf.pubsubTopics & shards
|
|
|
|
else:
|
|
|
|
conf.topics
|
2023-08-11 15:28:24 +00:00
|
|
|
|
2024-01-03 12:11:50 +00:00
|
|
|
let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr:
|
|
|
|
return err("failed to parse 'max-num-bytes-msg-size' param: " & $error)
|
|
|
|
|
|
|
|
debug "Setting max message size", num_bytes=parsedMaxMsgSize
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
try:
|
2024-01-03 12:11:50 +00:00
|
|
|
await mountRelay(node, pubsubTopics, peerExchangeHandler = peerExchangeHandler,
|
|
|
|
int(parsedMaxMsgSize))
|
2023-04-25 13:34:57 +00:00
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku relay protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
# Add validation keys to protected topics
|
2024-02-01 17:16:10 +00:00
|
|
|
var subscribedProtectedTopics : seq[ProtectedTopic]
|
2023-05-02 09:45:50 +00:00
|
|
|
for topicKey in conf.protectedTopics:
|
|
|
|
if topicKey.topic notin pubsubTopics:
|
|
|
|
warn "protected topic not in subscribed pubsub topics, skipping adding validator",
|
|
|
|
protectedTopic=topicKey.topic, subscribedTopics=pubsubTopics
|
|
|
|
continue
|
2024-02-01 17:16:10 +00:00
|
|
|
subscribedProtectedTopics.add(topicKey)
|
2023-05-02 09:45:50 +00:00
|
|
|
notice "routing only signed traffic", protectedTopic=topicKey.topic, publicKey=topicKey.key
|
2024-02-01 17:16:10 +00:00
|
|
|
node.wakuRelay.addSignedTopicsValidator(subscribedProtectedTopics)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
2023-06-01 19:43:10 +00:00
|
|
|
# Enable Rendezvous Discovery protocol when Relay is enabled
|
|
|
|
try:
|
|
|
|
await mountRendezvous(node)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku rendezvous protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
# Keepalive mounted on all nodes
|
|
|
|
try:
|
|
|
|
await mountLibp2pPing(node)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount libp2p ping protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
2024-02-15 11:25:08 +00:00
|
|
|
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
|
|
|
## Action to be taken when an internal error occurs during the node run.
|
|
|
|
## e.g. the connection with the database is lost and not recovered.
|
|
|
|
error "Unrecoverable error occurred", error = msg
|
|
|
|
quit(QuitFailure)
|
2023-09-11 06:32:31 +00:00
|
|
|
|
2024-02-15 11:25:08 +00:00
|
|
|
if conf.rlnRelay:
|
2024-02-13 04:48:02 +00:00
|
|
|
when defined(rln_v2):
|
|
|
|
let rlnConf = WakuRlnConfig(
|
|
|
|
rlnRelayDynamic: conf.rlnRelayDynamic,
|
|
|
|
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
|
|
|
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
2024-02-16 13:06:31 +00:00
|
|
|
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
2024-02-13 04:48:02 +00:00
|
|
|
rlnRelayCredPath: conf.rlnRelayCredPath,
|
|
|
|
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
|
|
|
rlnRelayTreePath: conf.rlnRelayTreePath,
|
|
|
|
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
|
2024-02-28 16:19:20 +00:00
|
|
|
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
2024-02-15 11:25:08 +00:00
|
|
|
onFatalErrorAction: onFatalErrorAction,
|
2024-02-13 04:48:02 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
let rlnConf = WakuRlnConfig(
|
|
|
|
rlnRelayDynamic: conf.rlnRelayDynamic,
|
|
|
|
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
|
|
|
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
2024-02-16 13:06:31 +00:00
|
|
|
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
2024-02-13 04:48:02 +00:00
|
|
|
rlnRelayCredPath: conf.rlnRelayCredPath,
|
|
|
|
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
|
|
|
rlnRelayTreePath: conf.rlnRelayTreePath,
|
2024-02-28 16:19:20 +00:00
|
|
|
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
2024-02-15 11:25:08 +00:00
|
|
|
onFatalErrorAction: onFatalErrorAction,
|
2024-02-13 04:48:02 +00:00
|
|
|
)
|
2023-09-11 06:32:31 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
waitFor node.mountRlnRelay(rlnConf)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg())
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
if conf.store:
|
|
|
|
# Archive setup
|
2024-03-01 11:05:27 +00:00
|
|
|
let archiveDriverRes = waitFor ArchiveDriver.new(conf.storeMessageDbUrl,
|
2023-06-27 11:24:31 +00:00
|
|
|
conf.storeMessageDbVacuum,
|
2023-09-06 17:16:37 +00:00
|
|
|
conf.storeMessageDbMigration,
|
2023-11-24 15:21:22 +00:00
|
|
|
conf.storeMaxNumDbConnections,
|
2024-02-15 11:25:08 +00:00
|
|
|
onFatalErrorAction)
|
2023-06-27 11:24:31 +00:00
|
|
|
if archiveDriverRes.isErr():
|
|
|
|
return err("failed to setup archive driver: " & archiveDriverRes.error)
|
|
|
|
|
|
|
|
let retPolicyRes = RetentionPolicy.new(conf.storeMessageRetentionPolicy)
|
|
|
|
if retPolicyRes.isErr():
|
|
|
|
return err("failed to create retention policy: " & retPolicyRes.error)
|
|
|
|
|
|
|
|
let mountArcRes = node.mountArchive(archiveDriverRes.get(),
|
|
|
|
retPolicyRes.get())
|
|
|
|
if mountArcRes.isErr():
|
|
|
|
return err("failed to mount waku archive protocol: " & mountArcRes.error)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
# Store setup
|
|
|
|
try:
|
|
|
|
await mountStore(node)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku store protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
mountStoreClient(node)
|
|
|
|
if conf.storenode != "":
|
|
|
|
let storeNode = parsePeerInfo(conf.storenode)
|
|
|
|
if storeNode.isOk():
|
|
|
|
node.peerManager.addServicePeer(storeNode.value, WakuStoreCodec)
|
|
|
|
else:
|
|
|
|
return err("failed to set node waku store peer: " & storeNode.error)
|
|
|
|
|
|
|
|
# NOTE Must be mounted after relay
|
|
|
|
if conf.lightpush:
|
|
|
|
try:
|
|
|
|
await mountLightPush(node)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
if conf.lightpushnode != "":
|
|
|
|
let lightPushNode = parsePeerInfo(conf.lightpushnode)
|
|
|
|
if lightPushNode.isOk():
|
|
|
|
mountLightPushClient(node)
|
|
|
|
node.peerManager.addServicePeer(lightPushNode.value, WakuLightPushCodec)
|
|
|
|
else:
|
|
|
|
return err("failed to set node waku lightpush peer: " & lightPushNode.error)
|
|
|
|
|
|
|
|
# Filter setup. NOTE Must be mounted after relay
|
|
|
|
if conf.filter:
|
|
|
|
try:
|
2024-01-16 16:27:40 +00:00
|
|
|
await mountLegacyFilter(node, filterTimeout = chronos.seconds(conf.filterTimeout))
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku legacy filter protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
try:
|
|
|
|
await mountFilter(node,
|
|
|
|
subscriptionTimeout = chronos.seconds(conf.filterSubscriptionTimeout),
|
|
|
|
maxFilterPeers = conf.filterMaxPeersToServe,
|
|
|
|
maxFilterCriteriaPerPeer = conf.filterMaxCriteria)
|
2023-04-25 13:34:57 +00:00
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku filter protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
if conf.filternode != "":
|
|
|
|
let filterNode = parsePeerInfo(conf.filternode)
|
|
|
|
if filterNode.isOk():
|
2023-10-18 09:47:47 +00:00
|
|
|
try:
|
|
|
|
await node.mountFilterClient()
|
|
|
|
node.peerManager.addServicePeer(filterNode.value, WakuLegacyFilterCodec)
|
|
|
|
node.peerManager.addServicePeer(filterNode.value, WakuFilterSubscribeCodec)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku filter client protocol: " & getCurrentExceptionMsg())
|
2023-04-25 13:34:57 +00:00
|
|
|
else:
|
|
|
|
return err("failed to set node waku filter peer: " & filterNode.error)
|
|
|
|
|
|
|
|
# waku peer exchange setup
|
|
|
|
if conf.peerExchangeNode != "" or conf.peerExchange:
|
|
|
|
try:
|
|
|
|
await mountPeerExchange(node)
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
if conf.peerExchangeNode != "":
|
|
|
|
let peerExchangeNode = parsePeerInfo(conf.peerExchangeNode)
|
|
|
|
if peerExchangeNode.isOk():
|
|
|
|
node.peerManager.addServicePeer(peerExchangeNode.value, WakuPeerExchangeCodec)
|
|
|
|
else:
|
|
|
|
return err("failed to set node waku peer-exchange peer: " & peerExchangeNode.error)
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc setupAndMountProtocols*(app: App): Future[AppResult[void]] {.async.} =
|
|
|
|
return await setupProtocols(
|
|
|
|
app.node,
|
|
|
|
app.conf,
|
2023-06-27 11:24:31 +00:00
|
|
|
app.key
|
2023-04-25 13:34:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
## Start node
|
|
|
|
|
|
|
|
proc startNode(node: WakuNode, conf: WakuNodeConf,
|
|
|
|
dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]): Future[AppResult[void]] {.async.} =
|
|
|
|
## Start a configured node and all mounted protocols.
|
|
|
|
## Connect to static nodes and start
|
|
|
|
## keep-alive, if configured.
|
|
|
|
|
|
|
|
# Start Waku v2 node
|
|
|
|
try:
|
|
|
|
await node.start()
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to start waku node: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
# Connect to configured static nodes
|
|
|
|
if conf.staticnodes.len > 0:
|
|
|
|
try:
|
|
|
|
await connectToNodes(node, conf.staticnodes, "static")
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to connect to static nodes: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
if dynamicBootstrapNodes.len > 0:
|
|
|
|
info "Connecting to dynamic bootstrap peers"
|
|
|
|
try:
|
|
|
|
await connectToNodes(node, dynamicBootstrapNodes, "dynamic bootstrap")
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
# retrieve px peers and add the to the peer store
|
|
|
|
if conf.peerExchangeNode != "":
|
|
|
|
let desiredOutDegree = node.wakuRelay.parameters.d.uint64()
|
|
|
|
await node.fetchPeerExchangePeers(desiredOutDegree)
|
|
|
|
|
|
|
|
# Start keepalive, if enabled
|
|
|
|
if conf.keepAlive:
|
|
|
|
node.startKeepalive()
|
|
|
|
|
|
|
|
# Maintain relay connections
|
|
|
|
if conf.relay:
|
|
|
|
node.peerManager.start()
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
2023-10-27 07:11:47 +00:00
|
|
|
proc startApp*(app: var App): AppResult[void] =
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
let nodeRes = catch: (waitFor startNode(app.node,app.conf,app.dynamicBootstrapNodes))
|
|
|
|
if nodeRes.isErr():
|
|
|
|
return err("exception starting node: " & nodeRes.error.msg)
|
|
|
|
|
|
|
|
nodeRes.get().isOkOr:
|
|
|
|
return err("exception starting node: " & error)
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
|
|
# Update app data that is set dynamically on node start
|
|
|
|
app.updateApp().isOkOr:
|
|
|
|
return err("Error in updateApp: " & $error)
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
if app.wakuDiscv5.isSome():
|
2023-08-23 13:53:17 +00:00
|
|
|
let wakuDiscv5 = app.wakuDiscv5.get()
|
2023-11-21 20:15:39 +00:00
|
|
|
let catchRes = catch: (waitFor wakuDiscv5.start())
|
|
|
|
let startRes = catchRes.valueOr:
|
|
|
|
return err("failed to start waku discovery v5: " & catchRes.error.msg)
|
2023-06-27 13:50:11 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
startRes.isOkOr:
|
|
|
|
return err("failed to start waku discovery v5: " & error)
|
2023-06-27 13:50:11 +00:00
|
|
|
|
2023-10-27 07:11:47 +00:00
|
|
|
return ok()
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
## Monitoring and external interfaces
|
|
|
|
|
2024-02-29 08:48:14 +00:00
|
|
|
proc startRestServer(app: App,
|
|
|
|
address: IpAddress,
|
|
|
|
port: Port,
|
|
|
|
conf: WakuNodeConf):
|
|
|
|
AppResult[WakuRestServerRef] =
|
2023-10-27 14:31:57 +00:00
|
|
|
|
|
|
|
# Used to register api endpoints that are not currently installed as keys,
|
|
|
|
# values are holding error messages to be returned to the client
|
|
|
|
var notInstalledTab: Table[string, string] = initTable[string, string]()
|
|
|
|
|
2024-02-29 08:48:14 +00:00
|
|
|
let requestErrorHandler : RestRequestErrorHandler = proc (error: RestRequestError,
|
|
|
|
request: HttpRequestRef):
|
|
|
|
Future[HttpResponseRef]
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
|
|
|
try:
|
|
|
|
case error
|
|
|
|
of RestRequestError.Invalid:
|
|
|
|
return await request.respond(Http400, "Invalid request", HttpTable.init())
|
|
|
|
of RestRequestError.NotFound:
|
|
|
|
let paths = request.rawPath.split("/")
|
|
|
|
let rootPath = if len(paths) > 1:
|
|
|
|
paths[1]
|
|
|
|
else:
|
|
|
|
""
|
|
|
|
notInstalledTab.withValue(rootPath, errMsg):
|
|
|
|
return await request.respond(Http404, errMsg[], HttpTable.init())
|
|
|
|
do:
|
|
|
|
return await request.respond(Http400, "Bad request initiated. Invalid path or method used.", HttpTable.init())
|
|
|
|
of RestRequestError.InvalidContentBody:
|
|
|
|
return await request.respond(Http400, "Invalid content body", HttpTable.init())
|
|
|
|
of RestRequestError.InvalidContentType:
|
|
|
|
return await request.respond(Http400, "Invalid content type", HttpTable.init())
|
|
|
|
of RestRequestError.Unexpected:
|
|
|
|
return defaultResponse()
|
|
|
|
except HttpWriteError:
|
|
|
|
error "Failed to write response to client", error = getCurrentExceptionMsg()
|
|
|
|
discard
|
2023-10-27 14:31:57 +00:00
|
|
|
|
|
|
|
return defaultResponse()
|
|
|
|
|
2024-02-29 08:48:14 +00:00
|
|
|
let allowedOrigin = if len(conf.restAllowOrigin) > 0 :
|
|
|
|
some(conf.restAllowOrigin.join(","))
|
|
|
|
else:
|
|
|
|
none(string)
|
|
|
|
|
|
|
|
let server = ? newRestHttpServer(address, port,
|
|
|
|
allowedOrigin = allowedOrigin,
|
|
|
|
requestErrorHandler = requestErrorHandler)
|
|
|
|
|
2023-10-05 12:00:09 +00:00
|
|
|
## Admin REST API
|
2023-11-24 09:13:20 +00:00
|
|
|
if conf.restAdmin:
|
|
|
|
installAdminApiHandlers(server.router, app.node)
|
2023-10-05 12:00:09 +00:00
|
|
|
|
2023-04-26 17:25:18 +00:00
|
|
|
## Debug REST API
|
|
|
|
installDebugApiHandlers(server.router, app.node)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
2023-09-08 09:19:47 +00:00
|
|
|
## Health REST API
|
|
|
|
installHealthApiHandler(server.router, app.node)
|
|
|
|
|
2023-04-26 17:25:18 +00:00
|
|
|
## Relay REST API
|
|
|
|
if conf.relay:
|
2023-11-28 12:21:41 +00:00
|
|
|
let cache = MessageCache.init(int(conf.restRelayCacheCapacity))
|
2023-09-26 11:33:52 +00:00
|
|
|
|
|
|
|
let handler = messageCacheHandler(cache)
|
|
|
|
|
|
|
|
for pubsubTopic in conf.pubsubTopics:
|
2023-11-28 12:21:41 +00:00
|
|
|
cache.pubsubSubscribe(pubsubTopic)
|
2023-09-26 11:33:52 +00:00
|
|
|
app.node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler))
|
|
|
|
|
|
|
|
for contentTopic in conf.contentTopics:
|
2023-11-28 12:21:41 +00:00
|
|
|
cache.contentSubscribe(contentTopic)
|
|
|
|
app.node.subscribe((kind: ContentSub, topic: contentTopic), some(handler))
|
2023-09-26 11:33:52 +00:00
|
|
|
|
|
|
|
installRelayApiHandlers(server.router, app.node, cache)
|
2023-10-27 14:31:57 +00:00
|
|
|
else:
|
|
|
|
notInstalledTab["relay"] = "/relay endpoints are not available. Please check your configuration: --relay"
|
2023-04-26 17:25:18 +00:00
|
|
|
|
2023-08-04 09:34:22 +00:00
|
|
|
## Filter REST API
|
2023-10-18 09:47:47 +00:00
|
|
|
if conf.filternode != "" and
|
|
|
|
app.node.wakuFilterClient != nil and
|
|
|
|
app.node.wakuFilterClientLegacy != nil:
|
|
|
|
|
2023-11-28 12:21:41 +00:00
|
|
|
let legacyFilterCache = MessageCache.init()
|
2023-09-14 19:28:57 +00:00
|
|
|
rest_legacy_filter_api.installLegacyFilterRestApiHandlers(server.router, app.node, legacyFilterCache)
|
|
|
|
|
2023-11-28 12:21:41 +00:00
|
|
|
let filterCache = MessageCache.init()
|
2023-10-27 19:43:54 +00:00
|
|
|
|
2024-01-16 16:27:40 +00:00
|
|
|
let filterDiscoHandler =
|
2023-10-27 19:43:54 +00:00
|
|
|
if app.wakuDiscv5.isSome():
|
|
|
|
some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Filter))
|
|
|
|
else: none(DiscoveryHandler)
|
|
|
|
|
|
|
|
rest_filter_api.installFilterRestApiHandlers(
|
|
|
|
server.router,
|
|
|
|
app.node,
|
|
|
|
filterCache,
|
|
|
|
filterDiscoHandler,
|
|
|
|
)
|
2023-10-27 14:31:57 +00:00
|
|
|
else:
|
|
|
|
notInstalledTab["filter"] = "/filter endpoints are not available. Please check your configuration: --filternode"
|
|
|
|
|
2023-04-26 17:25:18 +00:00
|
|
|
## Store REST API
|
2024-01-16 16:27:40 +00:00
|
|
|
let storeDiscoHandler =
|
2023-10-27 19:43:54 +00:00
|
|
|
if app.wakuDiscv5.isSome():
|
|
|
|
some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Store))
|
|
|
|
else: none(DiscoveryHandler)
|
|
|
|
|
|
|
|
installStoreApiHandlers(server.router, app.node, storeDiscoHandler)
|
2023-04-26 17:25:18 +00:00
|
|
|
|
2023-09-22 13:36:46 +00:00
|
|
|
## Light push API
|
2023-10-18 09:47:47 +00:00
|
|
|
if conf.lightpushnode != "" and
|
|
|
|
app.node.wakuLightpushClient != nil:
|
2024-01-16 16:27:40 +00:00
|
|
|
let lightDiscoHandler =
|
2023-10-27 19:43:54 +00:00
|
|
|
if app.wakuDiscv5.isSome():
|
|
|
|
some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Lightpush))
|
|
|
|
else: none(DiscoveryHandler)
|
|
|
|
|
|
|
|
rest_lightpush_api.installLightPushRequestHandler(server.router, app.node, lightDiscoHandler)
|
2023-10-27 14:31:57 +00:00
|
|
|
else:
|
|
|
|
notInstalledTab["lightpush"] = "/lightpush endpoints are not available. Please check your configuration: --lightpushnode"
|
2023-09-22 11:46:55 +00:00
|
|
|
|
2023-04-26 17:25:18 +00:00
|
|
|
server.start()
|
|
|
|
info "Starting REST HTTP server", url = "http://" & $address & ":" & $port & "/"
|
|
|
|
|
|
|
|
ok(server)
|
|
|
|
|
2023-12-14 06:16:39 +00:00
|
|
|
proc startMetricsServer(serverIp: IpAddress, serverPort: Port): AppResult[MetricsHttpServerRef] =
|
2023-04-26 17:25:18 +00:00
|
|
|
info "Starting metrics HTTP server", serverIp= $serverIp, serverPort= $serverPort
|
|
|
|
|
|
|
|
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
|
|
|
if metricsServerRes.isErr():
|
|
|
|
return err("metrics HTTP server start failed: " & $metricsServerRes.error)
|
|
|
|
|
|
|
|
let server = metricsServerRes.value
|
|
|
|
try:
|
|
|
|
waitFor server.start()
|
|
|
|
except CatchableError:
|
|
|
|
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
info "Metrics HTTP server started", serverIp= $serverIp, serverPort= $serverPort
|
|
|
|
ok(server)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
proc startMetricsLogging(): AppResult[void] =
|
|
|
|
startMetricsLog()
|
|
|
|
ok()
|
|
|
|
|
|
|
|
proc setupMonitoringAndExternalInterfaces*(app: var App): AppResult[void] =
|
|
|
|
if app.conf.rest:
|
2023-04-26 17:25:18 +00:00
|
|
|
let startRestServerRes = startRestServer(app, app.conf.restAddress, Port(app.conf.restPort + app.conf.portsShift), app.conf)
|
2023-04-25 13:34:57 +00:00
|
|
|
if startRestServerRes.isErr():
|
|
|
|
error "6/7 Starting REST server failed. Continuing in current state.", error=startRestServerRes.error
|
|
|
|
else:
|
|
|
|
app.restServer = some(startRestServerRes.value)
|
|
|
|
|
|
|
|
|
|
|
|
if app.conf.metricsServer:
|
2023-04-26 17:25:18 +00:00
|
|
|
let startMetricsServerRes = startMetricsServer(app.conf.metricsServerAddress, Port(app.conf.metricsServerPort + app.conf.portsShift))
|
2023-04-25 13:34:57 +00:00
|
|
|
if startMetricsServerRes.isErr():
|
|
|
|
error "6/7 Starting metrics server failed. Continuing in current state.", error=startMetricsServerRes.error
|
2023-04-26 17:25:18 +00:00
|
|
|
else:
|
|
|
|
app.metricsServer = some(startMetricsServerRes.value)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
if app.conf.metricsLogging:
|
|
|
|
let startMetricsLoggingRes = startMetricsLogging()
|
|
|
|
if startMetricsLoggingRes.isErr():
|
|
|
|
error "6/7 Starting metrics console logging failed. Continuing in current state.", error=startMetricsLoggingRes.error
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
|
|
|
|
# App shutdown
|
|
|
|
|
2023-12-14 06:16:39 +00:00
|
|
|
proc stop*(app: App): Future[void] {.async: (raises: [Exception]).} =
|
2023-04-25 13:34:57 +00:00
|
|
|
if app.restServer.isSome():
|
|
|
|
await app.restServer.get().stop()
|
|
|
|
|
2023-04-26 17:25:18 +00:00
|
|
|
if app.metricsServer.isSome():
|
|
|
|
await app.metricsServer.get().stop()
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
if app.wakuDiscv5.isSome():
|
|
|
|
await app.wakuDiscv5.get().stop()
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
if not app.node.isNil():
|
|
|
|
await app.node.stop()
|