2024-06-28 10:34:57 +00:00
|
|
|
|
{.push raises: [].}
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
|
|
import
|
2024-10-28 08:17:46 +00:00
|
|
|
|
std/[options, sequtils],
|
2024-07-09 11:14:28 +00:00
|
|
|
|
results,
|
2023-04-25 13:34:57 +00:00
|
|
|
|
chronicles,
|
|
|
|
|
chronos,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
libp2p/protocols/connectivity/relay/relay,
|
|
|
|
|
libp2p/protocols/connectivity/relay/client,
|
2023-10-27 07:11:47 +00:00
|
|
|
|
libp2p/wire,
|
2023-04-25 13:34:57 +00:00
|
|
|
|
libp2p/crypto/crypto,
|
|
|
|
|
libp2p/protocols/pubsub/gossipsub,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
libp2p/services/autorelayservice,
|
|
|
|
|
libp2p/services/hpservice,
|
2023-04-25 13:34:57 +00:00
|
|
|
|
libp2p/peerid,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
libp2p/discovery/discoverymngr,
|
|
|
|
|
libp2p/discovery/rendezvousinterface,
|
2023-04-25 13:34:57 +00:00
|
|
|
|
eth/keys,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
eth/p2p/discoveryv5/enr,
|
2023-04-26 17:25:18 +00:00
|
|
|
|
presto,
|
|
|
|
|
metrics,
|
|
|
|
|
metrics/chronos_httpserver
|
2023-04-25 13:34:57 +00:00
|
|
|
|
import
|
2024-07-05 22:03:38 +00:00
|
|
|
|
../common/logging,
|
|
|
|
|
../waku_core,
|
|
|
|
|
../waku_node,
|
|
|
|
|
../node/peer_manager,
|
|
|
|
|
../node/health_monitor,
|
2024-12-20 11:25:49 +00:00
|
|
|
|
../node/waku_metrics,
|
2024-08-27 14:49:46 +00:00
|
|
|
|
../node/delivery_monitor/delivery_monitor,
|
2024-07-05 22:03:38 +00:00
|
|
|
|
../waku_api/message_cache,
|
|
|
|
|
../waku_api/rest/server,
|
|
|
|
|
../waku_archive,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
../waku_relay/protocol,
|
2024-07-05 22:03:38 +00:00
|
|
|
|
../discovery/waku_dnsdisc,
|
|
|
|
|
../discovery/waku_discv5,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
../discovery/autonat_service,
|
2024-07-05 22:03:38 +00:00
|
|
|
|
../waku_enr/sharding,
|
|
|
|
|
../waku_rln_relay,
|
|
|
|
|
../waku_store,
|
|
|
|
|
../waku_filter_v2,
|
|
|
|
|
../factory/networks_config,
|
|
|
|
|
../factory/node_factory,
|
|
|
|
|
../factory/internal_config,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
../factory/external_config,
|
2024-12-13 16:38:16 +00:00
|
|
|
|
../factory/app_callbacks,
|
2024-10-28 08:17:46 +00:00
|
|
|
|
../waku_enr/multiaddr
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
|
|
logScope:
|
2024-05-03 12:07:15 +00:00
|
|
|
|
topics = "wakunode waku"
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
|
|
|
|
# Git version in git describe format (defined at compile time)
|
|
|
|
|
const git_version* {.strdefine.} = "n/a"
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
type Waku* = ref object
|
2024-05-03 12:07:15 +00:00
|
|
|
|
version: string
|
|
|
|
|
conf: WakuNodeConf
|
|
|
|
|
rng: ref HmacDrbgContext
|
|
|
|
|
key: crypto.PrivateKey
|
2023-06-22 20:58:14 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
wakuDiscv5*: WakuDiscoveryV5
|
|
|
|
|
dynamicBootstrapNodes: seq[RemotePeerInfo]
|
2024-12-03 13:39:37 +00:00
|
|
|
|
dnsRetryLoopHandle: Future[void]
|
2024-10-28 08:17:46 +00:00
|
|
|
|
discoveryMngr: DiscoveryManager
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
node*: WakuNode
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-08-27 14:49:46 +00:00
|
|
|
|
deliveryMonitor: DeliveryMonitor
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
restServer*: WakuRestServerRef
|
|
|
|
|
metricsServer*: MetricsHttpServerRef
|
2024-12-13 16:38:16 +00:00
|
|
|
|
appCallbacks*: AppCallbacks
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-05-13 15:45:48 +00:00
|
|
|
|
proc logConfig(conf: WakuNodeConf) =
|
|
|
|
|
info "Configuration: Enabled protocols",
|
|
|
|
|
relay = conf.relay,
|
|
|
|
|
rlnRelay = conf.rlnRelay,
|
|
|
|
|
store = conf.store,
|
|
|
|
|
filter = conf.filter,
|
|
|
|
|
lightpush = conf.lightpush,
|
|
|
|
|
peerExchange = conf.peerExchange
|
|
|
|
|
|
|
|
|
|
info "Configuration. Network", cluster = conf.clusterId, maxPeers = conf.maxRelayPeers
|
|
|
|
|
|
2024-09-10 21:07:12 +00:00
|
|
|
|
for shard in conf.shards:
|
2024-05-13 15:45:48 +00:00
|
|
|
|
info "Configuration. Shards", shard = shard
|
|
|
|
|
|
|
|
|
|
for i in conf.discv5BootstrapNodes:
|
|
|
|
|
info "Configuration. Bootstrap nodes", node = i
|
|
|
|
|
|
|
|
|
|
if conf.rlnRelay and conf.rlnRelayDynamic:
|
|
|
|
|
info "Configuration. Validation",
|
|
|
|
|
mechanism = "onchain rln",
|
|
|
|
|
contract = conf.rlnRelayEthContractAddress,
|
|
|
|
|
maxMessageSize = conf.maxMessageSize,
|
|
|
|
|
rlnEpochSizeSec = conf.rlnEpochSizeSec,
|
|
|
|
|
rlnRelayUserMessageLimit = conf.rlnRelayUserMessageLimit,
|
|
|
|
|
rlnRelayEthClientAddress = string(conf.rlnRelayEthClientAddress)
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
func version*(waku: Waku): string =
|
|
|
|
|
waku.version
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-09-10 21:07:12 +00:00
|
|
|
|
proc validateShards(conf: WakuNodeConf): Result[void, string] =
|
|
|
|
|
let numShardsInNetwork = getNumShardsInNetwork(conf)
|
|
|
|
|
|
|
|
|
|
for shard in conf.shards:
|
|
|
|
|
if shard >= numShardsInNetwork:
|
|
|
|
|
let msg =
|
|
|
|
|
"validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
|
|
|
|
|
$numShardsInNetwork # fmt doesn't work
|
|
|
|
|
error "validateShards failed", error = msg
|
|
|
|
|
return err(msg)
|
|
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
proc setupSwitchServices(
|
|
|
|
|
waku: Waku, conf: WakuNodeConf, circuitRelay: Relay, rng: ref HmacDrbgContext
|
|
|
|
|
) =
|
|
|
|
|
proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} =
|
|
|
|
|
debug "circuit relay handler new reserve event",
|
|
|
|
|
addrs_before = $(waku.node.announcedAddresses), addrs = $addresses
|
|
|
|
|
|
|
|
|
|
waku.node.announcedAddresses.setLen(0) ## remove previous addresses
|
|
|
|
|
waku.node.announcedAddresses.add(addresses)
|
|
|
|
|
debug "waku node announced addresses updated",
|
|
|
|
|
announcedAddresses = waku.node.announcedAddresses
|
|
|
|
|
|
|
|
|
|
if not isNil(waku.wakuDiscv5):
|
|
|
|
|
waku.wakuDiscv5.updateAnnouncedMultiAddress(addresses).isOkOr:
|
|
|
|
|
error "failed to update announced multiaddress", error = $error
|
|
|
|
|
|
|
|
|
|
let autonatService = getAutonatService(rng)
|
|
|
|
|
if conf.isRelayClient:
|
|
|
|
|
## The node is considered to be behind a NAT or firewall and then it
|
|
|
|
|
## should struggle to be reachable and establish connections to other nodes
|
|
|
|
|
const MaxNumRelayServers = 2
|
|
|
|
|
let autoRelayService = AutoRelayService.new(
|
|
|
|
|
MaxNumRelayServers, RelayClient(circuitRelay), onReservation, rng
|
|
|
|
|
)
|
|
|
|
|
let holePunchService = HPService.new(autonatService, autoRelayService)
|
|
|
|
|
waku.node.switch.services = @[Service(holePunchService)]
|
|
|
|
|
else:
|
|
|
|
|
waku.node.switch.services = @[Service(autonatService)]
|
|
|
|
|
|
2023-04-25 13:34:57 +00:00
|
|
|
|
## Initialisation
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
proc newCircuitRelay(isRelayClient: bool): Relay =
|
|
|
|
|
if isRelayClient:
|
|
|
|
|
return RelayClient.new()
|
|
|
|
|
return Relay.new()
|
|
|
|
|
|
2024-12-13 16:38:16 +00:00
|
|
|
|
proc setupAppCallbacks(
|
|
|
|
|
node: WakuNode, conf: WakuNodeConf, appCallbacks: AppCallbacks
|
|
|
|
|
): Result[void, string] =
|
|
|
|
|
if appCallbacks.isNil():
|
|
|
|
|
info "No external callbacks to be set"
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
|
|
if not appCallbacks.relayHandler.isNil():
|
|
|
|
|
if node.wakuRelay.isNil():
|
|
|
|
|
return err("Cannot configure relayHandler callback without Relay mounted")
|
|
|
|
|
|
|
|
|
|
let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
|
|
|
|
|
return err("Could not get autoshards: " & error)
|
|
|
|
|
|
|
|
|
|
let confShards =
|
|
|
|
|
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
|
|
|
|
|
let shards = confShards & autoShards
|
|
|
|
|
|
|
|
|
|
for shard in shards:
|
|
|
|
|
discard node.wakuRelay.subscribe($shard, appCallbacks.relayHandler)
|
|
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
|
|
proc new*(
|
|
|
|
|
T: type Waku, confCopy: var WakuNodeConf, appCallbacks: AppCallbacks = nil
|
|
|
|
|
): Result[Waku, string] =
|
2024-03-08 22:46:42 +00:00
|
|
|
|
let rng = crypto.newRng()
|
|
|
|
|
|
2024-09-16 13:30:38 +00:00
|
|
|
|
logging.setupLog(confCopy.logLevel, confCopy.logFormat)
|
2024-05-13 15:45:48 +00:00
|
|
|
|
|
2024-09-10 21:07:12 +00:00
|
|
|
|
# TODO: remove after pubsubtopic config gets removed
|
|
|
|
|
var shards = newSeq[uint16]()
|
2024-09-16 13:30:38 +00:00
|
|
|
|
if confCopy.pubsubTopics.len > 0:
|
|
|
|
|
let shardsRes = topicsToRelayShards(confCopy.pubsubTopics)
|
2024-09-10 21:07:12 +00:00
|
|
|
|
if shardsRes.isErr():
|
|
|
|
|
error "failed to parse pubsub topic, please format according to static shard specification",
|
|
|
|
|
error = shardsRes.error
|
|
|
|
|
return err("failed to parse pubsub topic: " & $shardsRes.error)
|
|
|
|
|
|
|
|
|
|
let shardsOpt = shardsRes.get()
|
|
|
|
|
|
|
|
|
|
if shardsOpt.isSome():
|
|
|
|
|
let relayShards = shardsOpt.get()
|
2024-09-16 13:30:38 +00:00
|
|
|
|
if relayShards.clusterId != confCopy.clusterId:
|
2024-09-10 21:07:12 +00:00
|
|
|
|
error "clusterId of the pubsub topic should match the node's cluster. e.g. --pubsub-topic=/waku/2/rs/22/1 and --cluster-id=22",
|
2024-09-16 13:30:38 +00:00
|
|
|
|
nodeCluster = confCopy.clusterId, pubsubCluster = relayShards.clusterId
|
2024-09-10 21:07:12 +00:00
|
|
|
|
return err(
|
|
|
|
|
"clusterId of the pubsub topic should match the node's cluster. e.g. --pubsub-topic=/waku/2/rs/22/1 and --cluster-id=22"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
for shard in relayShards.shardIds:
|
|
|
|
|
shards.add(shard)
|
|
|
|
|
confCopy.shards = shards
|
|
|
|
|
|
2024-05-13 15:45:48 +00:00
|
|
|
|
case confCopy.clusterId
|
|
|
|
|
|
|
|
|
|
# cluster-id=1 (aka The Waku Network)
|
|
|
|
|
of 1:
|
|
|
|
|
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
|
|
|
|
|
|
|
|
|
# Override configuration
|
|
|
|
|
confCopy.maxMessageSize = twnClusterConf.maxMessageSize
|
|
|
|
|
confCopy.clusterId = twnClusterConf.clusterId
|
|
|
|
|
confCopy.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
2024-06-28 09:19:16 +00:00
|
|
|
|
confCopy.rlnRelayChainId = twnClusterConf.rlnRelayChainId
|
2024-05-13 15:45:48 +00:00
|
|
|
|
confCopy.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
|
|
|
|
confCopy.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
|
|
|
|
|
confCopy.discv5Discovery = twnClusterConf.discv5Discovery
|
|
|
|
|
confCopy.discv5BootstrapNodes =
|
|
|
|
|
confCopy.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
|
|
|
|
|
confCopy.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
|
|
|
|
confCopy.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
2024-09-10 21:07:12 +00:00
|
|
|
|
confCopy.numShardsInNetwork = twnClusterConf.numShardsInNetwork
|
2024-08-01 20:28:00 +00:00
|
|
|
|
|
|
|
|
|
# Only set rlnRelay to true if relay is configured
|
|
|
|
|
if confCopy.relay:
|
|
|
|
|
confCopy.rlnRelay = twnClusterConf.rlnRelay
|
2024-05-13 15:45:48 +00:00
|
|
|
|
else:
|
|
|
|
|
discard
|
|
|
|
|
|
|
|
|
|
info "Running nwaku node", version = git_version
|
|
|
|
|
logConfig(confCopy)
|
|
|
|
|
|
2024-09-10 21:07:12 +00:00
|
|
|
|
let validateShardsRes = validateShards(confCopy)
|
|
|
|
|
if validateShardsRes.isErr():
|
|
|
|
|
error "Failed validating shards", error = $validateShardsRes.error
|
|
|
|
|
return err("Failed validating shards: " & $validateShardsRes.error)
|
|
|
|
|
|
2024-03-08 22:46:42 +00:00
|
|
|
|
if not confCopy.nodekey.isSome():
|
|
|
|
|
let keyRes = crypto.PrivateKey.random(Secp256k1, rng[])
|
2024-03-15 23:08:47 +00:00
|
|
|
|
if keyRes.isErr():
|
2024-03-08 22:46:42 +00:00
|
|
|
|
error "Failed to generate key", error = $keyRes.error
|
|
|
|
|
return err("Failed to generate key: " & $keyRes.error)
|
|
|
|
|
confCopy.nodekey = some(keyRes.get())
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
var relay = newCircuitRelay(confCopy.isRelayClient)
|
|
|
|
|
|
|
|
|
|
let nodeRes = setupNode(confCopy, rng, relay)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
if nodeRes.isErr():
|
|
|
|
|
error "Failed setting up node", error = nodeRes.error
|
2024-03-08 22:46:42 +00:00
|
|
|
|
return err("Failed setting up node: " & nodeRes.error)
|
|
|
|
|
|
2024-08-27 14:49:46 +00:00
|
|
|
|
let node = nodeRes.get()
|
|
|
|
|
|
2024-12-13 16:38:16 +00:00
|
|
|
|
node.setupAppCallbacks(confCopy, appCallbacks).isOkOr:
|
|
|
|
|
error "Failed setting up app callbacks", error = error
|
|
|
|
|
return err("Failed setting up app callbacks: " & $error)
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
## Delivery Monitor
|
2024-08-27 14:49:46 +00:00
|
|
|
|
var deliveryMonitor: DeliveryMonitor
|
2024-09-16 13:30:38 +00:00
|
|
|
|
if confCopy.reliabilityEnabled:
|
|
|
|
|
if confCopy.storenode == "":
|
2024-08-27 14:49:46 +00:00
|
|
|
|
return err("A storenode should be set when reliability mode is on")
|
|
|
|
|
|
|
|
|
|
let deliveryMonitorRes = DeliveryMonitor.new(
|
|
|
|
|
node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient,
|
|
|
|
|
node.wakuFilterClient,
|
|
|
|
|
)
|
|
|
|
|
if deliveryMonitorRes.isErr():
|
|
|
|
|
return err("could not create delivery monitor: " & $deliveryMonitorRes.error)
|
|
|
|
|
deliveryMonitor = deliveryMonitorRes.get()
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
var waku = Waku(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
version: git_version,
|
|
|
|
|
conf: confCopy,
|
|
|
|
|
rng: rng,
|
|
|
|
|
key: confCopy.nodekey.get(),
|
2024-08-27 14:49:46 +00:00
|
|
|
|
node: node,
|
|
|
|
|
deliveryMonitor: deliveryMonitor,
|
2024-12-13 16:38:16 +00:00
|
|
|
|
appCallbacks: appCallbacks,
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-03-08 22:46:42 +00:00
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
waku.setupSwitchServices(confCopy, relay, rng)
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
ok(waku)
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc getPorts(
|
|
|
|
|
listenAddrs: seq[MultiAddress]
|
2024-05-03 12:07:15 +00:00
|
|
|
|
): Result[tuple[tcpPort, websocketPort: Option[Port]], string] =
|
2023-10-27 07:11:47 +00:00
|
|
|
|
var tcpPort, websocketPort = none(Port)
|
|
|
|
|
|
|
|
|
|
for a in listenAddrs:
|
|
|
|
|
if a.isWsAddress():
|
|
|
|
|
if websocketPort.isNone():
|
|
|
|
|
let wsAddress = initTAddress(a).valueOr:
|
|
|
|
|
return err("getPorts wsAddr error:" & $error)
|
|
|
|
|
websocketPort = some(wsAddress.port)
|
|
|
|
|
elif tcpPort.isNone():
|
|
|
|
|
let tcpAddress = initTAddress(a).valueOr:
|
|
|
|
|
return err("getPorts tcpAddr error:" & $error)
|
|
|
|
|
tcpPort = some(tcpAddress.port)
|
|
|
|
|
|
|
|
|
|
return ok((tcpPort: tcpPort, websocketPort: websocketPort))
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
|
|
|
|
|
var conf = waku[].conf
|
|
|
|
|
let (tcpPort, websocketPort) = getPorts(waku[].node.switch.peerInfo.listenAddrs).valueOr:
|
2023-10-27 07:11:47 +00:00
|
|
|
|
return err("Could not retrieve ports " & error)
|
|
|
|
|
|
|
|
|
|
if tcpPort.isSome():
|
|
|
|
|
conf.tcpPort = tcpPort.get()
|
|
|
|
|
|
|
|
|
|
if websocketPort.isSome():
|
|
|
|
|
conf.websocketPort = websocketPort.get()
|
|
|
|
|
|
|
|
|
|
# Rebuild NetConfig with bound port values
|
|
|
|
|
let netConf = networkConfiguration(conf, clientId).valueOr:
|
|
|
|
|
return err("Could not update NetConfig: " & error)
|
|
|
|
|
|
2024-03-08 22:46:42 +00:00
|
|
|
|
return ok(netConf)
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
proc updateEnr(waku: ptr Waku): Result[void, string] =
|
|
|
|
|
let netConf: NetConfig = getRunningNetConfig(waku).valueOr:
|
|
|
|
|
return err("error calling updateNetConfig: " & $error)
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
let record = enrConfiguration(waku[].conf, netConf, waku[].key).valueOr:
|
2023-11-21 20:15:39 +00:00
|
|
|
|
return err("ENR setup failed: " & error)
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
if isClusterMismatched(record, waku[].conf.clusterId):
|
2024-01-30 12:15:23 +00:00
|
|
|
|
return err("cluster id mismatch configured shards")
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
waku[].node.enr = record
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
|
|
|
|
|
let addresses: seq[MultiAddress] = waku[].node.announcedAddresses
|
|
|
|
|
let encodedAddrs = multiaddr.encodeMultiaddrs(addresses)
|
|
|
|
|
|
|
|
|
|
## First update the enr info contained in WakuNode
|
|
|
|
|
let keyBytes = waku[].key.getRawBytes().valueOr:
|
|
|
|
|
return err("failed to retrieve raw bytes from waku key: " & $error)
|
|
|
|
|
|
|
|
|
|
let parsedPk = keys.PrivateKey.fromHex(keyBytes.toHex()).valueOr:
|
|
|
|
|
return err("failed to parse the private key: " & $error)
|
|
|
|
|
|
|
|
|
|
let enrFields = @[toFieldPair(MultiaddrEnrField, encodedAddrs)]
|
|
|
|
|
waku[].node.enr.update(parsedPk, enrFields).isOkOr:
|
|
|
|
|
return err("failed to update multiaddress in ENR updateAddressInENR: " & $error)
|
|
|
|
|
|
|
|
|
|
debug "Waku node ENR updated successfully with new multiaddress",
|
|
|
|
|
enr = waku[].node.enr.toUri(), record = $(waku[].node.enr)
|
|
|
|
|
|
|
|
|
|
## Now update the ENR infor in discv5
|
|
|
|
|
if not waku[].wakuDiscv5.isNil():
|
|
|
|
|
waku[].wakuDiscv5.protocol.localNode.record = waku[].node.enr
|
|
|
|
|
let enr = waku[].wakuDiscv5.protocol.localNode.record
|
|
|
|
|
|
|
|
|
|
debug "Waku discv5 ENR updated successfully with new multiaddress",
|
|
|
|
|
enr = enr.toUri(), record = $(enr)
|
|
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
proc updateWaku(waku: ptr Waku): Result[void, string] =
|
|
|
|
|
if waku[].conf.tcpPort == Port(0) or waku[].conf.websocketPort == Port(0):
|
2024-10-28 08:17:46 +00:00
|
|
|
|
updateEnr(waku).isOkOr:
|
2023-10-27 07:11:47 +00:00
|
|
|
|
return err("error calling updateEnr: " & $error)
|
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
?updateAnnouncedAddrWithPrimaryIpAddr(waku[].node)
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
2024-10-28 08:17:46 +00:00
|
|
|
|
?updateAddressInENR(waku)
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
|
|
|
|
return ok()
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-12-03 13:39:37 +00:00
|
|
|
|
proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
|
|
|
|
|
while true:
|
|
|
|
|
await sleepAsync(30.seconds)
|
|
|
|
|
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
|
|
|
|
|
waku.conf.dnsDiscovery, waku.conf.dnsDiscoveryUrl,
|
|
|
|
|
waku.conf.dnsDiscoveryNameServers,
|
|
|
|
|
)
|
|
|
|
|
if dynamicBootstrapNodesRes.isErr():
|
|
|
|
|
error "Retrieving dynamic bootstrap nodes failed",
|
|
|
|
|
error = dynamicBootstrapNodesRes.error
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
|
|
|
|
|
|
|
|
|
if not waku[].wakuDiscv5.isNil():
|
|
|
|
|
let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes
|
|
|
|
|
.filterIt(it.hasUdpPort())
|
|
|
|
|
.mapIt(it.enr.get().toUri())
|
|
|
|
|
var discv5BootstrapEnrs: seq[enr.Record]
|
|
|
|
|
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
|
|
|
|
for enrUri in dynamicBootstrapEnrs:
|
|
|
|
|
addBootstrapNode(enrUri, discv5BootstrapEnrs)
|
|
|
|
|
|
|
|
|
|
waku[].wakuDiscv5.updateBootstrapRecords(
|
|
|
|
|
waku[].wakuDiscv5.protocol.bootstrapRecords & discv5BootstrapEnrs
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
info "Connecting to dynamic bootstrap peers"
|
|
|
|
|
try:
|
|
|
|
|
await connectToNodes(
|
|
|
|
|
waku[].node, waku[].dynamicBootstrapNodes, "dynamic bootstrap"
|
|
|
|
|
)
|
|
|
|
|
except CatchableError:
|
|
|
|
|
error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
|
|
|
|
debug "Retrieve dynamic bootstrap nodes"
|
|
|
|
|
|
|
|
|
|
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
|
|
|
|
|
waku.conf.dnsDiscovery, waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if dynamicBootstrapNodesRes.isErr():
|
|
|
|
|
error "Retrieving dynamic bootstrap nodes failed",
|
|
|
|
|
error = dynamicBootstrapNodesRes.error
|
|
|
|
|
# Start Dns Discovery retry loop
|
|
|
|
|
waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop()
|
|
|
|
|
else:
|
|
|
|
|
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
|
|
|
|
|
2024-08-01 20:28:00 +00:00
|
|
|
|
if not waku[].conf.discv5Only:
|
|
|
|
|
(await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr:
|
|
|
|
|
return err("error while calling startNode: " & $error)
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
2024-08-01 20:28:00 +00:00
|
|
|
|
# Update waku data that is set dynamically on node start
|
|
|
|
|
updateWaku(waku).isOkOr:
|
|
|
|
|
return err("Error in updateApp: " & $error)
|
2023-10-27 07:11:47 +00:00
|
|
|
|
|
2024-03-08 22:46:42 +00:00
|
|
|
|
## Discv5
|
2024-08-01 20:28:00 +00:00
|
|
|
|
if waku[].conf.discv5Discovery or waku[].conf.discv5Only:
|
2024-05-03 12:07:15 +00:00
|
|
|
|
waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5(
|
|
|
|
|
waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue, waku.conf,
|
|
|
|
|
waku.dynamicBootstrapNodes, waku.rng, waku.key,
|
2024-05-01 19:13:08 +00:00
|
|
|
|
)
|
2023-06-27 13:50:11 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
(await waku.wakuDiscV5.start()).isOkOr:
|
2024-05-01 19:13:08 +00:00
|
|
|
|
return err("failed to start waku discovery v5: " & $error)
|
2023-06-27 13:50:11 +00:00
|
|
|
|
|
2024-12-20 11:25:49 +00:00
|
|
|
|
waku.metricsServer = startMetricsServerAndLogging(waku[].conf).valueOr:
|
|
|
|
|
return err("failed to start metrics server and logging: " & $error)
|
|
|
|
|
|
2024-08-27 14:49:46 +00:00
|
|
|
|
## Reliability
|
|
|
|
|
if not waku[].deliveryMonitor.isNil():
|
|
|
|
|
waku[].deliveryMonitor.startDeliveryMonitor()
|
|
|
|
|
|
2023-10-27 07:11:47 +00:00
|
|
|
|
return ok()
|
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
# Waku shutdown
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} =
|
|
|
|
|
if not waku.restServer.isNil():
|
|
|
|
|
await waku.restServer.stop()
|
2023-04-25 13:34:57 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
if not waku.metricsServer.isNil():
|
|
|
|
|
await waku.metricsServer.stop()
|
2023-04-26 17:25:18 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
if not waku.wakuDiscv5.isNil():
|
|
|
|
|
await waku.wakuDiscv5.stop()
|
2023-06-27 13:50:11 +00:00
|
|
|
|
|
2024-05-03 12:07:15 +00:00
|
|
|
|
if not waku.node.isNil():
|
|
|
|
|
await waku.node.stop()
|
2024-12-03 13:39:37 +00:00
|
|
|
|
|
|
|
|
|
if not waku.dnsRetryLoopHandle.isNil():
|
|
|
|
|
await waku.dnsRetryLoopHandle.cancelAndWait()
|