2023-06-22 16:58:14 -04:00
|
|
|
|
import
|
2023-09-27 16:02:24 +03:00
|
|
|
|
chronicles,
|
|
|
|
|
|
chronos,
|
|
|
|
|
|
libp2p/crypto/crypto,
|
|
|
|
|
|
libp2p/multiaddress,
|
|
|
|
|
|
libp2p/nameresolving/dnsresolver,
|
2025-04-01 09:28:18 +11:00
|
|
|
|
std/[options, sequtils, strutils, net],
|
2024-07-09 13:14:28 +02:00
|
|
|
|
results
|
2023-06-22 16:58:14 -04:00
|
|
|
|
import
|
2024-03-03 02:59:53 +02:00
|
|
|
|
./external_config,
|
|
|
|
|
|
../common/utils/nat,
|
|
|
|
|
|
../node/config,
|
|
|
|
|
|
../waku_enr/capabilities,
|
|
|
|
|
|
../waku_enr,
|
2025-04-01 09:28:18 +11:00
|
|
|
|
../waku_core,
|
|
|
|
|
|
./networks_config
|
2023-06-22 16:58:14 -04:00
|
|
|
|
|
2024-03-16 00:08:47 +01:00
|
|
|
|
proc enrConfiguration*(
|
|
|
|
|
|
conf: WakuNodeConf, netConfig: NetConfig, key: crypto.PrivateKey
|
|
|
|
|
|
): Result[enr.Record, string] =
|
2023-10-27 10:11:47 +03:00
|
|
|
|
var enrBuilder = EnrBuilder.init(key)
|
|
|
|
|
|
|
|
|
|
|
|
enrBuilder.withIpAddressAndPorts(
|
2024-03-16 00:08:47 +01:00
|
|
|
|
netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort
|
2023-10-27 10:11:47 +03:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
if netConfig.wakuFlags.isSome():
|
|
|
|
|
|
enrBuilder.withWakuCapabilities(netConfig.wakuFlags.get())
|
|
|
|
|
|
|
|
|
|
|
|
enrBuilder.withMultiaddrs(netConfig.enrMultiaddrs)
|
|
|
|
|
|
|
2024-03-16 00:08:47 +01:00
|
|
|
|
enrBuilder.withWakuRelaySharding(
|
2024-09-10 15:07:12 -06:00
|
|
|
|
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
2024-03-16 00:08:47 +01:00
|
|
|
|
).isOkOr:
|
|
|
|
|
|
return err("could not initialize ENR with shards")
|
2023-10-27 10:11:47 +03:00
|
|
|
|
|
|
|
|
|
|
let recordRes = enrBuilder.build()
|
|
|
|
|
|
let record =
|
|
|
|
|
|
if recordRes.isErr():
|
2024-03-16 00:08:47 +01:00
|
|
|
|
error "failed to create record", error = recordRes.error
|
2023-10-27 10:11:47 +03:00
|
|
|
|
return err($recordRes.error)
|
2024-03-16 00:08:47 +01:00
|
|
|
|
else:
|
|
|
|
|
|
recordRes.get()
|
2023-10-27 10:11:47 +03:00
|
|
|
|
|
|
|
|
|
|
return ok(record)
|
|
|
|
|
|
|
2024-03-16 00:08:47 +01:00
|
|
|
|
proc validateExtMultiAddrs*(vals: seq[string]): Result[seq[MultiAddress], string] =
|
2023-06-29 21:59:53 +02:00
|
|
|
|
var multiaddrs: seq[MultiAddress]
|
|
|
|
|
|
for val in vals:
|
2024-03-16 00:08:47 +01:00
|
|
|
|
let multiaddr = ?MultiAddress.init(val)
|
2023-06-29 21:59:53 +02:00
|
|
|
|
multiaddrs.add(multiaddr)
|
|
|
|
|
|
return ok(multiaddrs)
|
|
|
|
|
|
|
2024-03-16 00:08:47 +01:00
|
|
|
|
proc dnsResolve*(
|
|
|
|
|
|
domain: string, conf: WakuNodeConf
|
|
|
|
|
|
): Future[Result[string, string]] {.async.} =
|
2023-09-27 16:02:24 +03:00
|
|
|
|
# Use conf's DNS servers
|
|
|
|
|
|
var nameServers: seq[TransportAddress]
|
|
|
|
|
|
for ip in conf.dnsAddrsNameServers:
|
|
|
|
|
|
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
2023-10-11 08:58:45 +02:00
|
|
|
|
|
2023-09-27 16:02:24 +03:00
|
|
|
|
let dnsResolver = DnsResolver.new(nameServers)
|
|
|
|
|
|
|
|
|
|
|
|
# Resolve domain IP
|
|
|
|
|
|
let resolved = await dnsResolver.resolveIp(domain, 0.Port, Domain.AF_UNSPEC)
|
|
|
|
|
|
|
|
|
|
|
|
if resolved.len > 0:
|
|
|
|
|
|
return ok(resolved[0].host) # Use only first answer
|
|
|
|
|
|
else:
|
|
|
|
|
|
return err("Could not resolve IP from DNS: empty response")
|
|
|
|
|
|
|
2024-03-16 00:08:47 +01:00
|
|
|
|
proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResult =
|
2023-06-22 16:58:14 -04:00
|
|
|
|
## `udpPort` is only supplied to satisfy underlying APIs but is not
|
|
|
|
|
|
## actually a supported transport for libp2p traffic.
|
2024-03-16 00:08:47 +01:00
|
|
|
|
let natRes = setupNat(
|
|
|
|
|
|
conf.nat,
|
|
|
|
|
|
clientId,
|
|
|
|
|
|
Port(uint16(conf.tcpPort) + conf.portsShift),
|
|
|
|
|
|
Port(uint16(conf.tcpPort) + conf.portsShift),
|
|
|
|
|
|
)
|
2023-06-22 16:58:14 -04:00
|
|
|
|
if natRes.isErr():
|
|
|
|
|
|
return err("failed to setup NAT: " & $natRes.error)
|
|
|
|
|
|
|
2023-09-27 16:02:24 +03:00
|
|
|
|
var (extIp, extTcpPort, _) = natRes.get()
|
2023-06-22 16:58:14 -04:00
|
|
|
|
|
|
|
|
|
|
let
|
2024-03-16 00:08:47 +01:00
|
|
|
|
dns4DomainName =
|
|
|
|
|
|
if conf.dns4DomainName != "":
|
|
|
|
|
|
some(conf.dns4DomainName)
|
|
|
|
|
|
else:
|
|
|
|
|
|
none(string)
|
|
|
|
|
|
|
|
|
|
|
|
discv5UdpPort =
|
|
|
|
|
|
if conf.discv5Discovery:
|
|
|
|
|
|
some(Port(uint16(conf.discv5UdpPort) + conf.portsShift))
|
|
|
|
|
|
else:
|
|
|
|
|
|
none(Port)
|
2023-06-22 16:58:14 -04:00
|
|
|
|
|
2023-06-29 21:59:53 +02:00
|
|
|
|
## TODO: the NAT setup assumes a manual port mapping configuration if extIp
|
|
|
|
|
|
## config is set. This probably implies adding manual config item for
|
|
|
|
|
|
## extPort as well. The following heuristic assumes that, in absence of
|
|
|
|
|
|
## manual config, the external port is the same as the bind port.
|
2024-03-16 00:08:47 +01:00
|
|
|
|
extPort =
|
|
|
|
|
|
if (extIp.isSome() or dns4DomainName.isSome()) and extTcpPort.isNone():
|
|
|
|
|
|
some(Port(uint16(conf.tcpPort) + conf.portsShift))
|
|
|
|
|
|
else:
|
|
|
|
|
|
extTcpPort
|
|
|
|
|
|
|
|
|
|
|
|
extMultiAddrs =
|
|
|
|
|
|
if (conf.extMultiAddrs.len > 0):
|
|
|
|
|
|
let extMultiAddrsValidationRes = validateExtMultiAddrs(conf.extMultiAddrs)
|
|
|
|
|
|
if extMultiAddrsValidationRes.isErr():
|
|
|
|
|
|
return
|
|
|
|
|
|
err("invalid external multiaddress: " & $extMultiAddrsValidationRes.error)
|
|
|
|
|
|
else:
|
|
|
|
|
|
extMultiAddrsValidationRes.get()
|
|
|
|
|
|
else:
|
|
|
|
|
|
@[]
|
2023-06-22 16:58:14 -04:00
|
|
|
|
|
|
|
|
|
|
wakuFlags = CapabilitiesBitfield.init(
|
2024-03-16 00:08:47 +01:00
|
|
|
|
lightpush = conf.lightpush,
|
|
|
|
|
|
filter = conf.filter,
|
|
|
|
|
|
store = conf.store,
|
|
|
|
|
|
relay = conf.relay,
|
2024-08-13 07:27:34 -04:00
|
|
|
|
sync = conf.storeSync,
|
2024-03-16 00:08:47 +01:00
|
|
|
|
)
|
2023-06-22 16:58:14 -04:00
|
|
|
|
|
2023-09-27 16:02:24 +03:00
|
|
|
|
# Resolve and use DNS domain IP
|
|
|
|
|
|
if dns4DomainName.isSome() and extIp.isNone():
|
|
|
|
|
|
try:
|
|
|
|
|
|
let dnsRes = waitFor dnsResolve(conf.dns4DomainName, conf)
|
2023-10-11 08:58:45 +02:00
|
|
|
|
|
2023-09-27 16:02:24 +03:00
|
|
|
|
if dnsRes.isErr():
|
|
|
|
|
|
return err($dnsRes.error) # Pass error down the stack
|
2023-10-11 08:58:45 +02:00
|
|
|
|
|
2023-12-14 07:16:39 +01:00
|
|
|
|
extIp = some(parseIpAddress(dnsRes.get()))
|
2023-09-27 16:02:24 +03:00
|
|
|
|
except CatchableError:
|
2024-03-16 00:08:47 +01:00
|
|
|
|
return
|
|
|
|
|
|
err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg())
|
2023-10-11 08:58:45 +02:00
|
|
|
|
|
2023-06-22 16:58:14 -04:00
|
|
|
|
# Wrap in none because NetConfig does not have a default constructor
|
2023-06-29 21:59:53 +02:00
|
|
|
|
# TODO: We could change bindIp in NetConfig to be something less restrictive
|
2023-12-14 07:16:39 +01:00
|
|
|
|
# than IpAddress, which doesn't allow default construction
|
2023-06-22 16:58:14 -04:00
|
|
|
|
let netConfigRes = NetConfig.init(
|
2024-03-16 00:08:47 +01:00
|
|
|
|
clusterId = conf.clusterId,
|
|
|
|
|
|
bindIp = conf.listenAddress,
|
|
|
|
|
|
bindPort = Port(uint16(conf.tcpPort) + conf.portsShift),
|
|
|
|
|
|
extIp = extIp,
|
|
|
|
|
|
extPort = extPort,
|
|
|
|
|
|
extMultiAddrs = extMultiAddrs,
|
|
|
|
|
|
extMultiAddrsOnly = conf.extMultiAddrsOnly,
|
|
|
|
|
|
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
|
|
|
|
|
|
wsEnabled = conf.websocketSupport,
|
|
|
|
|
|
wssEnabled = conf.websocketSecureSupport,
|
|
|
|
|
|
dns4DomainName = dns4DomainName,
|
|
|
|
|
|
discv5UdpPort = discv5UdpPort,
|
|
|
|
|
|
wakuFlags = some(wakuFlags),
|
2025-04-25 20:23:53 +03:00
|
|
|
|
dnsNameServers = conf.dnsAddrsNameServers,
|
2024-03-16 00:08:47 +01:00
|
|
|
|
)
|
2023-06-22 16:58:14 -04:00
|
|
|
|
|
2023-06-29 21:59:53 +02:00
|
|
|
|
return netConfigRes
|
2025-04-01 09:28:18 +11:00
|
|
|
|
|
|
|
|
|
|
proc applyPresetConfiguration*(srcConf: WakuNodeConf): Result[WakuNodeConf, string] =
|
|
|
|
|
|
var resConf = srcConf
|
|
|
|
|
|
|
|
|
|
|
|
if resConf.clusterId == 1:
|
|
|
|
|
|
warn(
|
|
|
|
|
|
"TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead."
|
|
|
|
|
|
)
|
|
|
|
|
|
resConf.preset = "twn"
|
|
|
|
|
|
|
|
|
|
|
|
case toLowerAscii(resConf.preset)
|
|
|
|
|
|
of "twn":
|
|
|
|
|
|
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
|
|
|
|
|
|
|
|
|
|
|
# Override configuration
|
|
|
|
|
|
resConf.maxMessageSize = twnClusterConf.maxMessageSize
|
|
|
|
|
|
resConf.clusterId = twnClusterConf.clusterId
|
|
|
|
|
|
resConf.rlnRelay = twnClusterConf.rlnRelay
|
|
|
|
|
|
resConf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
|
|
|
|
|
resConf.rlnRelayChainId = twnClusterConf.rlnRelayChainId
|
|
|
|
|
|
resConf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
|
|
|
|
|
resConf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
|
|
|
|
|
|
resConf.discv5Discovery = twnClusterConf.discv5Discovery
|
|
|
|
|
|
resConf.discv5BootstrapNodes =
|
|
|
|
|
|
resConf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
|
|
|
|
|
|
resConf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
|
|
|
|
|
resConf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
|
|
|
|
|
resConf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
|
|
|
|
|
|
|
|
|
|
|
|
if resConf.relay:
|
|
|
|
|
|
resConf.rlnRelay = twnClusterConf.rlnRelay
|
|
|
|
|
|
else:
|
|
|
|
|
|
discard
|
|
|
|
|
|
|
|
|
|
|
|
return ok(resConf)
|
|
|
|
|
|
|
|
|
|
|
|
# TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise
|
|
|
|
|
|
proc getNumShardsInNetwork*(conf: WakuNodeConf): uint32 =
|
|
|
|
|
|
if conf.numShardsInNetwork != 0:
|
|
|
|
|
|
return conf.numShardsInNetwork
|
|
|
|
|
|
# If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec
|
|
|
|
|
|
# https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding
|
|
|
|
|
|
return uint32(MaxShardIndex + 1)
|
|
|
|
|
|
|
|
|
|
|
|
proc validateShards*(conf: WakuNodeConf): Result[void, string] =
|
|
|
|
|
|
let numShardsInNetwork = getNumShardsInNetwork(conf)
|
|
|
|
|
|
|
|
|
|
|
|
for shard in conf.shards:
|
|
|
|
|
|
if shard >= numShardsInNetwork:
|
|
|
|
|
|
let msg =
|
|
|
|
|
|
"validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
|
|
|
|
|
|
$numShardsInNetwork # fmt doesn't work
|
|
|
|
|
|
error "validateShards failed", error = msg
|
|
|
|
|
|
return err(msg)
|
|
|
|
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
|
|
|
|
proc getNodeKey*(
|
|
|
|
|
|
conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng()
|
|
|
|
|
|
): Result[PrivateKey, string] =
|
|
|
|
|
|
if conf.nodekey.isSome():
|
|
|
|
|
|
return ok(conf.nodekey.get())
|
|
|
|
|
|
|
|
|
|
|
|
warn "missing node key, generating new set"
|
|
|
|
|
|
let key = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
|
|
|
|
|
|
error "Failed to generate key", error = error
|
|
|
|
|
|
return err("Failed to generate key: " & $error)
|
|
|
|
|
|
return ok(key)
|