2024-06-28 10:34:57 +00:00
|
|
|
{.push raises: [].}
|
2021-11-01 18:02:39 +00:00
|
|
|
|
|
|
|
import
|
2024-05-21 16:37:50 +00:00
|
|
|
std/[sequtils, strutils, options, sets, net, json],
|
2024-07-09 11:14:28 +00:00
|
|
|
results,
|
2023-02-06 10:36:37 +00:00
|
|
|
chronos,
|
|
|
|
chronicles,
|
|
|
|
metrics,
|
2023-04-17 13:21:20 +00:00
|
|
|
libp2p/multiaddress,
|
2023-06-06 14:36:20 +00:00
|
|
|
eth/keys as eth_keys,
|
2023-02-06 10:36:37 +00:00
|
|
|
eth/p2p/discoveryv5/node,
|
|
|
|
eth/p2p/discoveryv5/protocol
|
2024-05-01 19:13:08 +00:00
|
|
|
import
|
|
|
|
../node/peer_manager/peer_manager,
|
|
|
|
../waku_core,
|
|
|
|
../waku_enr,
|
|
|
|
../factory/external_config
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2023-03-07 09:52:12 +00:00
|
|
|
export protocol, waku_enr
|
2021-11-01 18:02:39 +00:00
|
|
|
|
|
|
|
declarePublicGauge waku_discv5_discovered, "number of nodes discovered"
|
|
|
|
declarePublicGauge waku_discv5_errors, "number of waku discv5 errors", ["type"]
|
|
|
|
|
|
|
|
logScope:
|
2022-11-03 15:36:24 +00:00
|
|
|
topics = "waku discv5"
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
## Config
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
type WakuDiscoveryV5Config* = object
|
2024-03-15 23:08:47 +00:00
|
|
|
discv5Config*: Option[DiscoveryConfig]
|
|
|
|
address*: IpAddress
|
|
|
|
port*: Port
|
|
|
|
privateKey*: eth_keys.PrivateKey
|
|
|
|
bootstrapRecords*: seq[waku_enr.Record]
|
|
|
|
autoupdateRecord*: bool
|
2021-11-12 14:10:54 +00:00
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
## Protocol
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
type WakuDiscv5Predicate* =
|
|
|
|
proc(record: waku_enr.Record): bool {.closure, gcsafe, raises: [].}
|
2023-02-06 10:36:37 +00:00
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
type WakuDiscoveryV5* = ref object
|
2024-03-15 23:08:47 +00:00
|
|
|
conf: WakuDiscoveryV5Config
|
|
|
|
protocol*: protocol.Protocol
|
|
|
|
listening*: bool
|
|
|
|
predicate: Option[WakuDiscv5Predicate]
|
|
|
|
peerManager: Option[PeerManager]
|
|
|
|
topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent]
|
|
|
|
|
|
|
|
proc shardingPredicate*(
|
|
|
|
record: Record, bootnodes: seq[Record] = @[]
|
|
|
|
): Option[WakuDiscv5Predicate] =
|
2023-08-23 15:50:59 +00:00
|
|
|
## Filter peers based on relay sharding information
|
2023-11-06 12:31:36 +00:00
|
|
|
let typedRecord = record.toTyped().valueOr:
|
2024-03-15 23:08:47 +00:00
|
|
|
debug "peer filtering failed", reason = error
|
2023-11-06 12:31:36 +00:00
|
|
|
return none(WakuDiscv5Predicate)
|
2023-08-23 15:50:59 +00:00
|
|
|
|
2023-11-06 12:31:36 +00:00
|
|
|
let nodeShard = typedRecord.relaySharding().valueOr:
|
|
|
|
debug "no relay sharding information, peer filtering disabled"
|
|
|
|
return none(WakuDiscv5Predicate)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
2023-08-23 15:50:59 +00:00
|
|
|
debug "peer filtering updated"
|
|
|
|
|
|
|
|
let predicate = proc(record: waku_enr.Record): bool =
|
2024-03-15 23:08:47 +00:00
|
|
|
bootnodes.contains(record) or # Temp. Bootnode exception
|
|
|
|
(
|
|
|
|
record.getCapabilities().len > 0 and #RFC 31 requirement
|
|
|
|
nodeShard.shardIds.anyIt(record.containsShard(nodeShard.clusterId, it))
|
|
|
|
) #RFC 64 guideline
|
2023-08-23 15:50:59 +00:00
|
|
|
|
|
|
|
return some(predicate)
|
2021-11-12 14:10:54 +00:00
|
|
|
|
2023-09-26 11:30:55 +00:00
|
|
|
proc new*(
|
2024-03-15 23:08:47 +00:00
|
|
|
T: type WakuDiscoveryV5,
|
|
|
|
rng: ref HmacDrbgContext,
|
|
|
|
conf: WakuDiscoveryV5Config,
|
|
|
|
record: Option[waku_enr.Record],
|
|
|
|
peerManager: Option[PeerManager] = none(PeerManager),
|
|
|
|
queue: AsyncEventQueue[SubscriptionEvent] =
|
|
|
|
newAsyncEventQueue[SubscriptionEvent](30),
|
|
|
|
): T =
|
2023-06-06 14:36:20 +00:00
|
|
|
let protocol = newProtocol(
|
|
|
|
rng = rng,
|
|
|
|
config = conf.discv5Config.get(protocol.defaultDiscoveryConfig),
|
|
|
|
bindPort = conf.port,
|
|
|
|
bindIp = conf.address,
|
|
|
|
privKey = conf.privateKey,
|
2024-01-04 21:39:03 +00:00
|
|
|
bootstrapRecords = conf.bootstrapRecords,
|
2023-06-06 14:36:20 +00:00
|
|
|
enrAutoUpdate = conf.autoupdateRecord,
|
|
|
|
previousRecord = record,
|
2023-12-14 06:16:39 +00:00
|
|
|
enrIp = none(IpAddress),
|
2023-06-06 14:36:20 +00:00
|
|
|
enrTcpPort = none(Port),
|
|
|
|
enrUdpPort = none(Port),
|
|
|
|
)
|
2021-11-12 14:10:54 +00:00
|
|
|
|
2024-01-04 21:39:03 +00:00
|
|
|
let shardPredOp =
|
2024-03-15 23:08:47 +00:00
|
|
|
if record.isSome():
|
|
|
|
shardingPredicate(record.get(), conf.bootstrapRecords)
|
|
|
|
else:
|
|
|
|
none(WakuDiscv5Predicate)
|
2024-01-04 21:39:03 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
WakuDiscoveryV5(
|
|
|
|
conf: conf,
|
|
|
|
protocol: protocol,
|
|
|
|
listening: false,
|
|
|
|
predicate: shardPredOp,
|
|
|
|
peerManager: peerManager,
|
|
|
|
topicSubscriptionQueue: queue,
|
2024-03-15 23:08:47 +00:00
|
|
|
)
|
2023-06-06 14:36:20 +00:00
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
proc updateENRShards(
|
|
|
|
wd: WakuDiscoveryV5, newTopics: seq[PubsubTopic], add: bool
|
|
|
|
): Result[void, string] =
|
2023-08-23 13:53:17 +00:00
|
|
|
## Add or remove shards from the Discv5 ENR
|
2023-11-21 20:15:39 +00:00
|
|
|
let newShardOp = topicsToRelayShards(newTopics).valueOr:
|
|
|
|
return err("ENR update failed: " & error)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
let newShard = newShardOp.valueOr:
|
|
|
|
return ok()
|
2023-08-23 13:53:17 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
let typedRecord = wd.protocol.localNode.record.toTyped().valueOr:
|
|
|
|
return err("ENR update failed: " & $error)
|
2023-08-23 13:53:17 +00:00
|
|
|
|
|
|
|
let currentShardsOp = typedRecord.relaySharding()
|
|
|
|
|
|
|
|
let resultShard =
|
|
|
|
if add and currentShardsOp.isSome():
|
|
|
|
let currentShard = currentShardsOp.get()
|
|
|
|
|
2023-10-05 12:37:05 +00:00
|
|
|
if currentShard.clusterId != newShard.clusterId:
|
2023-11-21 20:15:39 +00:00
|
|
|
return err("ENR update failed: clusterId id mismatch")
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
RelayShards.init(
|
|
|
|
currentShard.clusterId, currentShard.shardIds & newShard.shardIds
|
|
|
|
).valueOr:
|
2023-11-21 20:15:39 +00:00
|
|
|
return err("ENR update failed: " & error)
|
2023-08-23 13:53:17 +00:00
|
|
|
elif not add and currentShardsOp.isSome():
|
|
|
|
let currentShard = currentShardsOp.get()
|
|
|
|
|
2023-10-05 12:37:05 +00:00
|
|
|
if currentShard.clusterId != newShard.clusterId:
|
2023-11-21 20:15:39 +00:00
|
|
|
return err("ENR update failed: clusterId id mismatch")
|
2023-08-23 13:53:17 +00:00
|
|
|
|
2023-10-05 12:37:05 +00:00
|
|
|
let currentSet = toHashSet(currentShard.shardIds)
|
|
|
|
let newSet = toHashSet(newShard.shardIds)
|
2023-08-23 13:53:17 +00:00
|
|
|
|
|
|
|
let indices = toSeq(currentSet - newSet)
|
|
|
|
|
|
|
|
if indices.len == 0:
|
2023-11-21 20:15:39 +00:00
|
|
|
return err("ENR update failed: cannot remove all shards")
|
2023-08-23 13:53:17 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
RelayShards.init(currentShard.clusterId, indices).valueOr:
|
|
|
|
return err("ENR update failed: " & error)
|
2024-03-15 23:08:47 +00:00
|
|
|
elif add and currentShardsOp.isNone():
|
|
|
|
newShard
|
|
|
|
else:
|
|
|
|
return ok()
|
2023-08-23 13:53:17 +00:00
|
|
|
|
|
|
|
let (field, value) =
|
2023-10-05 12:37:05 +00:00
|
|
|
if resultShard.shardIds.len >= ShardingIndicesListMaxLength:
|
2023-08-23 13:53:17 +00:00
|
|
|
(ShardingBitVectorEnrField, resultShard.toBitVector())
|
|
|
|
else:
|
2023-11-21 20:15:39 +00:00
|
|
|
let list = resultShard.toIndicesList().valueOr:
|
|
|
|
return err("ENR update failed: " & $error)
|
2023-08-23 13:53:17 +00:00
|
|
|
|
|
|
|
(ShardingIndicesListEnrField, list)
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
wd.protocol.updateRecord([(field, value)]).isOkOr:
|
|
|
|
return err("ENR update failed: " & $error)
|
2023-08-23 13:53:17 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
2024-06-26 12:25:58 +00:00
|
|
|
proc logDiscv5FoundPeers(discoveredRecords: seq[waku_enr.Record]) =
|
|
|
|
for record in discoveredRecords:
|
|
|
|
let recordUri = record.toURI()
|
|
|
|
let capabilities = record.getCapabilities()
|
|
|
|
|
|
|
|
let typedRecord = record.toTyped().valueOr:
|
|
|
|
warn "Could not parse to typed record", error = error, enr = recordUri
|
|
|
|
continue
|
|
|
|
|
|
|
|
let peerInfo = record.toRemotePeerInfo().valueOr:
|
|
|
|
warn "Could not generate remote peer info", error = error, enr = recordUri
|
|
|
|
continue
|
|
|
|
|
|
|
|
let addrs = peerInfo.constructMultiaddrStr()
|
|
|
|
|
|
|
|
let rs = typedRecord.relaySharding()
|
|
|
|
let shardsStr =
|
|
|
|
if rs.isSome():
|
|
|
|
$rs.get()
|
|
|
|
else:
|
|
|
|
"no shards found"
|
|
|
|
|
|
|
|
notice "Received discv5 node",
|
|
|
|
addrs = addrs, enr = recordUri, capabilities = capabilities, shards = shardsStr
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
proc findRandomPeers*(
|
|
|
|
wd: WakuDiscoveryV5, overridePred = none(WakuDiscv5Predicate)
|
|
|
|
): Future[seq[waku_enr.Record]] {.async.} =
|
2023-03-30 07:35:13 +00:00
|
|
|
## Find random peers to connect to using Discovery v5
|
|
|
|
let discoveredNodes = await wd.protocol.queryRandom()
|
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
var discoveredRecords = discoveredNodes.mapIt(it.record)
|
|
|
|
|
2024-06-26 12:25:58 +00:00
|
|
|
when defined(debugDiscv5):
|
|
|
|
logDiscv5FoundPeers(discoveredRecords)
|
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
# Filter out nodes that do not match the predicate
|
2023-08-23 15:50:59 +00:00
|
|
|
if overridePred.isSome():
|
|
|
|
discoveredRecords = discoveredRecords.filter(overridePred.get())
|
|
|
|
elif wd.predicate.isSome():
|
|
|
|
discoveredRecords = discoveredRecords.filter(wd.predicate.get())
|
2023-06-06 14:36:20 +00:00
|
|
|
|
2024-07-26 20:18:14 +00:00
|
|
|
waku_discv5_discovered.inc(discoveredRecords.len)
|
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
return discoveredRecords
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
proc searchLoop(wd: WakuDiscoveryV5) {.async.} =
|
2023-06-27 13:50:11 +00:00
|
|
|
## Continuously add newly discovered nodes
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
let peerManager = wd.peerManager.valueOr:
|
|
|
|
return
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
info "Starting discovery v5 search"
|
|
|
|
|
|
|
|
while wd.listening:
|
|
|
|
trace "running discv5 discovery loop"
|
2023-08-23 15:50:59 +00:00
|
|
|
let discoveredRecords = await wd.findRandomPeers()
|
2024-05-16 20:30:51 +00:00
|
|
|
|
|
|
|
var discoveredPeers: seq[RemotePeerInfo]
|
|
|
|
var wrongRecordsReasons: seq[tuple[record: string, errorDescription: string]]
|
|
|
|
## this is to store the reasons why certain records could not be converted to RemotePeerInfo
|
|
|
|
|
|
|
|
for record in discoveredRecords:
|
|
|
|
let peerInfo = record.toRemotePeerInfo().valueOr:
|
|
|
|
## in case of error, we keep track of it for debugging purposes
|
|
|
|
wrongRecordsReasons.add(($record, $error))
|
2024-07-26 20:18:14 +00:00
|
|
|
waku_discv5_errors.inc(labelValues = [$error])
|
2024-05-16 20:30:51 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
discoveredPeers.add(peerInfo)
|
|
|
|
|
|
|
|
trace "discv5 discovered peers",
|
|
|
|
num_discovered_peers = discoveredPeers.len,
|
|
|
|
peers = toSeq(discoveredPeers.mapIt(shortLog(it.peerId)))
|
|
|
|
|
|
|
|
trace "discv5 discarded wrong records",
|
|
|
|
wrong_records =
|
|
|
|
wrongRecordsReasons.mapIt("(" & it.record & "," & it.errorDescription & ")")
|
2023-06-27 13:50:11 +00:00
|
|
|
|
|
|
|
for peer in discoveredPeers:
|
|
|
|
# Peers added are filtered by the peer manager
|
|
|
|
peerManager.addPeer(peer, PeerOrigin.Discv5)
|
|
|
|
|
|
|
|
# Discovery `queryRandom` can have a synchronous fast path for example
|
|
|
|
# when no peers are in the routing table. Don't run it in continuous loop.
|
|
|
|
#
|
|
|
|
# Also, give some time to dial the discovered nodes and update stats, etc.
|
|
|
|
await sleepAsync(5.seconds)
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
proc subscriptionsListener(wd: WakuDiscoveryV5) {.async.} =
|
|
|
|
## Listen for pubsub topics subscriptions changes
|
2024-03-15 23:08:47 +00:00
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
let key = wd.topicSubscriptionQueue.register()
|
|
|
|
|
|
|
|
while wd.listening:
|
|
|
|
let events = await wd.topicSubscriptionQueue.waitEvents(key)
|
|
|
|
|
|
|
|
# Since we don't know the events we will receive we have to anticipate.
|
|
|
|
|
|
|
|
let subs = events.filterIt(it.kind == PubsubSub).mapIt(it.topic)
|
|
|
|
let unsubs = events.filterIt(it.kind == PubsubUnsub).mapIt(it.topic)
|
|
|
|
|
|
|
|
if subs.len == 0 and unsubs.len == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let unsubRes = wd.updateENRShards(unsubs, false)
|
|
|
|
let subRes = wd.updateENRShards(subs, true)
|
|
|
|
|
|
|
|
if subRes.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
debug "ENR shard addition failed", reason = $subRes.error
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
if unsubRes.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
debug "ENR shard removal failed", reason = $unsubRes.error
|
2023-11-21 20:15:39 +00:00
|
|
|
|
|
|
|
if subRes.isErr() and unsubRes.isErr():
|
|
|
|
continue
|
|
|
|
|
|
|
|
debug "ENR updated successfully"
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
wd.predicate =
|
|
|
|
shardingPredicate(wd.protocol.localNode.record, wd.protocol.bootstrapRecords)
|
2023-11-21 20:15:39 +00:00
|
|
|
|
|
|
|
wd.topicSubscriptionQueue.unregister(key)
|
|
|
|
|
2024-05-01 19:13:08 +00:00
|
|
|
proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async: (raises: []).} =
|
2023-06-27 13:50:11 +00:00
|
|
|
if wd.listening:
|
|
|
|
return err("already listening")
|
|
|
|
|
|
|
|
info "Starting discovery v5 service"
|
|
|
|
|
|
|
|
debug "start listening on udp port", address = $wd.conf.address, port = $wd.conf.port
|
|
|
|
try:
|
|
|
|
wd.protocol.open()
|
|
|
|
except CatchableError:
|
|
|
|
return err("failed to open udp port: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
wd.listening = true
|
|
|
|
|
|
|
|
trace "start discv5 service"
|
|
|
|
wd.protocol.start()
|
|
|
|
|
2023-11-21 20:15:39 +00:00
|
|
|
asyncSpawn wd.searchLoop()
|
|
|
|
asyncSpawn wd.subscriptionsListener()
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
debug "Successfully started discovery v5 service"
|
|
|
|
info "Discv5: discoverable ENR ", enr = wd.protocol.localNode.record.toUri()
|
|
|
|
|
2023-06-27 19:16:59 +00:00
|
|
|
ok()
|
|
|
|
|
2023-06-27 13:50:11 +00:00
|
|
|
proc stop*(wd: WakuDiscoveryV5): Future[void] {.async.} =
|
|
|
|
if not wd.listening:
|
2024-03-15 23:08:47 +00:00
|
|
|
return
|
2023-06-27 13:50:11 +00:00
|
|
|
|
|
|
|
info "Stopping discovery v5 service"
|
|
|
|
|
|
|
|
wd.listening = false
|
|
|
|
trace "Stop listening on discv5 port"
|
|
|
|
await wd.protocol.closeWait()
|
|
|
|
|
|
|
|
debug "Successfully stopped discovery v5 service"
|
2023-06-06 14:36:20 +00:00
|
|
|
|
|
|
|
## Helper functions
|
|
|
|
|
|
|
|
proc parseBootstrapAddress(address: string): Result[enr.Record, cstring] =
|
|
|
|
logScope:
|
|
|
|
address = address
|
|
|
|
|
|
|
|
if address[0] == '/':
|
|
|
|
return err("MultiAddress bootstrap addresses are not supported")
|
|
|
|
|
|
|
|
let lowerCaseAddress = toLowerAscii(address)
|
|
|
|
if lowerCaseAddress.startsWith("enr:"):
|
|
|
|
var enrRec: enr.Record
|
|
|
|
if not enrRec.fromURI(address):
|
|
|
|
return err("Invalid ENR bootstrap record")
|
|
|
|
|
|
|
|
return ok(enrRec)
|
|
|
|
elif lowerCaseAddress.startsWith("enode:"):
|
|
|
|
return err("ENode bootstrap addresses are not supported")
|
|
|
|
else:
|
|
|
|
return err("Ignoring unrecognized bootstrap address type")
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record]) =
|
2023-06-06 14:36:20 +00:00
|
|
|
# Ignore empty lines or lines starting with #
|
|
|
|
if bootstrapAddr.len == 0 or bootstrapAddr[0] == '#':
|
|
|
|
return
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
let enrRes = parseBootstrapAddress(bootstrapAddr)
|
|
|
|
if enrRes.isErr():
|
|
|
|
debug "ignoring invalid bootstrap address", reason = enrRes.error
|
|
|
|
return
|
2021-11-01 18:02:39 +00:00
|
|
|
|
2023-06-06 14:36:20 +00:00
|
|
|
bootstrapEnrs.add(enrRes.value)
|
2024-05-01 19:13:08 +00:00
|
|
|
|
|
|
|
proc setupDiscoveryV5*(
|
|
|
|
myENR: enr.Record,
|
|
|
|
nodePeerManager: PeerManager,
|
|
|
|
nodeTopicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent],
|
|
|
|
conf: WakuNodeConf,
|
|
|
|
dynamicBootstrapNodes: seq[RemotePeerInfo],
|
|
|
|
rng: ref HmacDrbgContext,
|
|
|
|
key: crypto.PrivateKey,
|
|
|
|
): WakuDiscoveryV5 =
|
|
|
|
let dynamicBootstrapEnrs =
|
|
|
|
dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get())
|
|
|
|
|
|
|
|
var discv5BootstrapEnrs: seq[enr.Record]
|
|
|
|
|
|
|
|
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
|
|
|
for enrUri in conf.discv5BootstrapNodes:
|
|
|
|
addBootstrapNode(enrUri, discv5BootstrapEnrs)
|
|
|
|
|
|
|
|
discv5BootstrapEnrs.add(dynamicBootstrapEnrs)
|
|
|
|
|
|
|
|
let discv5Config = DiscoveryConfig.init(
|
|
|
|
conf.discv5TableIpLimit, conf.discv5BucketIpLimit, conf.discv5BitsPerHop
|
|
|
|
)
|
|
|
|
|
|
|
|
let discv5UdpPort = Port(uint16(conf.discv5UdpPort) + conf.portsShift)
|
|
|
|
|
|
|
|
let discv5Conf = WakuDiscoveryV5Config(
|
|
|
|
discv5Config: some(discv5Config),
|
|
|
|
address: conf.listenAddress,
|
|
|
|
port: discv5UdpPort,
|
|
|
|
privateKey: eth_keys.PrivateKey(key.skkey),
|
|
|
|
bootstrapRecords: discv5BootstrapEnrs,
|
|
|
|
autoupdateRecord: conf.discv5EnrAutoUpdate,
|
|
|
|
)
|
|
|
|
|
|
|
|
WakuDiscoveryV5.new(
|
|
|
|
rng, discv5Conf, some(myENR), some(nodePeerManager), nodeTopicSubscriptionQueue
|
|
|
|
)
|
2024-05-21 16:37:50 +00:00
|
|
|
|
|
|
|
proc updateBootstrapRecords*(
|
|
|
|
self: var WakuDiscoveryV5, newRecordsString: string
|
|
|
|
): Result[void, string] =
|
|
|
|
## newRecordsString - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
|
|
|
|
var newRecords = newSeq[waku_enr.Record]()
|
|
|
|
|
|
|
|
var jsonNode: JsonNode
|
|
|
|
try:
|
|
|
|
jsonNode = parseJson(newRecordsString)
|
|
|
|
except Exception:
|
|
|
|
return err("exception parsing json enr records: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
if jsonNode.kind != JArray:
|
|
|
|
return err("updateBootstrapRecords should receive a json array containing ENRs")
|
|
|
|
|
|
|
|
for enr in jsonNode:
|
|
|
|
let enrWithoutQuotes = ($enr).replace("\"", "")
|
|
|
|
var bootstrapNodeEnr: waku_enr.Record
|
|
|
|
if not bootstrapNodeEnr.fromURI(enrWithoutQuotes):
|
|
|
|
return err("wrong enr given: " & enrWithoutQuotes)
|
|
|
|
|
|
|
|
self.protocol.bootstrapRecords = newRecords
|
|
|
|
|
|
|
|
return ok()
|