2024-06-28 10:34:57 +00:00
|
|
|
|
{.push raises: [].}
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
import
|
2024-08-07 18:58:28 +00:00
|
|
|
|
std/[net, tables, strutils, times, sequtils, random],
|
2024-07-09 11:14:28 +00:00
|
|
|
|
results,
|
2022-11-10 09:29:34 +00:00
|
|
|
|
chronicles,
|
|
|
|
|
chronicles/topics_registry,
|
|
|
|
|
chronos,
|
2022-11-16 15:38:31 +00:00
|
|
|
|
chronos/timer as ctime,
|
2022-11-10 09:29:34 +00:00
|
|
|
|
confutils,
|
|
|
|
|
eth/keys,
|
|
|
|
|
eth/p2p/discoveryv5/enr,
|
|
|
|
|
libp2p/crypto/crypto,
|
2023-04-19 19:20:50 +00:00
|
|
|
|
libp2p/nameresolving/dnsresolver,
|
2023-09-25 12:38:59 +00:00
|
|
|
|
libp2p/protocols/ping,
|
2022-11-10 09:29:34 +00:00
|
|
|
|
metrics,
|
|
|
|
|
metrics/chronos_httpserver,
|
2023-03-06 16:18:41 +00:00
|
|
|
|
presto/[route, server, client]
|
2022-11-10 09:29:34 +00:00
|
|
|
|
import
|
2024-07-05 22:03:38 +00:00
|
|
|
|
waku/[
|
|
|
|
|
waku_core,
|
|
|
|
|
node/peer_manager,
|
|
|
|
|
waku_node,
|
|
|
|
|
waku_enr,
|
|
|
|
|
discovery/waku_discv5,
|
|
|
|
|
discovery/waku_dnsdisc,
|
|
|
|
|
waku_relay,
|
|
|
|
|
waku_rln_relay,
|
|
|
|
|
factory/builder,
|
|
|
|
|
factory/networks_config,
|
|
|
|
|
],
|
2022-11-10 09:29:34 +00:00
|
|
|
|
./networkmonitor_metrics,
|
|
|
|
|
./networkmonitor_config,
|
|
|
|
|
./networkmonitor_utils
|
|
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
|
topics = "networkmonitor"
|
|
|
|
|
|
2023-09-25 12:38:59 +00:00
|
|
|
|
const ReconnectTime = 60
|
2023-09-28 08:07:27 +00:00
|
|
|
|
const MaxConnectionRetries = 5
|
|
|
|
|
const ResetRetriesAfter = 1200
|
2024-08-07 18:58:28 +00:00
|
|
|
|
const PingSmoothing = 0.3
|
2024-03-26 11:04:48 +00:00
|
|
|
|
const MaxConnectedPeers = 150
|
2023-09-25 12:38:59 +00:00
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
const git_version* {.strdefine.} = "n/a"
|
|
|
|
|
|
2024-07-30 13:56:49 +00:00
|
|
|
|
proc setDiscoveredPeersCapabilities(routingTableNodes: seq[waku_enr.Record]) =
|
2022-11-10 09:29:34 +00:00
|
|
|
|
for capability in @[Relay, Store, Filter, Lightpush]:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let nOfNodesWithCapability =
|
2024-07-30 13:56:49 +00:00
|
|
|
|
routingTableNodes.countIt(it.supportsCapability(capability))
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "capabilities as per ENR waku flag",
|
|
|
|
|
capability = capability, amount = nOfNodesWithCapability
|
|
|
|
|
networkmonitor_peer_type_as_per_enr.set(
|
|
|
|
|
int64(nOfNodesWithCapability), labelValues = [$capability]
|
|
|
|
|
)
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
2024-08-07 18:58:28 +00:00
|
|
|
|
proc setDiscoveredPeersCluster(routingTableNodes: seq[Node]) =
|
|
|
|
|
var clusters: CountTable[uint16]
|
|
|
|
|
|
|
|
|
|
for node in routingTableNodes:
|
|
|
|
|
let typedRec = node.record.toTyped().valueOr:
|
|
|
|
|
clusters.inc(0)
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
let relayShard = typedRec.relaySharding().valueOr:
|
|
|
|
|
clusters.inc(0)
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
clusters.inc(relayShard.clusterId)
|
|
|
|
|
|
|
|
|
|
for (key, value) in clusters.pairs:
|
|
|
|
|
networkmonitor_peer_cluster_as_per_enr.set(int64(value), labelValues = [$key])
|
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
proc analyzePeer(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
customPeerInfo: CustomPeerInfoRef,
|
|
|
|
|
peerInfo: RemotePeerInfo,
|
|
|
|
|
node: WakuNode,
|
|
|
|
|
timeout: chronos.Duration,
|
|
|
|
|
): Future[Result[string, string]] {.async.} =
|
2023-09-28 08:07:27 +00:00
|
|
|
|
var pingDelay: chronos.Duration
|
|
|
|
|
|
|
|
|
|
proc ping(): Future[Result[void, string]] {.async, gcsafe.} =
|
|
|
|
|
try:
|
|
|
|
|
let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
|
|
|
|
|
pingDelay = await node.libp2pPing.ping(conn)
|
|
|
|
|
return ok()
|
|
|
|
|
except CatchableError:
|
|
|
|
|
var msg = getCurrentExceptionMsg()
|
|
|
|
|
if msg == "Future operation cancelled!":
|
|
|
|
|
msg = "timedout"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
warn "failed to ping the peer", peer = peerInfo, err = msg
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
|
|
|
|
customPeerInfo.connError = msg
|
|
|
|
|
return err("could not ping peer: " & msg)
|
|
|
|
|
|
|
|
|
|
let timedOut = not await ping().withTimeout(timeout)
|
|
|
|
|
# need this check for pingDelat == 0 because there may be a conn error before timeout
|
|
|
|
|
if timedOut or pingDelay == 0.millis:
|
|
|
|
|
customPeerInfo.retries += 1
|
|
|
|
|
return err(customPeerInfo.connError)
|
|
|
|
|
|
|
|
|
|
customPeerInfo.connError = ""
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "successfully pinged peer", peer = peerInfo, duration = pingDelay.millis
|
2023-09-28 08:07:27 +00:00
|
|
|
|
networkmonitor_peer_ping.observe(pingDelay.millis)
|
|
|
|
|
|
2024-08-07 18:58:28 +00:00
|
|
|
|
# We are using a smoothed moving average
|
|
|
|
|
customPeerInfo.avgPingDuration =
|
|
|
|
|
if customPeerInfo.avgPingDuration.millis == 0:
|
|
|
|
|
pingDelay
|
|
|
|
|
else:
|
|
|
|
|
let newAvg =
|
|
|
|
|
(float64(pingDelay.millis) * PingSmoothing) +
|
|
|
|
|
float64(customPeerInfo.avgPingDuration.millis) * (1.0 - PingSmoothing)
|
|
|
|
|
|
|
|
|
|
int64(newAvg).millis
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
|
|
|
|
customPeerInfo.lastPingDuration = pingDelay
|
|
|
|
|
|
|
|
|
|
return ok(customPeerInfo.peerId)
|
|
|
|
|
|
|
|
|
|
proc shouldReconnect(customPeerInfo: CustomPeerInfoRef): bool =
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let reconnetIntervalCheck =
|
|
|
|
|
getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime
|
2023-09-28 08:07:27 +00:00
|
|
|
|
var retriesCheck = customPeerInfo.retries < MaxConnectionRetries
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
if not retriesCheck and
|
|
|
|
|
getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter:
|
2023-09-28 08:07:27 +00:00
|
|
|
|
customPeerInfo.retries = 0
|
|
|
|
|
retriesCheck = true
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "resetting retries counter", peerId = customPeerInfo.peerId
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
|
|
|
|
return reconnetIntervalCheck and retriesCheck
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2022-12-05 19:02:21 +00:00
|
|
|
|
# TODO: Split in discover, connect
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc setConnectedPeersMetrics(
|
2024-07-30 13:56:49 +00:00
|
|
|
|
discoveredNodes: seq[waku_enr.Record],
|
2024-03-15 23:08:47 +00:00
|
|
|
|
node: WakuNode,
|
|
|
|
|
timeout: chronos.Duration,
|
|
|
|
|
restClient: RestClientRef,
|
|
|
|
|
allPeers: CustomPeersTableRef,
|
|
|
|
|
) {.async.} =
|
2023-09-25 12:38:59 +00:00
|
|
|
|
let currentTime = getTime().toUnix()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-25 12:38:59 +00:00
|
|
|
|
var newPeers = 0
|
2023-09-28 08:07:27 +00:00
|
|
|
|
var successfulConnections = 0
|
|
|
|
|
|
|
|
|
|
var analyzeFuts: seq[Future[Result[string, string]]]
|
2023-09-25 12:38:59 +00:00
|
|
|
|
|
2024-03-26 11:04:48 +00:00
|
|
|
|
var (inConns, outConns) = node.peer_manager.connectedPeers(WakuRelayCodec)
|
2024-04-17 19:48:20 +00:00
|
|
|
|
info "connected peers", inConns = inConns.len, outConns = outConns.len
|
2024-03-26 11:04:48 +00:00
|
|
|
|
|
|
|
|
|
shuffle(outConns)
|
|
|
|
|
|
2024-04-17 19:48:20 +00:00
|
|
|
|
if outConns.len >= toInt(MaxConnectedPeers / 2):
|
|
|
|
|
for p in outConns[0 ..< toInt(outConns.len / 2)]:
|
2024-03-26 11:04:48 +00:00
|
|
|
|
trace "Pruning Peer", Peer = $p
|
|
|
|
|
asyncSpawn(node.switch.disconnect(p))
|
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
# iterate all newly discovered nodes
|
|
|
|
|
for discNode in discoveredNodes:
|
2024-07-30 13:56:49 +00:00
|
|
|
|
let peerRes = toRemotePeerInfo(discNode)
|
2023-09-25 12:38:59 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let peerInfo = peerRes.valueOr:
|
2024-07-30 13:56:49 +00:00
|
|
|
|
warn "error converting record to remote peer info", record = discNode
|
2023-09-25 12:38:59 +00:00
|
|
|
|
continue
|
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
# create new entry if new peerId found
|
2023-09-25 12:38:59 +00:00
|
|
|
|
let peerId = $peerInfo.peerId
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
if not allPeers.hasKey(peerId):
|
2023-09-28 08:07:27 +00:00
|
|
|
|
allPeers[peerId] = CustomPeerInfoRef(peerId: peerId)
|
2023-09-25 12:38:59 +00:00
|
|
|
|
newPeers += 1
|
|
|
|
|
else:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "already seen", peerId = peerId
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
let customPeerInfo = allPeers[peerId]
|
|
|
|
|
|
|
|
|
|
customPeerInfo.lastTimeDiscovered = currentTime
|
2024-07-30 13:56:49 +00:00
|
|
|
|
customPeerInfo.enr = discNode.toURI()
|
|
|
|
|
customPeerInfo.enrCapabilities = discNode.getCapabilities().mapIt($it)
|
2023-09-28 08:07:27 +00:00
|
|
|
|
customPeerInfo.discovered += 1
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2024-07-30 13:56:49 +00:00
|
|
|
|
for maddr in peerInfo.addrs:
|
|
|
|
|
if $maddr notin customPeerInfo.maddrs:
|
|
|
|
|
customPeerInfo.maddrs.add $maddr
|
|
|
|
|
let typedRecord = discNode.toTypedRecord()
|
|
|
|
|
if not typedRecord.isOk():
|
|
|
|
|
warn "could not convert record to typed record", record = discNode
|
|
|
|
|
continue
|
2022-11-10 09:29:34 +00:00
|
|
|
|
if not typedRecord.get().ip.isSome():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
warn "ip field is not set", record = typedRecord.get()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
let ip = $typedRecord.get().ip.get().join(".")
|
2023-09-28 08:07:27 +00:00
|
|
|
|
customPeerInfo.ip = ip
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-25 12:38:59 +00:00
|
|
|
|
# try to ping the peer
|
2023-09-28 08:07:27 +00:00
|
|
|
|
if shouldReconnect(customPeerInfo):
|
|
|
|
|
if customPeerInfo.retries > 0:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
warn "trying to dial failed peer again",
|
|
|
|
|
peerId = peerId, retry = customPeerInfo.retries
|
2023-09-28 08:07:27 +00:00
|
|
|
|
analyzeFuts.add(analyzePeer(customPeerInfo, peerInfo, node, timeout))
|
|
|
|
|
|
|
|
|
|
# Wait for all connection attempts to finish
|
|
|
|
|
let analyzedPeers = await allFinished(analyzeFuts)
|
|
|
|
|
|
|
|
|
|
for peerIdFut in analyzedPeers:
|
|
|
|
|
let peerIdRes = await peerIdFut
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let peerIdStr = peerIdRes.valueOr:
|
2023-09-28 08:07:27 +00:00
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
successfulConnections += 1
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let peerId = PeerId.init(peerIdStr).valueOr:
|
|
|
|
|
warn "failed to parse peerId", peerId = peerIdStr
|
2023-09-28 08:07:27 +00:00
|
|
|
|
continue
|
|
|
|
|
var customPeerInfo = allPeers[peerIdStr]
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
debug "connected to peer", peer = customPeerInfo[]
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
|
|
|
|
# after connection, get supported protocols
|
|
|
|
|
let lp2pPeerStore = node.switch.peerStore
|
|
|
|
|
let nodeProtocols = lp2pPeerStore[ProtoBook][peerId]
|
|
|
|
|
customPeerInfo.supportedProtocols = nodeProtocols
|
|
|
|
|
customPeerInfo.lastTimeConnected = currentTime
|
|
|
|
|
|
|
|
|
|
# after connection, get user-agent
|
|
|
|
|
let nodeUserAgent = lp2pPeerStore[AgentBook][peerId]
|
|
|
|
|
customPeerInfo.userAgent = nodeUserAgent
|
2022-12-05 19:02:21 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "number of newly discovered peers", amount = newPeers
|
2022-11-10 09:29:34 +00:00
|
|
|
|
# inform the total connections that we did in this round
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "number of successful connections", amount = successfulConnections
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
|
|
|
|
proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} =
|
|
|
|
|
var allProtocols: Table[string, int]
|
|
|
|
|
var allAgentStrings: Table[string, int]
|
|
|
|
|
var countries: Table[string, int]
|
|
|
|
|
var connectedPeers = 0
|
|
|
|
|
var failedPeers = 0
|
|
|
|
|
|
|
|
|
|
for peerInfo in allPeersRef.values:
|
|
|
|
|
if peerInfo.connError == "":
|
|
|
|
|
for protocol in peerInfo.supportedProtocols:
|
|
|
|
|
allProtocols[protocol] = allProtocols.mgetOrPut(protocol, 0) + 1
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
# store available user-agents in the network
|
|
|
|
|
allAgentStrings[peerInfo.userAgent] =
|
|
|
|
|
allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
|
|
|
|
if peerInfo.country != "":
|
|
|
|
|
countries[peerInfo.country] = countries.mgetOrPut(peerInfo.country, 0) + 1
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
connectedPeers += 1
|
|
|
|
|
else:
|
|
|
|
|
failedPeers += 1
|
|
|
|
|
|
|
|
|
|
networkmonitor_peer_count.set(int64(connectedPeers), labelValues = ["true"])
|
|
|
|
|
networkmonitor_peer_count.set(int64(failedPeers), labelValues = ["false"])
|
2024-03-15 23:08:47 +00:00
|
|
|
|
# update count on each protocol
|
2022-11-10 09:29:34 +00:00
|
|
|
|
for protocol in allProtocols.keys():
|
2023-09-28 08:07:27 +00:00
|
|
|
|
let countOfProtocols = allProtocols.mgetOrPut(protocol, 0)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
networkmonitor_peer_type_as_per_protocol.set(
|
|
|
|
|
int64(countOfProtocols), labelValues = [protocol]
|
|
|
|
|
)
|
|
|
|
|
info "supported protocols in the network",
|
|
|
|
|
protocol = protocol, count = countOfProtocols
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# update count on each user-agent
|
|
|
|
|
for userAgent in allAgentStrings.keys():
|
2023-09-28 08:07:27 +00:00
|
|
|
|
let countOfUserAgent = allAgentStrings.mgetOrPut(userAgent, 0)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
networkmonitor_peer_user_agents.set(
|
|
|
|
|
int64(countOfUserAgent), labelValues = [userAgent]
|
|
|
|
|
)
|
|
|
|
|
info "user agents participating in the network",
|
|
|
|
|
userAgent = userAgent, count = countOfUserAgent
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
for country in countries.keys():
|
|
|
|
|
let peerCount = countries.mgetOrPut(country, 0)
|
|
|
|
|
networkmonitor_peer_country_count.set(int64(peerCount), labelValues = [country])
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "number of peers per country", country = country, count = peerCount
|
2023-09-28 08:07:27 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc populateInfoFromIp(
|
|
|
|
|
allPeersRef: CustomPeersTableRef, restClient: RestClientRef
|
|
|
|
|
) {.async.} =
|
2022-12-05 19:02:21 +00:00
|
|
|
|
for peer in allPeersRef.keys():
|
|
|
|
|
if allPeersRef[peer].country != "" and allPeersRef[peer].city != "":
|
|
|
|
|
continue
|
|
|
|
|
# TODO: Update also if last update > x
|
|
|
|
|
if allPeersRef[peer].ip == "":
|
|
|
|
|
continue
|
|
|
|
|
# get more info the peers from its ip address
|
|
|
|
|
var location: NodeLocation
|
|
|
|
|
try:
|
|
|
|
|
# IP-API endpoints are now limited to 45 HTTP requests per minute
|
2023-04-19 19:20:50 +00:00
|
|
|
|
await sleepAsync(1400.millis)
|
2022-12-05 19:02:21 +00:00
|
|
|
|
let response = await restClient.ipToLocation(allPeersRef[peer].ip)
|
|
|
|
|
location = response.data
|
2023-04-04 13:34:53 +00:00
|
|
|
|
except CatchableError:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
warn "could not get location", ip = allPeersRef[peer].ip
|
2022-12-05 19:02:21 +00:00
|
|
|
|
continue
|
|
|
|
|
allPeersRef[peer].country = location.country
|
|
|
|
|
allPeersRef[peer].city = location.city
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# TODO: Split in discovery, connections, and ip2location
|
|
|
|
|
# crawls the network discovering peers and trying to connect to them
|
|
|
|
|
# metrics are processed and exposed
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc crawlNetwork(
|
|
|
|
|
node: WakuNode,
|
|
|
|
|
wakuDiscv5: WakuDiscoveryV5,
|
|
|
|
|
restClient: RestClientRef,
|
|
|
|
|
conf: NetworkMonitorConf,
|
|
|
|
|
allPeersRef: CustomPeersTableRef,
|
|
|
|
|
) {.async.} =
|
2022-11-14 07:33:36 +00:00
|
|
|
|
let crawlInterval = conf.refreshInterval * 1000
|
2022-11-10 09:29:34 +00:00
|
|
|
|
while true:
|
2023-09-28 08:07:27 +00:00
|
|
|
|
let startTime = Moment.now()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
# discover new random nodes
|
2024-07-30 13:56:49 +00:00
|
|
|
|
let discoveredNodes = await wakuDiscv5.findRandomPeers()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# nodes are nested into bucket, flat it
|
2024-08-08 18:11:51 +00:00
|
|
|
|
let flatNodes = wakuDiscv5.protocol.routingTable.buckets.mapIt(it.nodes).flatten()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# populate metrics related to capabilities as advertised by the ENR (see waku field)
|
2024-07-30 13:56:49 +00:00
|
|
|
|
setDiscoveredPeersCapabilities(discoveredNodes)
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2024-08-07 18:58:28 +00:00
|
|
|
|
# populate cluster metrics as advertised by the ENR
|
|
|
|
|
setDiscoveredPeersCluster(flatNodes)
|
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
# tries to connect to all newly discovered nodes
|
|
|
|
|
# and populates metrics related to peers we could connect
|
|
|
|
|
# note random discovered nodes can be already known
|
2024-03-15 23:08:47 +00:00
|
|
|
|
await setConnectedPeersMetrics(
|
|
|
|
|
discoveredNodes, node, conf.timeout, restClient, allPeersRef
|
|
|
|
|
)
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
updateMetrics(allPeersRef)
|
|
|
|
|
|
2022-12-05 19:02:21 +00:00
|
|
|
|
# populate info from ip addresses
|
|
|
|
|
await populateInfoFromIp(allPeersRef, restClient)
|
|
|
|
|
|
2024-07-30 13:56:49 +00:00
|
|
|
|
let totalNodes = discoveredNodes.len
|
|
|
|
|
#let seenNodes = totalNodes
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2024-07-30 13:56:49 +00:00
|
|
|
|
info "discovered nodes: ", total = totalNodes #, seen = seenNodes
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# Notes:
|
|
|
|
|
# we dont run ipMajorityLoop
|
|
|
|
|
# we dont run revalidateLoop
|
2023-09-28 08:07:27 +00:00
|
|
|
|
let endTime = Moment.now()
|
|
|
|
|
let elapsed = (endTime - startTime).nanos
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "crawl duration", time = elapsed.millis
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-09-28 08:07:27 +00:00
|
|
|
|
await sleepAsync(crawlInterval.millis - elapsed.millis)
|
2023-04-19 19:20:50 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc retrieveDynamicBootstrapNodes(
|
|
|
|
|
dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress]
|
|
|
|
|
): Result[seq[RemotePeerInfo], string] =
|
2023-04-19 19:20:50 +00:00
|
|
|
|
if dnsDiscovery and dnsDiscoveryUrl != "":
|
|
|
|
|
# DNS discovery
|
2024-03-15 23:08:47 +00:00
|
|
|
|
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
2023-04-19 19:20:50 +00:00
|
|
|
|
|
|
|
|
|
var nameServers: seq[TransportAddress]
|
|
|
|
|
for ip in dnsDiscoveryNameServers:
|
|
|
|
|
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
|
|
|
|
|
|
|
|
|
let dnsResolver = DnsResolver.new(nameServers)
|
|
|
|
|
|
|
|
|
|
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "resolving", domain = domain
|
2023-04-19 19:20:50 +00:00
|
|
|
|
let resolved = await dnsResolver.resolveTxt(domain)
|
|
|
|
|
return resolved[0] # Use only first answer
|
|
|
|
|
|
|
|
|
|
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver)
|
|
|
|
|
if wakuDnsDiscovery.isOk():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
return wakuDnsDiscovery.get().findPeers().mapErr(
|
|
|
|
|
proc(e: cstring): string =
|
|
|
|
|
$e
|
|
|
|
|
)
|
2023-04-19 19:20:50 +00:00
|
|
|
|
else:
|
|
|
|
|
warn "Failed to init Waku DNS discovery"
|
|
|
|
|
|
|
|
|
|
debug "No method for retrieving dynamic bootstrap nodes specified."
|
|
|
|
|
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc getBootstrapFromDiscDns(
|
|
|
|
|
conf: NetworkMonitorConf
|
|
|
|
|
): Result[seq[enr.Record], string] =
|
2022-12-05 19:02:21 +00:00
|
|
|
|
try:
|
2023-12-14 06:16:39 +00:00
|
|
|
|
let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let dynamicBootstrapNodesRes =
|
|
|
|
|
retrieveDynamicBootstrapNodes(true, conf.dnsDiscoveryUrl, dnsNameServers)
|
2022-12-05 19:02:21 +00:00
|
|
|
|
if not dynamicBootstrapNodesRes.isOk():
|
|
|
|
|
error("failed discovering peers from DNS")
|
|
|
|
|
let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
|
|
|
|
|
|
|
|
|
# select dynamic bootstrap nodes that have an ENR containing a udp port.
|
|
|
|
|
# Discv5 only supports UDP https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md)
|
|
|
|
|
var discv5BootstrapEnrs: seq[enr.Record]
|
|
|
|
|
for n in dynamicBootstrapNodes:
|
|
|
|
|
if n.enr.isSome():
|
|
|
|
|
let
|
|
|
|
|
enr = n.enr.get()
|
|
|
|
|
tenrRes = enr.toTypedRecord()
|
2024-03-15 23:08:47 +00:00
|
|
|
|
if tenrRes.isOk() and (
|
|
|
|
|
tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()
|
|
|
|
|
):
|
2022-12-05 19:02:21 +00:00
|
|
|
|
discv5BootstrapEnrs.add(enr)
|
|
|
|
|
return ok(discv5BootstrapEnrs)
|
2023-04-04 13:34:53 +00:00
|
|
|
|
except CatchableError:
|
2022-12-05 19:02:21 +00:00
|
|
|
|
error("failed discovering peers from DNS")
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc initAndStartApp(
|
|
|
|
|
conf: NetworkMonitorConf
|
|
|
|
|
): Result[(WakuNode, WakuDiscoveryV5), string] =
|
|
|
|
|
let bindIp =
|
|
|
|
|
try:
|
|
|
|
|
parseIpAddress("0.0.0.0")
|
|
|
|
|
except CatchableError:
|
|
|
|
|
return err("could not start node: " & getCurrentExceptionMsg())
|
2023-06-28 12:57:10 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let extIp =
|
|
|
|
|
try:
|
|
|
|
|
parseIpAddress("127.0.0.1")
|
|
|
|
|
except CatchableError:
|
|
|
|
|
return err("could not start node: " & getCurrentExceptionMsg())
|
2023-06-28 12:57:10 +00:00
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
let
|
|
|
|
|
# some hardcoded parameters
|
|
|
|
|
rng = keys.newRng()
|
2023-06-28 12:57:10 +00:00
|
|
|
|
key = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
2022-11-10 09:29:34 +00:00
|
|
|
|
nodeTcpPort = Port(60000)
|
|
|
|
|
nodeUdpPort = Port(9000)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
flags = CapabilitiesBitfield.init(
|
|
|
|
|
lightpush = false, filter = false, store = false, relay = true
|
|
|
|
|
)
|
2022-12-05 19:02:21 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
var builder = EnrBuilder.init(key)
|
2023-04-05 12:27:11 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
builder.withIpAddressAndPorts(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
ipAddr = some(extIp), tcpPort = some(nodeTcpPort), udpPort = some(nodeUdpPort)
|
2023-06-28 12:57:10 +00:00
|
|
|
|
)
|
|
|
|
|
builder.withWakuCapabilities(flags)
|
2024-02-12 08:58:55 +00:00
|
|
|
|
let addShardedTopics = builder.withShardedTopics(conf.pubsubTopics)
|
|
|
|
|
if addShardedTopics.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "failed to add sharded topics to ENR", error = addShardedTopics.error
|
2024-02-12 08:58:55 +00:00
|
|
|
|
return err($addShardedTopics.error)
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
let recordRes = builder.build()
|
|
|
|
|
let record =
|
|
|
|
|
if recordRes.isErr():
|
|
|
|
|
return err("cannot build record: " & $recordRes.error)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
else:
|
|
|
|
|
recordRes.get()
|
2022-12-05 19:02:21 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
var nodeBuilder = WakuNodeBuilder.init()
|
|
|
|
|
|
|
|
|
|
nodeBuilder.withNodeKey(key)
|
|
|
|
|
nodeBuilder.withRecord(record)
|
2024-03-26 11:04:48 +00:00
|
|
|
|
nodeBUilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
|
|
|
|
|
nodeBuilder.withPeerManagerConfig(maxRelayPeers = some(20), shardAware = true)
|
2023-06-28 12:57:10 +00:00
|
|
|
|
let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort)
|
|
|
|
|
if res.isErr():
|
|
|
|
|
return err("node building error" & $res.error)
|
2022-12-05 19:02:21 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
let nodeRes = nodeBuilder.build()
|
|
|
|
|
let node =
|
|
|
|
|
if nodeRes.isErr():
|
|
|
|
|
return err("node building error" & $res.error)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
else:
|
|
|
|
|
nodeRes.get()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
var discv5BootstrapEnrsRes = getBootstrapFromDiscDns(conf)
|
|
|
|
|
if discv5BootstrapEnrsRes.isErr():
|
|
|
|
|
error("failed discovering peers from DNS")
|
|
|
|
|
var discv5BootstrapEnrs = discv5BootstrapEnrsRes.get()
|
|
|
|
|
|
|
|
|
|
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
|
|
|
|
for enrUri in conf.bootstrapNodes:
|
|
|
|
|
addBootstrapNode(enrUri, discv5BootstrapEnrs)
|
|
|
|
|
|
|
|
|
|
# discv5
|
|
|
|
|
let discv5Conf = WakuDiscoveryV5Config(
|
|
|
|
|
discv5Config: none(DiscoveryConfig),
|
|
|
|
|
address: bindIp,
|
|
|
|
|
port: nodeUdpPort,
|
|
|
|
|
privateKey: keys.PrivateKey(key.skkey),
|
|
|
|
|
bootstrapRecords: discv5BootstrapEnrs,
|
2024-03-15 23:08:47 +00:00
|
|
|
|
autoupdateRecord: false,
|
2023-06-28 12:57:10 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
let wakuDiscv5 = WakuDiscoveryV5.new(node.rng, discv5Conf, some(record))
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
wakuDiscv5.protocol.open()
|
2023-04-04 13:34:53 +00:00
|
|
|
|
except CatchableError:
|
2023-06-28 12:57:10 +00:00
|
|
|
|
return err("could not start node: " & getCurrentExceptionMsg())
|
|
|
|
|
|
|
|
|
|
ok((node, wakuDiscv5))
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc startRestApiServer(
|
|
|
|
|
conf: NetworkMonitorConf,
|
|
|
|
|
allPeersInfo: CustomPeersTableRef,
|
|
|
|
|
numMessagesPerContentTopic: ContentTopicMessageTableRef,
|
|
|
|
|
): Result[void, string] =
|
2022-11-10 09:29:34 +00:00
|
|
|
|
try:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let serverAddress =
|
|
|
|
|
initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort)
|
2022-11-10 09:29:34 +00:00
|
|
|
|
proc validate(pattern: string, value: string): int =
|
2024-03-15 23:08:47 +00:00
|
|
|
|
if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1
|
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
var router = RestRouter.init(validate)
|
2022-11-14 07:33:36 +00:00
|
|
|
|
router.installHandler(allPeersInfo, numMessagesPerContentTopic)
|
2022-11-10 09:29:34 +00:00
|
|
|
|
var sres = RestServerRef.new(router, serverAddress)
|
|
|
|
|
let restServer = sres.get()
|
|
|
|
|
restServer.start()
|
2023-04-04 13:34:53 +00:00
|
|
|
|
except CatchableError:
|
2022-11-10 09:29:34 +00:00
|
|
|
|
error("could not start rest api server")
|
|
|
|
|
ok()
|
|
|
|
|
|
2022-11-14 07:33:36 +00:00
|
|
|
|
# handles rx of messages over a topic (see subscribe)
|
|
|
|
|
# counts the number of messages per content topic
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc subscribeAndHandleMessages(
|
|
|
|
|
node: WakuNode,
|
|
|
|
|
pubsubTopic: PubsubTopic,
|
|
|
|
|
msgPerContentTopic: ContentTopicMessageTableRef,
|
|
|
|
|
) =
|
2022-11-14 07:33:36 +00:00
|
|
|
|
# handle function
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc handler(
|
|
|
|
|
pubsubTopic: PubsubTopic, msg: WakuMessage
|
|
|
|
|
): Future[void] {.async, gcsafe.} =
|
|
|
|
|
trace "rx message", pubsubTopic = pubsubTopic, contentTopic = msg.contentTopic
|
2022-11-14 07:33:36 +00:00
|
|
|
|
|
|
|
|
|
# If we reach a table limit size, remove c topics with the least messages.
|
|
|
|
|
let tableSize = 100
|
|
|
|
|
if msgPerContentTopic.len > (tableSize - 1):
|
|
|
|
|
let minIndex = toSeq(msgPerContentTopic.values()).minIndex()
|
|
|
|
|
msgPerContentTopic.del(toSeq(msgPerContentTopic.keys())[minIndex])
|
|
|
|
|
|
|
|
|
|
# TODO: Will overflow at some point
|
|
|
|
|
# +1 if content topic existed, init to 1 otherwise
|
2023-06-06 17:28:47 +00:00
|
|
|
|
if msgPerContentTopic.hasKey(msg.contentTopic):
|
|
|
|
|
msgPerContentTopic[msg.contentTopic] += 1
|
2022-11-14 07:33:36 +00:00
|
|
|
|
else:
|
2023-06-06 17:28:47 +00:00
|
|
|
|
msgPerContentTopic[msg.contentTopic] = 1
|
2022-11-14 07:33:36 +00:00
|
|
|
|
|
2024-04-09 08:17:46 +00:00
|
|
|
|
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler)))
|
2022-11-14 07:33:36 +00:00
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
when isMainModule:
|
|
|
|
|
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
|
|
|
|
{.pop.}
|
|
|
|
|
let confRes = NetworkMonitorConf.loadConfig()
|
|
|
|
|
if confRes.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "could not load cli variables", err = confRes.error
|
2022-11-10 09:29:34 +00:00
|
|
|
|
quit(1)
|
|
|
|
|
|
2024-02-12 08:58:55 +00:00
|
|
|
|
var conf = confRes.get()
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "cli flags", conf = conf
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
2024-02-12 08:58:55 +00:00
|
|
|
|
if conf.clusterId == 1:
|
|
|
|
|
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
|
|
|
|
|
|
|
|
|
conf.bootstrapNodes = twnClusterConf.discv5BootstrapNodes
|
|
|
|
|
conf.pubsubTopics = twnClusterConf.pubsubTopics
|
|
|
|
|
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
|
|
|
|
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
2024-02-28 16:19:20 +00:00
|
|
|
|
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
|
|
|
|
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
2024-02-12 08:58:55 +00:00
|
|
|
|
|
2022-11-10 09:29:34 +00:00
|
|
|
|
if conf.logLevel != LogLevel.NONE:
|
|
|
|
|
setLogLevel(conf.logLevel)
|
|
|
|
|
|
|
|
|
|
# list of peers that we have discovered/connected
|
2022-11-14 07:33:36 +00:00
|
|
|
|
var allPeersInfo = CustomPeersTableRef()
|
|
|
|
|
|
|
|
|
|
# content topic and the number of messages that were received
|
|
|
|
|
var msgPerContentTopic = ContentTopicMessageTableRef()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# start metrics server
|
|
|
|
|
if conf.metricsServer:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let res =
|
|
|
|
|
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort))
|
2022-11-14 07:33:36 +00:00
|
|
|
|
if res.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "could not start metrics server", err = res.error
|
2022-11-10 09:29:34 +00:00
|
|
|
|
quit(1)
|
|
|
|
|
|
|
|
|
|
# start rest server for custom metrics
|
2022-11-14 07:33:36 +00:00
|
|
|
|
let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic)
|
|
|
|
|
if res.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "could not start rest api server", err = res.error
|
2022-11-16 15:38:31 +00:00
|
|
|
|
quit(1)
|
|
|
|
|
|
|
|
|
|
# create a rest client
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let clientRest =
|
|
|
|
|
RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2))
|
2022-11-16 15:38:31 +00:00
|
|
|
|
if clientRest.isErr():
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "could not start rest api client", err = res.error
|
2022-11-16 15:38:31 +00:00
|
|
|
|
quit(1)
|
|
|
|
|
let restClient = clientRest.get()
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# start waku node
|
2023-06-28 12:57:10 +00:00
|
|
|
|
let nodeRes = initAndStartApp(conf)
|
2022-11-14 07:33:36 +00:00
|
|
|
|
if nodeRes.isErr():
|
2022-11-10 09:29:34 +00:00
|
|
|
|
error "could not start node"
|
|
|
|
|
quit 1
|
2022-12-05 19:02:21 +00:00
|
|
|
|
|
2023-06-28 12:57:10 +00:00
|
|
|
|
let (node, discv5) = nodeRes.get()
|
2022-11-14 07:33:36 +00:00
|
|
|
|
|
|
|
|
|
waitFor node.mountRelay()
|
2023-09-25 12:38:59 +00:00
|
|
|
|
waitFor node.mountLibp2pPing()
|
2022-11-14 07:33:36 +00:00
|
|
|
|
|
2024-08-07 18:58:28 +00:00
|
|
|
|
if conf.rlnRelay and conf.rlnRelayEthContractAddress != "":
|
2024-02-12 08:58:55 +00:00
|
|
|
|
let rlnConf = WakuRlnConfig(
|
|
|
|
|
rlnRelayDynamic: conf.rlnRelayDynamic,
|
|
|
|
|
rlnRelayCredIndex: some(uint(0)),
|
|
|
|
|
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
2024-02-16 13:06:31 +00:00
|
|
|
|
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
2024-02-12 08:58:55 +00:00
|
|
|
|
rlnRelayCredPath: "",
|
|
|
|
|
rlnRelayCredPassword: "",
|
|
|
|
|
rlnRelayTreePath: conf.rlnRelayTreePath,
|
2024-03-15 23:08:47 +00:00
|
|
|
|
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
2024-02-12 08:58:55 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
waitFor node.mountRlnRelay(rlnConf)
|
|
|
|
|
except CatchableError:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "failed to setup RLN", err = getCurrentExceptionMsg()
|
2024-02-12 08:58:55 +00:00
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
|
|
node.mountMetadata(conf.clusterId).isOkOr:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "failed to mount waku metadata protocol: ", err = error
|
2024-02-12 08:58:55 +00:00
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
|
|
for pubsubTopic in conf.pubsubTopics:
|
|
|
|
|
# Subscribe the node to the default pubsubtopic, to count messages
|
|
|
|
|
subscribeAndHandleMessages(node, pubsubTopic, msgPerContentTopic)
|
2022-11-10 09:29:34 +00:00
|
|
|
|
|
|
|
|
|
# spawn the routine that crawls the network
|
|
|
|
|
# TODO: split into 3 routines (discovery, connections, ip2location)
|
2023-06-28 12:57:10 +00:00
|
|
|
|
asyncSpawn crawlNetwork(node, discv5, restClient, conf, allPeersInfo)
|
2022-12-05 19:02:21 +00:00
|
|
|
|
|
|
|
|
|
runForever()
|