2024-06-28 10:34:57 +00:00
|
|
|
|
{.push raises: [].}
|
2022-11-04 08:40:13 +00:00
|
|
|
|
|
2021-02-04 10:32:58 +00:00
|
|
|
|
import
|
2024-01-30 12:28:21 +00:00
|
|
|
|
std/[options, sets, sequtils, times, strutils, math, random],
|
2022-11-24 13:11:23 +00:00
|
|
|
|
chronos,
|
|
|
|
|
chronicles,
|
2022-11-04 08:40:13 +00:00
|
|
|
|
metrics,
|
2023-04-12 11:05:34 +00:00
|
|
|
|
libp2p/multistream,
|
2023-05-31 07:47:56 +00:00
|
|
|
|
libp2p/muxers/muxer,
|
2024-03-19 15:18:52 +00:00
|
|
|
|
libp2p/nameresolving/nameresolver,
|
|
|
|
|
libp2p/peerstore
|
|
|
|
|
|
2022-11-04 08:40:13 +00:00
|
|
|
|
import
|
2023-08-09 17:11:50 +00:00
|
|
|
|
../../common/nimchronos,
|
2023-12-07 12:21:18 +00:00
|
|
|
|
../../common/enr,
|
2023-04-24 14:37:54 +00:00
|
|
|
|
../../waku_core,
|
2023-04-18 13:22:10 +00:00
|
|
|
|
../../waku_relay,
|
2023-09-22 19:13:50 +00:00
|
|
|
|
../../waku_enr/sharding,
|
2024-01-30 12:28:21 +00:00
|
|
|
|
../../waku_enr/capabilities,
|
2023-10-11 06:58:45 +00:00
|
|
|
|
../../waku_metadata,
|
2022-11-04 08:40:13 +00:00
|
|
|
|
./peer_store/peer_storage,
|
|
|
|
|
./waku_peer_store
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2021-10-06 12:29:08 +00:00
|
|
|
|
export waku_peer_store, peer_storage, peers
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
|
|
|
|
declareCounter waku_peers_dials, "Number of peer dials", ["outcome"]
|
2022-12-14 15:04:11 +00:00
|
|
|
|
# TODO: Populate from PeerStore.Source when ready
|
2024-03-15 23:08:47 +00:00
|
|
|
|
declarePublicCounter waku_node_conns_initiated,
|
|
|
|
|
"Number of connections initiated", ["source"]
|
2021-03-26 08:49:51 +00:00
|
|
|
|
declarePublicGauge waku_peers_errors, "Number of peer manager errors", ["type"]
|
2024-03-15 23:08:47 +00:00
|
|
|
|
declarePublicGauge waku_connected_peers,
|
|
|
|
|
"Number of physical connections per direction and protocol",
|
|
|
|
|
labels = ["direction", "protocol"]
|
|
|
|
|
declarePublicGauge waku_streams_peers,
|
|
|
|
|
"Number of streams per direction and protocol", labels = ["direction", "protocol"]
|
2023-01-31 12:24:49 +00:00
|
|
|
|
declarePublicGauge waku_peer_store_size, "Number of peers managed by the peer store"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
declarePublicGauge waku_service_peers,
|
|
|
|
|
"Service peer protocol and multiaddress ", labels = ["protocol", "peerId"]
|
2024-07-26 20:18:14 +00:00
|
|
|
|
declarePublicGauge waku_total_unique_peers, "total number of unique peers"
|
2022-11-04 08:40:13 +00:00
|
|
|
|
|
2021-02-05 10:49:11 +00:00
|
|
|
|
logScope:
|
2022-11-03 15:36:24 +00:00
|
|
|
|
topics = "waku node peer_manager"
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
randomize()
|
|
|
|
|
|
2022-12-14 15:04:11 +00:00
|
|
|
|
const
|
|
|
|
|
# TODO: Make configurable
|
2024-05-13 15:25:44 +00:00
|
|
|
|
DefaultDialTimeout* = chronos.seconds(10)
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Max attempts before removing the peer
|
|
|
|
|
MaxFailedAttempts = 5
|
|
|
|
|
|
|
|
|
|
# Time to wait before attempting to dial again is calculated as:
|
|
|
|
|
# initialBackoffInSec*(backoffFactor^(failedAttempts-1))
|
|
|
|
|
# 120s, 480s, 1920, 7680s
|
|
|
|
|
InitialBackoffInSec = 120
|
|
|
|
|
BackoffFactor = 4
|
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
# Limit the amount of paralel dials
|
2024-01-30 12:28:21 +00:00
|
|
|
|
MaxParallelDials = 10
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
# Delay between consecutive relayConnectivityLoop runs
|
2024-07-09 14:33:18 +00:00
|
|
|
|
ConnectivityLoopInterval = chronos.seconds(30)
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
# How often the peer store is pruned
|
2023-09-08 11:36:26 +00:00
|
|
|
|
PrunePeerStoreInterval = chronos.minutes(10)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# How often metrics and logs are shown/updated
|
2023-07-04 11:31:18 +00:00
|
|
|
|
LogAndMetricsInterval = chronos.minutes(3)
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# Max peers that we allow from the same IP
|
2024-01-02 13:01:18 +00:00
|
|
|
|
DefaultColocationLimit* = 5
|
2023-05-18 07:40:14 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
type PeerManager* = ref object of RootObj
|
|
|
|
|
switch*: Switch
|
2024-09-27 12:46:46 +00:00
|
|
|
|
wakuPeerStore*: WakuPeerStore
|
2024-03-15 23:08:47 +00:00
|
|
|
|
wakuMetadata*: WakuMetadata
|
|
|
|
|
initialBackoffInSec*: int
|
|
|
|
|
backoffFactor*: int
|
|
|
|
|
maxFailedAttempts*: int
|
|
|
|
|
storage*: PeerStorage
|
|
|
|
|
serviceSlots*: Table[string, RemotePeerInfo]
|
|
|
|
|
maxRelayPeers*: int
|
|
|
|
|
outRelayPeersTarget: int
|
|
|
|
|
inRelayPeersTarget: int
|
|
|
|
|
ipTable*: Table[string, seq[PeerId]]
|
|
|
|
|
colocationLimit*: int
|
|
|
|
|
started: bool
|
|
|
|
|
shardedPeerManagement: bool # temp feature flag
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
proc protocolMatcher*(codec: string): Matcher =
|
|
|
|
|
## Returns a protocol matcher function for the provided codec
|
|
|
|
|
proc match(proto: string): bool {.gcsafe.} =
|
|
|
|
|
## Matches a proto with any postfix to the provided codec.
|
|
|
|
|
## E.g. if the codec is `/vac/waku/filter/2.0.0` it matches the protos:
|
|
|
|
|
## `/vac/waku/filter/2.0.0`, `/vac/waku/filter/2.0.0-beta3`, `/vac/waku/filter/2.0.0-actualnonsense`
|
|
|
|
|
return proto.startsWith(codec)
|
|
|
|
|
|
|
|
|
|
return match
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc calculateBackoff(
|
|
|
|
|
initialBackoffInSec: int, backoffFactor: int, failedAttempts: int
|
|
|
|
|
): timer.Duration =
|
2023-04-14 13:12:22 +00:00
|
|
|
|
if failedAttempts == 0:
|
|
|
|
|
return chronos.seconds(0)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
return chronos.seconds(initialBackoffInSec * (backoffFactor ^ (failedAttempts - 1)))
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
####################
|
|
|
|
|
# Helper functions #
|
|
|
|
|
####################
|
|
|
|
|
|
2024-07-09 11:14:28 +00:00
|
|
|
|
proc insertOrReplace(ps: PeerStorage, remotePeerInfo: RemotePeerInfo) {.gcsafe.} =
|
2023-11-27 13:08:58 +00:00
|
|
|
|
## Insert peer entry into persistent storage, or replace existing entry with updated info
|
|
|
|
|
ps.put(remotePeerInfo).isOkOr:
|
|
|
|
|
warn "failed to store peers", err = error
|
2021-03-26 08:49:51 +00:00
|
|
|
|
waku_peers_errors.inc(labelValues = ["storage_failure"])
|
2023-11-27 13:08:58 +00:00
|
|
|
|
return
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2024-07-09 11:14:28 +00:00
|
|
|
|
proc addPeer*(
|
|
|
|
|
pm: PeerManager, remotePeerInfo: RemotePeerInfo, origin = UnknownOrigin
|
|
|
|
|
) {.gcsafe.} =
|
2023-11-27 13:08:58 +00:00
|
|
|
|
## Adds peer to manager for the specified protocol
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
|
|
|
|
if remotePeerInfo.peerId == pm.switch.peerInfo.peerId:
|
2024-05-16 20:30:51 +00:00
|
|
|
|
trace "skipping to manage our unmanageable self"
|
2023-03-28 11:29:48 +00:00
|
|
|
|
return
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
if pm.wakuPeerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and
|
|
|
|
|
pm.wakuPeerStore[KeyBook][remotePeerInfo.peerId] == remotePeerInfo.publicKey and
|
|
|
|
|
pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].raw.len > 0:
|
2024-06-19 15:29:55 +00:00
|
|
|
|
let incomingEnr = remotePeerInfo.enr.valueOr:
|
|
|
|
|
trace "peer already managed and incoming ENR is empty",
|
|
|
|
|
remote_peer_id = $remotePeerInfo.peerId
|
|
|
|
|
return
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
if pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].raw == incomingEnr.raw or
|
|
|
|
|
pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].seqNum > incomingEnr.seqNum:
|
2024-06-19 15:29:55 +00:00
|
|
|
|
trace "peer already managed and ENR info is already saved",
|
|
|
|
|
remote_peer_id = $remotePeerInfo.peerId
|
|
|
|
|
return
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Adding peer to manager",
|
|
|
|
|
peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs
|
|
|
|
|
|
2024-07-26 20:18:14 +00:00
|
|
|
|
waku_total_unique_peers.inc()
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[AddressBook][remotePeerInfo.peerId] = remotePeerInfo.addrs
|
|
|
|
|
pm.wakuPeerStore[KeyBook][remotePeerInfo.peerId] = remotePeerInfo.publicKey
|
|
|
|
|
pm.wakuPeerStore[SourceBook][remotePeerInfo.peerId] = origin
|
|
|
|
|
pm.wakuPeerStore[ProtoVersionBook][remotePeerInfo.peerId] =
|
|
|
|
|
remotePeerInfo.protoVersion
|
|
|
|
|
pm.wakuPeerStore[AgentBook][remotePeerInfo.peerId] = remotePeerInfo.agent
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if remotePeerInfo.protocols.len > 0:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[ProtoBook][remotePeerInfo.peerId] = remotePeerInfo.protocols
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2023-04-19 14:12:00 +00:00
|
|
|
|
if remotePeerInfo.enr.isSome():
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId] = remotePeerInfo.enr.get()
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
|
|
|
|
# Add peer to storage. Entry will subsequently be updated with connectedness information
|
|
|
|
|
if not pm.storage.isNil:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
# Reading from the db (pm.storage) is only done on startup, hence you need to connect to all saved peers.
|
2024-05-13 15:25:44 +00:00
|
|
|
|
# `remotePeerInfo.connectedness` should already be `NotConnected`, but both we reset it to `NotConnected` just in case.
|
|
|
|
|
# This reset is also done when reading from storage, I believe, to ensure the `connectedness` state is the correct one.
|
|
|
|
|
# So many resets are likely redudant, but I haven't verified whether this is the case or not.
|
2023-11-27 13:08:58 +00:00
|
|
|
|
remotePeerInfo.connectedness = NotConnected
|
|
|
|
|
|
|
|
|
|
pm.storage.insertOrReplace(remotePeerInfo)
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
|
|
|
|
# Connects to a given node. Note that this function uses `connect` and
|
|
|
|
|
# does not provide a protocol. Streams for relay (gossipsub) are created
|
|
|
|
|
# automatically without the needing to dial.
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc connectRelay*(
|
|
|
|
|
pm: PeerManager,
|
|
|
|
|
peer: RemotePeerInfo,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
|
|
|
|
source = "api",
|
|
|
|
|
): Future[bool] {.async.} =
|
2023-03-28 11:29:48 +00:00
|
|
|
|
let peerId = peer.peerId
|
2022-12-14 15:04:11 +00:00
|
|
|
|
|
|
|
|
|
# Do not attempt to dial self
|
|
|
|
|
if peerId == pm.switch.peerInfo.peerId:
|
2023-03-28 11:29:48 +00:00
|
|
|
|
return false
|
2022-12-14 15:04:11 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
if not pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec):
|
2023-03-28 11:29:48 +00:00
|
|
|
|
pm.addPeer(peer)
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId]
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Connecting to relay peer",
|
|
|
|
|
wireAddr = peer.addrs, peerId = peerId, failedAttempts = failedAttempts
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
var deadline = sleepAsync(dialTimeout)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let workfut = pm.switch.connect(peerId, peer.addrs)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
# Can't use catch: with .withTimeout() in this case
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let res = catch:
|
|
|
|
|
await workfut or deadline
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let reasonFailed =
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if not workfut.finished():
|
|
|
|
|
await workfut.cancelAndWait()
|
|
|
|
|
"timed out"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
elif res.isErr():
|
|
|
|
|
res.error.msg
|
|
|
|
|
else:
|
2023-03-28 11:29:48 +00:00
|
|
|
|
if not deadline.finished():
|
2024-01-30 12:28:21 +00:00
|
|
|
|
await deadline.cancelAndWait()
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
waku_peers_dials.inc(labelValues = ["successful"])
|
2022-12-14 15:04:11 +00:00
|
|
|
|
waku_node_conns_initiated.inc(labelValues = [source])
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][peerId] = 0
|
2023-12-20 14:23:41 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
return true
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Dial failed
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][peerId] =
|
|
|
|
|
pm.wakuPeerStore[NumberFailedConnBook][peerId] + 1
|
|
|
|
|
pm.wakuPeerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second)
|
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peerId] = CannotConnect
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "Connecting relay peer failed",
|
2024-03-15 23:08:47 +00:00
|
|
|
|
peerId = peerId,
|
|
|
|
|
reason = reasonFailed,
|
2024-09-27 12:46:46 +00:00
|
|
|
|
failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId]
|
2023-01-23 20:24:46 +00:00
|
|
|
|
waku_peers_dials.inc(labelValues = [reasonFailed])
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
return false
|
|
|
|
|
|
2024-09-24 16:20:29 +00:00
|
|
|
|
proc disconnectNode*(pm: PeerManager, peer: RemotePeerInfo) {.async.} =
|
|
|
|
|
let peerId = peer.peerId
|
|
|
|
|
await pm.switch.disconnect(peerId)
|
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
# Dialing should be used for just protocols that require a stream to write and read
|
|
|
|
|
# This shall not be used to dial Relay protocols, since that would create
|
|
|
|
|
# unneccesary unused streams.
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc dialPeer(
|
|
|
|
|
pm: PeerManager,
|
|
|
|
|
peerId: PeerID,
|
|
|
|
|
addrs: seq[MultiAddress],
|
|
|
|
|
proto: string,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
|
|
|
|
source = "api",
|
|
|
|
|
): Future[Option[Connection]] {.async.} =
|
2023-03-28 11:29:48 +00:00
|
|
|
|
if peerId == pm.switch.peerInfo.peerId:
|
|
|
|
|
error "could not dial self"
|
|
|
|
|
return none(Connection)
|
|
|
|
|
|
|
|
|
|
if proto == WakuRelayCodec:
|
|
|
|
|
error "dial shall not be used to connect to relays"
|
|
|
|
|
return none(Connection)
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Dialing peer", wireAddr = addrs, peerId = peerId, proto = proto
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
|
|
|
|
# Dial Peer
|
|
|
|
|
let dialFut = pm.switch.dial(peerId, addrs, proto)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
let res = catch:
|
|
|
|
|
if await dialFut.withTimeout(dialTimeout):
|
2023-03-28 11:29:48 +00:00
|
|
|
|
return some(dialFut.read())
|
2024-03-15 23:08:47 +00:00
|
|
|
|
else:
|
|
|
|
|
await cancelAndWait(dialFut)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let reasonFailed = if res.isOk: "timed out" else: res.error.msg
|
2023-03-28 11:29:48 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Dialing peer failed", peerId = peerId, reason = reasonFailed, proto = proto
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
return none(Connection)
|
|
|
|
|
|
2024-07-09 11:14:28 +00:00
|
|
|
|
proc loadFromStorage(pm: PeerManager) {.gcsafe.} =
|
2023-11-27 13:08:58 +00:00
|
|
|
|
## Load peers from storage, if available
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "loading peers from storage"
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2023-10-11 06:58:45 +00:00
|
|
|
|
var amount = 0
|
2021-07-27 06:48:56 +00:00
|
|
|
|
|
2023-11-27 13:08:58 +00:00
|
|
|
|
proc onData(remotePeerInfo: RemotePeerInfo) =
|
|
|
|
|
let peerId = remotePeerInfo.peerId
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2023-11-27 13:08:58 +00:00
|
|
|
|
if pm.switch.peerInfo.peerId == peerId:
|
2021-04-16 09:57:45 +00:00
|
|
|
|
# Do not manage self
|
|
|
|
|
return
|
|
|
|
|
|
2023-11-27 13:08:58 +00:00
|
|
|
|
trace "loading peer",
|
|
|
|
|
peerId = peerId,
|
|
|
|
|
address = remotePeerInfo.addrs,
|
|
|
|
|
protocols = remotePeerInfo.protocols,
|
|
|
|
|
agent = remotePeerInfo.agent,
|
|
|
|
|
version = remotePeerInfo.protoVersion
|
|
|
|
|
|
2022-11-24 13:11:23 +00:00
|
|
|
|
# nim-libp2p books
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[AddressBook][peerId] = remotePeerInfo.addrs
|
|
|
|
|
pm.wakuPeerStore[ProtoBook][peerId] = remotePeerInfo.protocols
|
|
|
|
|
pm.wakuPeerStore[KeyBook][peerId] = remotePeerInfo.publicKey
|
|
|
|
|
pm.wakuPeerStore[AgentBook][peerId] = remotePeerInfo.agent
|
|
|
|
|
pm.wakuPeerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# custom books
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peerId] = NotConnected # Reset connectedness state
|
|
|
|
|
pm.wakuPeerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime
|
|
|
|
|
pm.wakuPeerStore[SourceBook][peerId] = remotePeerInfo.origin
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2023-11-27 13:08:58 +00:00
|
|
|
|
if remotePeerInfo.enr.isSome():
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[ENRBook][peerId] = remotePeerInfo.enr.get()
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-10-11 06:58:45 +00:00
|
|
|
|
amount.inc()
|
|
|
|
|
|
2023-11-27 13:08:58 +00:00
|
|
|
|
pm.storage.getAll(onData).isOkOr:
|
|
|
|
|
warn "loading peers from storage failed", err = error
|
2021-03-26 08:49:51 +00:00
|
|
|
|
waku_peers_errors.inc(labelValues = ["storage_load_failure"])
|
2023-10-11 06:58:45 +00:00
|
|
|
|
return
|
|
|
|
|
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "recovered peers from storage", amount = amount
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool =
|
2023-04-14 13:12:22 +00:00
|
|
|
|
# Returns if we can try to connect to this peer, based on past failed attempts
|
|
|
|
|
# It uses an exponential backoff. Each connection attempt makes us
|
|
|
|
|
# wait more before trying again.
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId]
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
|
|
|
|
# if it never errored, we can try to connect
|
|
|
|
|
if failedAttempts == 0:
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
# if there are too many failed attempts, do not reconnect
|
|
|
|
|
if failedAttempts >= pm.maxFailedAttempts:
|
|
|
|
|
return false
|
|
|
|
|
|
|
|
|
|
# If it errored we wait an exponential backoff from last connection
|
|
|
|
|
# the more failed attempts, the greater the backoff since last attempt
|
|
|
|
|
let now = Moment.init(getTime().toUnix, Second)
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let lastFailed = pm.wakuPeerStore[LastFailedConnBook][peerId]
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let backoff =
|
|
|
|
|
calculateBackoff(pm.initialBackoffInSec, pm.backoffFactor, failedAttempts)
|
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
return now >= (lastFailed + backoff)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
##################
|
|
|
|
|
# Initialisation #
|
2022-11-24 13:11:23 +00:00
|
|
|
|
##################
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2023-06-28 07:14:11 +00:00
|
|
|
|
proc getPeerIp(pm: PeerManager, peerId: PeerId): Option[string] =
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if not pm.switch.connManager.getConnections().hasKey(peerId):
|
|
|
|
|
return none(string)
|
|
|
|
|
|
|
|
|
|
let conns = pm.switch.connManager.getConnections().getOrDefault(peerId)
|
|
|
|
|
if conns.len == 0:
|
|
|
|
|
return none(string)
|
|
|
|
|
|
|
|
|
|
let obAddr = conns[0].connection.observedAddr.valueOr:
|
|
|
|
|
return none(string)
|
|
|
|
|
|
|
|
|
|
# TODO: think if circuit relay ips should be handled differently
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
return some(obAddr.getHostname())
|
2023-06-28 07:14:11 +00:00
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# called when a connection i) is created or ii) is closed
|
2021-02-12 08:53:52 +00:00
|
|
|
|
proc onConnEvent(pm: PeerManager, peerId: PeerID, event: ConnEvent) {.async.} =
|
|
|
|
|
case event.kind
|
2024-03-15 23:08:47 +00:00
|
|
|
|
of ConnEventKind.Connected:
|
|
|
|
|
#let direction = if event.incoming: Inbound else: Outbound
|
|
|
|
|
discard
|
|
|
|
|
of ConnEventKind.Disconnected:
|
|
|
|
|
discard
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} =
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let res = catch:
|
|
|
|
|
await pm.switch.dial(peerId, WakuMetadataCodec)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
var reason: string
|
|
|
|
|
block guardClauses:
|
|
|
|
|
let conn = res.valueOr:
|
|
|
|
|
reason = "dial failed: " & error.msg
|
|
|
|
|
break guardClauses
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let metadata = (await pm.wakuMetadata.request(conn)).valueOr:
|
|
|
|
|
reason = "waku metatdata request failed: " & error
|
|
|
|
|
break guardClauses
|
|
|
|
|
|
|
|
|
|
let clusterId = metadata.clusterId.valueOr:
|
|
|
|
|
reason = "empty cluster-id reported"
|
|
|
|
|
break guardClauses
|
|
|
|
|
|
|
|
|
|
if pm.wakuMetadata.clusterId != clusterId:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
reason =
|
|
|
|
|
"different clusterId reported: " & $pm.wakuMetadata.clusterId & " vs " &
|
|
|
|
|
$clusterId
|
2024-01-30 12:28:21 +00:00
|
|
|
|
break guardClauses
|
|
|
|
|
|
2024-03-19 15:18:52 +00:00
|
|
|
|
if (
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec) and
|
2024-03-19 15:18:52 +00:00
|
|
|
|
not metadata.shards.anyIt(pm.wakuMetadata.shards.contains(it))
|
|
|
|
|
):
|
2024-06-14 12:59:42 +00:00
|
|
|
|
let myShardsString = "[ " & toSeq(pm.wakuMetadata.shards).join(", ") & " ]"
|
|
|
|
|
let otherShardsString = "[ " & metadata.shards.join(", ") & " ]"
|
2024-06-10 08:10:18 +00:00
|
|
|
|
reason =
|
|
|
|
|
"no shards in common: my_shards = " & myShardsString & " others_shards = " &
|
|
|
|
|
otherShardsString
|
2024-01-30 12:28:21 +00:00
|
|
|
|
break guardClauses
|
|
|
|
|
|
|
|
|
|
return
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
|
|
|
|
info "disconnecting from peer", peerId = peerId, reason = reason
|
2024-01-30 12:28:21 +00:00
|
|
|
|
asyncSpawn(pm.switch.disconnect(peerId))
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.delete(peerId)
|
2023-02-14 14:38:32 +00:00
|
|
|
|
|
2024-09-27 16:35:18 +00:00
|
|
|
|
proc connectedPeers*(pm: PeerManager, protocol: string): (seq[PeerId], seq[PeerId]) =
|
|
|
|
|
## Returns the peerIds of physical connections (in and out)
|
|
|
|
|
## containing at least one stream with the given protocol.
|
|
|
|
|
|
|
|
|
|
var inPeers: seq[PeerId]
|
|
|
|
|
var outPeers: seq[PeerId]
|
|
|
|
|
|
|
|
|
|
for peerId, muxers in pm.switch.connManager.getConnections():
|
|
|
|
|
for peerConn in muxers:
|
|
|
|
|
let streams = peerConn.getStreams()
|
|
|
|
|
if streams.anyIt(it.protocol == protocol):
|
|
|
|
|
if peerConn.connection.transportDir == Direction.In:
|
|
|
|
|
inPeers.add(peerId)
|
|
|
|
|
elif peerConn.connection.transportDir == Direction.Out:
|
|
|
|
|
outPeers.add(peerId)
|
|
|
|
|
|
|
|
|
|
return (inPeers, outPeers)
|
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# called when a peer i) first connects to us ii) disconnects all connections from us
|
2023-02-14 14:38:32 +00:00
|
|
|
|
proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if not pm.wakuMetadata.isNil() and event.kind == PeerEventKind.Joined:
|
|
|
|
|
await pm.onPeerMetadata(peerId)
|
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
var direction: PeerDirection
|
|
|
|
|
var connectedness: Connectedness
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
case event.kind
|
|
|
|
|
of Joined:
|
|
|
|
|
direction = if event.initiator: Outbound else: Inbound
|
|
|
|
|
connectedness = Connected
|
|
|
|
|
|
2024-09-27 16:35:18 +00:00
|
|
|
|
## Check max allowed in-relay peers
|
|
|
|
|
let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0]
|
|
|
|
|
if inRelayPeers.len > pm.inRelayPeersTarget and
|
|
|
|
|
pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec):
|
|
|
|
|
debug "disconnecting relay peer because reached max num in-relay peers",
|
|
|
|
|
peerId = peerId,
|
|
|
|
|
inRelayPeers = inRelayPeers.len,
|
|
|
|
|
inRelayPeersTarget = pm.inRelayPeersTarget
|
|
|
|
|
await pm.switch.disconnect(peerId)
|
|
|
|
|
|
|
|
|
|
## Apply max ip colocation limit
|
2024-03-15 23:08:47 +00:00
|
|
|
|
if (let ip = pm.getPeerIp(peerId); ip.isSome()):
|
|
|
|
|
pm.ipTable.mgetOrPut(ip.get, newSeq[PeerId]()).add(peerId)
|
|
|
|
|
|
|
|
|
|
# in theory this should always be one, but just in case
|
|
|
|
|
let peersBehindIp = pm.ipTable[ip.get]
|
|
|
|
|
|
|
|
|
|
# pm.colocationLimit == 0 disables the ip colocation limit
|
|
|
|
|
if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit:
|
|
|
|
|
for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]:
|
|
|
|
|
debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip
|
|
|
|
|
asyncSpawn(pm.switch.disconnect(peerId))
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.delete(peerId)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
of Left:
|
|
|
|
|
direction = UnknownDirection
|
|
|
|
|
connectedness = CanConnect
|
|
|
|
|
|
|
|
|
|
# note we cant access the peerId ip here as the connection was already closed
|
|
|
|
|
for ip, peerIds in pm.ipTable.pairs:
|
|
|
|
|
if peerIds.contains(peerId):
|
|
|
|
|
pm.ipTable[ip] = pm.ipTable[ip].filterIt(it != peerId)
|
|
|
|
|
if pm.ipTable[ip].len == 0:
|
|
|
|
|
pm.ipTable.del(ip)
|
|
|
|
|
break
|
2024-07-09 11:14:28 +00:00
|
|
|
|
of Identified:
|
|
|
|
|
debug "event identified", peerId = peerId
|
2023-06-28 07:14:11 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[ConnectionBook][peerId] = connectedness
|
|
|
|
|
pm.wakuPeerStore[DirectionBook][peerId] = direction
|
2023-11-27 13:08:58 +00:00
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
if not pm.storage.isNil:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
var remotePeerInfo = pm.wakuPeerStore.getPeer(peerId)
|
2024-06-24 08:20:09 +00:00
|
|
|
|
|
|
|
|
|
if event.kind == PeerEventKind.Left:
|
|
|
|
|
remotePeerInfo.disconnectTime = getTime().toUnix
|
2023-11-27 13:08:58 +00:00
|
|
|
|
|
|
|
|
|
pm.storage.insertOrReplace(remotePeerInfo)
|
2022-11-29 16:35:25 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc new*(
|
|
|
|
|
T: type PeerManager,
|
|
|
|
|
switch: Switch,
|
|
|
|
|
wakuMetadata: WakuMetadata = nil,
|
|
|
|
|
maxRelayPeers: Option[int] = none(int),
|
|
|
|
|
storage: PeerStorage = nil,
|
|
|
|
|
initialBackoffInSec = InitialBackoffInSec,
|
|
|
|
|
backoffFactor = BackoffFactor,
|
|
|
|
|
maxFailedAttempts = MaxFailedAttempts,
|
|
|
|
|
colocationLimit = DefaultColocationLimit,
|
|
|
|
|
shardedPeerManagement = false,
|
2024-07-09 11:14:28 +00:00
|
|
|
|
): PeerManager {.gcsafe.} =
|
2023-01-31 12:24:49 +00:00
|
|
|
|
let capacity = switch.peerStore.capacity
|
|
|
|
|
let maxConnections = switch.connManager.inSema.size
|
|
|
|
|
if maxConnections > capacity:
|
|
|
|
|
error "Max number of connections can't be greater than PeerManager capacity",
|
2024-03-15 23:08:47 +00:00
|
|
|
|
capacity = capacity, maxConnections = maxConnections
|
|
|
|
|
raise newException(
|
|
|
|
|
Defect, "Max number of connections can't be greater than PeerManager capacity"
|
|
|
|
|
)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
2023-07-04 11:31:18 +00:00
|
|
|
|
var maxRelayPeersValue = 0
|
|
|
|
|
if maxRelayPeers.isSome():
|
|
|
|
|
if maxRelayPeers.get() > maxConnections:
|
|
|
|
|
error "Max number of relay peers can't be greater than the max amount of connections",
|
2024-03-15 23:08:47 +00:00
|
|
|
|
maxConnections = maxConnections, maxRelayPeers = maxRelayPeers.get()
|
|
|
|
|
raise newException(
|
|
|
|
|
Defect,
|
|
|
|
|
"Max number of relay peers can't be greater than the max amount of connections",
|
|
|
|
|
)
|
2023-07-04 11:31:18 +00:00
|
|
|
|
|
|
|
|
|
if maxRelayPeers.get() == maxConnections:
|
|
|
|
|
warn "Max number of relay peers is equal to max amount of connections, peer won't be contributing to service peers",
|
2024-03-15 23:08:47 +00:00
|
|
|
|
maxConnections = maxConnections, maxRelayPeers = maxRelayPeers.get()
|
2023-07-04 11:31:18 +00:00
|
|
|
|
maxRelayPeersValue = maxRelayPeers.get()
|
|
|
|
|
else:
|
|
|
|
|
# Leave by default 20% of connections for service peers
|
|
|
|
|
maxRelayPeersValue = maxConnections - (maxConnections div 5)
|
2023-06-23 13:30:28 +00:00
|
|
|
|
|
2023-04-14 13:12:22 +00:00
|
|
|
|
# attempt to calculate max backoff to prevent potential overflows or unreasonably high values
|
|
|
|
|
let backoff = calculateBackoff(initialBackoffInSec, backoffFactor, maxFailedAttempts)
|
|
|
|
|
if backoff.weeks() > 1:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
error "Max backoff time can't be over 1 week", maxBackoff = backoff
|
2023-04-14 13:12:22 +00:00
|
|
|
|
raise newException(Defect, "Max backoff time can't be over 1 week")
|
|
|
|
|
|
2024-09-27 16:35:18 +00:00
|
|
|
|
let outRelayPeersTarget = maxRelayPeersValue div 3
|
2023-07-04 11:31:18 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
let pm = PeerManager(
|
|
|
|
|
switch: switch,
|
|
|
|
|
wakuMetadata: wakuMetadata,
|
2024-09-27 12:46:46 +00:00
|
|
|
|
wakuPeerStore: createWakuPeerStore(switch.peerStore),
|
2024-03-15 23:08:47 +00:00
|
|
|
|
storage: storage,
|
|
|
|
|
initialBackoffInSec: initialBackoffInSec,
|
|
|
|
|
backoffFactor: backoffFactor,
|
|
|
|
|
outRelayPeersTarget: outRelayPeersTarget,
|
|
|
|
|
inRelayPeersTarget: maxRelayPeersValue - outRelayPeersTarget,
|
|
|
|
|
maxRelayPeers: maxRelayPeersValue,
|
|
|
|
|
maxFailedAttempts: maxFailedAttempts,
|
|
|
|
|
colocationLimit: colocationLimit,
|
|
|
|
|
shardedPeerManagement: shardedPeerManagement,
|
|
|
|
|
)
|
2023-04-14 13:12:22 +00:00
|
|
|
|
|
2023-02-14 14:38:32 +00:00
|
|
|
|
proc connHook(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.} =
|
2021-10-06 12:29:08 +00:00
|
|
|
|
onConnEvent(pm, peerId, event)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-02-14 14:38:32 +00:00
|
|
|
|
proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} =
|
|
|
|
|
onPeerEvent(pm, peerId, event)
|
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
proc peerStoreChanged(peerId: PeerId) {.gcsafe.} =
|
2024-09-27 12:46:46 +00:00
|
|
|
|
waku_peer_store_size.set(toSeq(pm.wakuPeerStore[AddressBook].book.keys).len.int64)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
2023-02-14 14:38:32 +00:00
|
|
|
|
# currently disabled
|
|
|
|
|
#pm.switch.addConnEventHandler(connHook, ConnEventKind.Connected)
|
|
|
|
|
#pm.switch.addConnEventHandler(connHook, ConnEventKind.Disconnected)
|
|
|
|
|
|
|
|
|
|
pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Joined)
|
|
|
|
|
pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Left)
|
2021-02-12 08:53:52 +00:00
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
# called every time the peerstore is updated
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore[AddressBook].addHandler(peerStoreChanged)
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
pm.serviceSlots = initTable[string, RemotePeerInfo]()
|
2023-05-31 07:47:56 +00:00
|
|
|
|
pm.ipTable = initTable[string, seq[PeerId]]()
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
2022-11-03 15:36:24 +00:00
|
|
|
|
if not storage.isNil():
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "found persistent peer storage"
|
2021-03-26 08:49:51 +00:00
|
|
|
|
pm.loadFromStorage() # Load previously managed peers.
|
2021-07-27 06:48:56 +00:00
|
|
|
|
else:
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "no peer storage found"
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
return pm
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2021-02-11 08:58:25 +00:00
|
|
|
|
#####################
|
|
|
|
|
# Manager interface #
|
|
|
|
|
#####################
|
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
proc addServicePeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, proto: string) =
|
|
|
|
|
# Do not add relay peers
|
|
|
|
|
if proto == WakuRelayCodec:
|
|
|
|
|
warn "Can't add relay peer to service peers slots"
|
|
|
|
|
return
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "Adding peer to service slots",
|
|
|
|
|
peerId = remotePeerInfo.peerId, addr = remotePeerInfo.addrs[0], service = proto
|
2023-02-27 17:24:31 +00:00
|
|
|
|
waku_service_peers.set(1, labelValues = [$proto, $remotePeerInfo.addrs[0]])
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
# Set peer for service slot
|
2023-01-26 09:20:20 +00:00
|
|
|
|
pm.serviceSlots[proto] = remotePeerInfo
|
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
pm.addPeer(remotePeerInfo)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
2021-02-08 09:17:20 +00:00
|
|
|
|
####################
|
|
|
|
|
# Dialer interface #
|
|
|
|
|
####################
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc dialPeer*(
|
|
|
|
|
pm: PeerManager,
|
|
|
|
|
remotePeerInfo: RemotePeerInfo,
|
|
|
|
|
proto: string,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
|
|
|
|
source = "api",
|
|
|
|
|
): Future[Option[Connection]] {.async.} =
|
2021-02-08 09:17:20 +00:00
|
|
|
|
# Dial a given peer and add it to the list of known peers
|
2022-11-04 08:40:13 +00:00
|
|
|
|
# TODO: check peer validity and score before continuing. Limit number of peers to be managed.
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
# First add dialed peer info to peer store, if it does not exist yet..
|
|
|
|
|
# TODO: nim libp2p peerstore already adds them
|
2024-09-27 12:46:46 +00:00
|
|
|
|
if not pm.wakuPeerStore.hasPeer(remotePeerInfo.peerId, proto):
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Adding newly dialed peer to manager",
|
|
|
|
|
peerId = $remotePeerInfo.peerId, address = $remotePeerInfo.addrs[0], proto = proto
|
2023-02-27 17:24:31 +00:00
|
|
|
|
pm.addPeer(remotePeerInfo)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
return await pm.dialPeer(
|
|
|
|
|
remotePeerInfo.peerId, remotePeerInfo.addrs, proto, dialTimeout, source
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
proc dialPeer*(
|
|
|
|
|
pm: PeerManager,
|
|
|
|
|
peerId: PeerID,
|
|
|
|
|
proto: string,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
|
|
|
|
source = "api",
|
|
|
|
|
): Future[Option[Connection]] {.async.} =
|
2021-10-06 12:29:08 +00:00
|
|
|
|
# Dial an existing peer by looking up it's existing addrs in the switch's peerStore
|
2022-11-04 08:40:13 +00:00
|
|
|
|
# TODO: check peer validity and score before continuing. Limit number of peers to be managed.
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2022-06-01 09:49:41 +00:00
|
|
|
|
let addrs = pm.switch.peerStore[AddressBook][peerId]
|
2022-12-14 15:04:11 +00:00
|
|
|
|
return await pm.dialPeer(peerId, addrs, proto, dialTimeout, source)
|
2021-10-06 12:29:08 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc connectToNodes*(
|
|
|
|
|
pm: PeerManager,
|
|
|
|
|
nodes: seq[string] | seq[RemotePeerInfo],
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
|
|
|
|
source = "api",
|
|
|
|
|
) {.async.} =
|
2023-01-23 20:24:46 +00:00
|
|
|
|
if nodes.len == 0:
|
|
|
|
|
return
|
|
|
|
|
|
2024-09-24 16:20:29 +00:00
|
|
|
|
info "Dialing multiple peers", numOfPeers = nodes.len, nodes = $nodes
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-03-28 11:29:48 +00:00
|
|
|
|
var futConns: seq[Future[bool]]
|
2024-06-26 12:25:58 +00:00
|
|
|
|
var connectedPeers: seq[RemotePeerInfo]
|
2022-12-14 15:04:11 +00:00
|
|
|
|
for node in nodes:
|
2023-04-12 09:29:11 +00:00
|
|
|
|
let node = parsePeerInfo(node)
|
|
|
|
|
if node.isOk():
|
|
|
|
|
futConns.add(pm.connectRelay(node.value))
|
2024-06-26 12:25:58 +00:00
|
|
|
|
connectedPeers.add(node.value)
|
2023-04-12 09:29:11 +00:00
|
|
|
|
else:
|
|
|
|
|
error "Couldn't parse node info", error = node.error
|
2023-01-09 20:45:50 +00:00
|
|
|
|
|
|
|
|
|
await allFutures(futConns)
|
2024-06-26 12:25:58 +00:00
|
|
|
|
|
|
|
|
|
# Filtering successful connectedPeers based on futConns
|
|
|
|
|
let combined = zip(connectedPeers, futConns)
|
|
|
|
|
connectedPeers = combined.filterIt(it[1].read() == true).mapIt(it[0])
|
|
|
|
|
|
|
|
|
|
when defined(debugDiscv5):
|
|
|
|
|
let peerIds = connectedPeers.mapIt(it.peerId)
|
|
|
|
|
let origin = connectedPeers.mapIt(it.origin)
|
2024-07-15 07:55:31 +00:00
|
|
|
|
if peerIds.len > 0:
|
|
|
|
|
notice "established connections with found peers",
|
|
|
|
|
peerIds = peerIds.mapIt(shortLog(it)), origin = origin
|
|
|
|
|
else:
|
|
|
|
|
notice "could not connect to new peers", attempted = nodes.len
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
info "Finished dialing multiple peers",
|
2024-06-26 12:25:58 +00:00
|
|
|
|
successfulConns = connectedPeers.len, attempted = nodes.len
|
2022-09-20 11:03:34 +00:00
|
|
|
|
|
|
|
|
|
# The issue seems to be around peers not being fully connected when
|
|
|
|
|
# trying to subscribe. So what we do is sleep to guarantee nodes are
|
|
|
|
|
# fully connected.
|
|
|
|
|
#
|
|
|
|
|
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
|
|
|
|
|
# later.
|
|
|
|
|
await sleepAsync(chronos.seconds(5))
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2024-09-24 16:20:29 +00:00
|
|
|
|
proc reconnectPeers*(
|
|
|
|
|
pm: PeerManager, proto: string, backoffTime: chronos.Duration = chronos.seconds(0)
|
|
|
|
|
) {.async.} =
|
|
|
|
|
## Reconnect to peers registered for this protocol. This will update connectedness.
|
|
|
|
|
## Especially useful to resume connections from persistent storage after a restart.
|
|
|
|
|
|
|
|
|
|
debug "Reconnecting peers", proto = proto
|
|
|
|
|
|
|
|
|
|
# Proto is not persisted, we need to iterate over all peers.
|
2024-09-27 12:46:46 +00:00
|
|
|
|
for peerInfo in pm.wakuPeerStore.peers(protocolMatcher(proto)):
|
2024-09-24 16:20:29 +00:00
|
|
|
|
# Check that the peer can be connected
|
|
|
|
|
if peerInfo.connectedness == CannotConnect:
|
|
|
|
|
error "Not reconnecting to unreachable or non-existing peer",
|
|
|
|
|
peerId = peerInfo.peerId
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if backoffTime > ZeroDuration:
|
|
|
|
|
debug "Backing off before reconnect",
|
|
|
|
|
peerId = peerInfo.peerId, backoffTime = backoffTime
|
|
|
|
|
# We disconnected recently and still need to wait for a backoff period before connecting
|
|
|
|
|
await sleepAsync(backoffTime)
|
|
|
|
|
|
|
|
|
|
await pm.connectToNodes(@[peerInfo])
|
|
|
|
|
|
2023-04-26 08:47:46 +00:00
|
|
|
|
proc getNumStreams*(pm: PeerManager, protocol: string): (int, int) =
|
|
|
|
|
var
|
|
|
|
|
numStreamsIn = 0
|
|
|
|
|
numStreamsOut = 0
|
2023-04-12 11:05:34 +00:00
|
|
|
|
for peerId, muxers in pm.switch.connManager.getConnections():
|
|
|
|
|
for peerConn in muxers:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
for stream in peerConn.getStreams():
|
|
|
|
|
if stream.protocol == protocol:
|
|
|
|
|
if stream.dir == Direction.In:
|
|
|
|
|
numStreamsIn += 1
|
|
|
|
|
elif stream.dir == Direction.Out:
|
|
|
|
|
numStreamsOut += 1
|
2023-04-26 08:47:46 +00:00
|
|
|
|
return (numStreamsIn, numStreamsOut)
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
2023-05-18 07:40:14 +00:00
|
|
|
|
proc pruneInRelayConns(pm: PeerManager, amount: int) {.async.} =
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if amount <= 0:
|
|
|
|
|
return
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let (inRelayPeers, _) = pm.connectedPeers(WakuRelayCodec)
|
2023-05-18 07:40:14 +00:00
|
|
|
|
let connsToPrune = min(amount, inRelayPeers.len)
|
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
for p in inRelayPeers[0 ..< connsToPrune]:
|
2024-01-30 12:28:21 +00:00
|
|
|
|
trace "Pruning Peer", Peer = $p
|
2023-06-28 07:14:11 +00:00
|
|
|
|
asyncSpawn(pm.switch.disconnect(p))
|
2023-05-31 07:47:56 +00:00
|
|
|
|
|
2023-12-20 14:23:41 +00:00
|
|
|
|
proc connectToRelayPeers*(pm: PeerManager) {.async.} =
|
2024-03-19 18:07:03 +00:00
|
|
|
|
var (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
|
2023-12-20 14:23:41 +00:00
|
|
|
|
let totalRelayPeers = inRelayPeers.len + outRelayPeers.len
|
2023-05-18 07:40:14 +00:00
|
|
|
|
|
2024-08-12 15:59:11 +00:00
|
|
|
|
if inRelayPeers.len > pm.inRelayPeersTarget:
|
|
|
|
|
await pm.pruneInRelayConns(inRelayPeers.len - pm.inRelayPeersTarget)
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2023-12-20 14:23:41 +00:00
|
|
|
|
if outRelayPeers.len >= pm.outRelayPeersTarget:
|
2023-02-27 17:24:31 +00:00
|
|
|
|
return
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let notConnectedPeers = pm.wakuPeerStore.getDisconnectedPeers()
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2024-03-19 18:07:03 +00:00
|
|
|
|
var outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId))
|
|
|
|
|
|
|
|
|
|
shuffle(outsideBackoffPeers)
|
|
|
|
|
|
|
|
|
|
var index = 0
|
2024-06-20 10:16:15 +00:00
|
|
|
|
var numPendingConnReqs =
|
|
|
|
|
min(outsideBackoffPeers.len, pm.outRelayPeersTarget - outRelayPeers.len)
|
2024-03-19 18:07:03 +00:00
|
|
|
|
## number of outstanding connection requests
|
|
|
|
|
|
|
|
|
|
while numPendingConnReqs > 0 and outRelayPeers.len < pm.outRelayPeersTarget:
|
|
|
|
|
let numPeersToConnect = min(numPendingConnReqs, MaxParallelDials)
|
|
|
|
|
await pm.connectToNodes(outsideBackoffPeers[index ..< (index + numPeersToConnect)])
|
|
|
|
|
|
|
|
|
|
(inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
|
|
|
|
|
|
|
|
|
|
index += numPeersToConnect
|
|
|
|
|
numPendingConnReqs -= numPeersToConnect
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
proc manageRelayPeers*(pm: PeerManager) {.async.} =
|
|
|
|
|
if pm.wakuMetadata.shards.len == 0:
|
|
|
|
|
return
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
var peersToConnect: HashSet[PeerId] # Can't use RemotePeerInfo as they are ref objects
|
|
|
|
|
var peersToDisconnect: int
|
|
|
|
|
|
|
|
|
|
# Get all connected peers for Waku Relay
|
|
|
|
|
var (inPeers, outPeers) = pm.connectedPeers(WakuRelayCodec)
|
|
|
|
|
|
|
|
|
|
# Calculate in/out target number of peers for each shards
|
|
|
|
|
let inTarget = pm.inRelayPeersTarget div pm.wakuMetadata.shards.len
|
|
|
|
|
let outTarget = pm.outRelayPeersTarget div pm.wakuMetadata.shards.len
|
|
|
|
|
|
|
|
|
|
for shard in pm.wakuMetadata.shards.items:
|
|
|
|
|
# Filter out peer not on this shard
|
|
|
|
|
let connectedInPeers = inPeers.filterIt(
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard))
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
let connectedOutPeers = outPeers.filterIt(
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard))
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# Calculate the difference between current values and targets
|
|
|
|
|
let inPeerDiff = connectedInPeers.len - inTarget
|
|
|
|
|
let outPeerDiff = outTarget - connectedOutPeers.len
|
|
|
|
|
|
|
|
|
|
if inPeerDiff > 0:
|
|
|
|
|
peersToDisconnect += inPeerDiff
|
|
|
|
|
|
|
|
|
|
if outPeerDiff <= 0:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Get all peers for this shard
|
2024-03-15 23:08:47 +00:00
|
|
|
|
var connectablePeers =
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), uint16(shard))
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
let shardCount = connectablePeers.len
|
|
|
|
|
|
|
|
|
|
connectablePeers.keepItIf(
|
2024-09-27 12:46:46 +00:00
|
|
|
|
not pm.wakuPeerStore.isConnected(it.peerId) and pm.canBeConnected(it.peerId)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
let connectableCount = connectablePeers.len
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
connectablePeers.keepItIf(pm.wakuPeerStore.hasCapability(it.peerId, Relay))
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
let relayCount = connectablePeers.len
|
|
|
|
|
|
|
|
|
|
debug "Sharded Peer Management",
|
|
|
|
|
shard = shard,
|
|
|
|
|
connectable = $connectableCount & "/" & $shardCount,
|
|
|
|
|
relayConnectable = $relayCount & "/" & $shardCount,
|
|
|
|
|
relayInboundTarget = $connectedInPeers.len & "/" & $inTarget,
|
|
|
|
|
relayOutboundTarget = $connectedOutPeers.len & "/" & $outTarget
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
# Always pick random connectable relay peers
|
|
|
|
|
shuffle(connectablePeers)
|
|
|
|
|
|
|
|
|
|
let length = min(outPeerDiff, connectablePeers.len)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
for peer in connectablePeers[0 ..< length]:
|
2024-01-30 12:28:21 +00:00
|
|
|
|
trace "Peer To Connect To", peerId = $peer.peerId
|
|
|
|
|
peersToConnect.incl(peer.peerId)
|
|
|
|
|
|
|
|
|
|
await pm.pruneInRelayConns(peersToDisconnect)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if peersToConnect.len == 0:
|
|
|
|
|
return
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let uniquePeers = toSeq(peersToConnect).mapIt(pm.wakuPeerStore.getPeer(it))
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# Connect to all nodes
|
|
|
|
|
for i in countup(0, uniquePeers.len, MaxParallelDials):
|
|
|
|
|
let stop = min(i + MaxParallelDials, uniquePeers.len)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Connecting to Peers", peerIds = $uniquePeers[i ..< stop]
|
|
|
|
|
await pm.connectToNodes(uniquePeers[i ..< stop])
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
proc prunePeerStore*(pm: PeerManager) =
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let numPeers = pm.wakuPeerStore[AddressBook].book.len
|
|
|
|
|
let capacity = pm.wakuPeerStore.getCapacity()
|
2023-12-07 12:21:18 +00:00
|
|
|
|
if numPeers <= capacity:
|
2023-01-31 12:24:49 +00:00
|
|
|
|
return
|
|
|
|
|
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "Peer store capacity exceeded", numPeers = numPeers, capacity = capacity
|
2023-12-07 12:21:18 +00:00
|
|
|
|
let pruningCount = numPeers - capacity
|
|
|
|
|
var peersToPrune: HashSet[PeerId]
|
|
|
|
|
|
|
|
|
|
# prune failed connections
|
2024-09-27 12:46:46 +00:00
|
|
|
|
for peerId, count in pm.wakuPeerStore[NumberFailedConnBook].book.pairs:
|
2023-12-07 12:21:18 +00:00
|
|
|
|
if count < pm.maxFailedAttempts:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if peersToPrune.len >= pruningCount:
|
2023-01-31 12:24:49 +00:00
|
|
|
|
break
|
|
|
|
|
|
2023-12-07 12:21:18 +00:00
|
|
|
|
peersToPrune.incl(peerId)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
var notConnected = pm.wakuPeerStore.getDisconnectedPeers().mapIt(it.peerId)
|
2024-01-30 12:28:21 +00:00
|
|
|
|
|
|
|
|
|
# Always pick random non-connected peers
|
|
|
|
|
shuffle(notConnected)
|
2023-12-07 12:21:18 +00:00
|
|
|
|
|
|
|
|
|
var shardlessPeers: seq[PeerId]
|
|
|
|
|
var peersByShard = initTable[uint16, seq[PeerId]]()
|
|
|
|
|
|
|
|
|
|
for peer in notConnected:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
if not pm.wakuPeerStore[ENRBook].contains(peer):
|
2023-12-07 12:21:18 +00:00
|
|
|
|
shardlessPeers.add(peer)
|
|
|
|
|
continue
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let record = pm.wakuPeerStore[ENRBook][peer]
|
2023-12-07 12:21:18 +00:00
|
|
|
|
|
|
|
|
|
let rec = record.toTyped().valueOr:
|
|
|
|
|
shardlessPeers.add(peer)
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
let rs = rec.relaySharding().valueOr:
|
|
|
|
|
shardlessPeers.add(peer)
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
for shard in rs.shardIds:
|
2024-05-13 15:25:44 +00:00
|
|
|
|
peersByShard.mgetOrPut(shard, @[]).add(peer)
|
2023-12-07 12:21:18 +00:00
|
|
|
|
|
|
|
|
|
# prune not connected peers without shard
|
|
|
|
|
for peer in shardlessPeers:
|
|
|
|
|
if peersToPrune.len >= pruningCount:
|
2023-01-31 12:24:49 +00:00
|
|
|
|
break
|
|
|
|
|
|
2023-12-07 12:21:18 +00:00
|
|
|
|
peersToPrune.incl(peer)
|
|
|
|
|
|
|
|
|
|
# calculate the avg peers per shard
|
|
|
|
|
let total = sum(toSeq(peersByShard.values).mapIt(it.len))
|
|
|
|
|
let avg = min(1, total div max(1, peersByShard.len))
|
|
|
|
|
|
|
|
|
|
# prune peers from shard with higher than avg count
|
|
|
|
|
for shard, peers in peersByShard.pairs:
|
|
|
|
|
let count = max(peers.len - avg, 0)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
for peer in peers[0 .. count]:
|
2023-12-07 12:21:18 +00:00
|
|
|
|
if peersToPrune.len >= pruningCount:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
peersToPrune.incl(peer)
|
|
|
|
|
|
|
|
|
|
for peer in peersToPrune:
|
2024-09-27 12:46:46 +00:00
|
|
|
|
pm.wakuPeerStore.delete(peer)
|
2023-12-07 12:21:18 +00:00
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let afterNumPeers = pm.wakuPeerStore[AddressBook].book.len
|
2023-12-07 12:21:18 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Finished pruning peer store",
|
|
|
|
|
beforeNumPeers = numPeers,
|
|
|
|
|
afterNumPeers = afterNumPeers,
|
|
|
|
|
capacity = capacity,
|
|
|
|
|
pruned = peersToPrune.len
|
2023-01-31 12:24:49 +00:00
|
|
|
|
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc selectPeer*(
|
|
|
|
|
pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic)
|
|
|
|
|
): Option[RemotePeerInfo] =
|
|
|
|
|
trace "Selecting peer from peerstore", protocol = proto
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# Selects the best peer for a given protocol
|
2024-09-27 12:46:46 +00:00
|
|
|
|
var peers = pm.wakuPeerStore.getPeersByProtocol(proto)
|
2023-09-22 19:13:50 +00:00
|
|
|
|
|
|
|
|
|
if shard.isSome():
|
|
|
|
|
peers.keepItIf((it.enr.isSome() and it.enr.get().containsShard(shard.get())))
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
# No criteria for selecting a peer for WakuRelay, random one
|
|
|
|
|
if proto == WakuRelayCodec:
|
|
|
|
|
# TODO: proper heuristic here that compares peer scores and selects "best" one. For now the first peer for the given protocol is returned
|
|
|
|
|
if peers.len > 0:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Got peer from peerstore",
|
|
|
|
|
peerId = peers[0].peerId, multi = peers[0].addrs[0], protocol = proto
|
2023-03-09 18:05:50 +00:00
|
|
|
|
return some(peers[0])
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "No peer found for protocol", protocol = proto
|
2023-01-26 09:20:20 +00:00
|
|
|
|
return none(RemotePeerInfo)
|
|
|
|
|
|
|
|
|
|
# For other protocols, we select the peer that is slotted for the given protocol
|
|
|
|
|
pm.serviceSlots.withValue(proto, serviceSlot):
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Got peer from service slots",
|
|
|
|
|
peerId = serviceSlot[].peerId, multi = serviceSlot[].addrs[0], protocol = proto
|
2023-01-26 09:20:20 +00:00
|
|
|
|
return some(serviceSlot[])
|
|
|
|
|
|
|
|
|
|
# If not slotted, we select a random peer for the given protocol
|
|
|
|
|
if peers.len > 0:
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "Got peer from peerstore",
|
|
|
|
|
peerId = peers[0].peerId, multi = peers[0].addrs[0], protocol = proto
|
2023-03-09 18:05:50 +00:00
|
|
|
|
return some(peers[0])
|
2024-03-15 23:08:47 +00:00
|
|
|
|
trace "No peer found for protocol", protocol = proto
|
2023-01-26 09:20:20 +00:00
|
|
|
|
return none(RemotePeerInfo)
|
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
# Prunes peers from peerstore to remove old/stale ones
|
2024-03-15 23:08:47 +00:00
|
|
|
|
proc prunePeerStoreLoop(pm: PeerManager) {.async.} =
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "Starting prune peerstore loop"
|
2023-02-27 17:24:31 +00:00
|
|
|
|
while pm.started:
|
|
|
|
|
pm.prunePeerStore()
|
|
|
|
|
await sleepAsync(PrunePeerStoreInterval)
|
|
|
|
|
|
|
|
|
|
# Ensures a healthy amount of connected relay peers
|
|
|
|
|
proc relayConnectivityLoop*(pm: PeerManager) {.async.} =
|
2023-12-12 15:00:18 +00:00
|
|
|
|
trace "Starting relay connectivity loop"
|
2023-02-27 17:24:31 +00:00
|
|
|
|
while pm.started:
|
2024-01-30 12:28:21 +00:00
|
|
|
|
if pm.shardedPeerManagement:
|
|
|
|
|
await pm.manageRelayPeers()
|
2024-03-15 23:08:47 +00:00
|
|
|
|
else:
|
|
|
|
|
await pm.connectToRelayPeers()
|
2024-09-12 20:49:47 +00:00
|
|
|
|
let
|
|
|
|
|
(inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
|
|
|
|
|
excessInConns = max(inRelayPeers.len - pm.inRelayPeersTarget, 0)
|
|
|
|
|
|
2024-09-27 12:46:46 +00:00
|
|
|
|
# One minus the percentage of excess connections relative to the target, limited to 100%
|
2024-09-12 20:49:47 +00:00
|
|
|
|
# We calculate one minus this percentage because we want the factor to be inversely proportional to the number of excess peers
|
|
|
|
|
inFactor = 1 - min(excessInConns / pm.inRelayPeersTarget, 1)
|
|
|
|
|
# Percentage of out relay peers relative to the target
|
|
|
|
|
outFactor = min(outRelayPeers.len / pm.outRelayPeersTarget, 1)
|
|
|
|
|
factor = min(outFactor, inFactor)
|
|
|
|
|
dynamicSleepInterval =
|
|
|
|
|
chronos.seconds(int(float(ConnectivityLoopInterval.seconds()) * factor))
|
|
|
|
|
|
|
|
|
|
# Shorten the connectivity loop interval dynamically based on percentage of peers to fill or connections to prune
|
|
|
|
|
await sleepAsync(dynamicSleepInterval)
|
2023-02-27 17:24:31 +00:00
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
proc logAndMetrics(pm: PeerManager) {.async.} =
|
|
|
|
|
heartbeat "Scheduling log and metrics run", LogAndMetricsInterval:
|
|
|
|
|
# log metrics
|
2023-05-18 07:40:14 +00:00
|
|
|
|
let (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
|
|
|
|
|
let maxConnections = pm.switch.connManager.inSema.size
|
2024-09-27 12:46:46 +00:00
|
|
|
|
let notConnectedPeers = pm.wakuPeerStore.getDisconnectedPeers().mapIt(
|
2024-03-15 23:08:47 +00:00
|
|
|
|
RemotePeerInfo.init(it.peerId, it.addrs)
|
|
|
|
|
)
|
2023-05-18 07:40:14 +00:00
|
|
|
|
let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId))
|
2023-07-04 11:31:18 +00:00
|
|
|
|
let totalConnections = pm.switch.connManager.getConnections().len
|
2023-05-18 07:40:14 +00:00
|
|
|
|
|
|
|
|
|
info "Relay peer connections",
|
2023-07-04 11:31:18 +00:00
|
|
|
|
inRelayConns = $inRelayPeers.len & "/" & $pm.inRelayPeersTarget,
|
|
|
|
|
outRelayConns = $outRelayPeers.len & "/" & $pm.outRelayPeersTarget,
|
|
|
|
|
totalConnections = $totalConnections & "/" & $maxConnections,
|
2023-05-18 07:40:14 +00:00
|
|
|
|
notConnectedPeers = notConnectedPeers.len,
|
|
|
|
|
outsideBackoffPeers = outsideBackoffPeers.len
|
|
|
|
|
|
2023-05-31 07:47:56 +00:00
|
|
|
|
# update prometheus metrics
|
2024-09-27 12:46:46 +00:00
|
|
|
|
for proto in pm.wakuPeerStore.getWakuProtos():
|
2023-05-18 07:40:14 +00:00
|
|
|
|
let (protoConnsIn, protoConnsOut) = pm.connectedPeers(proto)
|
2023-04-26 08:47:46 +00:00
|
|
|
|
let (protoStreamsIn, protoStreamsOut) = pm.getNumStreams(proto)
|
2024-03-15 23:08:47 +00:00
|
|
|
|
waku_connected_peers.set(
|
|
|
|
|
protoConnsIn.len.float64, labelValues = [$Direction.In, proto]
|
|
|
|
|
)
|
|
|
|
|
waku_connected_peers.set(
|
|
|
|
|
protoConnsOut.len.float64, labelValues = [$Direction.Out, proto]
|
|
|
|
|
)
|
|
|
|
|
waku_streams_peers.set(
|
|
|
|
|
protoStreamsIn.float64, labelValues = [$Direction.In, proto]
|
|
|
|
|
)
|
|
|
|
|
waku_streams_peers.set(
|
|
|
|
|
protoStreamsOut.float64, labelValues = [$Direction.Out, proto]
|
|
|
|
|
)
|
2023-04-12 11:05:34 +00:00
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
proc start*(pm: PeerManager) =
|
|
|
|
|
pm.started = true
|
|
|
|
|
asyncSpawn pm.relayConnectivityLoop()
|
2023-01-31 12:24:49 +00:00
|
|
|
|
asyncSpawn pm.prunePeerStoreLoop()
|
2023-05-31 07:47:56 +00:00
|
|
|
|
asyncSpawn pm.logAndMetrics()
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
proc stop*(pm: PeerManager) =
|
2024-03-15 23:08:47 +00:00
|
|
|
|
pm.started = false
|