2022-11-04 08:40:13 +00:00
|
|
|
|
when (NimMajor, NimMinor) < (1, 4):
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
else:
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
|
|
|
|
import
|
2023-02-13 14:02:34 +00:00
|
|
|
|
std/[options, sets, sequtils, times],
|
2022-11-24 13:11:23 +00:00
|
|
|
|
chronos,
|
|
|
|
|
chronicles,
|
2022-11-04 08:40:13 +00:00
|
|
|
|
metrics,
|
|
|
|
|
libp2p/multistream
|
|
|
|
|
import
|
|
|
|
|
../../utils/peers,
|
2023-01-18 14:17:56 +00:00
|
|
|
|
../../waku/v2/protocol/waku_relay,
|
2022-11-04 08:40:13 +00:00
|
|
|
|
./peer_store/peer_storage,
|
|
|
|
|
./waku_peer_store
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2021-10-06 12:29:08 +00:00
|
|
|
|
export waku_peer_store, peer_storage, peers
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
|
|
|
|
declareCounter waku_peers_dials, "Number of peer dials", ["outcome"]
|
2022-12-14 15:04:11 +00:00
|
|
|
|
# TODO: Populate from PeerStore.Source when ready
|
2022-09-20 11:03:34 +00:00
|
|
|
|
declarePublicCounter waku_node_conns_initiated, "Number of connections initiated", ["source"]
|
2021-03-26 08:49:51 +00:00
|
|
|
|
declarePublicGauge waku_peers_errors, "Number of peer manager errors", ["type"]
|
2022-11-29 16:35:25 +00:00
|
|
|
|
declarePublicGauge waku_connected_peers, "Number of connected peers per direction: inbound|outbound", ["direction"]
|
2023-01-31 12:24:49 +00:00
|
|
|
|
declarePublicGauge waku_peer_store_size, "Number of peers managed by the peer store"
|
2022-11-04 08:40:13 +00:00
|
|
|
|
|
2021-02-05 10:49:11 +00:00
|
|
|
|
logScope:
|
2022-11-03 15:36:24 +00:00
|
|
|
|
topics = "waku node peer_manager"
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
2022-12-14 15:04:11 +00:00
|
|
|
|
const
|
|
|
|
|
# TODO: Make configurable
|
|
|
|
|
DefaultDialTimeout = chronos.seconds(10)
|
2021-02-05 10:49:11 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Max attempts before removing the peer
|
|
|
|
|
MaxFailedAttempts = 5
|
|
|
|
|
|
|
|
|
|
# Time to wait before attempting to dial again is calculated as:
|
|
|
|
|
# initialBackoffInSec*(backoffFactor^(failedAttempts-1))
|
|
|
|
|
# 120s, 480s, 1920, 7680s
|
|
|
|
|
InitialBackoffInSec = 120
|
|
|
|
|
BackoffFactor = 4
|
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
# Limit the amount of paralel dials
|
2023-01-18 14:17:56 +00:00
|
|
|
|
MaxParalelDials = 10
|
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
# Delay between consecutive relayConnectivityLoop runs
|
2023-01-18 14:17:56 +00:00
|
|
|
|
ConnectivityLoopInterval = chronos.seconds(30)
|
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
# How often the peer store is pruned
|
|
|
|
|
PrunePeerStoreInterval = chronos.minutes(5)
|
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
type
|
|
|
|
|
PeerManager* = ref object of RootObj
|
|
|
|
|
switch*: Switch
|
|
|
|
|
peerStore*: PeerStore
|
|
|
|
|
initialBackoffInSec*: int
|
|
|
|
|
backoffFactor*: int
|
|
|
|
|
maxFailedAttempts*: int
|
|
|
|
|
storage: PeerStorage
|
2023-01-26 09:20:20 +00:00
|
|
|
|
serviceSlots*: Table[string, RemotePeerInfo]
|
|
|
|
|
started: bool
|
2023-01-23 20:24:46 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
####################
|
|
|
|
|
# Helper functions #
|
|
|
|
|
####################
|
|
|
|
|
|
2021-04-21 09:36:56 +00:00
|
|
|
|
proc insertOrReplace(ps: PeerStorage,
|
|
|
|
|
peerId: PeerID,
|
2021-06-09 14:37:08 +00:00
|
|
|
|
storedInfo: StoredInfo,
|
|
|
|
|
connectedness: Connectedness,
|
2021-07-14 17:58:46 +00:00
|
|
|
|
disconnectTime: int64 = 0) =
|
2021-03-26 08:49:51 +00:00
|
|
|
|
# Insert peer entry into persistent storage, or replace existing entry with updated info
|
2021-04-21 09:36:56 +00:00
|
|
|
|
let res = ps.put(peerId, storedInfo, connectedness, disconnectTime)
|
2021-03-26 08:49:51 +00:00
|
|
|
|
if res.isErr:
|
|
|
|
|
warn "failed to store peers", err = res.error
|
|
|
|
|
waku_peers_errors.inc(labelValues = ["storage_failure"])
|
|
|
|
|
|
2022-11-24 13:11:23 +00:00
|
|
|
|
proc dialPeer(pm: PeerManager, peerId: PeerID,
|
2021-03-26 08:49:51 +00:00
|
|
|
|
addrs: seq[MultiAddress], proto: string,
|
2022-12-14 15:04:11 +00:00
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
2023-01-23 20:24:46 +00:00
|
|
|
|
source = "api",
|
2022-12-14 15:04:11 +00:00
|
|
|
|
): Future[Option[Connection]] {.async.} =
|
|
|
|
|
|
|
|
|
|
# Do not attempt to dial self
|
|
|
|
|
if peerId == pm.switch.peerInfo.peerId:
|
|
|
|
|
return none(Connection)
|
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
let failedAttempts = pm.peerStore[NumberFailedConnBook][peerId]
|
|
|
|
|
debug "Dialing peer", wireAddr = addrs, peerId = peerId, failedAttempts=failedAttempts
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
|
|
|
|
# Dial Peer
|
|
|
|
|
let dialFut = pm.switch.dial(peerId, addrs, proto)
|
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
var reasonFailed = ""
|
2021-03-26 08:49:51 +00:00
|
|
|
|
try:
|
2023-01-23 20:24:46 +00:00
|
|
|
|
if (await dialFut.withTimeout(dialTimeout)):
|
2021-03-26 08:49:51 +00:00
|
|
|
|
waku_peers_dials.inc(labelValues = ["successful"])
|
2022-12-14 15:04:11 +00:00
|
|
|
|
# TODO: This will be populated from the peerstore info when ready
|
|
|
|
|
waku_node_conns_initiated.inc(labelValues = [source])
|
2023-01-23 20:24:46 +00:00
|
|
|
|
pm.peerStore[NumberFailedConnBook][peerId] = 0
|
2021-03-26 08:49:51 +00:00
|
|
|
|
return some(dialFut.read())
|
|
|
|
|
else:
|
2023-01-23 20:24:46 +00:00
|
|
|
|
reasonFailed = "timeout"
|
|
|
|
|
await cancelAndWait(dialFut)
|
|
|
|
|
except CatchableError as exc:
|
|
|
|
|
reasonFailed = "failed"
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Dial failed
|
|
|
|
|
pm.peerStore[NumberFailedConnBook][peerId] = pm.peerStore[NumberFailedConnBook][peerId] + 1
|
|
|
|
|
pm.peerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second)
|
|
|
|
|
pm.peerStore[ConnectionBook][peerId] = CannotConnect
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
debug "Dialing peer failed",
|
|
|
|
|
peerId = peerId,
|
|
|
|
|
reason = reasonFailed,
|
|
|
|
|
failedAttempts = pm.peerStore[NumberFailedConnBook][peerId]
|
2023-01-23 20:24:46 +00:00
|
|
|
|
waku_peers_dials.inc(labelValues = [reasonFailed])
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
# Update storage
|
|
|
|
|
if not pm.storage.isNil:
|
|
|
|
|
pm.storage.insertOrReplace(peerId, pm.peerStore.get(peerId), CannotConnect)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
return none(Connection)
|
|
|
|
|
|
2021-07-14 17:58:46 +00:00
|
|
|
|
proc loadFromStorage(pm: PeerManager) =
|
2021-07-27 06:48:56 +00:00
|
|
|
|
debug "loading peers from storage"
|
2021-03-26 08:49:51 +00:00
|
|
|
|
# Load peers from storage, if available
|
2021-04-21 09:36:56 +00:00
|
|
|
|
proc onData(peerId: PeerID, storedInfo: StoredInfo, connectedness: Connectedness, disconnectTime: int64) =
|
2022-12-07 11:30:32 +00:00
|
|
|
|
trace "loading peer", peerId= $peerId, storedInfo= $storedInfo, connectedness=connectedness
|
2021-07-27 06:48:56 +00:00
|
|
|
|
|
2021-04-16 09:57:45 +00:00
|
|
|
|
if peerId == pm.switch.peerInfo.peerId:
|
|
|
|
|
# Do not manage self
|
|
|
|
|
return
|
|
|
|
|
|
2022-11-24 13:11:23 +00:00
|
|
|
|
# nim-libp2p books
|
|
|
|
|
pm.peerStore[AddressBook][peerId] = storedInfo.addrs
|
|
|
|
|
pm.peerStore[ProtoBook][peerId] = storedInfo.protos
|
|
|
|
|
pm.peerStore[KeyBook][peerId] = storedInfo.publicKey
|
|
|
|
|
pm.peerStore[AgentBook][peerId] = storedInfo.agent
|
|
|
|
|
pm.peerStore[ProtoVersionBook][peerId] = storedInfo.protoVersion
|
|
|
|
|
|
|
|
|
|
# custom books
|
|
|
|
|
pm.peerStore[ConnectionBook][peerId] = NotConnected # Reset connectedness state
|
|
|
|
|
pm.peerStore[DisconnectBook][peerId] = disconnectTime
|
|
|
|
|
pm.peerStore[SourceBook][peerId] = storedInfo.origin
|
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
let res = pm.storage.getAll(onData)
|
|
|
|
|
if res.isErr:
|
|
|
|
|
warn "failed to load peers from storage", err = res.error
|
|
|
|
|
waku_peers_errors.inc(labelValues = ["storage_load_failure"])
|
2021-07-27 06:48:56 +00:00
|
|
|
|
else:
|
|
|
|
|
debug "successfully queried peer storage"
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
##################
|
|
|
|
|
# Initialisation #
|
2022-11-24 13:11:23 +00:00
|
|
|
|
##################
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
proc onConnEvent(pm: PeerManager, peerId: PeerID, event: ConnEvent) {.async.} =
|
2022-06-08 09:20:18 +00:00
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
case event.kind
|
|
|
|
|
of ConnEventKind.Connected:
|
2022-11-29 16:35:25 +00:00
|
|
|
|
let direction = if event.incoming: Inbound else: Outbound
|
2022-11-24 13:11:23 +00:00
|
|
|
|
pm.peerStore[ConnectionBook][peerId] = Connected
|
2022-11-29 16:35:25 +00:00
|
|
|
|
pm.peerStore[DirectionBook][peerId] = direction
|
|
|
|
|
|
|
|
|
|
waku_connected_peers.inc(1, labelValues=[$direction])
|
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
if not pm.storage.isNil:
|
|
|
|
|
pm.storage.insertOrReplace(peerId, pm.peerStore.get(peerId), Connected)
|
2021-02-12 08:53:52 +00:00
|
|
|
|
return
|
|
|
|
|
of ConnEventKind.Disconnected:
|
2022-11-29 16:35:25 +00:00
|
|
|
|
waku_connected_peers.dec(1, labelValues=[$pm.peerStore[DirectionBook][peerId]])
|
|
|
|
|
|
|
|
|
|
pm.peerStore[DirectionBook][peerId] = UnknownDirection
|
2022-11-24 13:11:23 +00:00
|
|
|
|
pm.peerStore[ConnectionBook][peerId] = CanConnect
|
2021-03-26 08:49:51 +00:00
|
|
|
|
if not pm.storage.isNil:
|
2021-04-21 09:36:56 +00:00
|
|
|
|
pm.storage.insertOrReplace(peerId, pm.peerStore.get(peerId), CanConnect, getTime().toUnix)
|
2021-02-12 08:53:52 +00:00
|
|
|
|
return
|
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
proc new*(T: type PeerManager,
|
|
|
|
|
switch: Switch,
|
|
|
|
|
storage: PeerStorage = nil,
|
|
|
|
|
initialBackoffInSec = InitialBackoffInSec,
|
|
|
|
|
backoffFactor = BackoffFactor,
|
|
|
|
|
maxFailedAttempts = MaxFailedAttempts,): PeerManager =
|
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
let capacity = switch.peerStore.capacity
|
|
|
|
|
let maxConnections = switch.connManager.inSema.size
|
|
|
|
|
if maxConnections > capacity:
|
|
|
|
|
error "Max number of connections can't be greater than PeerManager capacity",
|
|
|
|
|
capacity = capacity,
|
|
|
|
|
maxConnections = maxConnections
|
|
|
|
|
raise newException(Defect, "Max number of connections can't be greater than PeerManager capacity")
|
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
let pm = PeerManager(switch: switch,
|
2022-11-24 13:11:23 +00:00
|
|
|
|
peerStore: switch.peerStore,
|
2023-01-23 20:24:46 +00:00
|
|
|
|
storage: storage,
|
|
|
|
|
initialBackoffInSec: initialBackoffInSec,
|
|
|
|
|
backoffFactor: backoffFactor,
|
|
|
|
|
maxFailedAttempts: maxFailedAttempts)
|
2021-10-06 12:29:08 +00:00
|
|
|
|
proc peerHook(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.} =
|
|
|
|
|
onConnEvent(pm, peerId, event)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
proc peerStoreChanged(peerId: PeerId) {.gcsafe.} =
|
|
|
|
|
waku_peer_store_size.set(toSeq(pm.peerStore[AddressBook].book.keys).len.int64)
|
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
pm.switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
|
|
|
|
pm.switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
# called every time the peerstore is updated
|
|
|
|
|
pm.peerStore[AddressBook].addHandler(peerStoreChanged)
|
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
pm.serviceSlots = initTable[string, RemotePeerInfo]()
|
|
|
|
|
|
2022-11-03 15:36:24 +00:00
|
|
|
|
if not storage.isNil():
|
2021-07-27 06:48:56 +00:00
|
|
|
|
debug "found persistent peer storage"
|
2021-03-26 08:49:51 +00:00
|
|
|
|
pm.loadFromStorage() # Load previously managed peers.
|
2021-07-27 06:48:56 +00:00
|
|
|
|
else:
|
|
|
|
|
debug "no peer storage found"
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-12 08:53:52 +00:00
|
|
|
|
return pm
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2021-02-11 08:58:25 +00:00
|
|
|
|
#####################
|
|
|
|
|
# Manager interface #
|
|
|
|
|
#####################
|
|
|
|
|
|
2021-10-06 12:29:08 +00:00
|
|
|
|
proc addPeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, proto: string) =
|
2021-02-08 09:17:20 +00:00
|
|
|
|
# Adds peer to manager for the specified protocol
|
|
|
|
|
|
2021-10-06 12:29:08 +00:00
|
|
|
|
if remotePeerInfo.peerId == pm.switch.peerInfo.peerId:
|
2021-04-16 09:57:45 +00:00
|
|
|
|
# Do not attempt to manage our unmanageable self
|
|
|
|
|
return
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-04 10:32:58 +00:00
|
|
|
|
# ...public key
|
|
|
|
|
var publicKey: PublicKey
|
2021-10-06 12:29:08 +00:00
|
|
|
|
discard remotePeerInfo.peerId.extractPublicKey(publicKey)
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2023-01-18 14:17:56 +00:00
|
|
|
|
if pm.peerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and
|
|
|
|
|
pm.peerStore[KeyBook][remotePeerInfo.peerId] == publicKey:
|
|
|
|
|
# Peer already managed
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
debug "Adding peer to manager", peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs, proto = proto
|
|
|
|
|
|
|
|
|
|
pm.peerStore[AddressBook][remotePeerInfo.peerId] = remotePeerInfo.addrs
|
2022-11-24 13:11:23 +00:00
|
|
|
|
pm.peerStore[KeyBook][remotePeerInfo.peerId] = publicKey
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2023-01-18 14:17:56 +00:00
|
|
|
|
# TODO: Remove this once service slots is ready
|
2022-11-24 13:11:23 +00:00
|
|
|
|
pm.peerStore[ProtoBook][remotePeerInfo.peerId] = pm.peerStore[ProtoBook][remotePeerInfo.peerId] & proto
|
2021-02-04 10:32:58 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
# Add peer to storage. Entry will subsequently be updated with connectedness information
|
|
|
|
|
if not pm.storage.isNil:
|
2021-10-06 12:29:08 +00:00
|
|
|
|
pm.storage.insertOrReplace(remotePeerInfo.peerId, pm.peerStore.get(remotePeerInfo.peerId), NotConnected)
|
2021-03-26 08:49:51 +00:00
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
proc addServicePeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, proto: string) =
|
|
|
|
|
# Do not add relay peers
|
|
|
|
|
if proto == WakuRelayCodec:
|
|
|
|
|
warn "Can't add relay peer to service peers slots"
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
info "Adding peer to service slots", peerId = remotePeerInfo.peerId, addr = remotePeerInfo.addrs[0], service = proto
|
|
|
|
|
|
|
|
|
|
# Set peer for service slot
|
|
|
|
|
pm.serviceSlots[proto] = remotePeerInfo
|
|
|
|
|
|
|
|
|
|
# TODO: Remove proto once fully refactored
|
|
|
|
|
pm.addPeer(remotePeerInfo, proto)
|
|
|
|
|
|
2021-07-27 06:48:56 +00:00
|
|
|
|
proc reconnectPeers*(pm: PeerManager,
|
|
|
|
|
proto: string,
|
|
|
|
|
protocolMatcher: Matcher,
|
|
|
|
|
backoff: chronos.Duration = chronos.seconds(0)) {.async.} =
|
2021-03-26 08:49:51 +00:00
|
|
|
|
## Reconnect to peers registered for this protocol. This will update connectedness.
|
|
|
|
|
## Especially useful to resume connections from persistent storage after a restart.
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-03-26 08:49:51 +00:00
|
|
|
|
debug "Reconnecting peers", proto=proto
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
for storedInfo in pm.peerStore.peers(protocolMatcher):
|
|
|
|
|
# Check that the peer can be connected
|
|
|
|
|
if storedInfo.connectedness == CannotConnect:
|
|
|
|
|
debug "Not reconnecting to unreachable or non-existing peer", peerId=storedInfo.peerId
|
2021-04-21 09:36:56 +00:00
|
|
|
|
continue
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-04-21 09:36:56 +00:00
|
|
|
|
# Respect optional backoff period where applicable.
|
|
|
|
|
let
|
2022-11-24 13:11:23 +00:00
|
|
|
|
# TODO: Add method to peerStore (eg isBackoffExpired())
|
|
|
|
|
disconnectTime = Moment.init(storedInfo.disconnectTime, Second) # Convert
|
2021-04-21 09:36:56 +00:00
|
|
|
|
currentTime = Moment.init(getTime().toUnix, Second) # Current time comparable to persisted value
|
|
|
|
|
backoffTime = disconnectTime + backoff - currentTime # Consider time elapsed since last disconnect
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-04-21 09:36:56 +00:00
|
|
|
|
trace "Respecting backoff", backoff=backoff, disconnectTime=disconnectTime, currentTime=currentTime, backoffTime=backoffTime
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
|
|
|
|
# TODO: This blocks the whole function. Try to connect to another peer in the meantime.
|
2021-04-21 09:36:56 +00:00
|
|
|
|
if backoffTime > ZeroDuration:
|
|
|
|
|
debug "Backing off before reconnect...", peerId=storedInfo.peerId, backoffTime=backoffTime
|
|
|
|
|
# We disconnected recently and still need to wait for a backoff period before connecting
|
|
|
|
|
await sleepAsync(backoffTime)
|
|
|
|
|
|
2022-12-07 11:30:32 +00:00
|
|
|
|
trace "Reconnecting to peer", peerId= $storedInfo.peerId
|
2021-03-26 08:49:51 +00:00
|
|
|
|
discard await pm.dialPeer(storedInfo.peerId, toSeq(storedInfo.addrs), proto)
|
|
|
|
|
|
2021-02-08 09:17:20 +00:00
|
|
|
|
####################
|
|
|
|
|
# Dialer interface #
|
|
|
|
|
####################
|
|
|
|
|
|
2022-12-14 15:04:11 +00:00
|
|
|
|
proc dialPeer*(pm: PeerManager,
|
|
|
|
|
remotePeerInfo: RemotePeerInfo,
|
|
|
|
|
proto: string,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
2023-01-23 20:24:46 +00:00
|
|
|
|
source = "api",
|
|
|
|
|
): Future[Option[Connection]] {.async.} =
|
2021-02-08 09:17:20 +00:00
|
|
|
|
# Dial a given peer and add it to the list of known peers
|
2022-11-04 08:40:13 +00:00
|
|
|
|
# TODO: check peer validity and score before continuing. Limit number of peers to be managed.
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2021-02-08 09:17:20 +00:00
|
|
|
|
# First add dialed peer info to peer store, if it does not exist yet...
|
2022-11-24 13:11:23 +00:00
|
|
|
|
if not pm.peerStore.hasPeer(remotePeerInfo.peerId, proto):
|
2022-12-07 11:30:32 +00:00
|
|
|
|
trace "Adding newly dialed peer to manager", peerId= $remotePeerInfo.peerId, address= $remotePeerInfo.addrs[0], proto= proto
|
2021-10-06 12:29:08 +00:00
|
|
|
|
pm.addPeer(remotePeerInfo, proto)
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2022-12-14 15:04:11 +00:00
|
|
|
|
return await pm.dialPeer(remotePeerInfo.peerId,remotePeerInfo.addrs, proto, dialTimeout, source)
|
2021-10-06 12:29:08 +00:00
|
|
|
|
|
2022-12-14 15:04:11 +00:00
|
|
|
|
proc dialPeer*(pm: PeerManager,
|
|
|
|
|
peerId: PeerID,
|
|
|
|
|
proto: string,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
2023-01-23 20:24:46 +00:00
|
|
|
|
source = "api",
|
2022-12-14 15:04:11 +00:00
|
|
|
|
): Future[Option[Connection]] {.async.} =
|
2021-10-06 12:29:08 +00:00
|
|
|
|
# Dial an existing peer by looking up it's existing addrs in the switch's peerStore
|
2022-11-04 08:40:13 +00:00
|
|
|
|
# TODO: check peer validity and score before continuing. Limit number of peers to be managed.
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2022-06-01 09:49:41 +00:00
|
|
|
|
let addrs = pm.switch.peerStore[AddressBook][peerId]
|
2022-12-14 15:04:11 +00:00
|
|
|
|
return await pm.dialPeer(peerId, addrs, proto, dialTimeout, source)
|
2021-10-06 12:29:08 +00:00
|
|
|
|
|
2022-12-14 15:04:11 +00:00
|
|
|
|
proc connectToNodes*(pm: PeerManager,
|
|
|
|
|
nodes: seq[string]|seq[RemotePeerInfo],
|
|
|
|
|
proto: string,
|
|
|
|
|
dialTimeout = DefaultDialTimeout,
|
|
|
|
|
source = "api") {.async.} =
|
2023-01-23 20:24:46 +00:00
|
|
|
|
if nodes.len == 0:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
info "Dialing multiple peers", numOfPeers = nodes.len
|
2022-11-24 13:11:23 +00:00
|
|
|
|
|
2023-01-09 20:45:50 +00:00
|
|
|
|
var futConns: seq[Future[Option[Connection]]]
|
2022-12-14 15:04:11 +00:00
|
|
|
|
for node in nodes:
|
|
|
|
|
let node = when node is string: parseRemotePeerInfo(node)
|
|
|
|
|
else: node
|
2023-01-09 20:45:50 +00:00
|
|
|
|
futConns.add(pm.dialPeer(RemotePeerInfo(node), proto, dialTimeout, source))
|
|
|
|
|
|
|
|
|
|
await allFutures(futConns)
|
2023-01-23 20:24:46 +00:00
|
|
|
|
let successfulConns = futConns.mapIt(it.read()).countIt(it.isSome)
|
|
|
|
|
|
|
|
|
|
info "Finished dialing multiple peers", successfulConns=successfulConns, attempted=nodes.len
|
2022-09-20 11:03:34 +00:00
|
|
|
|
|
|
|
|
|
# The issue seems to be around peers not being fully connected when
|
|
|
|
|
# trying to subscribe. So what we do is sleep to guarantee nodes are
|
|
|
|
|
# fully connected.
|
|
|
|
|
#
|
|
|
|
|
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
|
|
|
|
|
# later.
|
|
|
|
|
await sleepAsync(chronos.seconds(5))
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
|
|
|
|
# Ensures a healthy amount of connected relay peers
|
|
|
|
|
proc relayConnectivityLoop*(pm: PeerManager) {.async.} =
|
2023-01-26 09:20:20 +00:00
|
|
|
|
debug "Starting relay connectivity loop"
|
|
|
|
|
while pm.started:
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
|
|
|
|
let maxConnections = pm.switch.connManager.inSema.size
|
|
|
|
|
let numInPeers = pm.switch.connectedPeers(lpstream.Direction.In).len
|
|
|
|
|
let numOutPeers = pm.switch.connectedPeers(lpstream.Direction.Out).len
|
|
|
|
|
let numConPeers = numInPeers + numOutPeers
|
|
|
|
|
|
|
|
|
|
# TODO: Enforce a given in/out peers ratio
|
|
|
|
|
|
|
|
|
|
# Leave some room for service peers
|
|
|
|
|
if numConPeers >= (maxConnections - 5):
|
|
|
|
|
await sleepAsync(ConnectivityLoopInterval)
|
|
|
|
|
continue
|
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
let notConnectedPeers = pm.peerStore.getNotConnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs))
|
|
|
|
|
let outsideBackoffPeers = notConnectedPeers.filterIt(pm.peerStore.canBeConnected(it.peerId,
|
|
|
|
|
pm.initialBackoffInSec,
|
|
|
|
|
pm.backoffFactor))
|
|
|
|
|
let numPeersToConnect = min(min(maxConnections - numConPeers, outsideBackoffPeers.len), MaxParalelDials)
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
|
|
|
|
info "Relay connectivity loop",
|
|
|
|
|
connectedPeers = numConPeers,
|
|
|
|
|
targetConnectedPeers = maxConnections,
|
2023-01-23 20:24:46 +00:00
|
|
|
|
notConnectedPeers = notConnectedPeers.len,
|
|
|
|
|
outsideBackoffPeers = outsideBackoffPeers.len
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
2023-01-23 20:24:46 +00:00
|
|
|
|
await pm.connectToNodes(outsideBackoffPeers[0..<numPeersToConnect], WakuRelayCodec)
|
2023-01-18 14:17:56 +00:00
|
|
|
|
|
|
|
|
|
await sleepAsync(ConnectivityLoopInterval)
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
2023-01-31 12:24:49 +00:00
|
|
|
|
proc prunePeerStore*(pm: PeerManager) =
|
|
|
|
|
let numPeers = toSeq(pm.peerStore[AddressBook].book.keys).len
|
|
|
|
|
let capacity = pm.peerStore.capacity
|
|
|
|
|
if numPeers < capacity:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
debug "Peer store capacity exceeded", numPeers = numPeers, capacity = capacity
|
|
|
|
|
let peersToPrune = numPeers - capacity
|
|
|
|
|
|
|
|
|
|
# prune peers with too many failed attempts
|
|
|
|
|
var pruned = 0
|
|
|
|
|
for peerId in pm.peerStore[NumberFailedConnBook].book.keys:
|
|
|
|
|
if peersToPrune - pruned == 0:
|
|
|
|
|
break
|
|
|
|
|
if pm.peerStore[NumberFailedConnBook][peerId] >= pm.maxFailedAttempts:
|
|
|
|
|
pm.peerStore.del(peerId)
|
|
|
|
|
pruned += 1
|
|
|
|
|
|
|
|
|
|
# if we still need to prune, prune peers that are not connected
|
|
|
|
|
let notConnected = pm.peerStore.getNotConnectedPeers().mapIt(it.peerId)
|
|
|
|
|
for peerId in notConnected:
|
|
|
|
|
if peersToPrune - pruned == 0:
|
|
|
|
|
break
|
|
|
|
|
pm.peerStore.del(peerId)
|
|
|
|
|
pruned += 1
|
|
|
|
|
|
|
|
|
|
let afterNumPeers = toSeq(pm.peerStore[AddressBook].book.keys).len
|
|
|
|
|
debug "Finished pruning peer store", beforeNumPeers = numPeers,
|
|
|
|
|
afterNumPeers = afterNumPeers,
|
|
|
|
|
capacity = capacity,
|
|
|
|
|
pruned = pruned
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
proc prunePeerStoreLoop(pm: PeerManager) {.async.} =
|
|
|
|
|
while pm.started:
|
|
|
|
|
pm.prunePeerStore()
|
|
|
|
|
await sleepAsync(PrunePeerStoreInterval)
|
|
|
|
|
|
|
|
|
|
|
2023-01-26 09:20:20 +00:00
|
|
|
|
proc selectPeer*(pm: PeerManager, proto: string): Option[RemotePeerInfo] =
|
|
|
|
|
debug "Selecting peer from peerstore", protocol=proto
|
|
|
|
|
|
|
|
|
|
# Selects the best peer for a given protocol
|
|
|
|
|
let peers = pm.peerStore.getPeersByProtocol(proto)
|
|
|
|
|
|
|
|
|
|
# No criteria for selecting a peer for WakuRelay, random one
|
|
|
|
|
if proto == WakuRelayCodec:
|
|
|
|
|
# TODO: proper heuristic here that compares peer scores and selects "best" one. For now the first peer for the given protocol is returned
|
|
|
|
|
if peers.len > 0:
|
|
|
|
|
debug "Got peer from peerstore", peerId=peers[0].peerId, multi=peers[0].addrs[0], protocol=proto
|
|
|
|
|
return some(peers[0].toRemotePeerInfo())
|
|
|
|
|
debug "No peer found for protocol", protocol=proto
|
|
|
|
|
return none(RemotePeerInfo)
|
|
|
|
|
|
|
|
|
|
# For other protocols, we select the peer that is slotted for the given protocol
|
|
|
|
|
pm.serviceSlots.withValue(proto, serviceSlot):
|
|
|
|
|
debug "Got peer from service slots", peerId=serviceSlot[].peerId, multi=serviceSlot[].addrs[0], protocol=proto
|
|
|
|
|
return some(serviceSlot[])
|
|
|
|
|
|
|
|
|
|
# If not slotted, we select a random peer for the given protocol
|
|
|
|
|
if peers.len > 0:
|
|
|
|
|
debug "Got peer from peerstore", peerId=peers[0].peerId, multi=peers[0].addrs[0], protocol=proto
|
|
|
|
|
return some(peers[0].toRemotePeerInfo())
|
|
|
|
|
debug "No peer found for protocol", protocol=proto
|
|
|
|
|
return none(RemotePeerInfo)
|
|
|
|
|
|
|
|
|
|
proc start*(pm: PeerManager) =
|
|
|
|
|
pm.started = true
|
|
|
|
|
asyncSpawn pm.relayConnectivityLoop()
|
2023-01-31 12:24:49 +00:00
|
|
|
|
asyncSpawn pm.prunePeerStoreLoop()
|
2023-01-26 09:20:20 +00:00
|
|
|
|
|
|
|
|
|
proc stop*(pm: PeerManager) =
|
|
|
|
|
pm.started = false
|