nim-libp2p/libp2p/services/autorelayservice.nim

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

156 lines
5.3 KiB
Nim
Raw Permalink Normal View History

2023-01-17 15:18:38 +00:00
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
2023-06-07 11:12:49 +00:00
{.push raises: [].}
2023-01-17 15:18:38 +00:00
import chronos, chronicles, times, tables, sequtils
2023-01-17 15:18:38 +00:00
import ../switch, ../protocols/connectivity/relay/[client, utils]
logScope:
topics = "libp2p autorelay"
type
2023-06-07 11:12:49 +00:00
OnReservationHandler = proc(addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
2023-01-17 15:18:38 +00:00
AutoRelayService* = ref object of Service
running: bool
runner: Future[void]
client: RelayClient
2024-06-12 14:31:09 +00:00
maxNumRelays: int # maximum number of relays we can reserve at the same time
2023-01-17 15:18:38 +00:00
relayPeers: Table[PeerId, Future[void]]
relayAddresses: Table[PeerId, seq[MultiAddress]]
backingOff: seq[PeerId]
peerAvailable: AsyncEvent
onReservation: OnReservationHandler
addressMapper: AddressMapper
2023-01-17 15:18:38 +00:00
rng: ref HmacDrbgContext
2023-06-07 13:26:58 +00:00
proc isRunning*(self: AutoRelayService): bool =
return self.running
proc addressMapper(
self: AutoRelayService, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values)) & listenAddrs
proc reserveAndUpdate(
self: AutoRelayService, relayPid: PeerId, switch: Switch
) {.async.} =
2023-01-17 15:18:38 +00:00
while self.running:
let
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
relayedAddr = rsvp.addrs.mapIt(MultiAddress.init($it & "/p2p-circuit").tryGet())
2023-01-17 15:18:38 +00:00
ttl = rsvp.expire.int64 - times.now().utc.toTime.toUnix
if ttl <= 60:
# A reservation under a minute is basically useless
break
if relayPid notin self.relayAddresses or self.relayAddresses[relayPid] != relayedAddr:
self.relayAddresses[relayPid] = relayedAddr
await switch.peerInfo.update()
2023-05-16 12:59:02 +00:00
debug "Updated relay addresses", relayPid, relayedAddr
2023-01-17 15:18:38 +00:00
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
2023-01-17 15:18:38 +00:00
let hasBeenSetUp = await procCall Service(self).setup(switch)
if hasBeenSetUp:
proc handlePeerIdentified(peerId: PeerId, event: PeerEvent) {.async.} =
trace "Peer Identified", peerId
2024-06-12 14:31:09 +00:00
if self.relayPeers.len < self.maxNumRelays:
2023-01-17 15:18:38 +00:00
self.peerAvailable.fire()
2023-01-17 15:18:38 +00:00
proc handlePeerLeft(peerId: PeerId, event: PeerEvent) {.async.} =
trace "Peer Left", peerId
self.relayPeers.withValue(peerId, future):
future[].cancel()
switch.addPeerEventHandler(handlePeerIdentified, Identified)
2023-01-17 15:18:38 +00:00
switch.addPeerEventHandler(handlePeerLeft, Left)
switch.peerInfo.addressMappers.add(self.addressMapper)
2023-01-17 15:18:38 +00:00
await self.run(switch)
return hasBeenSetUp
proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
await sleepAsync(chronos.seconds(5))
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
2023-01-17 15:18:38 +00:00
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
for k in peers:
if self.relayPeers[k].finished():
self.relayPeers.del(k)
self.relayAddresses.del(k)
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
# To avoid ddosing our peers in certain conditions
self.backingOff.add(k)
asyncSpawn self.manageBackedOff(k)
# Get all connected relayPeers
self.peerAvailable.clear()
var connectedPeers = switch.connectedPeers(Direction.Out)
connectedPeers.keepItIf(
RelayV2HopCodec in switch.peerStore[ProtoBook][it] and it notin self.relayPeers and
it notin self.backingOff
)
self.rng.shuffle(connectedPeers)
for relayPid in connectedPeers:
2024-06-12 14:31:09 +00:00
if self.relayPeers.len() >= self.maxNumRelays:
2023-01-17 15:18:38 +00:00
break
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch)
2023-01-17 15:18:38 +00:00
if self.relayPeers.len() > 0:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
else:
await self.peerAvailable.wait()
method run*(self: AutoRelayService, switch: Switch) {.async.} =
2023-01-17 15:18:38 +00:00
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
2023-01-17 15:18:38 +00:00
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false
self.runner.cancel()
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
2023-01-17 15:18:38 +00:00
return hasBeenStopped
proc getAddresses*(self: AutoRelayService): seq[MultiAddress] =
result = concat(toSeq(self.relayAddresses.values))
proc new*(
T: typedesc[AutoRelayService],
2024-06-12 14:31:09 +00:00
maxNumRelays: int,
2023-01-17 15:18:38 +00:00
client: RelayClient,
onReservation: OnReservationHandler,
rng: ref HmacDrbgContext,
): T =
T(
2024-06-12 14:31:09 +00:00
maxNumRelays: maxNumRelays,
2023-01-17 15:18:38 +00:00
client: client,
onReservation: onReservation,
peerAvailable: newAsyncEvent(),
rng: rng,
)