nim-libp2p/libp2p/transports/tcptransport.nim

286 lines
8.0 KiB
Nim
Raw Normal View History

2022-07-01 18:19:57 +00:00
# Nim-LibP2P
2023-01-20 14:47:40 +00:00
# Copyright (c) 2023 Status Research & Development GmbH
2022-07-01 18:19:57 +00:00
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## TCP transport implementation
2023-06-07 11:12:49 +00:00
{.push raises: [].}
import std/[sequtils]
import stew/results
import chronos, chronicles
import transport,
../errors,
../wire,
../multicodec,
../connmanager,
../multiaddress,
../stream/connection,
../stream/chronosstream,
2022-07-01 18:19:57 +00:00
../upgrademngrs/upgrade,
../utility
2019-09-12 00:15:04 +00:00
logScope:
2020-12-01 17:34:27 +00:00
topics = "libp2p tcptransport"
2019-09-12 00:15:04 +00:00
export transport, results
const
TcpTransportTrackerName* = "libp2p.tcptransport"
type
TcpTransport* = ref object of Transport
servers*: seq[StreamServer]
clients: array[Direction, seq[StreamTransport]]
flags: set[ServerFlags]
clientFlags: set[SocketFlags]
acceptFuts: seq[Future[StreamTransport]]
connectionsTimeout: Duration
TcpTransportTracker* = ref object of TrackerBase
opened*: uint64
closed*: uint64
TcpTransportError* = object of transport.TransportError
2023-06-07 11:12:49 +00:00
proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [].}
proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} =
result = cast[TcpTransportTracker](getTracker(TcpTransportTrackerName))
if isNil(result):
result = setupTcpTransportTracker()
proc dumpTracking(): string {.gcsafe.} =
var tracker = getTcpTransportTracker()
result = "Opened tcp transports: " & $tracker.opened & "\n" &
"Closed tcp transports: " & $tracker.closed
proc leakTransport(): bool {.gcsafe.} =
var tracker = getTcpTransportTracker()
result = (tracker.opened != tracker.closed)
proc setupTcpTransportTracker(): TcpTransportTracker =
result = new TcpTransportTracker
result.opened = 0
result.closed = 0
result.dump = dumpTracking
result.isLeaked = leakTransport
addTracker(TcpTransportTrackerName, result)
proc connHandler*(self: TcpTransport,
client: StreamTransport,
observedAddr: Opt[MultiAddress],
dir: Direction): Future[Connection] {.async.} =
trace "Handling tcp connection", address = $observedAddr,
dir = $dir,
clients = self.clients[Direction.In].len +
self.clients[Direction.Out].len
let conn = Connection(
ChronosStream.init(
client = client,
dir = dir,
observedAddr = observedAddr,
timeout = self.connectionsTimeout
))
proc onClose() {.async.} =
2020-05-23 17:10:01 +00:00
try:
2020-11-25 13:35:25 +00:00
let futs = @[client.join(), conn.join()]
await futs[0] or futs[1]
for f in futs:
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
2020-11-25 13:35:25 +00:00
trace "Cleaning up client", addrs = $client.remoteAddress,
conn
self.clients[dir].keepItIf( it != client )
await allFuturesThrowing(
conn.close(), client.closeWait())
trace "Cleaned up client", addrs = $client.remoteAddress,
conn
2020-05-23 17:10:01 +00:00
except CatchableError as exc:
let useExc {.used.} = exc
debug "Error cleaning up client", errMsg = exc.msg, conn
2020-05-23 17:10:01 +00:00
self.clients[dir].add(client)
asyncSpawn onClose()
return conn
proc new*(
T: typedesc[TcpTransport],
flags: set[ServerFlags] = {},
upgrade: Upgrade,
connectionsTimeout = 10.minutes): T {.public.} =
let
transport = T(
flags: flags,
clientFlags:
if ServerFlags.TcpNoDelay in flags:
compilesOr:
{SocketFlags.TcpNoDelay}
do:
doAssert(false)
default(set[SocketFlags])
else:
default(set[SocketFlags]),
upgrader: upgrade,
networkReachability: NetworkReachability.Unknown,
connectionsTimeout: connectionsTimeout)
return transport
method start*(
self: TcpTransport,
addrs: seq[MultiAddress]) {.async.} =
## listen on the transport
##
2020-05-27 20:46:25 +00:00
if self.running:
warn "TCP transport already running"
return
2019-08-21 22:53:16 +00:00
await procCall Transport(self).start(addrs)
trace "Starting TCP transport"
2022-08-01 12:31:22 +00:00
inc getTcpTransportTracker().opened
for i, ma in addrs:
if not self.handles(ma):
trace "Invalid address detected, skipping!", address = ma
continue
self.flags.incl(ServerFlags.ReusePort)
let server = createStreamServer(
ma = ma,
flags = self.flags,
udata = self)
# always get the resolved address in case we're bound to 0.0.0.0:0
self.addrs[i] = MultiAddress.init(
server.sock.getLocalAddress()
).tryGet()
self.servers &= server
trace "Listening on", address = ma
method stop*(self: TcpTransport) {.async, gcsafe.} =
## stop the transport
##
try:
trace "Stopping TCP transport"
2020-05-27 20:46:25 +00:00
checkFutures(
await allFinished(
self.clients[Direction.In].mapIt(it.closeWait()) &
self.clients[Direction.Out].mapIt(it.closeWait())))
2022-08-01 12:31:22 +00:00
if not self.running:
warn "TCP transport already stopped"
return
await procCall Transport(self).stop() # call base
var toWait: seq[Future[void]]
for fut in self.acceptFuts:
if not fut.finished:
toWait.add(fut.cancelAndWait())
elif fut.done:
toWait.add(fut.read().closeWait())
for server in self.servers:
server.stop()
toWait.add(server.closeWait())
await allFutures(toWait)
self.servers = @[]
trace "Transport stopped"
2020-05-27 20:46:25 +00:00
inc getTcpTransportTracker().closed
except CatchableError as exc:
trace "Error shutting down tcp transport", exc = exc.msg
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
## accept a new TCP connection
##
2019-09-25 17:36:39 +00:00
if not self.running:
raise newTransportClosedError()
2020-11-19 15:10:25 +00:00
try:
if self.acceptFuts.len <= 0:
2023-11-16 15:54:34 +00:00
self.acceptFuts = self.servers.mapIt(Future[StreamTransport](it.accept()))
if self.acceptFuts.len <= 0:
return
let
finished = await one(self.acceptFuts)
index = self.acceptFuts.find(finished)
self.acceptFuts[index] = self.servers[index].accept()
let transp = await finished
try:
let observedAddr = MultiAddress.init(transp.remoteAddress).tryGet()
return await self.connHandler(transp, Opt.some(observedAddr), Direction.In)
except CancelledError as exc:
transp.close()
raise exc
except CatchableError as exc:
debug "Failed to handle connection", exc = exc.msg
transp.close()
2020-11-19 15:10:25 +00:00
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
except TransportAbortedError as exc:
debug "Connection aborted", exc = exc.msg
2020-11-19 15:10:25 +00:00
except TransportUseClosedError as exc:
debug "Server was closed", exc = exc.msg
2020-11-19 15:10:25 +00:00
raise newTransportClosedError(exc)
except CancelledError as exc:
raise exc
except TransportOsError as exc:
info "OS Error", exc = exc.msg
raise exc
2020-11-19 15:10:25 +00:00
except CatchableError as exc:
info "Unexpected error accepting connection", exc = exc.msg
2020-11-19 15:10:25 +00:00
raise exc
method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
## dial a peer
##
trace "Dialing remote peer", address = $address
let transp =
if self.networkReachability == NetworkReachability.NotReachable and self.addrs.len > 0:
self.clientFlags.incl(SocketFlags.ReusePort)
await connect(address, flags = self.clientFlags, localAddress = Opt.some(self.addrs[0]))
else:
await connect(address, flags = self.clientFlags)
try:
let observedAddr = MultiAddress.init(transp.remoteAddress).tryGet()
return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
except CatchableError as err:
await transp.closeWait()
raise err
method handles*(t: TcpTransport, address: MultiAddress): bool {.gcsafe.} =
if procCall Transport(t).handles(address):
if address.protocols.isOk:
return TCP.match(address)