parent
9c1633bf87
commit
03639f1446
|
@ -66,6 +66,12 @@ proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||||
method closed*(s: LPChannel): bool =
|
method closed*(s: LPChannel): bool =
|
||||||
s.closedLocal
|
s.closedLocal
|
||||||
|
|
||||||
|
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
||||||
|
## Channels may be closed for reading and writing in any order - we'll close
|
||||||
|
## the underlying bufferstream when both directions are closed
|
||||||
|
if s.closedLocal and s.isEof:
|
||||||
|
await procCall BufferStream(s).close()
|
||||||
|
|
||||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||||
if s.isClosed:
|
if s.isClosed:
|
||||||
trace "Already closed", s
|
trace "Already closed", s
|
||||||
|
@ -109,44 +115,30 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||||
|
|
||||||
trace "Channel reset", s
|
trace "Channel reset", s
|
||||||
|
|
||||||
proc closeUnderlying*(s: LPChannel): Future[void] {.async.} =
|
|
||||||
## Channels may be closed for reading and writing in any order - we'll close
|
|
||||||
## the underlying bufferstream when both directions are closed
|
|
||||||
if s.closedLocal and s.isEof:
|
|
||||||
trace "Closing underlying", s, conn = s.conn
|
|
||||||
await procCall BufferStream(s).close()
|
|
||||||
|
|
||||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
method close*(s: LPChannel) {.async, gcsafe.} =
|
||||||
## Close channel for writing - a message will be sent to the other peer
|
## Close channel for writing - a message will be sent to the other peer
|
||||||
## informing them that the channel is closed and that we're waiting for
|
## informing them that the channel is closed and that we're waiting for
|
||||||
## their acknowledgement.
|
## their acknowledgement.
|
||||||
##
|
if s.closedLocal:
|
||||||
|
trace "Already closed", s
|
||||||
|
return
|
||||||
|
s.closedLocal = true
|
||||||
|
|
||||||
try:
|
trace "Closing channel", s, conn = s.conn, len = s.len
|
||||||
if s.closedLocal:
|
|
||||||
trace "Already closed", s
|
|
||||||
return
|
|
||||||
s.closedLocal = true
|
|
||||||
|
|
||||||
trace "Closing channel", s, conn = s.conn, len = s.len
|
if s.isOpen:
|
||||||
|
try:
|
||||||
|
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
# It's harmless that close message cannot be sent - the connection is
|
||||||
|
# likely down already
|
||||||
|
trace "Cannot send close message", s, id = s.id
|
||||||
|
|
||||||
if s.isOpen:
|
await s.closeUnderlying() # maybe already eofed
|
||||||
trace "Sending close msg", s, conn = s.conn
|
|
||||||
await s.conn.writeMsg(s.id, s.closeCode).wait(10.seconds) # write close
|
trace "Closed channel", s, len = s.len
|
||||||
trace "Closed channel", s, len = s.len
|
|
||||||
except CancelledError as exc:
|
|
||||||
trace "Cancelling close", s, conn = s.conn
|
|
||||||
# need to reset here otherwise the close sequence doesn't complete and
|
|
||||||
# the channel leaks since none of it's `onClose` events trigger
|
|
||||||
await s.reset()
|
|
||||||
raise exc
|
|
||||||
except CatchableError as exc:
|
|
||||||
# need to reset here otherwise the close sequence doesn't complete and
|
|
||||||
# the channel leaks since none of it's `onClose` events trigger
|
|
||||||
trace "Cannot send close message", exc = exc.msg, s, conn = s.conn
|
|
||||||
await s.reset()
|
|
||||||
finally:
|
|
||||||
await s.closeUnderlying() # maybe already eofed
|
|
||||||
|
|
||||||
method initStream*(s: LPChannel) =
|
method initStream*(s: LPChannel) =
|
||||||
if s.objName.len == 0:
|
if s.objName.len == 0:
|
||||||
|
|
|
@ -60,7 +60,7 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||||
try:
|
try:
|
||||||
await chann.join()
|
await chann.join()
|
||||||
m.channels[chann.initiator].del(chann.id)
|
m.channels[chann.initiator].del(chann.id)
|
||||||
trace "cleaned up channel", m, s = chann
|
trace "cleaned up channel", m, chann
|
||||||
|
|
||||||
when defined(libp2p_expensive_metrics):
|
when defined(libp2p_expensive_metrics):
|
||||||
libp2p_mplex_channels.set(
|
libp2p_mplex_channels.set(
|
||||||
|
@ -68,7 +68,7 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||||
labelValues = [$chann.initiator, $m.connection.peerInfo.peerId])
|
labelValues = [$chann.initiator, $m.connection.peerInfo.peerId])
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
# This is top-level procedure which will work as separate task, so it
|
# This is top-level procedure which will work as separate task, so it
|
||||||
# do not need to propagate CancelledError, and no other exceptions should
|
# do not need to propogate CancelledError, and no other exceptions should
|
||||||
# happen here
|
# happen here
|
||||||
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||||
doAssert(chann.closed, "connection not closed by handler!")
|
doAssert(chann.closed, "connection not closed by handler!")
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
# This is top-level procedure which will work as separate task, so it
|
# This is top-level procedure which will work as separate task, so it
|
||||||
# do not need to propagate CancelledError.
|
# do not need to propogate CancelledError.
|
||||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||||
await chann.reset()
|
await chann.reset()
|
||||||
|
|
||||||
|
@ -184,13 +184,11 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
# This procedure is spawned as task and it is not part of public API, so
|
# This procedure is spawned as task and it is not part of public API, so
|
||||||
# there no way for this procedure to be cancelled implicitly.
|
# there no way for this procedure to be cancelled implicitly.
|
||||||
debug "Cancellation in mplex handler", m
|
debug "Unexpected cancellation in mplex handler", m
|
||||||
except LPStreamClosedError as exc:
|
|
||||||
trace "Stream Closed", m, msg = exc.msg
|
|
||||||
except LPStreamEOFError as exc:
|
except LPStreamEOFError as exc:
|
||||||
trace "Stream EOF", m, msg = exc.msg
|
trace "Stream EOF", m, msg = exc.msg
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
warn "Exception in mplex read loop", m, msg = exc.msg
|
warn "Unexpected exception in mplex read loop", m, msg = exc.msg
|
||||||
finally:
|
finally:
|
||||||
await m.close()
|
await m.close()
|
||||||
trace "Stopped mplex handler", m
|
trace "Stopped mplex handler", m
|
||||||
|
|
|
@ -73,12 +73,12 @@ type
|
||||||
msgSeqno*: uint64
|
msgSeqno*: uint64
|
||||||
anonymize*: bool # if we omit fromPeer and seqno from RPC messages we send
|
anonymize*: bool # if we omit fromPeer and seqno from RPC messages we send
|
||||||
|
|
||||||
method unsubscribePeer*(p: PubSub, peer: PeerID) {.base.} =
|
method unsubscribePeer*(p: PubSub, peerId: PeerID) {.base.} =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Unsubscribing pubsub peer", peer
|
trace "unsubscribing pubsub peer", peerId
|
||||||
p.peers.del(peer)
|
p.peers.del(peerId)
|
||||||
|
|
||||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||||
|
|
||||||
|
@ -217,7 +217,6 @@ method subscribePeer*(p: PubSub, peer: PeerID) {.base.} =
|
||||||
## messages
|
## messages
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Subscribing peer", peer
|
|
||||||
let peer = p.getOrCreatePeer(peer, p.codecs)
|
let peer = p.getOrCreatePeer(peer, p.codecs)
|
||||||
peer.outbound = true # flag as outbound
|
peer.outbound = true # flag as outbound
|
||||||
|
|
||||||
|
@ -339,9 +338,9 @@ method validate*(p: PubSub, message: Message): Future[ValidationResult] {.async,
|
||||||
if res != ValidationResult.Accept:
|
if res != ValidationResult.Accept:
|
||||||
result = res
|
result = res
|
||||||
break
|
break
|
||||||
|
|
||||||
case result
|
case result
|
||||||
of ValidationResult.Accept:
|
of ValidationResult.Accept:
|
||||||
libp2p_pubsub_validation_success.inc()
|
libp2p_pubsub_validation_success.inc()
|
||||||
of ValidationResult.Reject:
|
of ValidationResult.Reject:
|
||||||
libp2p_pubsub_validation_failure.inc()
|
libp2p_pubsub_validation_failure.inc()
|
||||||
|
@ -390,7 +389,7 @@ proc init*[PubParams: object | bool](
|
||||||
switch.addPeerEventHandler(peerEventHandler, PeerEvent.Left)
|
switch.addPeerEventHandler(peerEventHandler, PeerEvent.Left)
|
||||||
|
|
||||||
pubsub.initPubSub()
|
pubsub.initPubSub()
|
||||||
|
|
||||||
return pubsub
|
return pubsub
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -42,8 +42,7 @@ proc init*(T: type SecureConn,
|
||||||
peerInfo: peerInfo,
|
peerInfo: peerInfo,
|
||||||
observedAddr: observedAddr,
|
observedAddr: observedAddr,
|
||||||
closeEvent: conn.closeEvent,
|
closeEvent: conn.closeEvent,
|
||||||
timeout: timeout,
|
timeout: timeout)
|
||||||
dir: conn.dir)
|
|
||||||
result.initStream()
|
result.initStream()
|
||||||
|
|
||||||
method initStream*(s: SecureConn) =
|
method initStream*(s: SecureConn) =
|
||||||
|
@ -53,7 +52,6 @@ method initStream*(s: SecureConn) =
|
||||||
procCall Connection(s).initStream()
|
procCall Connection(s).initStream()
|
||||||
|
|
||||||
method close*(s: SecureConn) {.async.} =
|
method close*(s: SecureConn) {.async.} =
|
||||||
trace "closing secure conn", s, dir = s.dir
|
|
||||||
if not(isNil(s.stream)):
|
if not(isNil(s.stream)):
|
||||||
await s.stream.close()
|
await s.stream.close()
|
||||||
|
|
||||||
|
@ -76,6 +74,10 @@ proc handleConn*(s: Secure,
|
||||||
try:
|
try:
|
||||||
await conn.join()
|
await conn.join()
|
||||||
await sconn.close()
|
await sconn.close()
|
||||||
|
except CancelledError:
|
||||||
|
# This is top-level procedure which will work as separate task, so it
|
||||||
|
# do not need to propogate CancelledError.
|
||||||
|
discard
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "error cleaning up secure connection", err = exc.msg, sconn
|
trace "error cleaning up secure connection", err = exc.msg, sconn
|
||||||
|
|
||||||
|
@ -125,18 +127,18 @@ method readOnce*(s: SecureConn,
|
||||||
|
|
||||||
if not isNil(err):
|
if not isNil(err):
|
||||||
if not (err of LPStreamEOFError):
|
if not (err of LPStreamEOFError):
|
||||||
warn "error while reading message from secure connection, closing.",
|
warn "error while reading message from secure connection, closing.",
|
||||||
error=err.name,
|
error=err.name,
|
||||||
message=err.msg,
|
message=err.msg,
|
||||||
connection=s
|
connection=s
|
||||||
await s.close()
|
await s.close()
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
s.activity = true
|
s.activity = true
|
||||||
|
|
||||||
if buf.len == 0:
|
if buf.len == 0:
|
||||||
raise newLPStreamIncompleteError()
|
raise newLPStreamIncompleteError()
|
||||||
|
|
||||||
s.buf.add(buf)
|
s.buf.add(buf)
|
||||||
|
|
||||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||||
|
|
|
@ -7,6 +7,29 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
## This module implements an asynchronous buffer stream
|
||||||
|
## which emulates physical async IO.
|
||||||
|
##
|
||||||
|
## The stream is based on the standard library's `Deque`,
|
||||||
|
## which is itself based on a ring buffer.
|
||||||
|
##
|
||||||
|
## It works by exposing a regular LPStream interface and
|
||||||
|
## a method ``pushTo`` to push data to the internal read
|
||||||
|
## buffer; as well as a handler that can be registered
|
||||||
|
## that gets triggered on every write to the stream. This
|
||||||
|
## allows using the buffered stream as a sort of proxy,
|
||||||
|
## which can be consumed as a regular LPStream but allows
|
||||||
|
## injecting data for reads and intercepting writes.
|
||||||
|
##
|
||||||
|
## Another notable feature is that the stream is fully
|
||||||
|
## ordered and asynchronous. Reads are queued up in order
|
||||||
|
## and are suspended when not enough data available. This
|
||||||
|
## allows preserving backpressure while maintaining full
|
||||||
|
## asynchrony. Both writing to the internal buffer with
|
||||||
|
## ``pushTo`` as well as reading with ``read*` methods,
|
||||||
|
## will suspend until either the amount of elements in the
|
||||||
|
## buffer goes below ``maxSize`` or more data becomes available.
|
||||||
|
|
||||||
import std/strformat
|
import std/strformat
|
||||||
import stew/byteutils
|
import stew/byteutils
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
|
|
|
@ -32,23 +32,23 @@ method initStream*(s: ChronosStream) =
|
||||||
s.objName = "ChronosStream"
|
s.objName = "ChronosStream"
|
||||||
|
|
||||||
s.timeoutHandler = proc() {.async, gcsafe.} =
|
s.timeoutHandler = proc() {.async, gcsafe.} =
|
||||||
trace "Idle timeout expired, closing ChronosStream", s
|
trace "idle timeout expired, closing ChronosStream"
|
||||||
await s.close()
|
await s.close()
|
||||||
|
|
||||||
procCall Connection(s).initStream()
|
procCall Connection(s).initStream()
|
||||||
|
|
||||||
proc init*(C: type ChronosStream,
|
proc init*(C: type ChronosStream,
|
||||||
client: StreamTransport,
|
client: StreamTransport,
|
||||||
dir: Direction,
|
|
||||||
timeout = DefaultChronosStreamTimeout): ChronosStream =
|
timeout = DefaultChronosStreamTimeout): ChronosStream =
|
||||||
result = C(client: client,
|
result = C(client: client,
|
||||||
timeout: timeout,
|
timeout: timeout)
|
||||||
dir: dir)
|
|
||||||
result.initStream()
|
result.initStream()
|
||||||
|
|
||||||
template withExceptions(body: untyped) =
|
template withExceptions(body: untyped) =
|
||||||
try:
|
try:
|
||||||
body
|
body
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
except TransportIncompleteError:
|
except TransportIncompleteError:
|
||||||
# for all intents and purposes this is an EOF
|
# for all intents and purposes this is an EOF
|
||||||
raise newLPStreamIncompleteError()
|
raise newLPStreamIncompleteError()
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/[hashes, oids, strformat, sugar]
|
import std/[hashes, oids, strformat]
|
||||||
import chronicles, chronos, metrics
|
import chronicles, chronos, metrics
|
||||||
import lpstream,
|
import lpstream,
|
||||||
../multiaddress,
|
../multiaddress,
|
||||||
|
@ -25,6 +25,9 @@ const
|
||||||
type
|
type
|
||||||
TimeoutHandler* = proc(): Future[void] {.gcsafe.}
|
TimeoutHandler* = proc(): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
|
Direction* {.pure.} = enum
|
||||||
|
None, In, Out
|
||||||
|
|
||||||
Connection* = ref object of LPStream
|
Connection* = ref object of LPStream
|
||||||
activity*: bool # reset every time data is sent or received
|
activity*: bool # reset every time data is sent or received
|
||||||
timeout*: Duration # channel timeout if no activity
|
timeout*: Duration # channel timeout if no activity
|
||||||
|
@ -32,6 +35,7 @@ type
|
||||||
timeoutHandler*: TimeoutHandler # timeout handler
|
timeoutHandler*: TimeoutHandler # timeout handler
|
||||||
peerInfo*: PeerInfo
|
peerInfo*: PeerInfo
|
||||||
observedAddr*: Multiaddress
|
observedAddr*: Multiaddress
|
||||||
|
dir*: Direction
|
||||||
|
|
||||||
ConnectionTracker* = ref object of TrackerBase
|
ConnectionTracker* = ref object of TrackerBase
|
||||||
opened*: uint64
|
opened*: uint64
|
||||||
|
@ -81,9 +85,7 @@ method initStream*(s: Connection) =
|
||||||
|
|
||||||
s.timerTaskFut = s.timeoutMonitor()
|
s.timerTaskFut = s.timeoutMonitor()
|
||||||
if isNil(s.timeoutHandler):
|
if isNil(s.timeoutHandler):
|
||||||
s.timeoutHandler = proc(): Future[void] =
|
s.timeoutHandler = proc(): Future[void] = s.close()
|
||||||
trace "Idle timeout expired, closing connection", s
|
|
||||||
s.close()
|
|
||||||
|
|
||||||
inc getConnectionTracker().opened
|
inc getConnectionTracker().opened
|
||||||
|
|
||||||
|
@ -102,7 +104,7 @@ func hash*(p: Connection): Hash =
|
||||||
cast[pointer](p).hash
|
cast[pointer](p).hash
|
||||||
|
|
||||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||||
## monitor the channel for inactivity
|
## monitor the channel for innactivity
|
||||||
##
|
##
|
||||||
## if the timeout was hit, it means that
|
## if the timeout was hit, it means that
|
||||||
## neither incoming nor outgoing activity
|
## neither incoming nor outgoing activity
|
||||||
|
@ -123,10 +125,9 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||||
|
|
||||||
break
|
break
|
||||||
|
|
||||||
# reset channel on inactivity timeout
|
# reset channel on innactivity timeout
|
||||||
trace "Connection timed out", s
|
trace "Connection timed out", s
|
||||||
if not(isNil(s.timeoutHandler)):
|
if not(isNil(s.timeoutHandler)):
|
||||||
trace "Calling timeout handler", s
|
|
||||||
await s.timeoutHandler()
|
await s.timeoutHandler()
|
||||||
|
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
|
|
|
@ -15,8 +15,7 @@ import ../varint,
|
||||||
../peerinfo,
|
../peerinfo,
|
||||||
../multiaddress
|
../multiaddress
|
||||||
|
|
||||||
declareGauge(libp2p_open_streams,
|
declareGauge(libp2p_open_streams, "open stream instances", labels = ["type"])
|
||||||
"open stream instances", labels = ["type", "dir"])
|
|
||||||
|
|
||||||
export oids
|
export oids
|
||||||
|
|
||||||
|
@ -24,16 +23,12 @@ logScope:
|
||||||
topics = "lpstream"
|
topics = "lpstream"
|
||||||
|
|
||||||
type
|
type
|
||||||
Direction* {.pure.} = enum
|
|
||||||
In, Out
|
|
||||||
|
|
||||||
LPStream* = ref object of RootObj
|
LPStream* = ref object of RootObj
|
||||||
closeEvent*: AsyncEvent
|
closeEvent*: AsyncEvent
|
||||||
isClosed*: bool
|
isClosed*: bool
|
||||||
isEof*: bool
|
isEof*: bool
|
||||||
objName*: string
|
objName*: string
|
||||||
oid*: Oid
|
oid*: Oid
|
||||||
dir*: Direction
|
|
||||||
|
|
||||||
LPStreamError* = object of CatchableError
|
LPStreamError* = object of CatchableError
|
||||||
LPStreamIncompleteError* = object of LPStreamError
|
LPStreamIncompleteError* = object of LPStreamError
|
||||||
|
@ -91,8 +86,8 @@ method initStream*(s: LPStream) {.base.} =
|
||||||
s.closeEvent = newAsyncEvent()
|
s.closeEvent = newAsyncEvent()
|
||||||
s.oid = genOid()
|
s.oid = genOid()
|
||||||
|
|
||||||
libp2p_open_streams.inc(labelValues = [s.objName, $s.dir])
|
libp2p_open_streams.inc(labelValues = [s.objName])
|
||||||
trace "Stream created", s, objName = s.objName, dir = $s.dir
|
trace "Stream created", s, objName = s.objName
|
||||||
|
|
||||||
proc join*(s: LPStream): Future[void] =
|
proc join*(s: LPStream): Future[void] =
|
||||||
s.closeEvent.wait()
|
s.closeEvent.wait()
|
||||||
|
@ -219,8 +214,8 @@ method closeImpl*(s: LPStream): Future[void] {.async, base.} =
|
||||||
## Implementation of close - called only once
|
## Implementation of close - called only once
|
||||||
trace "Closing stream", s, objName = s.objName
|
trace "Closing stream", s, objName = s.objName
|
||||||
s.closeEvent.fire()
|
s.closeEvent.fire()
|
||||||
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
|
libp2p_open_streams.dec(labelValues = [s.objName])
|
||||||
trace "Closed stream", s, objName = s.objName, dir = $s.dir
|
trace "Closed stream", s, objName = s.objName
|
||||||
|
|
||||||
method close*(s: LPStream): Future[void] {.base, async.} = # {.raises [Defect].}
|
method close*(s: LPStream): Future[void] {.base, async.} = # {.raises [Defect].}
|
||||||
## close the stream - this may block, but will not raise exceptions
|
## close the stream - this may block, but will not raise exceptions
|
||||||
|
@ -228,7 +223,6 @@ method close*(s: LPStream): Future[void] {.base, async.} = # {.raises [Defect].}
|
||||||
if s.isClosed:
|
if s.isClosed:
|
||||||
trace "Already closed", s
|
trace "Already closed", s
|
||||||
return
|
return
|
||||||
|
|
||||||
s.isClosed = true # Set flag before performing virtual close
|
s.isClosed = true # Set flag before performing virtual close
|
||||||
|
|
||||||
# An separate implementation method is used so that even when derived types
|
# An separate implementation method is used so that even when derived types
|
||||||
|
|
|
@ -292,10 +292,10 @@ proc internalConnect(s: Switch,
|
||||||
let dialed = try:
|
let dialed = try:
|
||||||
await t.dial(a)
|
await t.dial(a)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
trace "Dialing canceled", msg = exc.msg, peerId, address = $a
|
trace "Dialing canceled", msg = exc.msg, peerId
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Dialing failed", msg = exc.msg, peerId, address = $a
|
trace "Dialing failed", msg = exc.msg, peerId
|
||||||
libp2p_failed_dials.inc()
|
libp2p_failed_dials.inc()
|
||||||
continue # Try the next address
|
continue # Try the next address
|
||||||
|
|
||||||
|
|
|
@ -62,57 +62,31 @@ proc setupTcpTransportTracker(): TcpTransportTracker =
|
||||||
proc connHandler*(t: TcpTransport,
|
proc connHandler*(t: TcpTransport,
|
||||||
client: StreamTransport,
|
client: StreamTransport,
|
||||||
initiator: bool): Connection =
|
initiator: bool): Connection =
|
||||||
debug "Handling tcp connection", address = $client.remoteAddress,
|
trace "handling connection", address = $client.remoteAddress
|
||||||
initiator = initiator,
|
let conn: Connection = Connection(ChronosStream.init(client))
|
||||||
clients = t.clients.len
|
conn.observedAddr = MultiAddress.init(client.remoteAddress).tryGet()
|
||||||
|
|
||||||
let conn = Connection(
|
|
||||||
ChronosStream.init(
|
|
||||||
client,
|
|
||||||
dir = if initiator:
|
|
||||||
Direction.Out
|
|
||||||
else:
|
|
||||||
Direction.In))
|
|
||||||
|
|
||||||
if not initiator:
|
if not initiator:
|
||||||
if not isNil(t.handler):
|
if not isNil(t.handler):
|
||||||
t.handlers &= t.handler(conn)
|
t.handlers &= t.handler(conn)
|
||||||
|
|
||||||
proc cleanup() {.async.} =
|
proc cleanup() {.async.} =
|
||||||
try:
|
try:
|
||||||
await client.join() or conn.join()
|
await client.join()
|
||||||
trace "Cleaning up client", addrs = $client.remoteAddress,
|
trace "cleaning up client", addrs = $client.remoteAddress, connoid = $conn.oid
|
||||||
conn = $conn.oid
|
if not(isNil(conn)):
|
||||||
|
|
||||||
t.clients.keepItIf( it != client )
|
|
||||||
if not(isNil(conn) and not conn.closed()):
|
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
t.clients.keepItIf(it != client)
|
||||||
if not(isNil(client) and client.closed()):
|
except CancelledError:
|
||||||
await client.closeWait()
|
# This is top-level procedure which will work as separate task, so it
|
||||||
|
# do not need to propogate CancelledError.
|
||||||
trace "Cleaned up client", addrs = $client.remoteAddress,
|
trace "Unexpected cancellation in transport's cleanup"
|
||||||
conn = $conn.oid
|
|
||||||
|
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
let useExc {.used.} = exc
|
trace "error cleaning up client", exc = exc.msg
|
||||||
debug "Error cleaning up client", errMsg = exc.msg, s = conn
|
|
||||||
|
|
||||||
t.clients.add(client)
|
t.clients.add(client)
|
||||||
# All the errors are handled inside `cleanup()` procedure.
|
# All the errors are handled inside `cleanup()` procedure.
|
||||||
asyncSpawn cleanup()
|
asyncSpawn cleanup()
|
||||||
|
result = conn
|
||||||
try:
|
|
||||||
conn.observedAddr = MultiAddress.init(client.remoteAddress).tryGet()
|
|
||||||
except CatchableError as exc:
|
|
||||||
trace "Unable to get remote address", exc = exc.msg
|
|
||||||
|
|
||||||
if not isNil(client):
|
|
||||||
client.close()
|
|
||||||
|
|
||||||
raise exc
|
|
||||||
|
|
||||||
return conn
|
|
||||||
|
|
||||||
proc connCb(server: StreamServer,
|
proc connCb(server: StreamServer,
|
||||||
client: StreamTransport) {.async, gcsafe.} =
|
client: StreamTransport) {.async, gcsafe.} =
|
||||||
|
@ -123,12 +97,10 @@ proc connCb(server: StreamServer,
|
||||||
# as it's added inside connHandler
|
# as it's added inside connHandler
|
||||||
discard t.connHandler(client, false)
|
discard t.connHandler(client, false)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
debug "Connection setup cancelled", exc = exc.msg
|
|
||||||
await client.closeWait()
|
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as err:
|
||||||
debug "Connection setup failed", exc = exc.msg
|
debug "Connection setup failed", err = err.msg
|
||||||
await client.closeWait()
|
client.close()
|
||||||
|
|
||||||
proc init*(T: type TcpTransport, flags: set[ServerFlags] = {}): T =
|
proc init*(T: type TcpTransport, flags: set[ServerFlags] = {}): T =
|
||||||
result = T(flags: flags)
|
result = T(flags: flags)
|
||||||
|
@ -197,16 +169,8 @@ method dial*(t: TcpTransport,
|
||||||
Future[Connection] {.async, gcsafe.} =
|
Future[Connection] {.async, gcsafe.} =
|
||||||
trace "dialing remote peer", address = $address
|
trace "dialing remote peer", address = $address
|
||||||
## dial a peer
|
## dial a peer
|
||||||
var client: StreamTransport
|
let client: StreamTransport = await connect(address)
|
||||||
try:
|
result = t.connHandler(client, true)
|
||||||
client = await connect(address)
|
|
||||||
except CatchableError as exc:
|
|
||||||
trace "Exception dialing peer", exc = exc.msg
|
|
||||||
if not(isNil(client)):
|
|
||||||
await client.closeWait()
|
|
||||||
raise exc
|
|
||||||
|
|
||||||
return t.connHandler(client, true)
|
|
||||||
|
|
||||||
method handles*(t: TcpTransport, address: MultiAddress): bool {.gcsafe.} =
|
method handles*(t: TcpTransport, address: MultiAddress): bool {.gcsafe.} =
|
||||||
if procCall Transport(t).handles(address):
|
if procCall Transport(t).handles(address):
|
||||||
|
|
Loading…
Reference in New Issue