2019-09-04 03:08:51 +00:00
|
|
|
## Nim-LibP2P
|
2019-09-24 17:48:23 +00:00
|
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
2019-09-04 03:08:51 +00:00
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
import oids, deques
|
2020-06-19 17:29:43 +00:00
|
|
|
import chronos, chronicles, metrics
|
2019-09-12 17:07:34 +00:00
|
|
|
import types,
|
2019-09-08 07:59:14 +00:00
|
|
|
coder,
|
2020-07-17 18:44:41 +00:00
|
|
|
../muxer,
|
2019-09-08 07:59:14 +00:00
|
|
|
nimcrypto/utils,
|
2020-06-19 17:29:43 +00:00
|
|
|
../../stream/connection,
|
2019-09-12 17:07:34 +00:00
|
|
|
../../stream/bufferstream,
|
2020-06-29 15:15:31 +00:00
|
|
|
../../peerinfo
|
2019-09-04 03:08:51 +00:00
|
|
|
|
2020-06-19 17:29:43 +00:00
|
|
|
export connection
|
2020-05-06 16:31:47 +00:00
|
|
|
|
2019-09-10 02:15:52 +00:00
|
|
|
logScope:
|
2020-06-10 08:48:01 +00:00
|
|
|
topics = "mplexchannel"
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
## Channel half-closed states
|
|
|
|
##
|
|
|
|
## | State | Closed local | Closed remote
|
|
|
|
## |=============================================
|
|
|
|
## | Read | Yes (until EOF) | No
|
|
|
|
## | Write | No | Yes
|
|
|
|
##
|
|
|
|
|
2020-05-29 16:24:38 +00:00
|
|
|
# TODO: this is one place where we need to use
|
|
|
|
# a proper state machine, but I've opted out of
|
|
|
|
# it for now for two reasons:
|
|
|
|
#
|
|
|
|
# 1) we don't have that many states to manage
|
|
|
|
# 2) I'm not sure if adding the state machine
|
|
|
|
# would have simplified or complicated the code
|
|
|
|
#
|
|
|
|
# But now that this is in place, we should perhaps
|
|
|
|
# reconsider reworking it again, this time with a
|
|
|
|
# more formal approach.
|
|
|
|
#
|
|
|
|
|
2019-09-04 03:08:51 +00:00
|
|
|
type
|
2019-09-12 17:07:34 +00:00
|
|
|
LPChannel* = ref object of BufferStream
|
2020-05-20 00:14:15 +00:00
|
|
|
id*: uint64 # channel id
|
2020-07-17 18:44:41 +00:00
|
|
|
timeout: Duration # channel timeout if no activity
|
|
|
|
activity: bool # reset every time data is sent or received
|
2020-05-20 00:14:15 +00:00
|
|
|
name*: string # name of the channel (for debugging)
|
|
|
|
conn*: Connection # wrapped connection used to for writing
|
|
|
|
initiator*: bool # initiated remotely or locally flag
|
|
|
|
isLazy*: bool # is channel lazy
|
2020-05-29 16:24:38 +00:00
|
|
|
isOpen*: bool # has channel been opened (only used with isLazy)
|
2020-05-20 00:14:15 +00:00
|
|
|
closedLocal*: bool # has channel been closed locally
|
|
|
|
msgCode*: MessageType # cached in/out message code
|
|
|
|
closeCode*: MessageType # cached in/out close code
|
|
|
|
resetCode*: MessageType # cached in/out reset code
|
2020-07-17 18:44:41 +00:00
|
|
|
timerTaskFut: Future[void] # the current timer instanse
|
2020-05-20 00:14:15 +00:00
|
|
|
|
|
|
|
proc open*(s: LPChannel) {.async, gcsafe.}
|
|
|
|
|
|
|
|
template withWriteLock(lock: AsyncLock, body: untyped): untyped =
|
|
|
|
try:
|
|
|
|
await lock.acquire()
|
|
|
|
body
|
|
|
|
finally:
|
2020-06-11 14:45:59 +00:00
|
|
|
if not(isNil(lock)) and lock.locked:
|
|
|
|
lock.release()
|
2020-05-20 00:14:15 +00:00
|
|
|
|
|
|
|
template withEOFExceptions(body: untyped): untyped =
|
|
|
|
try:
|
|
|
|
body
|
|
|
|
except LPStreamEOFError as exc:
|
|
|
|
trace "muxed connection EOF", exc = exc.msg
|
|
|
|
except LPStreamClosedError as exc:
|
|
|
|
trace "muxed connection closed", exc = exc.msg
|
|
|
|
except LPStreamIncompleteError as exc:
|
|
|
|
trace "incomplete message", exc = exc.msg
|
2019-09-04 03:08:51 +00:00
|
|
|
|
2020-07-18 17:00:44 +00:00
|
|
|
proc cleanupTimer(s: LPChannel) {.async.} =
|
|
|
|
## cleanup timers
|
2020-08-02 10:22:49 +00:00
|
|
|
if not isNil(s.timerTaskFut) and not s.timerTaskFut.finished:
|
2020-07-21 15:03:41 +00:00
|
|
|
await s.timerTaskFut.cancelAndWait()
|
2020-07-18 17:00:44 +00:00
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
proc closeMessage(s: LPChannel) {.async.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
2020-06-01 23:19:53 +00:00
|
|
|
## send close message - this will not raise
|
|
|
|
## on EOF or Closed
|
2020-06-29 15:15:31 +00:00
|
|
|
withWriteLock(s.writeLock):
|
|
|
|
trace "sending close message"
|
2019-09-07 23:32:32 +00:00
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
2020-03-27 14:25:52 +00:00
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
proc resetMessage(s: LPChannel) {.async.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
2020-06-01 23:19:53 +00:00
|
|
|
## send reset message - this will not raise
|
2020-05-20 00:14:15 +00:00
|
|
|
withEOFExceptions:
|
|
|
|
withWriteLock(s.writeLock):
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "sending reset message"
|
2020-05-20 00:14:15 +00:00
|
|
|
|
|
|
|
await s.conn.writeMsg(s.id, s.resetCode) # write reset
|
|
|
|
|
|
|
|
proc open*(s: LPChannel) {.async, gcsafe.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
## NOTE: Don't call withExcAndLock or withWriteLock,
|
|
|
|
## because this already gets called from writeHandler
|
|
|
|
## which is locked
|
2020-06-29 15:15:31 +00:00
|
|
|
await s.conn.writeMsg(s.id, MessageType.New, s.name)
|
|
|
|
trace "opened channel"
|
|
|
|
s.isOpen = true
|
2020-05-20 00:14:15 +00:00
|
|
|
|
|
|
|
proc closeRemote*(s: LPChannel) {.async.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
|
|
|
trace "got EOF, closing channel"
|
2020-07-17 18:44:41 +00:00
|
|
|
try:
|
|
|
|
await s.drainBuffer()
|
2020-05-20 00:14:15 +00:00
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
s.isEof = true # set EOF immediately to prevent further reads
|
|
|
|
await s.close() # close local end
|
2020-07-12 16:37:10 +00:00
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
# call to avoid leaks
|
|
|
|
await procCall BufferStream(s).close() # close parent bufferstream
|
2020-07-18 17:00:44 +00:00
|
|
|
await s.cleanupTimer()
|
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
trace "channel closed on EOF"
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "exception closing remote channel", exc = exc.msg
|
2020-03-27 14:25:52 +00:00
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
method closed*(s: LPChannel): bool =
|
|
|
|
## this emulates half-closed behavior
|
|
|
|
## when closed locally writing is
|
2020-05-29 16:24:38 +00:00
|
|
|
## disabled - see the table in the
|
2020-05-20 00:14:15 +00:00
|
|
|
## header of the file
|
|
|
|
s.closedLocal
|
2019-12-04 04:44:54 +00:00
|
|
|
|
2020-05-23 16:50:05 +00:00
|
|
|
method reset*(s: LPChannel) {.base, async, gcsafe.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
|
|
|
if s.closedLocal and s.isEof:
|
|
|
|
trace "channel already closed or reset"
|
|
|
|
return
|
|
|
|
|
2020-07-27 19:33:51 +00:00
|
|
|
trace "resetting channel"
|
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
# we asyncCheck here because the other end
|
|
|
|
# might be dead already - reset is always
|
|
|
|
# optimistic
|
|
|
|
asyncCheck s.resetMessage()
|
2020-07-12 16:37:10 +00:00
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
try:
|
|
|
|
# drain the buffer before closing
|
|
|
|
await s.drainBuffer()
|
|
|
|
await procCall BufferStream(s).close()
|
2020-07-12 16:37:10 +00:00
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
s.isEof = true
|
|
|
|
s.closedLocal = true
|
|
|
|
|
2020-07-18 17:00:44 +00:00
|
|
|
await s.cleanupTimer()
|
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "exception in reset", exc = exc.msg
|
2020-05-23 16:50:05 +00:00
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "channel reset"
|
|
|
|
|
2020-05-23 16:50:05 +00:00
|
|
|
method close*(s: LPChannel) {.async, gcsafe.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
2020-05-23 16:50:05 +00:00
|
|
|
if s.closedLocal:
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "channel already closed"
|
2020-05-23 16:50:05 +00:00
|
|
|
return
|
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "closing local lpchannel"
|
|
|
|
|
|
|
|
proc closeInternal() {.async.} =
|
2020-05-23 16:50:05 +00:00
|
|
|
try:
|
|
|
|
await s.closeMessage().wait(2.minutes)
|
2020-05-29 16:24:38 +00:00
|
|
|
if s.atEof: # already closed by remote close parent buffer immediately
|
2020-05-23 16:50:05 +00:00
|
|
|
await procCall BufferStream(s).close()
|
2020-07-18 17:00:44 +00:00
|
|
|
await s.cleanupTimer()
|
2020-06-29 15:15:31 +00:00
|
|
|
except CancelledError as exc:
|
2020-07-12 16:37:10 +00:00
|
|
|
await s.reset()
|
2020-06-29 15:15:31 +00:00
|
|
|
raise exc
|
2020-05-23 16:50:05 +00:00
|
|
|
except CatchableError as exc:
|
2020-07-27 19:33:51 +00:00
|
|
|
trace "exception closing channel", exc = exc.msg
|
2020-07-12 16:37:10 +00:00
|
|
|
await s.reset()
|
2020-05-23 16:50:05 +00:00
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "lpchannel closed local"
|
2020-05-23 16:50:05 +00:00
|
|
|
|
2020-06-19 17:29:43 +00:00
|
|
|
s.closedLocal = true
|
2020-06-29 15:15:31 +00:00
|
|
|
asyncCheck closeInternal()
|
2020-07-17 18:44:41 +00:00
|
|
|
|
|
|
|
proc timeoutMonitor(s: LPChannel) {.async.} =
|
|
|
|
## monitor the channel for innactivity
|
|
|
|
##
|
|
|
|
## if the timeout was hit, it means that
|
|
|
|
## neither incoming nor outgoing activity
|
|
|
|
## has been detected and the channel will
|
|
|
|
## be reset
|
|
|
|
##
|
|
|
|
|
2020-07-27 19:33:51 +00:00
|
|
|
logScope:
|
|
|
|
id = s.id
|
|
|
|
initiator = s.initiator
|
|
|
|
name = s.name
|
|
|
|
oid = $s.oid
|
|
|
|
peer = $s.conn.peerInfo
|
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
try:
|
|
|
|
while true:
|
2020-07-21 15:03:41 +00:00
|
|
|
await sleepAsync(s.timeout)
|
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
if s.closed or s.atEof:
|
|
|
|
return
|
|
|
|
|
|
|
|
if s.activity:
|
|
|
|
s.activity = false
|
|
|
|
continue
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
# reset channel on innactivity timeout
|
|
|
|
trace "channel timed out, resetting"
|
|
|
|
await s.reset()
|
2020-07-21 15:03:41 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2020-07-17 18:44:41 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
trace "exception in timeout", exc = exc.msg
|
|
|
|
|
|
|
|
method initStream*(s: LPChannel) =
|
|
|
|
if s.objName.len == 0:
|
|
|
|
s.objName = "LPChannel"
|
|
|
|
|
|
|
|
procCall BufferStream(s).initStream()
|
|
|
|
|
|
|
|
method readOnce*(s: LPChannel,
|
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int):
|
|
|
|
Future[int] =
|
|
|
|
s.activity = true
|
|
|
|
procCall BufferStream(s).readOnce(pbytes, nbytes)
|
|
|
|
|
|
|
|
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
|
|
|
s.activity = true
|
|
|
|
procCall BufferStream(s).write(msg)
|
|
|
|
|
|
|
|
proc init*(
|
|
|
|
L: type LPChannel,
|
|
|
|
id: uint64,
|
|
|
|
conn: Connection,
|
|
|
|
initiator: bool,
|
|
|
|
name: string = "",
|
|
|
|
size: int = DefaultBufferSize,
|
|
|
|
lazy: bool = false,
|
|
|
|
timeout: Duration = DefaultChanTimeout): LPChannel =
|
|
|
|
|
|
|
|
let chann = L(
|
|
|
|
id: id,
|
|
|
|
name: name,
|
|
|
|
conn: conn,
|
|
|
|
initiator: initiator,
|
|
|
|
isLazy: lazy,
|
|
|
|
timeout: timeout,
|
|
|
|
msgCode: if initiator: MessageType.MsgOut else: MessageType.MsgIn,
|
|
|
|
closeCode: if initiator: MessageType.CloseOut else: MessageType.CloseIn,
|
|
|
|
resetCode: if initiator: MessageType.ResetOut else: MessageType.ResetIn,
|
|
|
|
dir: if initiator: Direction.Out else: Direction.In)
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
id = chann.id
|
|
|
|
initiator = chann.initiator
|
|
|
|
name = chann.name
|
|
|
|
oid = $chann.oid
|
|
|
|
peer = $chann.conn.peerInfo
|
|
|
|
# stack = getStackTrace()
|
|
|
|
|
|
|
|
proc writeHandler(data: seq[byte]) {.async, gcsafe.} =
|
|
|
|
try:
|
|
|
|
if chann.isLazy and not(chann.isOpen):
|
|
|
|
await chann.open()
|
|
|
|
|
|
|
|
# writes should happen in sequence
|
|
|
|
trace "sending data"
|
|
|
|
|
|
|
|
await conn.writeMsg(chann.id,
|
|
|
|
chann.msgCode,
|
|
|
|
data)
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "exception in lpchannel write handler", exc = exc.msg
|
|
|
|
await chann.reset()
|
|
|
|
raise exc
|
|
|
|
|
|
|
|
chann.initBufferStream(writeHandler, size)
|
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
|
|
|
|
|
|
|
|
chann.timerTaskFut = chann.timeoutMonitor()
|
|
|
|
trace "created new lpchannel"
|
|
|
|
|
|
|
|
return chann
|