nim-libp2p/libp2p/muxers/mplex/lpchannel.nim

267 lines
7.4 KiB
Nim
Raw Normal View History

2019-09-04 03:08:51 +00:00
## Nim-LibP2P
2019-09-24 17:48:23 +00:00
## Copyright (c) 2019 Status Research & Development GmbH
2019-09-04 03:08:51 +00:00
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import oids, deques
import chronos, chronicles, metrics
2019-09-12 17:07:34 +00:00
import types,
2019-09-08 07:59:14 +00:00
coder,
nimcrypto/utils,
../../stream/connection,
2019-09-12 17:07:34 +00:00
../../stream/bufferstream,
../../utility,
../../errors,
../../peerinfo
2019-09-04 03:08:51 +00:00
export connection
2019-09-10 02:15:52 +00:00
logScope:
topics = "mplexchannel"
2019-09-10 02:15:52 +00:00
## Channel half-closed states
##
## | State | Closed local | Closed remote
## |=============================================
## | Read | Yes (until EOF) | No
## | Write | No | Yes
##
# TODO: this is one place where we need to use
# a proper state machine, but I've opted out of
# it for now for two reasons:
#
# 1) we don't have that many states to manage
# 2) I'm not sure if adding the state machine
# would have simplified or complicated the code
#
# But now that this is in place, we should perhaps
# reconsider reworking it again, this time with a
# more formal approach.
#
2019-09-04 03:08:51 +00:00
type
2019-09-12 17:07:34 +00:00
LPChannel* = ref object of BufferStream
id*: uint64 # channel id
name*: string # name of the channel (for debugging)
conn*: Connection # wrapped connection used to for writing
initiator*: bool # initiated remotely or locally flag
isLazy*: bool # is channel lazy
isOpen*: bool # has channel been opened (only used with isLazy)
closedLocal*: bool # has channel been closed locally
msgCode*: MessageType # cached in/out message code
closeCode*: MessageType # cached in/out close code
resetCode*: MessageType # cached in/out reset code
proc open*(s: LPChannel) {.async, gcsafe.}
template withWriteLock(lock: AsyncLock, body: untyped): untyped =
try:
await lock.acquire()
body
finally:
if not(isNil(lock)) and lock.locked:
lock.release()
template withEOFExceptions(body: untyped): untyped =
try:
body
except LPStreamEOFError as exc:
trace "muxed connection EOF", exc = exc.msg
except LPStreamClosedError as exc:
trace "muxed connection closed", exc = exc.msg
except LPStreamIncompleteError as exc:
trace "incomplete message", exc = exc.msg
2019-09-04 03:08:51 +00:00
2020-05-23 16:50:05 +00:00
method reset*(s: LPChannel) {.base, async, gcsafe.}
method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = "LPChannel"
procCall BufferStream(s).initStream()
2020-03-23 17:14:06 +00:00
proc newChannel*(id: uint64,
2019-09-04 06:40:11 +00:00
conn: Connection,
2019-09-04 03:08:51 +00:00
initiator: bool,
2019-09-06 21:27:55 +00:00
name: string = "",
2020-05-19 22:15:36 +00:00
size: int = DefaultBufferSize,
lazy: bool = false): LPChannel =
result = LPChannel(id: id,
name: name,
conn: conn,
initiator: initiator,
msgCode: if initiator: MessageType.MsgOut else: MessageType.MsgIn,
closeCode: if initiator: MessageType.CloseOut else: MessageType.CloseIn,
resetCode: if initiator: MessageType.ResetOut else: MessageType.ResetIn,
isLazy: lazy)
2019-09-04 03:08:51 +00:00
2019-09-04 06:40:11 +00:00
let chan = result
logScope:
id = chan.id
initiator = chan.initiator
name = chan.name
oid = $chan.oid
peer = $chan.conn.peerInfo
# stack = getStackTrace()
proc writeHandler(data: seq[byte]): Future[void] {.async, gcsafe.} =
2020-05-23 16:50:05 +00:00
try:
if chan.isLazy and not(chan.isOpen):
await chan.open()
# writes should happen in sequence
trace "sending data"
await conn.writeMsg(chan.id,
chan.msgCode,
data).wait(2.minutes) # write header
2020-05-23 16:50:05 +00:00
except CatchableError as exc:
trace "exception in lpchannel write handler", exc = exc.msg
await chan.reset()
raise exc
2019-09-04 06:40:11 +00:00
result.initBufferStream(writeHandler, size)
when chronicles.enabledLogLevel == LogLevel.TRACE:
result.name = if result.name.len > 0: result.name else: $result.oid
2019-09-04 06:40:11 +00:00
trace "created new lpchannel"
2019-09-04 06:40:11 +00:00
proc closeMessage(s: LPChannel) {.async.} =
logScope:
id = s.id
initiator = s.initiator
name = s.name
oid = $s.oid
peer = $s.conn.peerInfo
# stack = getStackTrace()
2020-06-01 23:19:53 +00:00
## send close message - this will not raise
## on EOF or Closed
withWriteLock(s.writeLock):
trace "sending close message"
2019-09-07 23:32:32 +00:00
await s.conn.writeMsg(s.id, s.closeCode) # write close
proc resetMessage(s: LPChannel) {.async.} =
logScope:
id = s.id
initiator = s.initiator
name = s.name
oid = $s.oid
peer = $s.conn.peerInfo
# stack = getStackTrace()
2020-06-01 23:19:53 +00:00
## send reset message - this will not raise
withEOFExceptions:
withWriteLock(s.writeLock):
trace "sending reset message"
await s.conn.writeMsg(s.id, s.resetCode) # write reset
proc open*(s: LPChannel) {.async, gcsafe.} =
logScope:
id = s.id
initiator = s.initiator
name = s.name
oid = $s.oid
peer = $s.conn.peerInfo
# stack = getStackTrace()
## NOTE: Don't call withExcAndLock or withWriteLock,
## because this already gets called from writeHandler
## which is locked
await s.conn.writeMsg(s.id, MessageType.New, s.name)
trace "opened channel"
s.isOpen = true
proc closeRemote*(s: LPChannel) {.async.} =
logScope:
id = s.id
initiator = s.initiator
name = s.name
oid = $s.oid
peer = $s.conn.peerInfo
# stack = getStackTrace()
trace "got EOF, closing channel"
# wait for all data in the buffer to be consumed
while s.len > 0:
await s.dataReadEvent.wait()
s.dataReadEvent.clear()
s.isEof = true # set EOF immediately to prevent further reads
await s.close() # close local end
# call to avoid leaks
await procCall BufferStream(s).close() # close parent bufferstream
trace "channel closed on EOF"
method closed*(s: LPChannel): bool =
## this emulates half-closed behavior
## when closed locally writing is
## disabled - see the table in the
## header of the file
s.closedLocal
2020-05-23 16:50:05 +00:00
method reset*(s: LPChannel) {.base, async, gcsafe.} =
logScope:
id = s.id
initiator = s.initiator
name = s.name
oid = $s.oid
peer = $s.conn.peerInfo
# stack = getStackTrace()
trace "resetting channel"
if s.closedLocal and s.isEof:
trace "channel already closed or reset"
return
# we asyncCheck here because the other end
# might be dead already - reset is always
# optimistic
asyncCheck s.resetMessage()
await procCall BufferStream(s).close()
s.isEof = true
s.closedLocal = true
2020-05-23 16:50:05 +00:00
trace "channel reset"
2020-05-23 16:50:05 +00:00
method close*(s: LPChannel) {.async, gcsafe.} =
logScope:
id = s.id
initiator = s.initiator
name = s.name
oid = $s.oid
peer = $s.conn.peerInfo
# stack = getStackTrace()
2020-05-23 16:50:05 +00:00
if s.closedLocal:
trace "channel already closed"
2020-05-23 16:50:05 +00:00
return
trace "closing local lpchannel"
proc closeInternal() {.async.} =
2020-05-23 16:50:05 +00:00
try:
await s.closeMessage().wait(2.minutes)
if s.atEof: # already closed by remote close parent buffer immediately
2020-05-23 16:50:05 +00:00
await procCall BufferStream(s).close()
except CancelledError as exc:
await s.reset() # reset on timeout
raise exc
2020-05-23 16:50:05 +00:00
except CatchableError as exc:
trace "exception closing channel"
await s.reset() # reset on timeout
2020-05-23 16:50:05 +00:00
trace "lpchannel closed local"
2020-05-23 16:50:05 +00:00
s.closedLocal = true
asyncCheck closeInternal()