2019-09-01 17:31:24 +00:00
|
|
|
## Nim-LibP2P
|
2019-09-24 17:48:23 +00:00
|
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
2019-09-01 17:31:24 +00:00
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
2020-06-19 17:29:43 +00:00
|
|
|
import oids
|
|
|
|
import chronicles, chronos, metrics
|
2020-05-08 20:58:23 +00:00
|
|
|
import ../varint,
|
2020-06-19 17:29:43 +00:00
|
|
|
../vbuffer,
|
|
|
|
../peerinfo,
|
|
|
|
../multiaddress
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-06-19 17:29:43 +00:00
|
|
|
declareGauge(libp2p_open_streams, "open stream instances", labels = ["type"])
|
2020-05-23 17:08:39 +00:00
|
|
|
|
2019-12-10 20:50:35 +00:00
|
|
|
type
|
2019-09-01 17:31:24 +00:00
|
|
|
LPStream* = ref object of RootObj
|
2020-06-24 15:08:44 +00:00
|
|
|
closeEvent*: AsyncEvent
|
2019-12-04 04:44:54 +00:00
|
|
|
isClosed*: bool
|
2020-05-20 00:14:15 +00:00
|
|
|
isEof*: bool
|
2020-06-19 17:29:43 +00:00
|
|
|
objName*: string
|
|
|
|
oid*: Oid
|
2019-09-01 17:31:24 +00:00
|
|
|
|
|
|
|
LPStreamError* = object of CatchableError
|
|
|
|
LPStreamIncompleteError* = object of LPStreamError
|
2020-05-06 16:31:47 +00:00
|
|
|
LPStreamIncorrectDefect* = object of Defect
|
2019-09-01 17:31:24 +00:00
|
|
|
LPStreamLimitError* = object of LPStreamError
|
|
|
|
LPStreamReadError* = object of LPStreamError
|
2020-05-23 17:08:39 +00:00
|
|
|
par*: ref CatchableError
|
2019-09-01 17:31:24 +00:00
|
|
|
LPStreamWriteError* = object of LPStreamError
|
2020-05-23 17:08:39 +00:00
|
|
|
par*: ref CatchableError
|
2019-12-10 20:50:35 +00:00
|
|
|
LPStreamEOFError* = object of LPStreamError
|
2020-05-20 00:14:15 +00:00
|
|
|
LPStreamClosedError* = object of LPStreamError
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-05-08 20:58:23 +00:00
|
|
|
InvalidVarintError* = object of LPStreamError
|
|
|
|
MaxSizeError* = object of LPStreamError
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamReadError*(p: ref CatchableError): ref CatchableError =
|
2019-09-01 21:56:00 +00:00
|
|
|
var w = newException(LPStreamReadError, "Read stream failed")
|
2019-09-01 17:31:24 +00:00
|
|
|
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
|
|
|
w.par = p
|
|
|
|
result = w
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamReadError*(msg: string): ref CatchableError =
|
2020-05-07 20:37:46 +00:00
|
|
|
newException(LPStreamReadError, msg)
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamWriteError*(p: ref CatchableError): ref CatchableError =
|
2019-09-01 17:31:24 +00:00
|
|
|
var w = newException(LPStreamWriteError, "Write stream failed")
|
|
|
|
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
|
|
|
w.par = p
|
|
|
|
result = w
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamIncompleteError*(): ref CatchableError =
|
2019-09-01 17:31:24 +00:00
|
|
|
result = newException(LPStreamIncompleteError, "Incomplete data received")
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamLimitError*(): ref CatchableError =
|
2019-09-01 17:31:24 +00:00
|
|
|
result = newException(LPStreamLimitError, "Buffer limit reached")
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamIncorrectDefect*(m: string): ref Defect =
|
2020-05-06 16:31:47 +00:00
|
|
|
result = newException(LPStreamIncorrectDefect, m)
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
proc newLPStreamEOFError*(): ref CatchableError =
|
2019-12-10 20:50:35 +00:00
|
|
|
result = newException(LPStreamEOFError, "Stream EOF!")
|
2019-09-04 06:40:11 +00:00
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
proc newLPStreamClosedError*(): ref Exception =
|
|
|
|
result = newException(LPStreamClosedError, "Stream Closed!")
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
method initStream*(s: LPStream) {.base.} =
|
2020-06-19 17:29:43 +00:00
|
|
|
if s.objName.len == 0:
|
|
|
|
s.objName = "LPStream"
|
|
|
|
|
|
|
|
s.oid = genOid()
|
|
|
|
libp2p_open_streams.inc(labelValues = [s.objName])
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "stream created", oid = $s.oid, name = s.objName
|
2020-06-24 15:08:44 +00:00
|
|
|
|
|
|
|
# TODO: debuging aid to troubleshoot streams open/close
|
|
|
|
# try:
|
|
|
|
# echo "ChronosStream ", libp2p_open_streams.value(labelValues = ["ChronosStream"])
|
|
|
|
# echo "SecureConn ", libp2p_open_streams.value(labelValues = ["SecureConn"])
|
|
|
|
# # doAssert(libp2p_open_streams.value(labelValues = ["ChronosStream"]) >=
|
|
|
|
# # libp2p_open_streams.value(labelValues = ["SecureConn"]))
|
|
|
|
# except CatchableError:
|
|
|
|
# discard
|
|
|
|
|
|
|
|
proc join*(s: LPStream): Future[void] =
|
|
|
|
s.closeEvent.wait()
|
2020-05-23 17:08:39 +00:00
|
|
|
|
2019-12-10 20:50:35 +00:00
|
|
|
method closed*(s: LPStream): bool {.base, inline.} =
|
2019-12-04 04:44:54 +00:00
|
|
|
s.isClosed
|
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
method atEof*(s: LPStream): bool {.base, inline.} =
|
|
|
|
s.isEof
|
|
|
|
|
2020-03-27 14:25:52 +00:00
|
|
|
method readOnce*(s: LPStream,
|
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int):
|
|
|
|
Future[int]
|
2019-12-04 04:44:54 +00:00
|
|
|
{.base, async.} =
|
2019-10-03 19:29:58 +00:00
|
|
|
doAssert(false, "not implemented!")
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-06-27 17:33:34 +00:00
|
|
|
proc readExactly*(s: LPStream,
|
2020-07-12 16:37:10 +00:00
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int):
|
|
|
|
Future[void] {.async.} =
|
2020-06-27 17:33:34 +00:00
|
|
|
|
|
|
|
if s.atEof:
|
|
|
|
raise newLPStreamEOFError()
|
|
|
|
|
2020-07-12 16:37:10 +00:00
|
|
|
logScope:
|
|
|
|
nbytes = nbytes
|
|
|
|
obName = s.objName
|
|
|
|
stack = getStackTrace()
|
|
|
|
oid = $s.oid
|
|
|
|
|
2020-06-27 17:33:34 +00:00
|
|
|
var pbuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
|
|
|
var read = 0
|
|
|
|
while read < nbytes and not(s.atEof()):
|
|
|
|
read += await s.readOnce(addr pbuffer[read], nbytes - read)
|
|
|
|
|
|
|
|
if read < nbytes:
|
2020-07-12 16:37:10 +00:00
|
|
|
trace "incomplete data received", read
|
2020-06-27 17:33:34 +00:00
|
|
|
raise newLPStreamIncompleteError()
|
|
|
|
|
2020-07-12 16:37:10 +00:00
|
|
|
proc readLine*(s: LPStream,
|
|
|
|
limit = 0,
|
|
|
|
sep = "\r\n"): Future[string]
|
|
|
|
{.async, deprecated: "todo".} =
|
2020-05-06 16:31:47 +00:00
|
|
|
# TODO replace with something that exploits buffering better
|
|
|
|
var lim = if limit <= 0: -1 else: limit
|
|
|
|
var state = 0
|
|
|
|
|
2020-05-23 17:08:39 +00:00
|
|
|
while true:
|
|
|
|
var ch: char
|
|
|
|
await readExactly(s, addr ch, 1)
|
|
|
|
|
|
|
|
if sep[state] == ch:
|
|
|
|
inc(state)
|
|
|
|
if state == len(sep):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
state = 0
|
|
|
|
if limit > 0:
|
|
|
|
let missing = min(state, lim - len(result) - 1)
|
|
|
|
result.add(sep[0 ..< missing])
|
2020-05-06 16:31:47 +00:00
|
|
|
else:
|
2020-05-23 17:08:39 +00:00
|
|
|
result.add(sep[0 ..< state])
|
|
|
|
|
|
|
|
result.add(ch)
|
|
|
|
if len(result) == lim:
|
|
|
|
break
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-05-08 20:58:23 +00:00
|
|
|
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe.} =
|
|
|
|
var
|
|
|
|
varint: uint64
|
|
|
|
length: int
|
|
|
|
buffer: array[10, byte]
|
|
|
|
|
|
|
|
for i in 0..<len(buffer):
|
|
|
|
await conn.readExactly(addr buffer[i], 1)
|
|
|
|
let res = PB.getUVarint(buffer.toOpenArray(0, i), length, varint)
|
2020-05-31 14:22:49 +00:00
|
|
|
if res.isOk():
|
2020-05-08 20:58:23 +00:00
|
|
|
return varint
|
2020-05-31 14:22:49 +00:00
|
|
|
if res.error() != VarintError.Incomplete:
|
2020-05-08 20:58:23 +00:00
|
|
|
break
|
|
|
|
if true: # can't end with a raise apparently
|
|
|
|
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
|
|
|
|
|
|
|
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe.} =
|
|
|
|
## read length prefixed msg, with the length encoded as a varint
|
|
|
|
let
|
|
|
|
length = await s.readVarint()
|
|
|
|
maxLen = uint64(if maxSize < 0: int.high else: maxSize)
|
|
|
|
|
|
|
|
if length > maxLen:
|
|
|
|
raise (ref MaxSizeError)(msg: "Message exceeds maximum length")
|
|
|
|
|
|
|
|
if length == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
var res = newSeq[byte](length)
|
|
|
|
await s.readExactly(addr res[0], res.len)
|
|
|
|
return res
|
|
|
|
|
|
|
|
proc writeLp*(s: LPStream, msg: string | seq[byte]): Future[void] {.gcsafe.} =
|
|
|
|
## write length prefixed
|
|
|
|
var buf = initVBuffer()
|
|
|
|
buf.writeSeq(msg)
|
|
|
|
buf.finish()
|
|
|
|
s.write(buf.buffer)
|
|
|
|
|
2020-05-07 20:37:46 +00:00
|
|
|
method write*(s: LPStream, msg: seq[byte]) {.base, async.} =
|
2019-10-03 19:29:58 +00:00
|
|
|
doAssert(false, "not implemented!")
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-05-06 16:31:47 +00:00
|
|
|
proc write*(s: LPStream, pbytes: pointer, nbytes: int): Future[void] {.deprecated: "seq".} =
|
|
|
|
s.write(@(toOpenArray(cast[ptr UncheckedArray[byte]](pbytes), 0, nbytes - 1)))
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-05-07 20:37:46 +00:00
|
|
|
proc write*(s: LPStream, msg: string): Future[void] =
|
|
|
|
s.write(@(toOpenArrayByte(msg, 0, msg.high)))
|
2019-09-01 17:31:24 +00:00
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
# TODO: split `close` into `close` and `dispose/destroy`
|
2020-06-19 17:29:43 +00:00
|
|
|
method close*(s: LPStream) {.base, async.} =
|
|
|
|
if not s.isClosed:
|
|
|
|
s.isClosed = true
|
2020-06-24 15:08:44 +00:00
|
|
|
s.closeEvent.fire()
|
|
|
|
libp2p_open_streams.dec(labelValues = [s.objName])
|
2020-06-29 15:15:31 +00:00
|
|
|
trace "stream destroyed", oid = $s.oid, name = s.objName
|
2020-06-24 15:08:44 +00:00
|
|
|
|
|
|
|
# TODO: debuging aid to troubleshoot streams open/close
|
|
|
|
# try:
|
|
|
|
# echo "ChronosStream ", libp2p_open_streams.value(labelValues = ["ChronosStream"])
|
|
|
|
# echo "SecureConn ", libp2p_open_streams.value(labelValues = ["SecureConn"])
|
|
|
|
# # doAssert(libp2p_open_streams.value(labelValues = ["ChronosStream"]) >=
|
|
|
|
# # libp2p_open_streams.value(labelValues = ["SecureConn"]))
|
|
|
|
# except CatchableError:
|
|
|
|
# discard
|