2019-05-07 20:11:40 +00:00
|
|
|
#
|
|
|
|
# Chronos Asynchronous Streams
|
|
|
|
# (c) Copyright 2019-Present
|
|
|
|
# Status Research & Development GmbH
|
|
|
|
#
|
|
|
|
# Licensed under either of
|
|
|
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
|
|
|
# MIT license (LICENSE-MIT)
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
import ../asyncloop, ../asyncsync
|
|
|
|
import ../transports/common, ../transports/stream
|
|
|
|
export asyncsync, stream, common
|
|
|
|
|
|
|
|
const
|
|
|
|
AsyncStreamDefaultBufferSize* = 4096
|
|
|
|
## Default reading stream internal buffer size.
|
|
|
|
AsyncStreamDefaultQueueSize* = 0
|
|
|
|
## Default writing stream internal queue size.
|
|
|
|
AsyncStreamReaderTrackerName* = "async.stream.reader"
|
|
|
|
## AsyncStreamReader leaks tracker name
|
|
|
|
AsyncStreamWriterTrackerName* = "async.stream.writer"
|
|
|
|
## AsyncStreamWriter leaks tracker name
|
|
|
|
|
|
|
|
type
|
2021-01-20 13:40:15 +00:00
|
|
|
AsyncStreamError* = object of CatchableError
|
2021-02-17 00:03:12 +00:00
|
|
|
AsyncStreamIncorrectDefect* = object of Defect
|
2021-01-20 13:40:15 +00:00
|
|
|
AsyncStreamIncompleteError* = object of AsyncStreamError
|
|
|
|
AsyncStreamLimitError* = object of AsyncStreamError
|
|
|
|
AsyncStreamUseClosedError* = object of AsyncStreamError
|
|
|
|
AsyncStreamReadError* = object of AsyncStreamError
|
|
|
|
par*: ref CatchableError
|
|
|
|
AsyncStreamWriteError* = object of AsyncStreamError
|
|
|
|
par*: ref CatchableError
|
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
AsyncBuffer* = object
|
|
|
|
offset*: int
|
|
|
|
buffer*: seq[byte]
|
|
|
|
events*: array[2, AsyncEvent]
|
|
|
|
|
|
|
|
WriteType* = enum
|
|
|
|
Pointer, Sequence, String
|
|
|
|
|
|
|
|
WriteItem* = object
|
|
|
|
case kind*: WriteType
|
|
|
|
of Pointer:
|
2021-02-18 12:08:21 +00:00
|
|
|
dataPtr*: pointer
|
2019-05-07 20:11:40 +00:00
|
|
|
of Sequence:
|
2021-02-18 12:08:21 +00:00
|
|
|
dataSeq*: seq[byte]
|
2019-05-07 20:11:40 +00:00
|
|
|
of String:
|
2021-02-18 12:08:21 +00:00
|
|
|
dataStr*: string
|
2019-05-07 20:11:40 +00:00
|
|
|
size*: int
|
2019-10-08 07:28:43 +00:00
|
|
|
offset*: int
|
2019-05-07 20:11:40 +00:00
|
|
|
future*: Future[void]
|
|
|
|
|
|
|
|
AsyncStreamState* = enum
|
|
|
|
Running, ## Stream is online and working
|
|
|
|
Error, ## Stream has stored error
|
|
|
|
Stopped, ## Stream was closed while working
|
|
|
|
Finished, ## Stream was properly finished
|
|
|
|
Closed ## Stream was closed
|
|
|
|
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
StreamReaderLoop* = proc (stream: AsyncStreamReader): Future[void] {.gcsafe, raises: [Defect].}
|
2019-05-07 20:11:40 +00:00
|
|
|
## Main read loop for read streams.
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
StreamWriterLoop* = proc (stream: AsyncStreamWriter): Future[void] {.gcsafe, raises: [Defect].}
|
2019-05-07 20:11:40 +00:00
|
|
|
## Main write loop for write streams.
|
|
|
|
|
|
|
|
AsyncStreamReader* = ref object of RootRef
|
|
|
|
rsource*: AsyncStreamReader
|
|
|
|
tsource*: StreamTransport
|
|
|
|
readerLoop*: StreamReaderLoop
|
|
|
|
state*: AsyncStreamState
|
|
|
|
buffer*: AsyncBuffer
|
|
|
|
udata: pointer
|
2021-01-20 13:40:15 +00:00
|
|
|
error*: ref AsyncStreamError
|
|
|
|
bytesCount*: uint64
|
2019-05-07 20:11:40 +00:00
|
|
|
future: Future[void]
|
|
|
|
|
|
|
|
AsyncStreamWriter* = ref object of RootRef
|
|
|
|
wsource*: AsyncStreamWriter
|
|
|
|
tsource*: StreamTransport
|
|
|
|
writerLoop*: StreamWriterLoop
|
|
|
|
state*: AsyncStreamState
|
|
|
|
queue*: AsyncQueue[WriteItem]
|
2021-01-22 08:36:37 +00:00
|
|
|
error*: ref AsyncStreamError
|
2019-05-07 20:11:40 +00:00
|
|
|
udata: pointer
|
2021-01-20 13:40:15 +00:00
|
|
|
bytesCount*: uint64
|
2019-05-07 20:11:40 +00:00
|
|
|
future: Future[void]
|
|
|
|
|
2019-06-12 15:26:20 +00:00
|
|
|
AsyncStream* = object of RootObj
|
2019-05-07 20:11:40 +00:00
|
|
|
reader*: AsyncStreamReader
|
|
|
|
writer*: AsyncStreamWriter
|
|
|
|
|
|
|
|
AsyncStreamTracker* = ref object of TrackerBase
|
|
|
|
opened*: int64
|
|
|
|
closed*: int64
|
|
|
|
|
|
|
|
AsyncStreamRW* = AsyncStreamReader | AsyncStreamWriter
|
|
|
|
|
|
|
|
proc init*(t: typedesc[AsyncBuffer], size: int): AsyncBuffer =
|
2021-02-18 12:08:21 +00:00
|
|
|
AsyncBuffer(
|
2021-01-20 13:40:15 +00:00
|
|
|
buffer: newSeq[byte](size),
|
|
|
|
events: [newAsyncEvent(), newAsyncEvent()],
|
|
|
|
offset: 0
|
|
|
|
)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc getBuffer*(sb: AsyncBuffer): pointer {.inline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
unsafeAddr sb.buffer[sb.offset]
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc bufferLen*(sb: AsyncBuffer): int {.inline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
len(sb.buffer) - sb.offset
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc getData*(sb: AsyncBuffer): pointer {.inline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
unsafeAddr sb.buffer[0]
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
template dataLen*(sb: AsyncBuffer): int =
|
|
|
|
sb.offset
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc `[]`*(sb: AsyncBuffer, index: int): byte {.inline.} =
|
|
|
|
doAssert(index < sb.offset)
|
2021-01-20 13:40:15 +00:00
|
|
|
sb.buffer[index]
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc update*(sb: var AsyncBuffer, size: int) {.inline.} =
|
|
|
|
sb.offset += size
|
|
|
|
|
|
|
|
proc wait*(sb: var AsyncBuffer): Future[void] =
|
|
|
|
sb.events[0].clear()
|
|
|
|
sb.events[1].fire()
|
2021-01-20 13:40:15 +00:00
|
|
|
sb.events[0].wait()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc transfer*(sb: var AsyncBuffer): Future[void] =
|
|
|
|
sb.events[1].clear()
|
|
|
|
sb.events[0].fire()
|
2021-01-20 13:40:15 +00:00
|
|
|
sb.events[1].wait()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc forget*(sb: var AsyncBuffer) {.inline.} =
|
|
|
|
sb.events[1].clear()
|
|
|
|
sb.events[0].fire()
|
|
|
|
|
|
|
|
proc shift*(sb: var AsyncBuffer, size: int) {.inline.} =
|
|
|
|
if sb.offset > size:
|
|
|
|
moveMem(addr sb.buffer[0], addr sb.buffer[size], sb.offset - size)
|
|
|
|
sb.offset = sb.offset - size
|
|
|
|
else:
|
|
|
|
sb.offset = 0
|
|
|
|
|
|
|
|
proc copyData*(sb: AsyncBuffer, dest: pointer, offset, length: int) {.inline.} =
|
|
|
|
copyMem(cast[pointer](cast[uint](dest) + cast[uint](offset)),
|
|
|
|
unsafeAddr sb.buffer[0], length)
|
|
|
|
|
2019-10-10 09:52:12 +00:00
|
|
|
proc upload*(sb: ptr AsyncBuffer, pbytes: ptr byte,
|
|
|
|
nbytes: int): Future[void] {.async.} =
|
2021-04-24 17:32:21 +00:00
|
|
|
## You can upload any amount of bytes to the buffer. If size of internal
|
|
|
|
## buffer is not enough to fit all the data at once, data will be uploaded
|
|
|
|
## via chunks of size up to internal buffer size.
|
2019-10-10 09:52:12 +00:00
|
|
|
var length = nbytes
|
2021-04-24 17:32:21 +00:00
|
|
|
var srcBuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
|
|
|
var srcOffset = 0
|
2019-10-10 09:52:12 +00:00
|
|
|
while length > 0:
|
|
|
|
let size = min(length, sb[].bufferLen())
|
|
|
|
if size == 0:
|
|
|
|
# Internal buffer is full, we need to transfer data to consumer.
|
|
|
|
await sb[].transfer()
|
|
|
|
else:
|
2021-04-24 17:32:21 +00:00
|
|
|
# Copy data from `pbytes` to internal buffer.
|
|
|
|
copyMem(addr sb[].buffer[sb.offset], addr srcBuffer[srcOffset], size)
|
2019-10-10 09:52:12 +00:00
|
|
|
sb[].offset = sb[].offset + size
|
2021-04-24 17:32:21 +00:00
|
|
|
srcOffset = srcOffset + size
|
2019-10-10 09:52:12 +00:00
|
|
|
length = length - size
|
2019-10-16 06:01:52 +00:00
|
|
|
# We notify consumers that new data is available.
|
|
|
|
sb[].forget()
|
2019-10-10 09:52:12 +00:00
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
template toDataOpenArray*(sb: AsyncBuffer): auto =
|
|
|
|
toOpenArray(sb.buffer, 0, sb.offset - 1)
|
|
|
|
|
|
|
|
template toBufferOpenArray*(sb: AsyncBuffer): auto =
|
|
|
|
toOpenArray(sb.buffer, sb.offset, len(sb.buffer) - 1)
|
|
|
|
|
2019-10-18 16:24:58 +00:00
|
|
|
template copyOut*(dest: pointer, item: WriteItem, length: int) =
|
2019-10-16 06:01:52 +00:00
|
|
|
if item.kind == Pointer:
|
2021-02-18 12:08:21 +00:00
|
|
|
let p = cast[pointer](cast[uint](item.dataPtr) + uint(item.offset))
|
2019-10-16 06:01:52 +00:00
|
|
|
copyMem(dest, p, length)
|
|
|
|
elif item.kind == Sequence:
|
2021-02-18 12:08:21 +00:00
|
|
|
copyMem(dest, unsafeAddr item.dataSeq[item.offset], length)
|
2019-10-16 06:01:52 +00:00
|
|
|
elif item.kind == String:
|
2021-02-18 12:08:21 +00:00
|
|
|
copyMem(dest, unsafeAddr item.dataStr[item.offset], length)
|
2019-10-16 06:01:52 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc newAsyncStreamReadError(p: ref CatchableError): ref AsyncStreamReadError {.
|
2021-02-17 00:03:12 +00:00
|
|
|
noinline.} =
|
2019-05-07 20:11:40 +00:00
|
|
|
var w = newException(AsyncStreamReadError, "Read stream failed")
|
2019-05-08 09:44:00 +00:00
|
|
|
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
2019-05-07 20:11:40 +00:00
|
|
|
w.par = p
|
2021-01-20 13:40:15 +00:00
|
|
|
w
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc newAsyncStreamWriteError(p: ref CatchableError): ref AsyncStreamWriteError {.
|
2021-02-17 00:03:12 +00:00
|
|
|
noinline.} =
|
2019-05-07 20:11:40 +00:00
|
|
|
var w = newException(AsyncStreamWriteError, "Write stream failed")
|
2019-05-08 09:44:00 +00:00
|
|
|
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
2019-05-07 20:11:40 +00:00
|
|
|
w.par = p
|
2021-01-20 13:40:15 +00:00
|
|
|
w
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc newAsyncStreamIncompleteError*(): ref AsyncStreamIncompleteError {.
|
2021-02-17 00:03:12 +00:00
|
|
|
noinline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
newException(AsyncStreamIncompleteError, "Incomplete data sent or received")
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-02-17 00:03:12 +00:00
|
|
|
proc newAsyncStreamLimitError*(): ref AsyncStreamLimitError {.noinline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
newException(AsyncStreamLimitError, "Buffer limit reached")
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-02-17 00:03:12 +00:00
|
|
|
proc newAsyncStreamUseClosedError*(): ref AsyncStreamUseClosedError {.
|
|
|
|
noinline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
newException(AsyncStreamUseClosedError, "Stream is already closed")
|
|
|
|
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
proc raiseAsyncStreamUseClosedError*() {.
|
|
|
|
noinline, noreturn, raises: [Defect, AsyncStreamUseClosedError].} =
|
2021-02-17 00:03:12 +00:00
|
|
|
raise newAsyncStreamUseClosedError()
|
|
|
|
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
proc raiseAsyncStreamLimitError*() {.
|
|
|
|
noinline, noreturn, raises: [Defect, AsyncStreamLimitError].} =
|
2021-02-17 00:03:12 +00:00
|
|
|
raise newAsyncStreamLimitError()
|
|
|
|
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
proc raiseAsyncStreamIncompleteError*() {.
|
|
|
|
noinline, noreturn, raises: [Defect, AsyncStreamIncompleteError].} =
|
2021-02-17 00:03:12 +00:00
|
|
|
raise newAsyncStreamIncompleteError()
|
|
|
|
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
proc raiseAsyncStreamIncorrectDefect*(m: string) {.
|
|
|
|
noinline, noreturn, raises: [Defect].} =
|
2021-02-17 00:03:12 +00:00
|
|
|
raise newException(AsyncStreamIncorrectDefect, m)
|
|
|
|
|
|
|
|
proc raiseEmptyMessageDefect*() {.noinline, noreturn.} =
|
|
|
|
raise newException(AsyncStreamIncorrectDefect,
|
|
|
|
"Could not write empty message")
|
2021-01-20 13:40:15 +00:00
|
|
|
|
2021-01-30 18:15:34 +00:00
|
|
|
template checkStreamClosed*(t: untyped) =
|
|
|
|
if t.state == AsyncStreamState.Closed:
|
2021-02-17 00:03:12 +00:00
|
|
|
raiseAsyncStreamUseClosedError()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc atEof*(rstream: AsyncStreamReader): bool =
|
|
|
|
## Returns ``true`` is reading stream is closed or finished and internal
|
|
|
|
## buffer do not have any bytes left.
|
2021-04-26 11:05:37 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
|
|
|
if isNil(rstream.rsource):
|
|
|
|
rstream.tsource.atEof()
|
|
|
|
else:
|
|
|
|
rstream.rsource.atEof()
|
|
|
|
else:
|
|
|
|
rstream.state in {AsyncStreamState.Stopped, Finished, Closed, Error} and
|
|
|
|
(rstream.buffer.dataLen() == 0)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc atEof*(wstream: AsyncStreamWriter): bool =
|
|
|
|
## Returns ``true`` is writing stream ``wstream`` closed or finished.
|
2021-04-26 11:05:37 +00:00
|
|
|
if isNil(wstream.writerLoop):
|
|
|
|
if isNil(wstream.wsource):
|
|
|
|
wstream.tsource.atEof()
|
|
|
|
else:
|
|
|
|
wstream.wsource.atEof()
|
|
|
|
else:
|
|
|
|
wstream.state in {AsyncStreamState.Stopped, Finished, Closed, Error}
|
|
|
|
|
|
|
|
proc closed*(reader: AsyncStreamReader): bool =
|
|
|
|
## Returns ``true`` is reading/writing stream is closed.
|
|
|
|
(reader.state == AsyncStreamState.Closed)
|
|
|
|
|
|
|
|
proc finished*(reader: AsyncStreamReader): bool =
|
|
|
|
## Returns ``true`` is reading/writing stream is finished (completed).
|
|
|
|
if isNil(reader.readerLoop):
|
|
|
|
if isNil(reader.rsource):
|
|
|
|
reader.tsource.finished()
|
|
|
|
else:
|
|
|
|
reader.rsource.finished()
|
|
|
|
else:
|
|
|
|
(reader.state == AsyncStreamState.Finished)
|
|
|
|
|
|
|
|
proc stopped*(reader: AsyncStreamReader): bool =
|
|
|
|
## Returns ``true`` is reading/writing stream is stopped (interrupted).
|
|
|
|
if isNil(reader.readerLoop):
|
|
|
|
if isNil(reader.rsource):
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
reader.rsource.stopped()
|
|
|
|
else:
|
|
|
|
(reader.state == AsyncStreamState.Stopped)
|
|
|
|
|
|
|
|
proc running*(reader: AsyncStreamReader): bool =
|
|
|
|
## Returns ``true`` is reading/writing stream is still pending.
|
|
|
|
if isNil(reader.readerLoop):
|
|
|
|
if isNil(reader.rsource):
|
|
|
|
reader.tsource.running()
|
|
|
|
else:
|
|
|
|
reader.rsource.running()
|
|
|
|
else:
|
|
|
|
(reader.state == AsyncStreamState.Running)
|
|
|
|
|
|
|
|
proc failed*(reader: AsyncStreamReader): bool =
|
|
|
|
if isNil(reader.readerLoop):
|
|
|
|
if isNil(reader.rsource):
|
|
|
|
reader.tsource.failed()
|
|
|
|
else:
|
|
|
|
reader.rsource.failed()
|
|
|
|
else:
|
|
|
|
(reader.state == AsyncStreamState.Error)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-04-26 11:05:37 +00:00
|
|
|
proc closed*(writer: AsyncStreamWriter): bool =
|
2019-05-07 20:11:40 +00:00
|
|
|
## Returns ``true`` is reading/writing stream is closed.
|
2021-04-26 11:05:37 +00:00
|
|
|
(writer.state == AsyncStreamState.Closed)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-04-26 11:05:37 +00:00
|
|
|
proc finished*(writer: AsyncStreamWriter): bool =
|
2019-05-07 20:11:40 +00:00
|
|
|
## Returns ``true`` is reading/writing stream is finished (completed).
|
2021-04-26 11:05:37 +00:00
|
|
|
if isNil(writer.writerLoop):
|
|
|
|
if isNil(writer.wsource):
|
|
|
|
writer.tsource.finished()
|
|
|
|
else:
|
|
|
|
writer.wsource.finished()
|
|
|
|
else:
|
|
|
|
(writer.state == AsyncStreamState.Finished)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-04-26 11:05:37 +00:00
|
|
|
proc stopped*(writer: AsyncStreamWriter): bool =
|
2019-05-07 20:11:40 +00:00
|
|
|
## Returns ``true`` is reading/writing stream is stopped (interrupted).
|
2021-04-26 11:05:37 +00:00
|
|
|
if isNil(writer.writerLoop):
|
|
|
|
if isNil(writer.wsource):
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
writer.wsource.stopped()
|
|
|
|
else:
|
|
|
|
(writer.state == AsyncStreamState.Stopped)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-04-26 11:05:37 +00:00
|
|
|
proc running*(writer: AsyncStreamWriter): bool =
|
2019-05-07 20:11:40 +00:00
|
|
|
## Returns ``true`` is reading/writing stream is still pending.
|
2021-04-26 11:05:37 +00:00
|
|
|
if isNil(writer.writerLoop):
|
|
|
|
if isNil(writer.wsource):
|
|
|
|
writer.tsource.running()
|
|
|
|
else:
|
|
|
|
writer.wsource.running()
|
|
|
|
else:
|
|
|
|
(writer.state == AsyncStreamState.Running)
|
|
|
|
|
|
|
|
proc failed*(writer: AsyncStreamWriter): bool =
|
|
|
|
if isNil(writer.writerLoop):
|
|
|
|
if isNil(writer.wsource):
|
|
|
|
writer.tsource.failed()
|
|
|
|
else:
|
|
|
|
writer.wsource.failed()
|
|
|
|
else:
|
|
|
|
(writer.state == AsyncStreamState.Error)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-04-26 11:05:37 +00:00
|
|
|
proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {.
|
|
|
|
gcsafe, raises: [Defect].}
|
|
|
|
proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {.
|
|
|
|
gcsafe, raises: [Defect].}
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc getAsyncStreamReaderTracker(): AsyncStreamTracker {.inline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = cast[AsyncStreamTracker](getTracker(AsyncStreamReaderTrackerName))
|
|
|
|
if isNil(res):
|
|
|
|
res = setupAsyncStreamReaderTracker()
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc getAsyncStreamWriterTracker(): AsyncStreamTracker {.inline.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = cast[AsyncStreamTracker](getTracker(AsyncStreamWriterTrackerName))
|
|
|
|
if isNil(res):
|
|
|
|
res = setupAsyncStreamWriterTracker()
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc dumpAsyncStreamReaderTracking(): string {.gcsafe.} =
|
|
|
|
var tracker = getAsyncStreamReaderTracker()
|
2021-01-20 13:40:15 +00:00
|
|
|
let res = "Opened async stream readers: " & $tracker.opened & "\n" &
|
|
|
|
"Closed async stream readers: " & $tracker.closed
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc dumpAsyncStreamWriterTracking(): string {.gcsafe.} =
|
|
|
|
var tracker = getAsyncStreamWriterTracker()
|
2021-01-20 13:40:15 +00:00
|
|
|
let res = "Opened async stream writers: " & $tracker.opened & "\n" &
|
|
|
|
"Closed async stream writers: " & $tracker.closed
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc leakAsyncStreamReader(): bool {.gcsafe.} =
|
|
|
|
var tracker = getAsyncStreamReaderTracker()
|
2021-01-20 13:40:15 +00:00
|
|
|
tracker.opened != tracker.closed
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc leakAsyncStreamWriter(): bool {.gcsafe.} =
|
|
|
|
var tracker = getAsyncStreamWriterTracker()
|
2021-01-20 13:40:15 +00:00
|
|
|
tracker.opened != tracker.closed
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc trackAsyncStreamReader(t: AsyncStreamReader) {.inline.} =
|
|
|
|
var tracker = getAsyncStreamReaderTracker()
|
|
|
|
inc(tracker.opened)
|
|
|
|
|
|
|
|
proc untrackAsyncStreamReader*(t: AsyncStreamReader) {.inline.} =
|
|
|
|
var tracker = getAsyncStreamReaderTracker()
|
|
|
|
inc(tracker.closed)
|
|
|
|
|
|
|
|
proc trackAsyncStreamWriter(t: AsyncStreamWriter) {.inline.} =
|
|
|
|
var tracker = getAsyncStreamWriterTracker()
|
|
|
|
inc(tracker.opened)
|
|
|
|
|
|
|
|
proc untrackAsyncStreamWriter*(t: AsyncStreamWriter) {.inline.} =
|
|
|
|
var tracker = getAsyncStreamWriterTracker()
|
|
|
|
inc(tracker.closed)
|
|
|
|
|
|
|
|
proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {.gcsafe.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamTracker(
|
|
|
|
opened: 0,
|
|
|
|
closed: 0,
|
|
|
|
dump: dumpAsyncStreamReaderTracking,
|
|
|
|
isLeaked: leakAsyncStreamReader
|
|
|
|
)
|
|
|
|
addTracker(AsyncStreamReaderTrackerName, res)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {.gcsafe.} =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamTracker(
|
|
|
|
opened: 0,
|
|
|
|
closed: 0,
|
|
|
|
dump: dumpAsyncStreamWriterTracking,
|
|
|
|
isLeaked: leakAsyncStreamWriter
|
|
|
|
)
|
|
|
|
addTracker(AsyncStreamWriterTrackerName, res)
|
|
|
|
res
|
|
|
|
|
|
|
|
template readLoop(body: untyped): untyped =
|
|
|
|
while true:
|
|
|
|
if rstream.buffer.dataLen() == 0:
|
|
|
|
if rstream.state == AsyncStreamState.Error:
|
|
|
|
raise rstream.error
|
|
|
|
|
|
|
|
let (consumed, done) = body
|
|
|
|
rstream.buffer.shift(consumed)
|
|
|
|
rstream.bytesCount = rstream.bytesCount + uint64(consumed)
|
|
|
|
if done:
|
|
|
|
break
|
|
|
|
else:
|
2021-02-18 12:08:21 +00:00
|
|
|
if not(rstream.atEof()):
|
|
|
|
await rstream.buffer.wait()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer,
|
|
|
|
nbytes: int) {.async.} =
|
|
|
|
## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store
|
|
|
|
## it to ``pbytes``.
|
|
|
|
##
|
|
|
|
## If EOF is received and ``nbytes`` is not yet readed, the procedure
|
|
|
|
## will raise ``AsyncStreamIncompleteError``.
|
2020-09-10 00:50:06 +00:00
|
|
|
doAssert(not(isNil(pbytes)), "pbytes must not be nil")
|
|
|
|
doAssert(nbytes >= 0, "nbytes must be non-negative integer")
|
|
|
|
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2021-01-20 13:40:15 +00:00
|
|
|
|
2020-09-10 00:50:06 +00:00
|
|
|
if nbytes == 0:
|
|
|
|
return
|
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
|
|
|
await readExactly(rstream.tsource, pbytes, nbytes)
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-05-07 20:11:40 +00:00
|
|
|
except TransportIncompleteError:
|
|
|
|
raise newAsyncStreamIncompleteError()
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
|
|
|
await readExactly(rstream.rsource, pbytes, nbytes)
|
|
|
|
else:
|
|
|
|
var index = 0
|
2021-01-20 13:40:15 +00:00
|
|
|
var pbuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
|
|
|
readLoop():
|
|
|
|
if rstream.buffer.dataLen() == 0:
|
|
|
|
if rstream.atEof():
|
|
|
|
raise newAsyncStreamIncompleteError()
|
|
|
|
let count = min(nbytes - index, rstream.buffer.dataLen())
|
|
|
|
if count > 0:
|
|
|
|
rstream.buffer.copyData(addr pbuffer[index], 0, count)
|
|
|
|
index += count
|
|
|
|
(consumed: count, done: index == nbytes)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
|
|
|
|
nbytes: int): Future[int] {.async.} =
|
|
|
|
## Perform one read operation on read-only stream ``rstream``.
|
|
|
|
##
|
|
|
|
## If internal buffer is not empty, ``nbytes`` bytes will be transferred from
|
2021-01-20 13:40:15 +00:00
|
|
|
## internal buffer, otherwise it will wait until some bytes will be available.
|
2020-09-10 00:50:06 +00:00
|
|
|
doAssert(not(isNil(pbytes)), "pbytes must not be nil")
|
|
|
|
doAssert(nbytes > 0, "nbytes must be positive value")
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
return await readOnce(rstream.tsource, pbytes, nbytes)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-10-17 11:44:14 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
2021-01-20 13:40:15 +00:00
|
|
|
return await readOnce(rstream.rsource, pbytes, nbytes)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
var count = 0
|
|
|
|
readLoop():
|
|
|
|
if rstream.buffer.dataLen() == 0:
|
|
|
|
(0, rstream.atEof())
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
count = min(rstream.buffer.dataLen(), nbytes)
|
|
|
|
rstream.buffer.copyData(pbytes, 0, count)
|
|
|
|
(count, true)
|
|
|
|
return count
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
|
|
|
|
sep: seq[byte]): Future[int] {.async.} =
|
|
|
|
## Read data from the read-only stream ``rstream`` until separator ``sep`` is
|
|
|
|
## found.
|
|
|
|
##
|
|
|
|
## On success, the data and separator will be removed from the internal
|
|
|
|
## buffer (consumed). Returned data will include the separator at the end.
|
|
|
|
##
|
|
|
|
## If EOF is received, and `sep` was not found, procedure will raise
|
|
|
|
## ``AsyncStreamIncompleteError``.
|
|
|
|
##
|
|
|
|
## If ``nbytes`` bytes has been received and `sep` was not found, procedure
|
|
|
|
## will raise ``AsyncStreamLimitError``.
|
|
|
|
##
|
|
|
|
## Procedure returns actual number of bytes read.
|
2020-09-10 00:50:06 +00:00
|
|
|
doAssert(not(isNil(pbytes)), "pbytes must not be nil")
|
|
|
|
doAssert(len(sep) > 0, "separator must not be empty")
|
|
|
|
doAssert(nbytes >= 0, "nbytes must be non-negative value")
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2020-09-10 00:50:06 +00:00
|
|
|
|
|
|
|
if nbytes == 0:
|
|
|
|
raise newAsyncStreamLimitError()
|
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
return await readUntil(rstream.tsource, pbytes, nbytes, sep)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-05-07 20:11:40 +00:00
|
|
|
except TransportIncompleteError:
|
|
|
|
raise newAsyncStreamIncompleteError()
|
|
|
|
except TransportLimitError:
|
|
|
|
raise newAsyncStreamLimitError()
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
2021-01-20 13:40:15 +00:00
|
|
|
return await readUntil(rstream.rsource, pbytes, nbytes, sep)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
var pbuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
|
|
|
var state = 0
|
|
|
|
var k = 0
|
|
|
|
readLoop():
|
|
|
|
if rstream.atEof():
|
2019-06-18 17:11:13 +00:00
|
|
|
raise newAsyncStreamIncompleteError()
|
|
|
|
var index = 0
|
2021-01-20 13:40:15 +00:00
|
|
|
while index < rstream.buffer.dataLen():
|
|
|
|
if k >= nbytes:
|
|
|
|
raise newAsyncStreamLimitError()
|
2019-06-18 17:11:13 +00:00
|
|
|
let ch = rstream.buffer[index]
|
2021-01-20 13:40:15 +00:00
|
|
|
inc(index)
|
|
|
|
pbuffer[k] = ch
|
|
|
|
inc(k)
|
2019-06-18 17:11:13 +00:00
|
|
|
if sep[state] == ch:
|
|
|
|
inc(state)
|
2021-01-20 13:40:15 +00:00
|
|
|
if state == len(sep):
|
|
|
|
break
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
|
|
|
state = 0
|
2021-01-20 13:40:15 +00:00
|
|
|
(index, state == len(sep))
|
|
|
|
return k
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc readLine*(rstream: AsyncStreamReader, limit = 0,
|
|
|
|
sep = "\r\n"): Future[string] {.async.} =
|
|
|
|
## Read one line from read-only stream ``rstream``, where ``"line"`` is a
|
|
|
|
## sequence of bytes ending with ``sep`` (default is ``"\r\n"``).
|
|
|
|
##
|
|
|
|
## If EOF is received, and ``sep`` was not found, the method will return the
|
|
|
|
## partial read bytes.
|
|
|
|
##
|
|
|
|
## If the EOF was received and the internal buffer is empty, return an
|
|
|
|
## empty string.
|
|
|
|
##
|
|
|
|
## If ``limit`` more then 0, then result string will be limited to ``limit``
|
|
|
|
## bytes.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
return await readLine(rstream.tsource, limit, sep)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
2021-01-20 13:40:15 +00:00
|
|
|
return await readLine(rstream.rsource, limit, sep)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
let lim = if limit <= 0: -1 else: limit
|
|
|
|
var state = 0
|
2019-06-18 17:11:13 +00:00
|
|
|
var res = ""
|
2021-01-20 13:40:15 +00:00
|
|
|
readLoop():
|
|
|
|
if rstream.atEof():
|
|
|
|
(0, true)
|
|
|
|
else:
|
|
|
|
var index = 0
|
|
|
|
while index < rstream.buffer.dataLen():
|
|
|
|
let ch = char(rstream.buffer[index])
|
|
|
|
inc(index)
|
|
|
|
|
|
|
|
if sep[state] == ch:
|
|
|
|
inc(state)
|
|
|
|
if state == len(sep):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if state != 0:
|
|
|
|
if limit > 0:
|
|
|
|
let missing = min(state, lim - len(res) - 1)
|
|
|
|
res.add(sep[0 ..< missing])
|
|
|
|
else:
|
|
|
|
res.add(sep[0 ..< state])
|
|
|
|
res.add(ch)
|
|
|
|
if len(res) == lim:
|
|
|
|
break
|
|
|
|
(index, (state == len(sep)) or (lim == len(res)))
|
|
|
|
return res
|
|
|
|
|
|
|
|
proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} =
|
|
|
|
## Read all bytes from read-only stream ``rstream``.
|
|
|
|
##
|
|
|
|
## This procedure allocates buffer seq[byte] and return it as result.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2019-06-18 17:11:13 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
|
|
|
return await read(rstream.tsource)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except TransportLimitError:
|
|
|
|
raise newAsyncStreamLimitError()
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
|
|
|
else:
|
|
|
|
if isNil(rstream.readerLoop):
|
|
|
|
return await read(rstream.rsource)
|
|
|
|
else:
|
|
|
|
var res = newSeq[byte]()
|
|
|
|
readLoop():
|
|
|
|
if rstream.atEof():
|
|
|
|
(0, true)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
let count = rstream.buffer.dataLen()
|
|
|
|
res.add(rstream.buffer.buffer.toOpenArray(0, count - 1))
|
|
|
|
(count, false)
|
|
|
|
return res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} =
|
2019-05-07 20:11:40 +00:00
|
|
|
## Read all bytes (n <= 0) or exactly `n` bytes from read-only stream
|
|
|
|
## ``rstream``.
|
|
|
|
##
|
|
|
|
## This procedure allocates buffer seq[byte] and return it as result.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
return await read(rstream.tsource, n)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
2021-01-20 13:40:15 +00:00
|
|
|
return await read(rstream.rsource, n)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
if n <= 0:
|
|
|
|
return await read(rstream.rsource)
|
|
|
|
else:
|
|
|
|
var res = newSeq[byte]()
|
|
|
|
readLoop():
|
|
|
|
if rstream.atEof():
|
|
|
|
(0, true)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
let count = min(rstream.buffer.dataLen(), n - len(res))
|
|
|
|
res.add(rstream.buffer.buffer.toOpenArray(0, count - 1))
|
|
|
|
(count, len(res) == n)
|
|
|
|
return res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} =
|
|
|
|
## Consume (discard) all bytes from read-only stream ``rstream``.
|
|
|
|
##
|
|
|
|
## Return number of bytes actually consumed (discarded).
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2021-01-20 13:40:15 +00:00
|
|
|
|
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
|
|
|
return await consume(rstream.tsource)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except TransportLimitError:
|
|
|
|
raise newAsyncStreamLimitError()
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
|
|
|
else:
|
|
|
|
if isNil(rstream.readerLoop):
|
|
|
|
return await consume(rstream.rsource)
|
|
|
|
else:
|
|
|
|
var res = 0
|
|
|
|
readLoop():
|
|
|
|
if rstream.atEof():
|
|
|
|
(0, true)
|
|
|
|
else:
|
|
|
|
res += rstream.buffer.dataLen()
|
|
|
|
(rstream.buffer.dataLen(), false)
|
|
|
|
return res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} =
|
2019-05-07 20:11:40 +00:00
|
|
|
## Consume (discard) all bytes (n <= 0) or ``n`` bytes from read-only stream
|
|
|
|
## ``rstream``.
|
|
|
|
##
|
|
|
|
## Return number of bytes actually consumed (discarded).
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
return await consume(rstream.tsource, n)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-05-07 20:11:40 +00:00
|
|
|
except TransportLimitError:
|
|
|
|
raise newAsyncStreamLimitError()
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(rstream.readerLoop):
|
2021-01-20 13:40:15 +00:00
|
|
|
return await consume(rstream.rsource, n)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
if n <= 0:
|
|
|
|
return await rstream.consume()
|
|
|
|
else:
|
|
|
|
var res = 0
|
|
|
|
readLoop():
|
2019-06-18 17:11:13 +00:00
|
|
|
if rstream.atEof():
|
2021-01-20 13:40:15 +00:00
|
|
|
(0, true)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-01-20 13:40:15 +00:00
|
|
|
let count = min(rstream.buffer.dataLen(), n - res)
|
|
|
|
res += count
|
|
|
|
(count, res == n)
|
|
|
|
return res
|
|
|
|
|
|
|
|
proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
|
|
|
|
async.} =
|
|
|
|
## Read all bytes from stream ``rstream`` until ``predicate`` callback
|
|
|
|
## will not be satisfied.
|
|
|
|
##
|
|
|
|
## ``predicate`` callback should return tuple ``(consumed, result)``, where
|
|
|
|
## ``consumed`` is the number of bytes processed and ``result`` is a
|
|
|
|
## completion flag (``true`` if readMessage() should stop reading data,
|
|
|
|
## or ``false`` if readMessage() should continue to read data from stream).
|
|
|
|
##
|
|
|
|
## ``predicate`` callback must copy all the data from ``data`` array and
|
|
|
|
## return number of bytes it is going to consume.
|
|
|
|
## ``predicate`` callback will receive (zero-length) openarray, if stream
|
|
|
|
## is at EOF.
|
|
|
|
doAssert(not(isNil(pred)), "`predicate` callback should not be `nil`")
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(rstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
if isNil(rstream.rsource):
|
|
|
|
try:
|
|
|
|
await readMessage(rstream.tsource, pred)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamReadError(exc)
|
|
|
|
else:
|
|
|
|
if isNil(rstream.readerLoop):
|
|
|
|
await readMessage(rstream.rsource, pred)
|
|
|
|
else:
|
|
|
|
readLoop():
|
|
|
|
let count = rstream.buffer.dataLen()
|
|
|
|
if count == 0:
|
|
|
|
if rstream.atEof():
|
|
|
|
pred([])
|
|
|
|
else:
|
|
|
|
# Case, when transport's buffer is not yet filled with data.
|
|
|
|
(0, false)
|
|
|
|
else:
|
|
|
|
pred(rstream.buffer.buffer.toOpenArray(0, count - 1))
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc write*(wstream: AsyncStreamWriter, pbytes: pointer,
|
|
|
|
nbytes: int) {.async.} =
|
|
|
|
## Write sequence of bytes pointed by ``pbytes`` of length ``nbytes`` to
|
|
|
|
## writer stream ``wstream``.
|
|
|
|
##
|
|
|
|
## ``nbytes` must be more then zero.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(wstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
if nbytes <= 0:
|
2021-02-17 00:03:12 +00:00
|
|
|
raiseEmptyMessageDefect()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(wstream.wsource):
|
2019-07-05 10:19:04 +00:00
|
|
|
var res: int
|
|
|
|
try:
|
|
|
|
res = await write(wstream.tsource, pbytes, nbytes)
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except AsyncStreamError as exc:
|
|
|
|
raise exc
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-07-05 10:19:04 +00:00
|
|
|
if res != nbytes:
|
2019-05-07 20:11:40 +00:00
|
|
|
raise newAsyncStreamIncompleteError()
|
2021-01-20 13:40:15 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(nbytes)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(wstream.writerLoop):
|
|
|
|
await write(wstream.wsource, pbytes, nbytes)
|
2021-01-20 13:40:15 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(nbytes)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
|
|
|
var item = WriteItem(kind: Pointer)
|
2021-02-18 12:08:21 +00:00
|
|
|
item.dataPtr = pbytes
|
2019-06-18 17:11:13 +00:00
|
|
|
item.size = nbytes
|
|
|
|
item.future = newFuture[void]("async.stream.write(pointer)")
|
2019-07-05 10:19:04 +00:00
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.queue.put(item)
|
2019-07-05 10:19:04 +00:00
|
|
|
await item.future
|
2021-01-20 13:40:15 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except AsyncStreamError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc write*(wstream: AsyncStreamWriter, sbytes: seq[byte],
|
|
|
|
msglen = -1) {.async.} =
|
|
|
|
## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer
|
|
|
|
## stream ``wstream``.
|
|
|
|
##
|
|
|
|
## Sequence of bytes ``sbytes`` must not be zero-length.
|
|
|
|
##
|
|
|
|
## If ``msglen < 0`` whole sequence ``sbytes`` will be writen to stream.
|
|
|
|
## If ``msglen > len(sbytes)`` only ``len(sbytes)`` bytes will be written to
|
|
|
|
## stream.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(wstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
let length = if msglen <= 0: len(sbytes) else: min(msglen, len(sbytes))
|
|
|
|
if length <= 0:
|
2021-02-17 00:03:12 +00:00
|
|
|
raiseEmptyMessageDefect()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(wstream.wsource):
|
2019-07-05 10:19:04 +00:00
|
|
|
var res: int
|
|
|
|
try:
|
2021-02-01 16:04:38 +00:00
|
|
|
res = await write(wstream.tsource, sbytes, length)
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-07-05 10:19:04 +00:00
|
|
|
if res != length:
|
2019-05-07 20:11:40 +00:00
|
|
|
raise newAsyncStreamIncompleteError()
|
2021-02-01 16:04:38 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(length)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(wstream.writerLoop):
|
2021-02-01 16:04:38 +00:00
|
|
|
await write(wstream.wsource, sbytes, length)
|
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(length)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
var item = WriteItem(kind: Sequence)
|
|
|
|
if not isLiteral(sbytes):
|
2021-02-18 12:08:21 +00:00
|
|
|
shallowCopy(item.dataSeq, sbytes)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-02-18 12:08:21 +00:00
|
|
|
item.dataSeq = sbytes
|
2019-06-18 17:11:13 +00:00
|
|
|
item.size = length
|
|
|
|
item.future = newFuture[void]("async.stream.write(seq)")
|
2019-07-05 10:19:04 +00:00
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.queue.put(item)
|
2019-07-05 10:19:04 +00:00
|
|
|
await item.future
|
2021-01-20 13:40:15 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except AsyncStreamError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc write*(wstream: AsyncStreamWriter, sbytes: string,
|
|
|
|
msglen = -1) {.async.} =
|
|
|
|
## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``.
|
|
|
|
##
|
|
|
|
## String ``sbytes`` must not be zero-length.
|
|
|
|
##
|
|
|
|
## If ``msglen < 0`` whole string ``sbytes`` will be writen to stream.
|
|
|
|
## If ``msglen > len(sbytes)`` only ``len(sbytes)`` bytes will be written to
|
|
|
|
## stream.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(wstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
let length = if msglen <= 0: len(sbytes) else: min(msglen, len(sbytes))
|
|
|
|
if length <= 0:
|
2021-02-17 00:03:12 +00:00
|
|
|
raiseEmptyMessageDefect()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if isNil(wstream.wsource):
|
2019-07-05 10:19:04 +00:00
|
|
|
var res: int
|
|
|
|
try:
|
2021-02-01 16:04:38 +00:00
|
|
|
res = await write(wstream.tsource, sbytes, length)
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2019-11-28 18:14:19 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-07-05 10:19:04 +00:00
|
|
|
if res != length:
|
2019-05-07 20:11:40 +00:00
|
|
|
raise newAsyncStreamIncompleteError()
|
2021-02-01 16:04:38 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(length)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(wstream.writerLoop):
|
2021-02-01 16:04:38 +00:00
|
|
|
await write(wstream.wsource, sbytes, length)
|
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(length)
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
2019-06-18 17:11:13 +00:00
|
|
|
var item = WriteItem(kind: String)
|
|
|
|
if not isLiteral(sbytes):
|
2021-02-18 12:08:21 +00:00
|
|
|
shallowCopy(item.dataStr, sbytes)
|
2019-06-18 17:11:13 +00:00
|
|
|
else:
|
2021-02-18 12:08:21 +00:00
|
|
|
item.dataStr = sbytes
|
2019-06-18 17:11:13 +00:00
|
|
|
item.size = length
|
|
|
|
item.future = newFuture[void]("async.stream.write(string)")
|
2019-07-05 10:19:04 +00:00
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.queue.put(item)
|
2019-07-05 10:19:04 +00:00
|
|
|
await item.future
|
2021-01-20 13:40:15 +00:00
|
|
|
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except AsyncStreamError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc finish*(wstream: AsyncStreamWriter) {.async.} =
|
|
|
|
## Finish write stream ``wstream``.
|
2021-01-30 18:15:34 +00:00
|
|
|
checkStreamClosed(wstream)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
if not isNil(wstream.wsource):
|
2019-06-18 17:11:13 +00:00
|
|
|
if isNil(wstream.writerLoop):
|
|
|
|
await wstream.wsource.finish()
|
|
|
|
else:
|
|
|
|
var item = WriteItem(kind: Pointer)
|
|
|
|
item.size = 0
|
|
|
|
item.future = newFuture[void]("async.stream.finish")
|
2019-07-05 10:19:04 +00:00
|
|
|
try:
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.queue.put(item)
|
2019-07-05 10:19:04 +00:00
|
|
|
await item.future
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except AsyncStreamError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
raise newAsyncStreamWriteError(exc)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc join*(rw: AsyncStreamRW): Future[void] =
|
|
|
|
## Get Future[void] which will be completed when stream become finished or
|
|
|
|
## closed.
|
|
|
|
when rw is AsyncStreamReader:
|
|
|
|
var retFuture = newFuture[void]("async.stream.reader.join")
|
|
|
|
else:
|
|
|
|
var retFuture = newFuture[void]("async.stream.writer.join")
|
2019-06-20 20:30:41 +00:00
|
|
|
|
|
|
|
proc continuation(udata: pointer) {.gcsafe.} =
|
|
|
|
retFuture.complete()
|
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc cancellation(udata: pointer) {.gcsafe.} =
|
2019-06-20 20:30:41 +00:00
|
|
|
rw.future.removeCallback(continuation, cast[pointer](retFuture))
|
|
|
|
|
|
|
|
if not(rw.future.finished()):
|
|
|
|
rw.future.addCallback(continuation, cast[pointer](retFuture))
|
2021-01-20 13:40:15 +00:00
|
|
|
rw.future.cancelCallback = cancellation
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
|
|
|
retFuture.complete()
|
2019-06-20 20:30:41 +00:00
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
return retFuture
|
|
|
|
|
|
|
|
proc close*(rw: AsyncStreamRW) =
|
|
|
|
## Close and frees resources of stream ``rw``.
|
|
|
|
##
|
|
|
|
## Note close() procedure is not completed immediately!
|
|
|
|
if rw.closed():
|
2021-02-17 00:03:12 +00:00
|
|
|
raiseAsyncStreamIncorrectDefect("Stream is already closed!")
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2020-04-09 14:56:56 +00:00
|
|
|
rw.state = AsyncStreamState.Closed
|
|
|
|
|
exception tracking (#166)
* exception tracking
This PR adds minimal exception tracking to chronos, moving the goalpost
one step further.
In particular, it becomes invalid to raise exceptions from `callSoon`
callbacks: this is critical for writing correct error handling because
there's no reasonable way that a user of chronos can possibly _reason_
about exceptions coming out of there: the event loop will be in an
indeterminite state when the loop is executing an _random_ callback.
As expected, there are several issues in the error handling of chronos:
in particular, it will end up in an inconsistent internal state whenever
the selector loop operations fail, because the internal state update
functions are not written in an exception-safe way. This PR turns this
into a Defect, which probably is not the optimal way of handling things
- expect more work to be done here.
Some API have no way of reporting back errors to callers - for example,
when something fails in the accept loop, there's not much it can do, and
no way to report it back to the user of the API - this has been fixed
with the new accept flow - the old one should be deprecated.
Finally, there is information loss in the API: in composite operations
like `poll` and `waitFor` there's no way to differentiate internal
errors from user-level errors originating from callbacks.
* store `CatchableError` in future
* annotate proc's with correct raises information
* `selectors2` to avoid non-CatchableError IOSelectorsException
* `$` should never raise
* remove unnecessary gcsafe annotations
* fix exceptions leaking out of timer waits
* fix some imports
* functions must signal raising the union of all exceptions across all
platforms to enable cross-platform code
* switch to unittest2
* add `selectors2` which supercedes the std library version and fixes
several exception handling issues in there
* fixes
* docs, platform-independent eh specifiers for some functions
* add feature flag for strict exception mode
also bump version to 3.0.0 - _most_ existing code should be compatible
with this version of exception handling but some things might need
fixing - callbacks, existing raises specifications etc.
* fix AsyncCheck for non-void T
2021-03-24 09:08:33 +00:00
|
|
|
proc continuation(udata: pointer) {.raises: [Defect].} =
|
2019-05-07 20:11:40 +00:00
|
|
|
if not isNil(rw.udata):
|
|
|
|
GC_unref(cast[ref int](rw.udata))
|
2019-07-02 18:26:21 +00:00
|
|
|
if not(rw.future.finished()):
|
|
|
|
rw.future.complete()
|
2019-05-07 20:11:40 +00:00
|
|
|
when rw is AsyncStreamReader:
|
|
|
|
untrackAsyncStreamReader(rw)
|
|
|
|
elif rw is AsyncStreamWriter:
|
|
|
|
untrackAsyncStreamWriter(rw)
|
|
|
|
|
|
|
|
when rw is AsyncStreamReader:
|
2019-10-08 07:28:43 +00:00
|
|
|
if isNil(rw.rsource) or isNil(rw.readerLoop) or isNil(rw.future):
|
2019-05-07 20:11:40 +00:00
|
|
|
callSoon(continuation)
|
|
|
|
else:
|
2019-10-08 07:28:43 +00:00
|
|
|
if rw.future.finished():
|
|
|
|
callSoon(continuation)
|
|
|
|
else:
|
|
|
|
rw.future.addCallback(continuation)
|
2019-10-17 11:44:14 +00:00
|
|
|
rw.future.cancel()
|
2019-05-07 20:11:40 +00:00
|
|
|
elif rw is AsyncStreamWriter:
|
2019-10-08 07:28:43 +00:00
|
|
|
if isNil(rw.wsource) or isNil(rw.writerLoop) or isNil(rw.future):
|
2019-05-07 20:11:40 +00:00
|
|
|
callSoon(continuation)
|
|
|
|
else:
|
2019-10-08 07:28:43 +00:00
|
|
|
if rw.future.finished():
|
|
|
|
callSoon(continuation)
|
|
|
|
else:
|
|
|
|
rw.future.addCallback(continuation)
|
2019-10-17 11:44:14 +00:00
|
|
|
rw.future.cancel()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc closeWait*(rw: AsyncStreamRW): Future[void] =
|
|
|
|
## Close and frees resources of stream ``rw``.
|
|
|
|
rw.close()
|
2021-01-20 13:40:15 +00:00
|
|
|
rw.join()
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc startReader(rstream: AsyncStreamReader) =
|
|
|
|
rstream.state = Running
|
|
|
|
if not isNil(rstream.readerLoop):
|
|
|
|
rstream.future = rstream.readerLoop(rstream)
|
|
|
|
else:
|
|
|
|
rstream.future = newFuture[void]("async.stream.empty.reader")
|
|
|
|
|
|
|
|
proc startWriter(wstream: AsyncStreamWriter) =
|
|
|
|
wstream.state = Running
|
|
|
|
if not isNil(wstream.writerLoop):
|
|
|
|
wstream.future = wstream.writerLoop(wstream)
|
|
|
|
else:
|
|
|
|
wstream.future = newFuture[void]("async.stream.empty.writer")
|
|
|
|
|
|
|
|
proc init*(child, wsource: AsyncStreamWriter, loop: StreamWriterLoop,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamWriter
|
|
|
|
## parameters.
|
|
|
|
child.writerLoop = loop
|
|
|
|
child.wsource = wsource
|
|
|
|
child.tsource = wsource.tsource
|
|
|
|
child.queue = newAsyncQueue[WriteItem](queueSize)
|
|
|
|
trackAsyncStreamWriter(child)
|
|
|
|
child.startWriter()
|
|
|
|
|
|
|
|
proc init*[T](child, wsource: AsyncStreamWriter, loop: StreamWriterLoop,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize, udata: ref T) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamWriter
|
|
|
|
## parameters.
|
|
|
|
child.writerLoop = loop
|
|
|
|
child.wsource = wsource
|
|
|
|
child.tsource = wsource.tsource
|
|
|
|
child.queue = newAsyncQueue[WriteItem](queueSize)
|
|
|
|
if not isNil(udata):
|
|
|
|
GC_ref(udata)
|
|
|
|
child.udata = cast[pointer](udata)
|
|
|
|
trackAsyncStreamWriter(child)
|
|
|
|
child.startWriter()
|
|
|
|
|
|
|
|
proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop,
|
|
|
|
bufferSize = AsyncStreamDefaultBufferSize) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamReader
|
|
|
|
## parameters.
|
|
|
|
child.readerLoop = loop
|
|
|
|
child.rsource = rsource
|
|
|
|
child.tsource = rsource.tsource
|
|
|
|
child.buffer = AsyncBuffer.init(bufferSize)
|
|
|
|
trackAsyncStreamReader(child)
|
|
|
|
child.startReader()
|
|
|
|
|
|
|
|
proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop,
|
|
|
|
bufferSize = AsyncStreamDefaultBufferSize,
|
|
|
|
udata: ref T) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamReader
|
|
|
|
## parameters.
|
|
|
|
child.readerLoop = loop
|
|
|
|
child.rsource = rsource
|
|
|
|
child.tsource = rsource.tsource
|
|
|
|
child.buffer = AsyncBuffer.init(bufferSize)
|
|
|
|
if not isNil(udata):
|
|
|
|
GC_ref(udata)
|
|
|
|
child.udata = cast[pointer](udata)
|
|
|
|
trackAsyncStreamReader(child)
|
|
|
|
child.startReader()
|
|
|
|
|
|
|
|
proc init*(child: AsyncStreamWriter, tsource: StreamTransport) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamWriter
|
|
|
|
## parameters.
|
|
|
|
child.writerLoop = nil
|
|
|
|
child.wsource = nil
|
|
|
|
child.tsource = tsource
|
|
|
|
trackAsyncStreamWriter(child)
|
|
|
|
child.startWriter()
|
|
|
|
|
|
|
|
proc init*[T](child: AsyncStreamWriter, tsource: StreamTransport,
|
|
|
|
udata: ref T) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamWriter
|
|
|
|
## parameters.
|
|
|
|
child.writerLoop = nil
|
|
|
|
child.wsource = nil
|
|
|
|
child.tsource = tsource
|
|
|
|
trackAsyncStreamWriter(child)
|
|
|
|
child.startWriter()
|
|
|
|
|
2019-06-18 17:11:13 +00:00
|
|
|
proc init*(child, wsource: AsyncStreamWriter) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamWriter
|
|
|
|
## parameters.
|
|
|
|
child.writerLoop = nil
|
|
|
|
child.wsource = wsource
|
|
|
|
child.tsource = wsource.tsource
|
|
|
|
trackAsyncStreamWriter(child)
|
|
|
|
child.startWriter()
|
|
|
|
|
|
|
|
proc init*[T](child, wsource: AsyncStreamWriter, udata: ref T) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamWriter
|
|
|
|
## parameters.
|
|
|
|
child.writerLoop = nil
|
|
|
|
child.wsource = wsource
|
|
|
|
child.tsource = wsource.tsource
|
|
|
|
if not isNil(udata):
|
|
|
|
GC_ref(udata)
|
|
|
|
child.udata = cast[pointer](udata)
|
|
|
|
trackAsyncStreamWriter(child)
|
|
|
|
child.startWriter()
|
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
proc init*(child: AsyncStreamReader, tsource: StreamTransport) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamReader
|
|
|
|
## parameters.
|
|
|
|
child.readerLoop = nil
|
|
|
|
child.rsource = nil
|
|
|
|
child.tsource = tsource
|
|
|
|
trackAsyncStreamReader(child)
|
|
|
|
child.startReader()
|
|
|
|
|
|
|
|
proc init*[T](child: AsyncStreamReader, tsource: StreamTransport,
|
|
|
|
udata: ref T) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamReader
|
|
|
|
## parameters.
|
|
|
|
child.readerLoop = nil
|
|
|
|
child.rsource = nil
|
|
|
|
child.tsource = tsource
|
|
|
|
if not isNil(udata):
|
|
|
|
GC_ref(udata)
|
|
|
|
child.udata = cast[pointer](udata)
|
|
|
|
trackAsyncStreamReader(child)
|
|
|
|
child.startReader()
|
|
|
|
|
2019-06-18 17:11:13 +00:00
|
|
|
proc init*(child, rsource: AsyncStreamReader) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamReader
|
|
|
|
## parameters.
|
|
|
|
child.readerLoop = nil
|
|
|
|
child.rsource = rsource
|
|
|
|
child.tsource = rsource.tsource
|
|
|
|
trackAsyncStreamReader(child)
|
|
|
|
child.startReader()
|
|
|
|
|
|
|
|
proc init*[T](child, rsource: AsyncStreamReader, udata: ref T) =
|
|
|
|
## Initialize newly allocated object ``child`` with AsyncStreamReader
|
|
|
|
## parameters.
|
|
|
|
child.readerLoop = nil
|
|
|
|
child.rsource = rsource
|
|
|
|
child.tsource = rsource.tsource
|
|
|
|
if not isNil(udata):
|
|
|
|
GC_ref(udata)
|
|
|
|
child.udata = cast[pointer](udata)
|
|
|
|
trackAsyncStreamReader(child)
|
|
|
|
child.startReader()
|
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
proc newAsyncStreamReader*[T](rsource: AsyncStreamReader,
|
|
|
|
loop: StreamReaderLoop,
|
|
|
|
bufferSize = AsyncStreamDefaultBufferSize,
|
|
|
|
udata: ref T): AsyncStreamReader =
|
|
|
|
## Create new AsyncStreamReader object, which will use other async stream
|
|
|
|
## reader ``rsource`` as source data channel.
|
|
|
|
##
|
|
|
|
## ``loop`` is main reading loop procedure.
|
|
|
|
##
|
|
|
|
## ``bufferSize`` is internal buffer size.
|
|
|
|
##
|
|
|
|
## ``udata`` - user object which will be associated with new AsyncStreamReader
|
|
|
|
## object.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamReader()
|
|
|
|
res.init(rsource, loop, bufferSize, udata)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamReader*(rsource: AsyncStreamReader,
|
|
|
|
loop: StreamReaderLoop,
|
|
|
|
bufferSize = AsyncStreamDefaultBufferSize
|
|
|
|
): AsyncStreamReader =
|
|
|
|
## Create new AsyncStreamReader object, which will use other async stream
|
|
|
|
## reader ``rsource`` as source data channel.
|
|
|
|
##
|
|
|
|
## ``loop`` is main reading loop procedure.
|
|
|
|
##
|
|
|
|
## ``bufferSize`` is internal buffer size.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamReader()
|
|
|
|
res.init(rsource, loop, bufferSize)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamReader*[T](tsource: StreamTransport,
|
|
|
|
udata: ref T): AsyncStreamReader =
|
|
|
|
## Create new AsyncStreamReader object, which will use stream transport
|
|
|
|
## ``tsource`` as source data channel.
|
|
|
|
##
|
|
|
|
## ``udata`` - user object which will be associated with new AsyncStreamWriter
|
|
|
|
## object.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamReader()
|
|
|
|
res.init(tsource, udata)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamReader*(tsource: StreamTransport): AsyncStreamReader =
|
|
|
|
## Create new AsyncStreamReader object, which will use stream transport
|
|
|
|
## ``tsource`` as source data channel.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamReader()
|
|
|
|
res.init(tsource)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamWriter*[T](wsource: AsyncStreamWriter,
|
|
|
|
loop: StreamWriterLoop,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize,
|
|
|
|
udata: ref T): AsyncStreamWriter =
|
|
|
|
## Create new AsyncStreamWriter object which will use other AsyncStreamWriter
|
|
|
|
## object ``wsource`` as data channel.
|
|
|
|
##
|
|
|
|
## ``loop`` is main writing loop procedure.
|
|
|
|
##
|
|
|
|
## ``queueSize`` is writing queue size (default size is unlimited).
|
|
|
|
##
|
|
|
|
## ``udata`` - user object which will be associated with new AsyncStreamWriter
|
|
|
|
## object.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamWriter()
|
|
|
|
res.init(wsource, loop, queueSize, udata)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamWriter*(wsource: AsyncStreamWriter,
|
|
|
|
loop: StreamWriterLoop,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize
|
|
|
|
): AsyncStreamWriter =
|
|
|
|
## Create new AsyncStreamWriter object which will use other AsyncStreamWriter
|
|
|
|
## object ``wsource`` as data channel.
|
|
|
|
##
|
|
|
|
## ``loop`` is main writing loop procedure.
|
|
|
|
##
|
|
|
|
## ``queueSize`` is writing queue size (default size is unlimited).
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamWriter()
|
|
|
|
res.init(wsource, loop, queueSize)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamWriter*[T](tsource: StreamTransport,
|
|
|
|
udata: ref T): AsyncStreamWriter =
|
|
|
|
## Create new AsyncStreamWriter object which will use stream transport
|
|
|
|
## ``tsource`` as data channel.
|
|
|
|
##
|
|
|
|
## ``udata`` - user object which will be associated with new AsyncStreamWriter
|
|
|
|
## object.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamWriter()
|
|
|
|
res.init(tsource, udata)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamWriter*(tsource: StreamTransport): AsyncStreamWriter =
|
|
|
|
## Create new AsyncStreamWriter object which will use stream transport
|
|
|
|
## ``tsource`` as data channel.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamWriter()
|
|
|
|
res.init(tsource)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2019-06-18 17:11:13 +00:00
|
|
|
proc newAsyncStreamWriter*[T](wsource: AsyncStreamWriter,
|
|
|
|
udata: ref T): AsyncStreamWriter =
|
|
|
|
## Create copy of AsyncStreamWriter object ``wsource``.
|
|
|
|
##
|
|
|
|
## ``udata`` - user object which will be associated with new AsyncStreamWriter
|
|
|
|
## object.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamWriter()
|
|
|
|
res.init(wsource, udata)
|
|
|
|
res
|
2019-06-18 17:11:13 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamWriter*(wsource: AsyncStreamWriter): AsyncStreamWriter =
|
|
|
|
## Create copy of AsyncStreamWriter object ``wsource``.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamWriter()
|
|
|
|
res.init(wsource)
|
|
|
|
res
|
2019-06-18 17:11:13 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamReader*[T](rsource: AsyncStreamWriter,
|
|
|
|
udata: ref T): AsyncStreamWriter =
|
|
|
|
## Create copy of AsyncStreamReader object ``rsource``.
|
|
|
|
##
|
|
|
|
## ``udata`` - user object which will be associated with new AsyncStreamReader
|
|
|
|
## object.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamReader()
|
|
|
|
res.init(rsource, udata)
|
|
|
|
res
|
2019-06-18 17:11:13 +00:00
|
|
|
|
|
|
|
proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader =
|
|
|
|
## Create copy of AsyncStreamReader object ``rsource``.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = AsyncStreamReader()
|
|
|
|
res.init(rsource)
|
|
|
|
res
|
2019-06-18 17:11:13 +00:00
|
|
|
|
2019-05-07 20:11:40 +00:00
|
|
|
proc getUserData*[T](rw: AsyncStreamRW): T {.inline.} =
|
|
|
|
## Obtain user data associated with AsyncStreamReader or AsyncStreamWriter
|
|
|
|
## object ``rw``.
|
2021-01-20 13:40:15 +00:00
|
|
|
cast[T](rw.udata)
|