prevent http `closeWait` future from being cancelled (#486)
* simplify `closeWait` implementations * remove redundant cancellation callbacks * use `noCancel` to avoid forgetting the right future flags * add a few missing raises trackers * enforce `OwnCancelSchedule` on manually created futures that don't raise `CancelledError` * ensure cancellations don't reach internal futures
This commit is contained in:
parent
41f77d261e
commit
e15dc3b41f
|
@ -43,7 +43,7 @@ proc closeWait*(bstream: HttpBodyReader) {.async: (raises: []).} =
|
||||||
## Close and free resource allocated by body reader.
|
## Close and free resource allocated by body reader.
|
||||||
if bstream.bstate == HttpState.Alive:
|
if bstream.bstate == HttpState.Alive:
|
||||||
bstream.bstate = HttpState.Closing
|
bstream.bstate = HttpState.Closing
|
||||||
var res = newSeq[Future[void]]()
|
var res = newSeq[Future[void].Raising([])]()
|
||||||
# We closing streams in reversed order because stream at position [0], uses
|
# We closing streams in reversed order because stream at position [0], uses
|
||||||
# data from stream at position [1].
|
# data from stream at position [1].
|
||||||
for index in countdown((len(bstream.streams) - 1), 0):
|
for index in countdown((len(bstream.streams) - 1), 0):
|
||||||
|
@ -68,7 +68,7 @@ proc closeWait*(bstream: HttpBodyWriter) {.async: (raises: []).} =
|
||||||
## Close and free all the resources allocated by body writer.
|
## Close and free all the resources allocated by body writer.
|
||||||
if bstream.bstate == HttpState.Alive:
|
if bstream.bstate == HttpState.Alive:
|
||||||
bstream.bstate = HttpState.Closing
|
bstream.bstate = HttpState.Closing
|
||||||
var res = newSeq[Future[void]]()
|
var res = newSeq[Future[void].Raising([])]()
|
||||||
for index in countdown(len(bstream.streams) - 1, 0):
|
for index in countdown(len(bstream.streams) - 1, 0):
|
||||||
res.add(bstream.streams[index].closeWait())
|
res.add(bstream.streams[index].closeWait())
|
||||||
await noCancel(allFutures(res))
|
await noCancel(allFutures(res))
|
||||||
|
|
|
@ -294,7 +294,7 @@ proc new*(t: typedesc[HttpSessionRef],
|
||||||
if HttpClientFlag.Http11Pipeline in flags:
|
if HttpClientFlag.Http11Pipeline in flags:
|
||||||
sessionWatcher(res)
|
sessionWatcher(res)
|
||||||
else:
|
else:
|
||||||
Future[void].Raising([]).init("session.watcher.placeholder")
|
nil
|
||||||
res
|
res
|
||||||
|
|
||||||
proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] =
|
proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] =
|
||||||
|
@ -607,7 +607,7 @@ proc closeWait(conn: HttpClientConnectionRef) {.async: (raises: []).} =
|
||||||
conn.state = HttpClientConnectionState.Closing
|
conn.state = HttpClientConnectionState.Closing
|
||||||
let pending =
|
let pending =
|
||||||
block:
|
block:
|
||||||
var res: seq[Future[void]]
|
var res: seq[Future[void].Raising([])]
|
||||||
if not(isNil(conn.reader)) and not(conn.reader.closed()):
|
if not(isNil(conn.reader)) and not(conn.reader.closed()):
|
||||||
res.add(conn.reader.closeWait())
|
res.add(conn.reader.closeWait())
|
||||||
if not(isNil(conn.writer)) and not(conn.writer.closed()):
|
if not(isNil(conn.writer)) and not(conn.writer.closed()):
|
||||||
|
@ -847,14 +847,14 @@ proc sessionWatcher(session: HttpSessionRef) {.async: (raises: []).} =
|
||||||
break
|
break
|
||||||
|
|
||||||
proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} =
|
proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} =
|
||||||
var pending: seq[FutureBase]
|
var pending: seq[Future[void].Raising([])]
|
||||||
if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}:
|
if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}:
|
||||||
request.state = HttpReqRespState.Closing
|
request.state = HttpReqRespState.Closing
|
||||||
if not(isNil(request.writer)):
|
if not(isNil(request.writer)):
|
||||||
if not(request.writer.closed()):
|
if not(request.writer.closed()):
|
||||||
pending.add(FutureBase(request.writer.closeWait()))
|
pending.add(request.writer.closeWait())
|
||||||
request.writer = nil
|
request.writer = nil
|
||||||
pending.add(FutureBase(request.releaseConnection()))
|
pending.add(request.releaseConnection())
|
||||||
await noCancel(allFutures(pending))
|
await noCancel(allFutures(pending))
|
||||||
request.session = nil
|
request.session = nil
|
||||||
request.error = nil
|
request.error = nil
|
||||||
|
@ -862,14 +862,14 @@ proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} =
|
||||||
untrackCounter(HttpClientRequestTrackerName)
|
untrackCounter(HttpClientRequestTrackerName)
|
||||||
|
|
||||||
proc closeWait*(response: HttpClientResponseRef) {.async: (raises: []).} =
|
proc closeWait*(response: HttpClientResponseRef) {.async: (raises: []).} =
|
||||||
var pending: seq[FutureBase]
|
var pending: seq[Future[void].Raising([])]
|
||||||
if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}:
|
if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}:
|
||||||
response.state = HttpReqRespState.Closing
|
response.state = HttpReqRespState.Closing
|
||||||
if not(isNil(response.reader)):
|
if not(isNil(response.reader)):
|
||||||
if not(response.reader.closed()):
|
if not(response.reader.closed()):
|
||||||
pending.add(FutureBase(response.reader.closeWait()))
|
pending.add(response.reader.closeWait())
|
||||||
response.reader = nil
|
response.reader = nil
|
||||||
pending.add(FutureBase(response.releaseConnection()))
|
pending.add(response.releaseConnection())
|
||||||
await noCancel(allFutures(pending))
|
await noCancel(allFutures(pending))
|
||||||
response.session = nil
|
response.session = nil
|
||||||
response.error = nil
|
response.error = nil
|
||||||
|
|
|
@ -523,15 +523,13 @@ proc closeWait*(ab: AsyncEventQueue): Future[void] {.
|
||||||
{FutureFlag.OwnCancelSchedule})
|
{FutureFlag.OwnCancelSchedule})
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
proc continuation(udata: pointer) {.gcsafe.} =
|
||||||
retFuture.complete()
|
retFuture.complete()
|
||||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
|
||||||
# We are not going to change the state of `retFuture` to cancelled, so we
|
# Ignore cancellation requests - we'll complete the future soon enough
|
||||||
# will prevent the entire sequence of Futures from being cancelled.
|
retFuture.cancelCallback = nil
|
||||||
discard
|
|
||||||
|
|
||||||
ab.close()
|
ab.close()
|
||||||
# Schedule `continuation` to be called only after all the `reader`
|
# Schedule `continuation` to be called only after all the `reader`
|
||||||
# notifications will be scheduled and processed.
|
# notifications will be scheduled and processed.
|
||||||
retFuture.cancelCallback = cancellation
|
|
||||||
callSoon(continuation)
|
callSoon(continuation)
|
||||||
retFuture
|
retFuture
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,19 @@ type
|
||||||
|
|
||||||
FutureFlag* {.pure.} = enum
|
FutureFlag* {.pure.} = enum
|
||||||
OwnCancelSchedule
|
OwnCancelSchedule
|
||||||
|
## When OwnCancelSchedule is set, the owner of the future is responsible
|
||||||
|
## for implementing cancellation in one of 3 ways:
|
||||||
|
##
|
||||||
|
## * ensure that cancellation requests never reach the future by means of
|
||||||
|
## not exposing it to user code, `await` and `tryCancel`
|
||||||
|
## * set `cancelCallback` to `nil` to stop cancellation propagation - this
|
||||||
|
## is appropriate when it is expected that the future will be completed
|
||||||
|
## in a regular way "soon"
|
||||||
|
## * set `cancelCallback` to a handler that implements cancellation in an
|
||||||
|
## operation-specific way
|
||||||
|
##
|
||||||
|
## If `cancelCallback` is not set and the future gets cancelled, a
|
||||||
|
## `Defect` will be raised.
|
||||||
|
|
||||||
FutureFlags* = set[FutureFlag]
|
FutureFlags* = set[FutureFlag]
|
||||||
|
|
||||||
|
@ -104,6 +117,12 @@ proc internalInitFutureBase*(fut: FutureBase, loc: ptr SrcLoc,
|
||||||
fut.internalState = state
|
fut.internalState = state
|
||||||
fut.internalLocation[LocationKind.Create] = loc
|
fut.internalLocation[LocationKind.Create] = loc
|
||||||
fut.internalFlags = flags
|
fut.internalFlags = flags
|
||||||
|
if FutureFlag.OwnCancelSchedule in flags:
|
||||||
|
# Owners must replace `cancelCallback` with `nil` if they want to ignore
|
||||||
|
# cancellations
|
||||||
|
fut.internalCancelcb = proc(_: pointer) =
|
||||||
|
raiseAssert "Cancellation request for non-cancellable future"
|
||||||
|
|
||||||
if state != FutureState.Pending:
|
if state != FutureState.Pending:
|
||||||
fut.internalLocation[LocationKind.Finish] = loc
|
fut.internalLocation[LocationKind.Finish] = loc
|
||||||
|
|
||||||
|
|
|
@ -1013,6 +1013,7 @@ proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {.
|
||||||
if future.finished():
|
if future.finished():
|
||||||
retFuture.complete()
|
retFuture.complete()
|
||||||
else:
|
else:
|
||||||
|
retFuture.cancelCallback = nil
|
||||||
cancelSoon(future, continuation, cast[pointer](retFuture), loc)
|
cancelSoon(future, continuation, cast[pointer](retFuture), loc)
|
||||||
|
|
||||||
retFuture
|
retFuture
|
||||||
|
@ -1057,6 +1058,7 @@ proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: as
|
||||||
if future.finished():
|
if future.finished():
|
||||||
completeFuture()
|
completeFuture()
|
||||||
else:
|
else:
|
||||||
|
retFuture.cancelCallback = nil
|
||||||
future.addCallback(continuation)
|
future.addCallback(continuation)
|
||||||
retFuture
|
retFuture
|
||||||
|
|
||||||
|
|
|
@ -18,45 +18,6 @@ proc makeNoRaises*(): NimNode {.compileTime.} =
|
||||||
|
|
||||||
ident"void"
|
ident"void"
|
||||||
|
|
||||||
macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped =
|
|
||||||
## Given a Future type instance, return a type storing `{.raises.}`
|
|
||||||
## information
|
|
||||||
##
|
|
||||||
## Note; this type may change in the future
|
|
||||||
E.expectKind(nnkBracket)
|
|
||||||
|
|
||||||
let raises = if E.len == 0:
|
|
||||||
makeNoRaises()
|
|
||||||
else:
|
|
||||||
nnkTupleConstr.newTree(E.mapIt(it))
|
|
||||||
nnkBracketExpr.newTree(
|
|
||||||
ident "InternalRaisesFuture",
|
|
||||||
nnkDotExpr.newTree(F, ident"T"),
|
|
||||||
raises
|
|
||||||
)
|
|
||||||
|
|
||||||
template init*[T, E](
|
|
||||||
F: type InternalRaisesFuture[T, E], fromProc: static[string] = ""): F =
|
|
||||||
## Creates a new pending future.
|
|
||||||
##
|
|
||||||
## Specifying ``fromProc``, which is a string specifying the name of the proc
|
|
||||||
## that this future belongs to, is a good habit as it helps with debugging.
|
|
||||||
let res = F()
|
|
||||||
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {})
|
|
||||||
res
|
|
||||||
|
|
||||||
template init*[T, E](
|
|
||||||
F: type InternalRaisesFuture[T, E], fromProc: static[string] = "",
|
|
||||||
flags: static[FutureFlags]): F =
|
|
||||||
## Creates a new pending future.
|
|
||||||
##
|
|
||||||
## Specifying ``fromProc``, which is a string specifying the name of the proc
|
|
||||||
## that this future belongs to, is a good habit as it helps with debugging.
|
|
||||||
let res = F()
|
|
||||||
internalInitFutureBase(
|
|
||||||
res, getSrcLocation(fromProc), FutureState.Pending, flags)
|
|
||||||
res
|
|
||||||
|
|
||||||
proc dig(n: NimNode): NimNode {.compileTime.} =
|
proc dig(n: NimNode): NimNode {.compileTime.} =
|
||||||
# Dig through the layers of type to find the raises list
|
# Dig through the layers of type to find the raises list
|
||||||
if n.eqIdent("void"):
|
if n.eqIdent("void"):
|
||||||
|
@ -87,6 +48,58 @@ proc members(tup: NimNode): seq[NimNode] {.compileTime.} =
|
||||||
for t in tup.members():
|
for t in tup.members():
|
||||||
result.add(t)
|
result.add(t)
|
||||||
|
|
||||||
|
macro hasException(raises: typedesc, ident: static string): bool =
|
||||||
|
newLit(raises.members.anyIt(it.eqIdent(ident)))
|
||||||
|
|
||||||
|
macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped =
|
||||||
|
## Given a Future type instance, return a type storing `{.raises.}`
|
||||||
|
## information
|
||||||
|
##
|
||||||
|
## Note; this type may change in the future
|
||||||
|
E.expectKind(nnkBracket)
|
||||||
|
|
||||||
|
let raises = if E.len == 0:
|
||||||
|
makeNoRaises()
|
||||||
|
else:
|
||||||
|
nnkTupleConstr.newTree(E.mapIt(it))
|
||||||
|
nnkBracketExpr.newTree(
|
||||||
|
ident "InternalRaisesFuture",
|
||||||
|
nnkDotExpr.newTree(F, ident"T"),
|
||||||
|
raises
|
||||||
|
)
|
||||||
|
|
||||||
|
template init*[T, E](
|
||||||
|
F: type InternalRaisesFuture[T, E], fromProc: static[string] = ""): F =
|
||||||
|
## Creates a new pending future.
|
||||||
|
##
|
||||||
|
## Specifying ``fromProc``, which is a string specifying the name of the proc
|
||||||
|
## that this future belongs to, is a good habit as it helps with debugging.
|
||||||
|
when not hasException(type(E), "CancelledError"):
|
||||||
|
static:
|
||||||
|
raiseAssert "Manually created futures must either own cancellation schedule or raise CancelledError"
|
||||||
|
|
||||||
|
|
||||||
|
let res = F()
|
||||||
|
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {})
|
||||||
|
res
|
||||||
|
|
||||||
|
template init*[T, E](
|
||||||
|
F: type InternalRaisesFuture[T, E], fromProc: static[string] = "",
|
||||||
|
flags: static[FutureFlags]): F =
|
||||||
|
## Creates a new pending future.
|
||||||
|
##
|
||||||
|
## Specifying ``fromProc``, which is a string specifying the name of the proc
|
||||||
|
## that this future belongs to, is a good habit as it helps with debugging.
|
||||||
|
let res = F()
|
||||||
|
when not hasException(type(E), "CancelledError"):
|
||||||
|
static:
|
||||||
|
doAssert FutureFlag.OwnCancelSchedule in flags,
|
||||||
|
"Manually created futures must either own cancellation schedule or raise CancelledError"
|
||||||
|
|
||||||
|
internalInitFutureBase(
|
||||||
|
res, getSrcLocation(fromProc), FutureState.Pending, flags)
|
||||||
|
res
|
||||||
|
|
||||||
proc containsSignature(members: openArray[NimNode], typ: NimNode): bool {.compileTime.} =
|
proc containsSignature(members: openArray[NimNode], typ: NimNode): bool {.compileTime.} =
|
||||||
let typHash = signatureHash(typ)
|
let typHash = signatureHash(typ)
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ type
|
||||||
udata: pointer
|
udata: pointer
|
||||||
error*: ref AsyncStreamError
|
error*: ref AsyncStreamError
|
||||||
bytesCount*: uint64
|
bytesCount*: uint64
|
||||||
future: Future[void]
|
future: Future[void].Raising([])
|
||||||
|
|
||||||
AsyncStreamWriter* = ref object of RootRef
|
AsyncStreamWriter* = ref object of RootRef
|
||||||
wsource*: AsyncStreamWriter
|
wsource*: AsyncStreamWriter
|
||||||
|
@ -88,7 +88,7 @@ type
|
||||||
error*: ref AsyncStreamError
|
error*: ref AsyncStreamError
|
||||||
udata: pointer
|
udata: pointer
|
||||||
bytesCount*: uint64
|
bytesCount*: uint64
|
||||||
future: Future[void]
|
future: Future[void].Raising([])
|
||||||
|
|
||||||
AsyncStream* = object of RootObj
|
AsyncStream* = object of RootObj
|
||||||
reader*: AsyncStreamReader
|
reader*: AsyncStreamReader
|
||||||
|
@ -897,44 +897,27 @@ proc close*(rw: AsyncStreamRW) =
|
||||||
rw.future.addCallback(continuation)
|
rw.future.addCallback(continuation)
|
||||||
rw.future.cancelSoon()
|
rw.future.cancelSoon()
|
||||||
|
|
||||||
proc closeWait*(rw: AsyncStreamRW): Future[void] {.
|
proc closeWait*(rw: AsyncStreamRW): Future[void] {.async: (raises: []).} =
|
||||||
async: (raw: true, raises: []).} =
|
|
||||||
## Close and frees resources of stream ``rw``.
|
## Close and frees resources of stream ``rw``.
|
||||||
const FutureName =
|
if not rw.closed():
|
||||||
when rw is AsyncStreamReader:
|
rw.close()
|
||||||
"async.stream.reader.closeWait"
|
await noCancel(rw.join())
|
||||||
else:
|
|
||||||
"async.stream.writer.closeWait"
|
|
||||||
|
|
||||||
let retFuture = Future[void].Raising([]).init(FutureName)
|
|
||||||
|
|
||||||
if rw.closed():
|
|
||||||
retFuture.complete()
|
|
||||||
return retFuture
|
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe, raises:[].} =
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
rw.close()
|
|
||||||
if rw.future.finished():
|
|
||||||
retFuture.complete()
|
|
||||||
else:
|
|
||||||
rw.future.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
retFuture
|
|
||||||
|
|
||||||
proc startReader(rstream: AsyncStreamReader) =
|
proc startReader(rstream: AsyncStreamReader) =
|
||||||
rstream.state = Running
|
rstream.state = Running
|
||||||
if not isNil(rstream.readerLoop):
|
if not isNil(rstream.readerLoop):
|
||||||
rstream.future = rstream.readerLoop(rstream)
|
rstream.future = rstream.readerLoop(rstream)
|
||||||
else:
|
else:
|
||||||
rstream.future = newFuture[void]("async.stream.empty.reader")
|
rstream.future = Future[void].Raising([]).init(
|
||||||
|
"async.stream.empty.reader", {FutureFlag.OwnCancelSchedule})
|
||||||
|
|
||||||
proc startWriter(wstream: AsyncStreamWriter) =
|
proc startWriter(wstream: AsyncStreamWriter) =
|
||||||
wstream.state = Running
|
wstream.state = Running
|
||||||
if not isNil(wstream.writerLoop):
|
if not isNil(wstream.writerLoop):
|
||||||
wstream.future = wstream.writerLoop(wstream)
|
wstream.future = wstream.writerLoop(wstream)
|
||||||
else:
|
else:
|
||||||
wstream.future = newFuture[void]("async.stream.empty.writer")
|
wstream.future = Future[void].Raising([]).init(
|
||||||
|
"async.stream.empty.writer", {FutureFlag.OwnCancelSchedule})
|
||||||
|
|
||||||
proc init*(child, wsource: AsyncStreamWriter, loop: StreamWriterLoop,
|
proc init*(child, wsource: AsyncStreamWriter, loop: StreamWriterLoop,
|
||||||
queueSize = AsyncStreamDefaultQueueSize) =
|
queueSize = AsyncStreamDefaultQueueSize) =
|
||||||
|
|
|
@ -73,7 +73,7 @@ when defined(windows) or defined(nimdoc):
|
||||||
udata*: pointer # User-defined pointer
|
udata*: pointer # User-defined pointer
|
||||||
flags*: set[ServerFlags] # Flags
|
flags*: set[ServerFlags] # Flags
|
||||||
bufferSize*: int # Size of internal transports' buffer
|
bufferSize*: int # Size of internal transports' buffer
|
||||||
loopFuture*: Future[void] # Server's main Future
|
loopFuture*: Future[void].Raising([]) # Server's main Future
|
||||||
domain*: Domain # Current server domain (IPv4 or IPv6)
|
domain*: Domain # Current server domain (IPv4 or IPv6)
|
||||||
apending*: bool
|
apending*: bool
|
||||||
asock*: AsyncFD # Current AcceptEx() socket
|
asock*: AsyncFD # Current AcceptEx() socket
|
||||||
|
@ -92,7 +92,7 @@ else:
|
||||||
udata*: pointer # User-defined pointer
|
udata*: pointer # User-defined pointer
|
||||||
flags*: set[ServerFlags] # Flags
|
flags*: set[ServerFlags] # Flags
|
||||||
bufferSize*: int # Size of internal transports' buffer
|
bufferSize*: int # Size of internal transports' buffer
|
||||||
loopFuture*: Future[void] # Server's main Future
|
loopFuture*: Future[void].Raising([]) # Server's main Future
|
||||||
errorCode*: OSErrorCode # Current error code
|
errorCode*: OSErrorCode # Current error code
|
||||||
dualstack*: DualStackType # IPv4/IPv6 dualstack parameters
|
dualstack*: DualStackType # IPv4/IPv6 dualstack parameters
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ type
|
||||||
remote: TransportAddress # Remote address
|
remote: TransportAddress # Remote address
|
||||||
udata*: pointer # User-driven pointer
|
udata*: pointer # User-driven pointer
|
||||||
function: DatagramCallback # Receive data callback
|
function: DatagramCallback # Receive data callback
|
||||||
future: Future[void] # Transport's life future
|
future: Future[void].Raising([]) # Transport's life future
|
||||||
raddr: Sockaddr_storage # Reader address storage
|
raddr: Sockaddr_storage # Reader address storage
|
||||||
ralen: SockLen # Reader address length
|
ralen: SockLen # Reader address length
|
||||||
waddr: Sockaddr_storage # Writer address storage
|
waddr: Sockaddr_storage # Writer address storage
|
||||||
|
@ -359,7 +359,8 @@ when defined(windows):
|
||||||
res.queue = initDeque[GramVector]()
|
res.queue = initDeque[GramVector]()
|
||||||
res.udata = udata
|
res.udata = udata
|
||||||
res.state = {ReadPaused, WritePaused}
|
res.state = {ReadPaused, WritePaused}
|
||||||
res.future = newFuture[void]("datagram.transport")
|
res.future = Future[void].Raising([]).init(
|
||||||
|
"datagram.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
res.rovl.data = CompletionData(cb: readDatagramLoop,
|
res.rovl.data = CompletionData(cb: readDatagramLoop,
|
||||||
udata: cast[pointer](res))
|
udata: cast[pointer](res))
|
||||||
res.wovl.data = CompletionData(cb: writeDatagramLoop,
|
res.wovl.data = CompletionData(cb: writeDatagramLoop,
|
||||||
|
@ -568,7 +569,8 @@ else:
|
||||||
res.queue = initDeque[GramVector]()
|
res.queue = initDeque[GramVector]()
|
||||||
res.udata = udata
|
res.udata = udata
|
||||||
res.state = {ReadPaused, WritePaused}
|
res.state = {ReadPaused, WritePaused}
|
||||||
res.future = newFuture[void]("datagram.transport")
|
res.future = Future[void].Raising([]).init(
|
||||||
|
"datagram.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
GC_ref(res)
|
GC_ref(res)
|
||||||
# Start tracking transport
|
# Start tracking transport
|
||||||
trackCounter(DgramTransportTrackerName)
|
trackCounter(DgramTransportTrackerName)
|
||||||
|
@ -840,31 +842,16 @@ proc join*(transp: DatagramTransport): Future[void] {.
|
||||||
|
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
|
proc closed*(transp: DatagramTransport): bool {.inline.} =
|
||||||
|
## Returns ``true`` if transport in closed state.
|
||||||
|
{ReadClosed, WriteClosed} * transp.state != {}
|
||||||
|
|
||||||
proc closeWait*(transp: DatagramTransport): Future[void] {.
|
proc closeWait*(transp: DatagramTransport): Future[void] {.
|
||||||
async: (raw: true, raises: []).} =
|
async: (raises: []).} =
|
||||||
## Close transport ``transp`` and release all resources.
|
## Close transport ``transp`` and release all resources.
|
||||||
let retFuture = newFuture[void](
|
if not transp.closed():
|
||||||
"datagram.transport.closeWait", {FutureFlag.OwnCancelSchedule})
|
transp.close()
|
||||||
|
await noCancel(transp.join())
|
||||||
if {ReadClosed, WriteClosed} * transp.state != {}:
|
|
||||||
retFuture.complete()
|
|
||||||
return retFuture
|
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
|
||||||
# We are not going to change the state of `retFuture` to cancelled, so we
|
|
||||||
# will prevent the entire sequence of Futures from being cancelled.
|
|
||||||
discard
|
|
||||||
|
|
||||||
transp.close()
|
|
||||||
if transp.future.finished():
|
|
||||||
retFuture.complete()
|
|
||||||
else:
|
|
||||||
transp.future.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
retFuture.cancelCallback = cancellation
|
|
||||||
retFuture
|
|
||||||
|
|
||||||
proc send*(transp: DatagramTransport, pbytes: pointer,
|
proc send*(transp: DatagramTransport, pbytes: pointer,
|
||||||
nbytes: int): Future[void] {.
|
nbytes: int): Future[void] {.
|
||||||
|
@ -1020,7 +1007,3 @@ proc getMessage*(transp: DatagramTransport): seq[byte] {.
|
||||||
proc getUserData*[T](transp: DatagramTransport): T {.inline.} =
|
proc getUserData*[T](transp: DatagramTransport): T {.inline.} =
|
||||||
## Obtain user data stored in ``transp`` object.
|
## Obtain user data stored in ``transp`` object.
|
||||||
cast[T](transp.udata)
|
cast[T](transp.udata)
|
||||||
|
|
||||||
proc closed*(transp: DatagramTransport): bool {.inline.} =
|
|
||||||
## Returns ``true`` if transport in closed state.
|
|
||||||
{ReadClosed, WriteClosed} * transp.state != {}
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ when defined(windows):
|
||||||
offset: int # Reading buffer offset
|
offset: int # Reading buffer offset
|
||||||
error: ref TransportError # Current error
|
error: ref TransportError # Current error
|
||||||
queue: Deque[StreamVector] # Writer queue
|
queue: Deque[StreamVector] # Writer queue
|
||||||
future: Future[void] # Stream life future
|
future: Future[void].Raising([]) # Stream life future
|
||||||
# Windows specific part
|
# Windows specific part
|
||||||
rwsabuf: WSABUF # Reader WSABUF
|
rwsabuf: WSABUF # Reader WSABUF
|
||||||
wwsabuf: WSABUF # Writer WSABUF
|
wwsabuf: WSABUF # Writer WSABUF
|
||||||
|
@ -103,7 +103,7 @@ else:
|
||||||
offset: int # Reading buffer offset
|
offset: int # Reading buffer offset
|
||||||
error: ref TransportError # Current error
|
error: ref TransportError # Current error
|
||||||
queue: Deque[StreamVector] # Writer queue
|
queue: Deque[StreamVector] # Writer queue
|
||||||
future: Future[void] # Stream life future
|
future: Future[void].Raising([]) # Stream life future
|
||||||
case kind*: TransportKind
|
case kind*: TransportKind
|
||||||
of TransportKind.Socket:
|
of TransportKind.Socket:
|
||||||
domain: Domain # Socket transport domain (IPv4/IPv6)
|
domain: Domain # Socket transport domain (IPv4/IPv6)
|
||||||
|
@ -598,7 +598,8 @@ when defined(windows):
|
||||||
transp.buffer = newSeq[byte](bufsize)
|
transp.buffer = newSeq[byte](bufsize)
|
||||||
transp.state = {ReadPaused, WritePaused}
|
transp.state = {ReadPaused, WritePaused}
|
||||||
transp.queue = initDeque[StreamVector]()
|
transp.queue = initDeque[StreamVector]()
|
||||||
transp.future = newFuture[void]("stream.socket.transport")
|
transp.future = Future[void].Raising([]).init(
|
||||||
|
"stream.socket.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
GC_ref(transp)
|
GC_ref(transp)
|
||||||
transp
|
transp
|
||||||
|
|
||||||
|
@ -619,7 +620,8 @@ when defined(windows):
|
||||||
transp.flags = flags
|
transp.flags = flags
|
||||||
transp.state = {ReadPaused, WritePaused}
|
transp.state = {ReadPaused, WritePaused}
|
||||||
transp.queue = initDeque[StreamVector]()
|
transp.queue = initDeque[StreamVector]()
|
||||||
transp.future = newFuture[void]("stream.pipe.transport")
|
transp.future = Future[void].Raising([]).init(
|
||||||
|
"stream.pipe.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
GC_ref(transp)
|
GC_ref(transp)
|
||||||
transp
|
transp
|
||||||
|
|
||||||
|
@ -1457,7 +1459,8 @@ else:
|
||||||
transp.buffer = newSeq[byte](bufsize)
|
transp.buffer = newSeq[byte](bufsize)
|
||||||
transp.state = {ReadPaused, WritePaused}
|
transp.state = {ReadPaused, WritePaused}
|
||||||
transp.queue = initDeque[StreamVector]()
|
transp.queue = initDeque[StreamVector]()
|
||||||
transp.future = newFuture[void]("socket.stream.transport")
|
transp.future = Future[void].Raising([]).init(
|
||||||
|
"socket.stream.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
GC_ref(transp)
|
GC_ref(transp)
|
||||||
transp
|
transp
|
||||||
|
|
||||||
|
@ -1473,7 +1476,8 @@ else:
|
||||||
transp.buffer = newSeq[byte](bufsize)
|
transp.buffer = newSeq[byte](bufsize)
|
||||||
transp.state = {ReadPaused, WritePaused}
|
transp.state = {ReadPaused, WritePaused}
|
||||||
transp.queue = initDeque[StreamVector]()
|
transp.queue = initDeque[StreamVector]()
|
||||||
transp.future = newFuture[void]("pipe.stream.transport")
|
transp.future = Future[void].Raising([]).init(
|
||||||
|
"pipe.stream.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
GC_ref(transp)
|
GC_ref(transp)
|
||||||
transp
|
transp
|
||||||
|
|
||||||
|
@ -1806,6 +1810,9 @@ proc connect*(address: TransportAddress,
|
||||||
if TcpNoDelay in flags: mappedFlags.incl(SocketFlags.TcpNoDelay)
|
if TcpNoDelay in flags: mappedFlags.incl(SocketFlags.TcpNoDelay)
|
||||||
connect(address, bufferSize, child, localAddress, mappedFlags, dualstack)
|
connect(address, bufferSize, child, localAddress, mappedFlags, dualstack)
|
||||||
|
|
||||||
|
proc closed*(server: StreamServer): bool =
|
||||||
|
server.status == ServerStatus.Closed
|
||||||
|
|
||||||
proc close*(server: StreamServer) =
|
proc close*(server: StreamServer) =
|
||||||
## Release ``server`` resources.
|
## Release ``server`` resources.
|
||||||
##
|
##
|
||||||
|
@ -1832,22 +1839,11 @@ proc close*(server: StreamServer) =
|
||||||
else:
|
else:
|
||||||
server.sock.closeSocket(continuation)
|
server.sock.closeSocket(continuation)
|
||||||
|
|
||||||
proc closeWait*(server: StreamServer): Future[void] {.
|
proc closeWait*(server: StreamServer): Future[void] {.async: (raises: []).} =
|
||||||
async: (raw: true, raises: []).} =
|
|
||||||
## Close server ``server`` and release all resources.
|
## Close server ``server`` and release all resources.
|
||||||
let retFuture = newFuture[void](
|
if not server.closed():
|
||||||
"stream.server.closeWait", {FutureFlag.OwnCancelSchedule})
|
server.close()
|
||||||
|
await noCancel(server.join())
|
||||||
proc continuation(udata: pointer) =
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
server.close()
|
|
||||||
|
|
||||||
if not(server.loopFuture.finished()):
|
|
||||||
server.loopFuture.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
else:
|
|
||||||
retFuture.complete()
|
|
||||||
retFuture
|
|
||||||
|
|
||||||
proc getBacklogSize(backlog: int): cint =
|
proc getBacklogSize(backlog: int): cint =
|
||||||
doAssert(backlog >= 0 and backlog <= high(int32))
|
doAssert(backlog >= 0 and backlog <= high(int32))
|
||||||
|
@ -2058,7 +2054,9 @@ proc createStreamServer*(host: TransportAddress,
|
||||||
sres.init = init
|
sres.init = init
|
||||||
sres.bufferSize = bufferSize
|
sres.bufferSize = bufferSize
|
||||||
sres.status = Starting
|
sres.status = Starting
|
||||||
sres.loopFuture = newFuture[void]("stream.transport.server")
|
sres.loopFuture = asyncloop.init(
|
||||||
|
Future[void].Raising([]), "stream.transport.server",
|
||||||
|
{FutureFlag.OwnCancelSchedule})
|
||||||
sres.udata = udata
|
sres.udata = udata
|
||||||
sres.dualstack = dualstack
|
sres.dualstack = dualstack
|
||||||
if localAddress.family == AddressFamily.None:
|
if localAddress.family == AddressFamily.None:
|
||||||
|
@ -2630,6 +2628,23 @@ proc join*(transp: StreamTransport): Future[void] {.
|
||||||
retFuture.complete()
|
retFuture.complete()
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
|
proc closed*(transp: StreamTransport): bool {.inline.} =
|
||||||
|
## Returns ``true`` if transport in closed state.
|
||||||
|
({ReadClosed, WriteClosed} * transp.state != {})
|
||||||
|
|
||||||
|
proc finished*(transp: StreamTransport): bool {.inline.} =
|
||||||
|
## Returns ``true`` if transport in finished (EOF) state.
|
||||||
|
({ReadEof, WriteEof} * transp.state != {})
|
||||||
|
|
||||||
|
proc failed*(transp: StreamTransport): bool {.inline.} =
|
||||||
|
## Returns ``true`` if transport in error state.
|
||||||
|
({ReadError, WriteError} * transp.state != {})
|
||||||
|
|
||||||
|
proc running*(transp: StreamTransport): bool {.inline.} =
|
||||||
|
## Returns ``true`` if transport is still pending.
|
||||||
|
({ReadClosed, ReadEof, ReadError,
|
||||||
|
WriteClosed, WriteEof, WriteError} * transp.state == {})
|
||||||
|
|
||||||
proc close*(transp: StreamTransport) =
|
proc close*(transp: StreamTransport) =
|
||||||
## Closes and frees resources of transport ``transp``.
|
## Closes and frees resources of transport ``transp``.
|
||||||
##
|
##
|
||||||
|
@ -2672,31 +2687,11 @@ proc close*(transp: StreamTransport) =
|
||||||
elif transp.kind == TransportKind.Socket:
|
elif transp.kind == TransportKind.Socket:
|
||||||
closeSocket(transp.fd, continuation)
|
closeSocket(transp.fd, continuation)
|
||||||
|
|
||||||
proc closeWait*(transp: StreamTransport): Future[void] {.
|
proc closeWait*(transp: StreamTransport): Future[void] {.async: (raises: []).} =
|
||||||
async: (raw: true, raises: []).} =
|
|
||||||
## Close and frees resources of transport ``transp``.
|
## Close and frees resources of transport ``transp``.
|
||||||
let retFuture = newFuture[void](
|
if not transp.closed():
|
||||||
"stream.transport.closeWait", {FutureFlag.OwnCancelSchedule})
|
transp.close()
|
||||||
|
await noCancel(transp.join())
|
||||||
if {ReadClosed, WriteClosed} * transp.state != {}:
|
|
||||||
retFuture.complete()
|
|
||||||
return retFuture
|
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
|
||||||
# We are not going to change the state of `retFuture` to cancelled, so we
|
|
||||||
# will prevent the entire sequence of Futures from being cancelled.
|
|
||||||
discard
|
|
||||||
|
|
||||||
transp.close()
|
|
||||||
if transp.future.finished():
|
|
||||||
retFuture.complete()
|
|
||||||
else:
|
|
||||||
transp.future.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
retFuture.cancelCallback = cancellation
|
|
||||||
retFuture
|
|
||||||
|
|
||||||
proc shutdownWait*(transp: StreamTransport): Future[void] {.
|
proc shutdownWait*(transp: StreamTransport): Future[void] {.
|
||||||
async: (raw: true, raises: [TransportError, CancelledError]).} =
|
async: (raw: true, raises: [TransportError, CancelledError]).} =
|
||||||
|
@ -2756,23 +2751,6 @@ proc shutdownWait*(transp: StreamTransport): Future[void] {.
|
||||||
callSoon(continuation, nil)
|
callSoon(continuation, nil)
|
||||||
retFuture
|
retFuture
|
||||||
|
|
||||||
proc closed*(transp: StreamTransport): bool {.inline.} =
|
|
||||||
## Returns ``true`` if transport in closed state.
|
|
||||||
({ReadClosed, WriteClosed} * transp.state != {})
|
|
||||||
|
|
||||||
proc finished*(transp: StreamTransport): bool {.inline.} =
|
|
||||||
## Returns ``true`` if transport in finished (EOF) state.
|
|
||||||
({ReadEof, WriteEof} * transp.state != {})
|
|
||||||
|
|
||||||
proc failed*(transp: StreamTransport): bool {.inline.} =
|
|
||||||
## Returns ``true`` if transport in error state.
|
|
||||||
({ReadError, WriteError} * transp.state != {})
|
|
||||||
|
|
||||||
proc running*(transp: StreamTransport): bool {.inline.} =
|
|
||||||
## Returns ``true`` if transport is still pending.
|
|
||||||
({ReadClosed, ReadEof, ReadError,
|
|
||||||
WriteClosed, WriteEof, WriteError} * transp.state == {})
|
|
||||||
|
|
||||||
proc fromPipe2*(fd: AsyncFD, child: StreamTransport = nil,
|
proc fromPipe2*(fd: AsyncFD, child: StreamTransport = nil,
|
||||||
bufferSize = DefaultStreamBufferSize
|
bufferSize = DefaultStreamBufferSize
|
||||||
): Result[StreamTransport, OSErrorCode] =
|
): Result[StreamTransport, OSErrorCode] =
|
||||||
|
|
Loading…
Reference in New Issue