From 5dfa3fd7fa0b7c3f6e7e8bc54a233f8b07d9356c Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 5 Mar 2024 13:53:12 +0100 Subject: [PATCH 01/37] fix conversion error with `or` on futures with `{.async: (raises: []).}` (#515) ```nim import chronos proc f(): Future[void] {.async: (raises: []).} = discard discard f() or f() or f() ``` ``` /Users/etan/Documents/Repos/nimbus-eth2/vendor/nim-chronos/chronos/internal/raisesfutures.nim(145, 44) union /Users/etan/Documents/Repos/nimbus-eth2/vendor/nimbus-build-system/vendor/Nim/lib/core/macros.nim(185, 28) [] /Users/etan/Documents/Repos/nimbus-eth2/test.nim(6, 13) template/generic instantiation of `or` from here /Users/etan/Documents/Repos/nimbus-eth2/vendor/nim-chronos/chronos/internal/asyncfutures.nim(1668, 39) template/generic instantiation of `union` from here /Users/etan/Documents/Repos/nimbus-eth2/vendor/nimbus-build-system/vendor/Nim/lib/core/macros.nim(185, 28) Error: illegal conversion from '-1' to '[0..9223372036854775807]' ``` Fix by checking for `void` before trying to access `raises` --- chronos/internal/raisesfutures.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 5b91f41..ed85c03 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -142,7 +142,7 @@ macro union*(tup0: typedesc, tup1: typedesc): typedesc = if not found: result.add err - for err2 in getType(getTypeInst(tup1)[1])[1..^1]: + for err2 in tup1.members(): result.add err2 if result.len == 0: result = makeNoRaises() From 1eb834a2f98c31677e899ffcc80259a10d78cfe7 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 5 Mar 2024 18:33:46 +0200 Subject: [PATCH 02/37] Fix `or` deadlock issue. (#517) * Fix `or` should not create future with OwnCancelSchedule flag set. * Fix `CancelledError` missing from raises list when both futures has empty raises list. * Fix macros tests. --- chronos/internal/asyncfutures.nim | 9 ++++----- tests/testbugs.nim | 13 +++++++++++++ tests/testmacro.nim | 2 +- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 1a2be75..de6deba 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -786,7 +786,7 @@ template orImpl*[T, Y](fut1: Future[T], fut2: Future[Y]): untyped = fut2.addCallback(cb) retFuture.cancelCallback = cancellation - return retFuture + retFuture proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = ## Returns a future which will complete once either ``fut1`` or ``fut2`` @@ -801,7 +801,7 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = ## completed, the result future will also be completed. ## ## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled. - var retFuture = newFuture[void]("chronos.or") + var retFuture = newFuture[void]("chronos.or()") orImpl(fut1, fut2) @@ -1665,10 +1665,9 @@ proc `or`*[T, Y, E1, E2]( fut1: InternalRaisesFuture[T, E1], fut2: InternalRaisesFuture[Y, E2]): auto = type - InternalRaisesFutureRaises = union(E1, E2) + InternalRaisesFutureRaises = union(E1, E2).union((CancelledError,)) - let - retFuture = newFuture[void]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + let retFuture = newFuture[void]("chronos.or()", {}) orImpl(fut1, fut2) proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = diff --git a/tests/testbugs.nim b/tests/testbugs.nim index fc4af3a..3f2f4e4 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -135,6 +135,16 @@ suite "Asynchronous issues test suite": await server.closeWait() return true + proc testOrDeadlock(): Future[bool] {.async.} = + proc f(): Future[void] {.async.} = + await sleepAsync(2.seconds) or sleepAsync(1.seconds) + let fx = f() + try: + await fx.cancelAndWait().wait(2.seconds) + except AsyncTimeoutError: + return false + true + test "Issue #6": check waitFor(issue6()) == true @@ -152,3 +162,6 @@ suite "Asynchronous issues test suite": test "IndexError crash test": check waitFor(testIndexError()) == true + + test "`or` deadlock [#516] test": + check waitFor(testOrDeadlock()) == true diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 9b19c68..d646303 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -491,7 +491,7 @@ suite "Exceptions tracking": proc testit2 {.async: (raises: [IOError]).} = raise (ref IOError)() - proc test {.async: (raises: [ValueError, IOError]).} = + proc test {.async: (raises: [CancelledError, ValueError, IOError]).} = await testit() or testit2() proc noraises() {.raises: [].} = From 4ed0cd6be723c6709a7d1d9a72a5aa5916f6871d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 5 Mar 2024 18:34:53 +0200 Subject: [PATCH 03/37] Ensure that `OwnCancelSchedule` flag will not be removed from `wait()` and `withTimeout()`. (#519) --- chronos/internal/asyncfutures.nim | 8 +++++++- tests/testfut.nim | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index de6deba..d084252 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1410,6 +1410,8 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. var retFuture = newFuture[bool]("chronos.withTimeout", {FutureFlag.OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. moment: Moment timer: TimerCallback timeouted = false @@ -1536,6 +1538,8 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = ## should return, because it can't be cancelled too. var retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. waitImpl(fut, retFuture, timeout) @@ -1677,6 +1681,8 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError) let - retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + retFuture = newFuture[T]("chronos.wait()", {OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. waitImpl(fut, retFuture, timeout) diff --git a/tests/testfut.nim b/tests/testfut.nim index fc2401d..aee3b15 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1594,6 +1594,19 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut + asyncTest "wait() should allow cancellation test (depends on race())": + proc testFoo(): Future[bool] {.async.} = + let + resFut = sleepAsync(2.seconds).wait(3.seconds) + timeFut = sleepAsync(1.seconds) + cancelFut = cancelAndWait(resFut) + discard await race(cancelFut, timeFut) + if cancelFut.finished(): + return (resFut.cancelled() and cancelFut.completed()) + false + + check (await testFoo()) == true + asyncTest "withTimeout() cancellation undefined behavior test #1": proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. async.} = @@ -1654,6 +1667,19 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut + asyncTest "withTimeout() should allow cancellation test (depends on race())": + proc testFoo(): Future[bool] {.async.} = + let + resFut = sleepAsync(2.seconds).withTimeout(3.seconds) + timeFut = sleepAsync(1.seconds) + cancelFut = cancelAndWait(resFut) + discard await race(cancelFut, timeFut) + if cancelFut.finished(): + return (resFut.cancelled() and cancelFut.completed()) + false + + check (await testFoo()) == true + asyncTest "Cancellation behavior test": proc testInnerFoo(fooFut: Future[void]) {.async.} = await fooFut From f6c7ecfa0a3af7fdffcabe9101b44125a1c93a6d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 6 Mar 2024 01:56:40 +0200 Subject: [PATCH 04/37] Add missing parts of defaults buffer size increase. (#513) --- chronos/apps/http/httpserver.nim | 6 +++--- chronos/apps/http/multipart.nim | 10 ++++++---- chronos/apps/http/shttpserver.nim | 6 +++--- chronos/config.nim | 5 +++++ chronos/streams/boundstream.nim | 6 +++--- chronos/streams/chunkstream.nim | 6 +++--- chronos/streams/tlsstream.nim | 10 +++++++--- 7 files changed, 30 insertions(+), 19 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 92ed356..c1e5279 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -11,7 +11,7 @@ import std/[tables, uri, strutils] import stew/[base10], httputils, results -import ../../[asyncloop, asyncsync] +import ../../[asyncloop, asyncsync, config] import ../../streams/[asyncstream, boundstream, chunkstream] import "."/[httptable, httpcommon, multipart] from ../../transports/common import TransportAddress, ServerFlags, `$`, `==` @@ -244,7 +244,7 @@ proc new*( serverUri = Uri(), serverIdent = "", maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, @@ -304,7 +304,7 @@ proc new*( serverUri = Uri(), serverIdent = "", maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, diff --git a/chronos/apps/http/multipart.nim b/chronos/apps/http/multipart.nim index 302d6ef..5c50e46 100644 --- a/chronos/apps/http/multipart.nim +++ b/chronos/apps/http/multipart.nim @@ -20,6 +20,7 @@ export asyncloop, httptable, httpcommon, httpbodyrw, asyncstream, httputils const UnableToReadMultipartBody = "Unable to read multipart message body, reason: " UnableToSendMultipartMessage = "Unable to send multipart message, reason: " + MaxMultipartHeaderSize = 4096 type MultiPartSource* {.pure.} = enum @@ -142,10 +143,11 @@ proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader], MultiPartReader(kind: MultiPartSource.Buffer, buffer: buf, offset: 0, boundary: fboundary) -proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef], - stream: HttpBodyReader, - boundary: openArray[B], - partHeadersMaxSize = 4096): MultiPartReaderRef = +proc new*[B: BChar]( + mpt: typedesc[MultiPartReaderRef], + stream: HttpBodyReader, + boundary: openArray[B], + partHeadersMaxSize = MaxMultipartHeaderSize): MultiPartReaderRef = ## Create new MultiPartReader instance with `stream` interface. ## ## ``stream`` is stream used to read data. diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 6272bb2..532839d 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -10,7 +10,7 @@ {.push raises: [].} import httpserver -import ../../asyncloop, ../../asyncsync +import ../../[asyncloop, asyncsync, config] import ../../streams/[asyncstream, tlsstream] export asyncloop, asyncsync, httpserver, asyncstream, tlsstream @@ -91,7 +91,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], serverIdent = "", secureFlags: set[TLSFlags] = {}, maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, @@ -157,7 +157,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], serverIdent = "", secureFlags: set[TLSFlags] = {}, maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, diff --git a/chronos/config.nim b/chronos/config.nim index cf500db..26d110f 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -97,6 +97,9 @@ const chronosStreamDefaultBufferSize* {.intdefine.} = 16384 ## Default size of chronos async stream internal buffer. + chronosTLSSessionCacheBufferSize* {.intdefine.} = 4096 + ## Default size of chronos TLS Session cache's internal buffer. + when defined(chronosStrictException): {.warning: "-d:chronosStrictException has been deprecated in favor of handleException".} # In chronos v3, this setting was used as the opposite of @@ -123,6 +126,8 @@ when defined(debug) or defined(chronosConfig): chronosTransportDefaultBufferSize) printOption("chronosStreamDefaultBufferSize", chronosStreamDefaultBufferSize) + printOption("chronosTLSSessionCacheBufferSize", + chronosTLSSessionCacheBufferSize) # In nim 1.6, `sink` + local variable + `move` generates the best code for # moving a proc parameter into a closure - this only works for closure diff --git a/chronos/streams/boundstream.nim b/chronos/streams/boundstream.nim index ce69571..8d2e52c 100644 --- a/chronos/streams/boundstream.nim +++ b/chronos/streams/boundstream.nim @@ -18,8 +18,8 @@ {.push raises: [].} import results -import ../asyncloop, ../timer -import asyncstream, ../transports/stream, ../transports/common +import ../[asyncloop, timer, config] +import asyncstream, ../transports/[stream, common] export asyncloop, asyncstream, stream, timer, common type @@ -44,7 +44,7 @@ type BoundedStreamRW* = BoundedStreamReader | BoundedStreamWriter const - BoundedBufferSize* = 4096 + BoundedBufferSize* = chronosStreamDefaultBufferSize BoundarySizeDefectMessage = "Boundary must not be empty array" template newBoundedStreamIncompleteError(): ref BoundedStreamError = diff --git a/chronos/streams/chunkstream.nim b/chronos/streams/chunkstream.nim index 7739207..f3e73e0 100644 --- a/chronos/streams/chunkstream.nim +++ b/chronos/streams/chunkstream.nim @@ -11,13 +11,13 @@ {.push raises: [].} -import ../asyncloop, ../timer -import asyncstream, ../transports/stream, ../transports/common +import ../[asyncloop, timer, config] +import asyncstream, ../transports/[stream, common] import results export asyncloop, asyncstream, stream, timer, common, results const - ChunkBufferSize = 4096 + ChunkBufferSize = chronosStreamDefaultBufferSize MaxChunkHeaderSize = 1024 ChunkHeaderValueSize = 8 # This is limit for chunk size to 8 hexadecimal digits, so maximum diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 12ea6d3..6c019f1 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -16,9 +16,12 @@ import bearssl/[brssl, ec, errors, pem, rsa, ssl, x509], bearssl/certs/cacert import ".."/[asyncloop, asyncsync, config, timer] -import asyncstream, ../transports/stream, ../transports/common +import asyncstream, ../transports/[stream, common] export asyncloop, asyncsync, timer, asyncstream +const + TLSSessionCacheBufferSize* = chronosTLSSessionCacheBufferSize + type TLSStreamKind {.pure.} = enum Client, Server @@ -777,11 +780,12 @@ proc init*(tt: typedesc[TLSCertificate], raiseTLSStreamProtocolError("Could not find any certificates") res -proc init*(tt: typedesc[TLSSessionCache], size: int = 4096): TLSSessionCache = +proc init*(tt: typedesc[TLSSessionCache], + size: int = TLSSessionCacheBufferSize): TLSSessionCache = ## Create new TLS session cache with size ``size``. ## ## One cached item is near 100 bytes size. - var rsize = min(size, 4096) + let rsize = min(size, 4096) var res = TLSSessionCache(storage: newSeq[byte](rsize)) sslSessionCacheLruInit(addr res.context, addr res.storage[0], rsize) res From 03d82475d91c09d35faace9077c8f2050a0bfc2e Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 6 Mar 2024 06:42:22 +0100 Subject: [PATCH 05/37] Avoid `ValueError` effect in varargs `race`/`one` (#520) We can check at compile-time that at least one parameter is passed * clean up closure environment explicitly in some callbacks to release memory earlier --- chronos/internal/asyncfutures.nim | 174 +++++++++++++++++++----------- tests/testfut.nim | 31 ++++-- 2 files changed, 136 insertions(+), 69 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index d084252..d364dc5 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -734,8 +734,8 @@ proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {. retFuture.fail(fut2.error) else: retFuture.complete() - fut1.callback = cb - fut2.callback = cb + fut1.addCallback(cb) + fut2.addCallback(cb) proc cancellation(udata: pointer) = # On cancel we remove all our callbacks only. @@ -1086,12 +1086,14 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] {. inc(finishedFutures) if finishedFutures == totalFutures: retFuture.complete() + reset(nfuts) proc cancellation(udata: pointer) = # On cancel we remove all our callbacks only. for i in 0.. Date: Thu, 7 Mar 2024 08:07:53 +0100 Subject: [PATCH 06/37] fix circular reference in timer (#510) --- chronos/asyncproc.nim | 12 +++++++----- chronos/internal/asyncfutures.nim | 11 +++++++++-- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index f008776..572e382 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -1010,12 +1010,14 @@ else: retFuture.fail(newException(AsyncProcessError, osErrorMsg(res.error()))) + timer = nil + proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - if not(isNil(timer)): - clearTimer(timer) - # Ignore any errors because of cancellation. - discard removeProcess2(processHandle) + if not(isNil(timer)): + clearTimer(timer) + timer = nil + # Ignore any errors because of cancellation. + discard removeProcess2(processHandle) if timeout != InfiniteDuration: timer = setTimer(Moment.fromNow(timeout), continuation, cast[pointer](2)) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index d364dc5..206f89c 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1386,13 +1386,15 @@ proc sleepAsync*(duration: Duration): Future[void] {. proc completion(data: pointer) {.gcsafe.} = if not(retFuture.finished()): retFuture.complete() + timer = nil # Release circular reference (for gc:arc) proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): + if not isNil(timer): clearTimer(timer) + timer = nil # Release circular reference (for gc:arc) retFuture.cancelCallback = cancellation - timer = setTimer(moment, completion, cast[pointer](retFuture)) + timer = setTimer(moment, completion) return retFuture proc sleepAsync*(ms: int): Future[void] {. @@ -1487,6 +1489,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. if not(isNil(timer)): clearTimer(timer) fut.completeFuture() + timer = nil # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 @@ -1497,6 +1500,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. fut.cancelSoon() else: fut.completeFuture() + timer = nil if fut.finished(): retFuture.complete(true) @@ -1549,6 +1553,7 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = if not(isNil(timer)): clearTimer(timer) fut.completeFuture() + timer = nil var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = @@ -1559,6 +1564,8 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = else: fut.completeFuture() + timer = nil + if fut.finished(): fut.completeFuture() else: From 17b7a76c7e40c89f31351cd1a5faf76f177b30ac Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 7 Mar 2024 08:09:16 +0100 Subject: [PATCH 07/37] Ensure `transp.reader` is reset to `nil` on error (#508) In `stream.readLoop`, a finished `Future` was left in `transp.reader` if there was an error in `resumeRead`. Set it to `nil` as well. Co-authored-by: Jacek Sieka --- chronos/transports/stream.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 33a8631..fa3cbac 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2372,7 +2372,7 @@ template readLoop(name, body: untyped): untyped = # resumeRead() could not return any error. raiseOsDefect(errorCode, "readLoop(): Unable to resume reading") else: - transp.reader.complete() + transp.completeReader() if errorCode == oserrno.ESRCH: # ESRCH 3 "No such process" # This error could be happened on pipes only, when process which From 47cc17719f4293bf80a22ebe28e3bfc54b2a59a1 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 8 Mar 2024 14:43:42 +0100 Subject: [PATCH 08/37] print warning when calling failed (#521) `failed` cannot return true for futures that don't forward exceptions --- chronos/internal/raisesfutures.nim | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index ed85c03..2e09a1d 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -2,6 +2,8 @@ import std/[macros, sequtils], ../futures +{.push raises: [].} + type InternalRaisesFuture*[T, E] = ref object of Future[T] ## Future with a tuple of possible exception types @@ -205,13 +207,20 @@ macro checkRaises*[T: CatchableError]( `warning` assert(`runtimeChecker`, `errorMsg`) -proc error*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. +func failed*[T](future: InternalRaisesFuture[T, void]): bool {.inline.} = + ## Determines whether ``future`` finished with an error. + static: + warning("No exceptions possible with this operation, `failed` always returns false") + + false + +func error*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. raises: [].} = static: warning("No exceptions possible with this operation, `error` always returns nil") nil -proc readError*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. +func readError*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. raises: [ValueError].} = static: warning("No exceptions possible with this operation, `readError` always raises") From d4f1487b0cd51a90b76c83f633c02dcae4a84610 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 19 Mar 2024 18:28:52 +0200 Subject: [PATCH 09/37] Disable libbacktrace enabled test on X86 platforms. (#523) * Disable libbacktrace enabled test on X86 platforms. * Fix mistype. * Use macos-12 workers from now. --- .github/workflows/ci.yml | 2 +- chronos.nimble | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e64f754..cab8555 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,7 +34,7 @@ jobs: shell: bash - target: os: macos - builder: macos-11 + builder: macos-12 shell: bash - target: os: windows diff --git a/chronos.nimble b/chronos.nimble index 48c7781..c754a5c 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -20,6 +20,7 @@ let nimc = getEnv("NIMC", "nim") # Which nim compiler to use let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js) let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler let verbose = getEnv("V", "") notin ["", "0"] +let platform = getEnv("PLATFORM", "") let testArguments = when defined(windows): [ @@ -60,15 +61,16 @@ task test, "Run all tests": run args, "tests/testall" task test_libbacktrace, "test with libbacktrace": - let allArgs = @[ - "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", - ] + if platform != "x86": + let allArgs = @[ + "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", + ] - for args in allArgs: - if (NimMajor, NimMinor) > (1, 6): - # First run tests with `refc` memory manager. - run args & " --mm:refc", "tests/testall" - run args, "tests/testall" + for args in allArgs: + if (NimMajor, NimMinor) > (1, 6): + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" + run args, "tests/testall" task docs, "Generate API documentation": exec "mdbook build docs" From 035288f3f08370d83e83c0ee4b01c1d957240138 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 20 Mar 2024 08:47:59 +0200 Subject: [PATCH 10/37] Remove `sink` and `chronosMoveSink()` usage. (#524) --- chronos/internal/asyncfutures.nim | 6 +++--- chronos/streams/asyncstream.nim | 8 ++++---- chronos/streams/tlsstream.nim | 2 +- chronos/transports/datagram.nim | 18 +++++++++--------- chronos/transports/stream.nim | 8 ++++---- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 206f89c..49c6acd 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -202,14 +202,14 @@ proc finish(fut: FutureBase, state: FutureState) = when chronosFutureTracking: scheduleDestructor(fut) -proc complete[T](future: Future[T], val: sink T, loc: ptr SrcLoc) = +proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) doAssert(isNil(future.internalError)) - future.internalValue = chronosMoveSink(val) + future.internalValue = val future.finish(FutureState.Completed) -template complete*[T](future: Future[T], val: sink T) = +template complete*[T](future: Future[T], val: T) = ## Completes ``future`` with value ``val``. complete(future, val, getSrcLocation()) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index e688f28..3d2f858 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -736,7 +736,7 @@ proc write*(wstream: AsyncStreamWriter, pbytes: pointer, await item.future wstream.bytesCount = wstream.bytesCount + uint64(item.size) -proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], +proc write*(wstream: AsyncStreamWriter, sbytes: seq[byte], msglen = -1) {. async: (raises: [CancelledError, AsyncStreamError]).} = ## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer @@ -771,14 +771,14 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], wstream.bytesCount = wstream.bytesCount + uint64(length) else: let item = WriteItem( - kind: Sequence, dataSeq: move(sbytes), size: length, + kind: Sequence, dataSeq: sbytes, size: length, future: Future[void].Raising([CancelledError, AsyncStreamError]) .init("async.stream.write(seq)")) await wstream.queue.put(item) await item.future wstream.bytesCount = wstream.bytesCount + uint64(item.size) -proc write*(wstream: AsyncStreamWriter, sbytes: sink string, +proc write*(wstream: AsyncStreamWriter, sbytes: string, msglen = -1) {. async: (raises: [CancelledError, AsyncStreamError]).} = ## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``. @@ -812,7 +812,7 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink string, wstream.bytesCount = wstream.bytesCount + uint64(length) else: let item = WriteItem( - kind: String, dataStr: move(sbytes), size: length, + kind: String, dataStr: sbytes, size: length, future: Future[void].Raising([CancelledError, AsyncStreamError]) .init("async.stream.write(string)")) await wstream.queue.put(item) diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 6c019f1..86f6d4c 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -161,7 +161,7 @@ proc tlsWriteRec(engine: ptr SslEngineContext, var length = 0'u var buf = sslEngineSendrecBuf(engine[], length) doAssert(length != 0 and not isNil(buf)) - await writer.wsource.write(chronosMoveSink(buf), int(length)) + await writer.wsource.write(buf, int(length)) sslEngineSendrecAck(engine[], length) TLSResult.Success except AsyncStreamError as exc: diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index cd335df..f89a6b4 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -873,7 +873,7 @@ proc send*(transp: DatagramTransport, pbytes: pointer, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc send*(transp: DatagramTransport, msg: sink string, +proc send*(transp: DatagramTransport, msg: string, msglen = -1): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination @@ -882,7 +882,7 @@ proc send*(transp: DatagramTransport, msg: sink string, transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithoutAddress, buf: baseAddr localCopy, @@ -896,16 +896,16 @@ proc send*(transp: DatagramTransport, msg: sink string, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc send*[T](transp: DatagramTransport, msg: sink seq[T], +proc send*[T](transp: DatagramTransport, msg: seq[T], msglen = -1): Future[void] {. - async: (raw: true, raises: [TransportError, CancelledError]).} = + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address which was bounded on transport. let retFuture = newFuture[void]("datagram.transport.send(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithoutAddress, buf: baseAddr localCopy, @@ -935,7 +935,7 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, return retFuture proc sendTo*(transp: DatagramTransport, remote: TransportAddress, - msg: sink string, msglen = -1): Future[void] {. + msg: string, msglen = -1): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address ``remote``. @@ -943,7 +943,7 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithAddress, buf: baseAddr localCopy, @@ -958,14 +958,14 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, return retFuture proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, - msg: sink seq[T], msglen = -1): Future[void] {. + msg: seq[T], msglen = -1): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send sequence ``msg`` using transport ``transp`` to remote destination ## address ``remote``. let retFuture = newFuture[void]("datagram.transport.sendTo(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithAddress, buf: baseAddr localCopy, diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index fa3cbac..c80d992 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2248,7 +2248,7 @@ proc write*(transp: StreamTransport, pbytes: pointer, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc write*(transp: StreamTransport, msg: sink string, +proc write*(transp: StreamTransport, msg: string, msglen = -1): Future[int] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write data from string ``msg`` using transport ``transp``. @@ -2267,7 +2267,7 @@ proc write*(transp: StreamTransport, msg: sink string, let written = nbytes - rbytes # In case fastWrite wrote some - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) pbytes = cast[ptr byte](addr localCopy[written]) @@ -2280,7 +2280,7 @@ proc write*(transp: StreamTransport, msg: sink string, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc write*[T](transp: StreamTransport, msg: sink seq[T], +proc write*[T](transp: StreamTransport, msg: seq[T], msglen = -1): Future[int] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write sequence ``msg`` using transport ``transp``. @@ -2300,7 +2300,7 @@ proc write*[T](transp: StreamTransport, msg: sink seq[T], let written = nbytes - rbytes # In case fastWrite wrote some - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) pbytes = cast[ptr byte](addr localCopy[written]) From d5bc90fef22cc32ef5ce4a037a852fd081dd5c0c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 20 Mar 2024 12:08:26 +0100 Subject: [PATCH 11/37] Work around type resolution with empty generic (#522) * Work around type resolution with empty generic * workaround --- chronos/internal/raisesfutures.nim | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 2e09a1d..07e3438 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -12,6 +12,12 @@ type ## This type gets injected by `async: (raises: ...)` and similar utilities ## and should not be used manually as the internal exception representation ## is subject to change in future chronos versions. + # TODO https://github.com/nim-lang/Nim/issues/23418 + # TODO https://github.com/nim-lang/Nim/issues/23419 + when E is void: + dummy: E + else: + dummy: array[0, E] proc makeNoRaises*(): NimNode {.compileTime.} = # An empty tuple would have been easier but... From 0e806d59aea5e49a65524567cfa18de34e37ccca Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 21 Mar 2024 09:21:51 +0100 Subject: [PATCH 12/37] v4.0.1 --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index c754a5c..5aa51b8 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "4.0.0" +version = "4.0.1" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From b8b4e1fc477edeb1ca0b0ae641d0e8fa5c3416ab Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 25 Mar 2024 10:37:42 +0100 Subject: [PATCH 13/37] make `Raising` compatible with 2.0 (#526) * make `Raising` compatible with 2.0 See https://github.com/nim-lang/Nim/issues/23432 * Update tests/testfut.nim * Update tests/testfut.nim --- chronos/internal/raisesfutures.nim | 23 +++++++++++++++++++---- tests/testfut.nim | 18 ++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 07e3438..546f3b7 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -59,17 +59,32 @@ proc members(tup: NimNode): seq[NimNode] {.compileTime.} = macro hasException(raises: typedesc, ident: static string): bool = newLit(raises.members.anyIt(it.eqIdent(ident))) -macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped = +macro Raising*[T](F: typedesc[Future[T]], E: typed): untyped = ## Given a Future type instance, return a type storing `{.raises.}` ## information ## ## Note; this type may change in the future - E.expectKind(nnkBracket) - let raises = if E.len == 0: + # An earlier version used `E: varargs[typedesc]` here but this is buggyt/no + # longer supported in 2.0 in certain cases: + # https://github.com/nim-lang/Nim/issues/23432 + let + e = + case E.getTypeInst().typeKind() + of ntyTypeDesc: @[E] + of ntyArray: + for x in E: + if x.getTypeInst().typeKind != ntyTypeDesc: + error("Expected typedesc, got " & repr(x), x) + E.mapIt(it) + else: + error("Expected typedesc, got " & repr(E), E) + @[] + + let raises = if e.len == 0: makeNoRaises() else: - nnkTupleConstr.newTree(E.mapIt(it)) + nnkTupleConstr.newTree(e) nnkBracketExpr.newTree( ident "InternalRaisesFuture", nnkDotExpr.newTree(F, ident"T"), diff --git a/tests/testfut.nim b/tests/testfut.nim index 8c08293..c2231f1 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -2047,9 +2047,27 @@ suite "Future[T] behavior test suite": check: future1.cancelled() == true future2.cancelled() == true + test "Sink with literals": # https://github.com/nim-lang/Nim/issues/22175 let fut = newFuture[string]() fut.complete("test") check: fut.value() == "test" + + test "Raising type matching": + type X[E] = Future[void].Raising(E) + + proc f(x: X) = discard + + var v: Future[void].Raising([ValueError]) + f(v) + + type Object = object + # TODO cannot use X[[ValueError]] here.. + field: Future[void].Raising([ValueError]) + discard Object(field: v) + + check: + not compiles(Future[void].Raising([42])) + not compiles(Future[void].Raising(42)) From ef1b077adfdc803fcce880e81a5740b964bac0bc Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 25 Mar 2024 10:38:17 +0100 Subject: [PATCH 14/37] v4.0.2 --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index 5aa51b8..ba92ac6 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "4.0.1" +version = "4.0.2" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From 402914f4cfb82a35a44511230bf8a9ab06aa3a8f Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 26 Mar 2024 22:33:19 +0200 Subject: [PATCH 15/37] Add custom ring buffer into chronos streams and transports. (#485) * Add custom ring buffer into chronos stream transport. * Rename BipBuffer.decommit() to BipBuffer.consume() Make asyncstream's using BipBuffer. * Address review comments part 1. * Address review comments part 2. * Address review comments. * Remove unused import results. * Address review comments. --- chronos/bipbuffer.nim | 140 ++++++++++++++++++++ chronos/streams/asyncstream.nim | 221 ++++++++++++++++---------------- chronos/streams/boundstream.nim | 10 +- chronos/streams/chunkstream.nim | 12 +- chronos/streams/tlsstream.nim | 2 +- chronos/transports/datagram.nim | 2 +- chronos/transports/stream.nim | 209 +++++++++++++++++------------- 7 files changed, 378 insertions(+), 218 deletions(-) create mode 100644 chronos/bipbuffer.nim diff --git a/chronos/bipbuffer.nim b/chronos/bipbuffer.nim new file mode 100644 index 0000000..5aa34c4 --- /dev/null +++ b/chronos/bipbuffer.nim @@ -0,0 +1,140 @@ +# +# Chronos +# +# (c) Copyright 2018-Present Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +## This module implements Bip Buffer (bi-partite circular buffer) by Simone +## Cooke. +## +## The Bip-Buffer is like a circular buffer, but slightly different. Instead of +## keeping one head and tail pointer to the data in the buffer, it maintains two +## revolving regions, allowing for fast data access without having to worry +## about wrapping at the end of the buffer. Buffer allocations are always +## maintained as contiguous blocks, allowing the buffer to be used in a highly +## efficient manner with API calls, and also reducing the amount of copying +## which needs to be performed to put data into the buffer. Finally, a two-phase +## allocation system allows the user to pessimistically reserve an area of +## buffer space, and then trim back the buffer to commit to only the space which +## was used. +## +## https://www.codeproject.com/Articles/3479/The-Bip-Buffer-The-Circular-Buffer-with-a-Twist + +{.push raises: [].} + +type + BipPos = object + start: Natural + finish: Natural + + BipBuffer* = object + a, b, r: BipPos + data: seq[byte] + +proc init*(t: typedesc[BipBuffer], size: int): BipBuffer = + ## Creates new Bip Buffer with size `size`. + BipBuffer(data: newSeq[byte](size)) + +template len(pos: BipPos): Natural = + pos.finish - pos.start + +template reset(pos: var BipPos) = + pos = BipPos() + +func init(t: typedesc[BipPos], start, finish: Natural): BipPos = + BipPos(start: start, finish: finish) + +func calcReserve(bp: BipBuffer): tuple[space: Natural, start: Natural] = + if len(bp.b) > 0: + (Natural(bp.a.start - bp.b.finish), bp.b.finish) + else: + let spaceAfterA = Natural(len(bp.data) - bp.a.finish) + if spaceAfterA >= bp.a.start: + (spaceAfterA, bp.a.finish) + else: + (bp.a.start, Natural(0)) + +func availSpace*(bp: BipBuffer): Natural = + ## Returns amount of space available for reserve in buffer `bp`. + let (res, _) = bp.calcReserve() + res + +func len*(bp: BipBuffer): Natural = + ## Returns amount of used space in buffer `bp`. + len(bp.b) + len(bp.a) + +proc reserve*(bp: var BipBuffer, + size: Natural = 0): tuple[data: ptr byte, size: Natural] = + ## Reserve `size` bytes in buffer. + ## + ## If `size == 0` (default) reserve all available space from buffer. + ## + ## If there is not enough space in buffer for resevation - error will be + ## returned. + ## + ## Returns current reserved range as pointer of type `pt` and size of + ## type `st`. + const ErrorMessage = "Not enough space available" + doAssert(size <= len(bp.data)) + let (availableSpace, reserveStart) = bp.calcReserve() + if availableSpace == 0: + raiseAssert ErrorMessage + let reserveLength = + if size == 0: + availableSpace + else: + if size < availableSpace: + raiseAssert ErrorMessage + size + bp.r = BipPos.init(reserveStart, Natural(reserveStart + reserveLength)) + (addr bp.data[bp.r.start], len(bp.r)) + +proc commit*(bp: var BipBuffer, size: Natural) = + ## Updates structure's pointers when new data inserted into buffer. + doAssert(len(bp.r) >= size, + "Committed size could not be larger than the previously reserved one") + if size == 0: + bp.r.reset() + return + + let toCommit = min(size, len(bp.r)) + if len(bp.a) == 0 and len(bp.b) == 0: + bp.a.start = bp.r.start + bp.a.finish = bp.r.start + toCommit + elif bp.r.start == bp.a.finish: + bp.a.finish += toCommit + else: + bp.b.finish += toCommit + bp.r.reset() + +proc consume*(bp: var BipBuffer, size: Natural) = + ## The procedure removes/frees `size` bytes from the buffer ``bp``. + var currentSize = size + if currentSize >= len(bp.a): + currentSize -= len(bp.a) + bp.a = bp.b + bp.b.reset() + if currentSize >= len(bp.a): + currentSize -= len(bp.a) + bp.a.reset() + else: + bp.a.start += currentSize + else: + bp.a.start += currentSize + +iterator items*(bp: BipBuffer): byte = + ## Iterates over all the bytes in the buffer. + for index in bp.a.start ..< bp.a.finish: + yield bp.data[index] + for index in bp.b.start ..< bp.b.finish: + yield bp.data[index] + +iterator regions*(bp: var BipBuffer): tuple[data: ptr byte, size: Natural] = + ## Iterates over all the regions (`a` and `b`) in the buffer. + if len(bp.a) > 0: + yield (addr bp.data[bp.a.start], len(bp.a)) + if len(bp.b) > 0: + yield (addr bp.data[bp.b.start], len(bp.b)) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 3d2f858..473cc38 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -9,7 +9,7 @@ {.push raises: [].} -import ../[config, asyncloop, asyncsync] +import ../[config, asyncloop, asyncsync, bipbuffer] import ../transports/[common, stream] export asyncloop, asyncsync, stream, common @@ -34,10 +34,11 @@ type AsyncStreamWriteEOFError* = object of AsyncStreamWriteError AsyncBuffer* = object - offset*: int - buffer*: seq[byte] + backend*: BipBuffer events*: array[2, AsyncEvent] + AsyncBufferRef* = ref AsyncBuffer + WriteType* = enum Pointer, Sequence, String @@ -73,7 +74,7 @@ type tsource*: StreamTransport readerLoop*: StreamReaderLoop state*: AsyncStreamState - buffer*: AsyncBuffer + buffer*: AsyncBufferRef udata: pointer error*: ref AsyncStreamError bytesCount*: uint64 @@ -96,85 +97,51 @@ type AsyncStreamRW* = AsyncStreamReader | AsyncStreamWriter -proc init*(t: typedesc[AsyncBuffer], size: int): AsyncBuffer = - AsyncBuffer( - buffer: newSeq[byte](size), - events: [newAsyncEvent(), newAsyncEvent()], - offset: 0 +proc new*(t: typedesc[AsyncBufferRef], size: int): AsyncBufferRef = + AsyncBufferRef( + backend: BipBuffer.init(size), + events: [newAsyncEvent(), newAsyncEvent()] ) -proc getBuffer*(sb: AsyncBuffer): pointer {.inline.} = - unsafeAddr sb.buffer[sb.offset] - -proc bufferLen*(sb: AsyncBuffer): int {.inline.} = - len(sb.buffer) - sb.offset - -proc getData*(sb: AsyncBuffer): pointer {.inline.} = - unsafeAddr sb.buffer[0] - -template dataLen*(sb: AsyncBuffer): int = - sb.offset - -proc `[]`*(sb: AsyncBuffer, index: int): byte {.inline.} = - doAssert(index < sb.offset) - sb.buffer[index] - -proc update*(sb: var AsyncBuffer, size: int) {.inline.} = - sb.offset += size - -template wait*(sb: var AsyncBuffer): untyped = +template wait*(sb: AsyncBufferRef): untyped = sb.events[0].clear() sb.events[1].fire() sb.events[0].wait() -template transfer*(sb: var AsyncBuffer): untyped = +template transfer*(sb: AsyncBufferRef): untyped = sb.events[1].clear() sb.events[0].fire() sb.events[1].wait() -proc forget*(sb: var AsyncBuffer) {.inline.} = +proc forget*(sb: AsyncBufferRef) {.inline.} = sb.events[1].clear() sb.events[0].fire() -proc shift*(sb: var AsyncBuffer, size: int) {.inline.} = - if sb.offset > size: - moveMem(addr sb.buffer[0], addr sb.buffer[size], sb.offset - size) - sb.offset = sb.offset - size - else: - sb.offset = 0 - -proc copyData*(sb: AsyncBuffer, dest: pointer, offset, length: int) {.inline.} = - copyMem(cast[pointer](cast[uint](dest) + cast[uint](offset)), - unsafeAddr sb.buffer[0], length) - -proc upload*(sb: ptr AsyncBuffer, pbytes: ptr byte, +proc upload*(sb: AsyncBufferRef, pbytes: ptr byte, nbytes: int): Future[void] {. async: (raises: [CancelledError]).} = ## You can upload any amount of bytes to the buffer. If size of internal ## buffer is not enough to fit all the data at once, data will be uploaded ## via chunks of size up to internal buffer size. - var length = nbytes - var srcBuffer = cast[ptr UncheckedArray[byte]](pbytes) - var srcOffset = 0 + var + length = nbytes + srcBuffer = pbytes.toUnchecked() + offset = 0 + while length > 0: - let size = min(length, sb[].bufferLen()) + let size = min(length, sb.backend.availSpace()) if size == 0: - # Internal buffer is full, we need to transfer data to consumer. - await sb[].transfer() + # Internal buffer is full, we need to notify consumer. + await sb.transfer() else: + let (data, _) = sb.backend.reserve() # Copy data from `pbytes` to internal buffer. - copyMem(addr sb[].buffer[sb.offset], addr srcBuffer[srcOffset], size) - sb[].offset = sb[].offset + size - srcOffset = srcOffset + size + copyMem(data, addr srcBuffer[offset], size) + sb.backend.commit(size) + offset = offset + size length = length - size # We notify consumers that new data is available. - sb[].forget() - -template toDataOpenArray*(sb: AsyncBuffer): auto = - toOpenArray(sb.buffer, 0, sb.offset - 1) - -template toBufferOpenArray*(sb: AsyncBuffer): auto = - toOpenArray(sb.buffer, sb.offset, len(sb.buffer) - 1) + sb.forget() template copyOut*(dest: pointer, item: WriteItem, length: int) = if item.kind == Pointer: @@ -243,7 +210,7 @@ proc atEof*(rstream: AsyncStreamReader): bool = rstream.rsource.atEof() else: (rstream.state != AsyncStreamState.Running) and - (rstream.buffer.dataLen() == 0) + (len(rstream.buffer.backend) == 0) proc atEof*(wstream: AsyncStreamWriter): bool = ## Returns ``true`` is writing stream ``wstream`` closed or finished. @@ -331,12 +298,12 @@ template checkStreamFinished*(t: untyped) = template readLoop(body: untyped): untyped = while true: - if rstream.buffer.dataLen() == 0: + if len(rstream.buffer.backend) == 0: if rstream.state == AsyncStreamState.Error: raise rstream.error let (consumed, done) = body - rstream.buffer.shift(consumed) + rstream.buffer.backend.consume(consumed) rstream.bytesCount = rstream.bytesCount + uint64(consumed) if done: break @@ -373,17 +340,23 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, if isNil(rstream.readerLoop): await readExactly(rstream.rsource, pbytes, nbytes) else: - var index = 0 - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) + var + index = 0 + pbuffer = pbytes.toUnchecked() readLoop(): - if rstream.buffer.dataLen() == 0: + if len(rstream.buffer.backend) == 0: if rstream.atEof(): raise newAsyncStreamIncompleteError() - let count = min(nbytes - index, rstream.buffer.dataLen()) - if count > 0: - rstream.buffer.copyData(addr pbuffer[index], 0, count) - index += count - (consumed: count, done: index == nbytes) + var readed = 0 + for (region, rsize) in rstream.buffer.backend.regions(): + let count = min(nbytes - index, rsize) + readed += count + if count > 0: + copyMem(addr pbuffer[index], region, count) + index += count + if index == nbytes: + break + (consumed: readed, done: index == nbytes) proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int): Future[int] {. @@ -407,15 +380,21 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, if isNil(rstream.readerLoop): return await readOnce(rstream.rsource, pbytes, nbytes) else: - var count = 0 + var + pbuffer = pbytes.toUnchecked() + index = 0 readLoop(): - if rstream.buffer.dataLen() == 0: + if len(rstream.buffer.backend) == 0: (0, rstream.atEof()) else: - count = min(rstream.buffer.dataLen(), nbytes) - rstream.buffer.copyData(pbytes, 0, count) - (count, true) - return count + for (region, rsize) in rstream.buffer.backend.regions(): + let size = min(rsize, nbytes - index) + copyMem(addr pbuffer[index], region, size) + index += size + if index >= nbytes: + break + (index, true) + index proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, sep: seq[byte]): Future[int] {. @@ -456,28 +435,32 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, if isNil(rstream.readerLoop): return await readUntil(rstream.rsource, pbytes, nbytes, sep) else: - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) - var state = 0 - var k = 0 + var + pbuffer = pbytes.toUnchecked() + state = 0 + k = 0 readLoop(): if rstream.atEof(): raise newAsyncStreamIncompleteError() + var index = 0 - while index < rstream.buffer.dataLen(): + for ch in rstream.buffer.backend: if k >= nbytes: raise newAsyncStreamLimitError() - let ch = rstream.buffer[index] + inc(index) pbuffer[k] = ch inc(k) + if sep[state] == ch: inc(state) if state == len(sep): break else: state = 0 + (index, state == len(sep)) - return k + k proc readLine*(rstream: AsyncStreamReader, limit = 0, sep = "\r\n"): Future[string] {. @@ -507,18 +490,19 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0, return await readLine(rstream.rsource, limit, sep) else: let lim = if limit <= 0: -1 else: limit - var state = 0 - var res = "" + var + state = 0 + res = "" + readLoop(): if rstream.atEof(): (0, true) else: var index = 0 - while index < rstream.buffer.dataLen(): - let ch = char(rstream.buffer[index]) + for ch in rstream.buffer.backend: inc(index) - if sep[state] == ch: + if sep[state] == char(ch): inc(state) if state == len(sep): break @@ -529,11 +513,14 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0, res.add(sep[0 ..< missing]) else: res.add(sep[0 ..< state]) - res.add(ch) + state = 0 + + res.add(char(ch)) if len(res) == lim: break + (index, (state == len(sep)) or (lim == len(res))) - return res + res proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -555,15 +542,17 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. if isNil(rstream.readerLoop): return await read(rstream.rsource) else: - var res = newSeq[byte]() + var res: seq[byte] readLoop(): if rstream.atEof(): (0, true) else: - let count = rstream.buffer.dataLen() - res.add(rstream.buffer.buffer.toOpenArray(0, count - 1)) - (count, false) - return res + var readed = 0 + for (region, rsize) in rstream.buffer.backend.regions(): + readed += rsize + res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) + (readed, false) + res proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -592,10 +581,13 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. if rstream.atEof(): (0, true) else: - let count = min(rstream.buffer.dataLen(), n - len(res)) - res.add(rstream.buffer.buffer.toOpenArray(0, count - 1)) - (count, len(res) == n) - return res + var readed = 0 + for (region, rsize) in rstream.buffer.backend.regions(): + let count = min(rsize, n - len(res)) + readed += count + res.add(region.toUnchecked().toOpenArray(0, count - 1)) + (readed, len(res) == n) + res proc consume*(rstream: AsyncStreamReader): Future[int] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -622,9 +614,10 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {. if rstream.atEof(): (0, true) else: - res += rstream.buffer.dataLen() - (rstream.buffer.dataLen(), false) - return res + let used = len(rstream.buffer.backend) + res += used + (used, false) + res proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -652,13 +645,12 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {. else: var res = 0 readLoop(): - if rstream.atEof(): - (0, true) - else: - let count = min(rstream.buffer.dataLen(), n - res) - res += count - (count, res == n) - return res + let + used = len(rstream.buffer.backend) + count = min(used, n - res) + res += count + (count, res == n) + res proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -689,15 +681,18 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. await readMessage(rstream.rsource, pred) else: readLoop(): - let count = rstream.buffer.dataLen() - if count == 0: + if len(rstream.buffer.backend) == 0: if rstream.atEof(): pred([]) else: # Case, when transport's buffer is not yet filled with data. (0, false) else: - pred(rstream.buffer.buffer.toOpenArray(0, count - 1)) + var res: tuple[consumed: int, done: bool] + for (region, rsize) in rstream.buffer.backend.regions(): + res = pred(region.toUnchecked().toOpenArray(0, rsize - 1)) + break + res proc write*(wstream: AsyncStreamWriter, pbytes: pointer, nbytes: int) {. @@ -951,7 +946,7 @@ proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBuffer.init(bufferSize) + child.buffer = AsyncBufferRef.new(bufferSize) trackCounter(AsyncStreamReaderTrackerName) child.startReader() @@ -963,7 +958,7 @@ proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBuffer.init(bufferSize) + child.buffer = AsyncBufferRef.new(bufferSize) if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) diff --git a/chronos/streams/boundstream.nim b/chronos/streams/boundstream.nim index 8d2e52c..0f7eba1 100644 --- a/chronos/streams/boundstream.nim +++ b/chronos/streams/boundstream.nim @@ -18,7 +18,7 @@ {.push raises: [].} import results -import ../[asyncloop, timer, config] +import ../[asyncloop, timer, bipbuffer, config] import asyncstream, ../transports/[stream, common] export asyncloop, asyncstream, stream, timer, common @@ -103,7 +103,7 @@ func endsWith(s, suffix: openArray[byte]): bool = proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var rstream = BoundedStreamReader(stream) rstream.state = AsyncStreamState.Running - var buffer = newSeq[byte](rstream.buffer.bufferLen()) + var buffer = newSeq[byte](rstream.buffer.backend.availSpace()) while true: let toRead = if rstream.boundSize.isNone(): @@ -127,7 +127,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = # There should be one step between transferring last bytes to the # consumer and declaring stream EOF. Otherwise could not be # consumed. - await upload(addr rstream.buffer, addr buffer[0], length) + await upload(rstream.buffer, addr buffer[0], length) if rstream.state == AsyncStreamState.Running: rstream.state = AsyncStreamState.Finished else: @@ -135,7 +135,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = # There should be one step between transferring last bytes to the # consumer and declaring stream EOF. Otherwise could not be # consumed. - await upload(addr rstream.buffer, addr buffer[0], res) + await upload(rstream.buffer, addr buffer[0], res) if (res < toRead) and rstream.rsource.atEof(): case rstream.cmpop @@ -151,7 +151,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = # There should be one step between transferring last bytes to the # consumer and declaring stream EOF. Otherwise could not be # consumed. - await upload(addr rstream.buffer, addr buffer[0], res) + await upload(rstream.buffer, addr buffer[0], res) if (res < toRead) and rstream.rsource.atEof(): case rstream.cmpop diff --git a/chronos/streams/chunkstream.nim b/chronos/streams/chunkstream.nim index f3e73e0..b9475d5 100644 --- a/chronos/streams/chunkstream.nim +++ b/chronos/streams/chunkstream.nim @@ -11,7 +11,7 @@ {.push raises: [].} -import ../[asyncloop, timer, config] +import ../[asyncloop, timer, bipbuffer, config] import asyncstream, ../transports/[stream, common] import results export asyncloop, asyncstream, stream, timer, common, results @@ -118,11 +118,11 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var chunksize = cres.get() if chunksize > 0'u64: while chunksize > 0'u64: - let toRead = int(min(chunksize, - uint64(rstream.buffer.bufferLen()))) - await rstream.rsource.readExactly(rstream.buffer.getBuffer(), - toRead) - rstream.buffer.update(toRead) + let + (data, rsize) = rstream.buffer.backend.reserve() + toRead = int(min(chunksize, uint64(rsize))) + await rstream.rsource.readExactly(data, toRead) + rstream.buffer.backend.commit(toRead) await rstream.buffer.transfer() chunksize = chunksize - uint64(toRead) diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 86f6d4c..9d90ab7 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -242,7 +242,7 @@ proc tlsReadApp(engine: ptr SslEngineContext, try: var length = 0'u var buf = sslEngineRecvappBuf(engine[], length) - await upload(addr reader.buffer, buf, int(length)) + await upload(reader.buffer, buf, int(length)) sslEngineRecvappAck(engine[], length) TLSResult.Success except CancelledError: diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index f89a6b4..d639121 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -11,7 +11,7 @@ import std/deques when not(defined(windows)): import ".."/selectors2 -import ".."/[asyncloop, config, osdefs, oserrno, osutils, handles] +import ".."/[asyncloop, osdefs, oserrno, osutils, handles] import "."/common import stew/ptrops diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index c80d992..b81a512 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -11,7 +11,7 @@ import std/deques import stew/ptrops -import ".."/[asyncloop, config, handles, osdefs, osutils, oserrno] +import ".."/[asyncloop, config, handles, bipbuffer, osdefs, osutils, oserrno] import ./common type @@ -72,8 +72,7 @@ when defined(windows): fd*: AsyncFD # File descriptor state: set[TransportState] # Current Transport state reader: ReaderFuture # Current reader Future - buffer: seq[byte] # Reading buffer - offset: int # Reading buffer offset + buffer: BipBuffer # Reading buffer error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void].Raising([]) # Stream life future @@ -82,7 +81,6 @@ when defined(windows): wwsabuf: WSABUF # Writer WSABUF rovl: CustomOverlapped # Reader OVERLAPPED structure wovl: CustomOverlapped # Writer OVERLAPPED structure - roffset: int # Pending reading offset flags: set[TransportFlags] # Internal flags case kind*: TransportKind of TransportKind.Socket: @@ -99,8 +97,7 @@ else: fd*: AsyncFD # File descriptor state: set[TransportState] # Current Transport state reader: ReaderFuture # Current reader Future - buffer: seq[byte] # Reading buffer - offset: int # Reading buffer offset + buffer: BipBuffer # Reading buffer error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void].Raising([]) # Stream life future @@ -184,14 +181,6 @@ template checkPending(t: untyped) = if not(isNil((t).reader)): raise newException(TransportError, "Read operation already pending!") -template shiftBuffer(t, c: untyped) = - if (t).offset > c: - if c > 0: - moveMem(addr((t).buffer[0]), addr((t).buffer[(c)]), (t).offset - (c)) - (t).offset = (t).offset - (c) - else: - (t).offset = 0 - template shiftVectorBuffer(v: var StreamVector, o: untyped) = (v).buf = cast[pointer](cast[uint]((v).buf) + uint(o)) (v).buflen -= int(o) @@ -228,6 +217,9 @@ proc clean(transp: StreamTransport) {.inline.} = transp.future.complete() GC_unref(transp) +template toUnchecked*(a: untyped): untyped = + cast[ptr UncheckedArray[byte]](a) + when defined(windows): template zeroOvelappedOffset(t: untyped) = @@ -245,9 +237,9 @@ when defined(windows): cast[HANDLE]((v).buflen) template setReaderWSABuffer(t: untyped) = - (t).rwsabuf.buf = cast[cstring]( - cast[uint](addr t.buffer[0]) + uint((t).roffset)) - (t).rwsabuf.len = ULONG(len((t).buffer) - (t).roffset) + let res = (t).buffer.reserve() + (t).rwsabuf.buf = cast[cstring](res.data) + (t).rwsabuf.len = uint32(res.size) template setWriterWSABuffer(t, v: untyped) = (t).wwsabuf.buf = cast[cstring](v.buf) @@ -381,8 +373,9 @@ when defined(windows): else: transp.queue.addFirst(vector) else: - let loop = getThreadDispatcher() - let size = min(uint32(getFileSize(vector)), 2_147_483_646'u32) + let + loop = getThreadDispatcher() + size = min(uint32(getFileSize(vector)), 2_147_483_646'u32) transp.wovl.setOverlappedOffset(vector.offset) var ret = loop.transmitFile(sock, getFileHandle(vector), size, @@ -481,29 +474,28 @@ when defined(windows): if bytesCount == 0: transp.state.incl({ReadEof, ReadPaused}) else: - if transp.offset != transp.roffset: - moveMem(addr transp.buffer[transp.offset], - addr transp.buffer[transp.roffset], - bytesCount) - transp.offset += int(bytesCount) - transp.roffset = transp.offset - if transp.offset == len(transp.buffer): + transp.buffer.commit(bytesCount) + if transp.buffer.availSpace() == 0: transp.state.incl(ReadPaused) of ERROR_OPERATION_ABORTED, ERROR_CONNECTION_ABORTED, ERROR_BROKEN_PIPE: # CancelIO() interrupt or closeSocket() call. + transp.buffer.commit(0) transp.state.incl(ReadPaused) of ERROR_NETNAME_DELETED, WSAECONNABORTED: + transp.buffer.commit(0) if transp.kind == TransportKind.Socket: transp.state.incl({ReadEof, ReadPaused}) else: transp.setReadError(err) of ERROR_PIPE_NOT_CONNECTED: + transp.buffer.commit(0) if transp.kind == TransportKind.Pipe: transp.state.incl({ReadEof, ReadPaused}) else: transp.setReadError(err) else: + transp.buffer.commit(0) transp.setReadError(err) transp.completeReader() @@ -524,7 +516,6 @@ when defined(windows): transp.state.incl(ReadPending) if transp.kind == TransportKind.Socket: let sock = SocketHandle(transp.fd) - transp.roffset = transp.offset transp.setReaderWSABuffer() let ret = wsaRecv(sock, addr transp.rwsabuf, 1, addr bytesCount, addr flags, @@ -549,7 +540,6 @@ when defined(windows): transp.completeReader() elif transp.kind == TransportKind.Pipe: let pipe = HANDLE(transp.fd) - transp.roffset = transp.offset transp.setReaderWSABuffer() let ret = readFile(pipe, cast[pointer](transp.rwsabuf.buf), DWORD(transp.rwsabuf.len), addr bytesCount, @@ -595,7 +585,7 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -616,7 +606,7 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.flags = flags transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() @@ -1390,11 +1380,12 @@ else: else: if transp.kind == TransportKind.Socket: while true: - let res = handleEintr( - osdefs.recv(fd, addr transp.buffer[transp.offset], - len(transp.buffer) - transp.offset, cint(0))) + let + (data, size) = transp.buffer.reserve() + res = handleEintr(osdefs.recv(fd, data, size, cint(0))) if res < 0: let err = osLastError() + transp.buffer.commit(0) case err of oserrno.ECONNRESET: transp.state.incl({ReadEof, ReadPaused}) @@ -1408,13 +1399,14 @@ else: discard removeReader2(transp.fd) elif res == 0: transp.state.incl({ReadEof, ReadPaused}) + transp.buffer.commit(0) let rres = removeReader2(transp.fd) if rres.isErr(): transp.state.incl(ReadError) transp.setReadError(rres.error()) else: - transp.offset += res - if transp.offset == len(transp.buffer): + transp.buffer.commit(res) + if transp.buffer.availSpace() == 0: transp.state.incl(ReadPaused) let rres = removeReader2(transp.fd) if rres.isErr(): @@ -1424,23 +1416,25 @@ else: break elif transp.kind == TransportKind.Pipe: while true: - let res = handleEintr( - osdefs.read(cint(fd), addr transp.buffer[transp.offset], - len(transp.buffer) - transp.offset)) + let + (data, size) = transp.buffer.reserve() + res = handleEintr(osdefs.read(cint(fd), data, size)) if res < 0: let err = osLastError() + transp.buffer.commit(0) transp.state.incl(ReadPaused) transp.setReadError(err) discard removeReader2(transp.fd) elif res == 0: transp.state.incl({ReadEof, ReadPaused}) + transp.buffer.commit(0) let rres = removeReader2(transp.fd) if rres.isErr(): transp.state.incl(ReadError) transp.setReadError(rres.error()) else: - transp.offset += res - if transp.offset == len(transp.buffer): + transp.buffer.commit(res) + if transp.buffer.availSpace() == 0: transp.state.incl(ReadPaused) let rres = removeReader2(transp.fd) if rres.isErr(): @@ -1458,7 +1452,7 @@ else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -1475,7 +1469,7 @@ else: transp = StreamTransport(kind: TransportKind.Pipe) transp.fd = fd - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -2339,7 +2333,7 @@ proc writeFile*(transp: StreamTransport, handle: int, proc atEof*(transp: StreamTransport): bool {.inline.} = ## Returns ``true`` if ``transp`` is at EOF. - (transp.offset == 0) and (ReadEof in transp.state) and + (len(transp.buffer) == 0) and (ReadEof in transp.state) and (ReadPaused in transp.state) template readLoop(name, body: untyped): untyped = @@ -2351,16 +2345,17 @@ template readLoop(name, body: untyped): untyped = if ReadClosed in transp.state: raise newException(TransportUseClosedError, "Attempt to read data from closed stream") - if transp.offset == 0: + if len(transp.buffer) == 0: # We going to raise an error, only if transport buffer is empty. if ReadError in transp.state: raise transp.getError() let (consumed, done) = body - transp.shiftBuffer(consumed) + transp.buffer.consume(consumed) if done: break - else: + + if len(transp.buffer) == 0: checkPending(transp) let fut = ReaderFuture.init(name) transp.reader = fut @@ -2403,17 +2398,23 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, if nbytes == 0: return - var index = 0 - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) + var + index = 0 + pbuffer = pbytes.toUnchecked() readLoop("stream.transport.readExactly"): - if transp.offset == 0: + if len(transp.buffer) == 0: if transp.atEof(): raise newException(TransportIncompleteError, "Data incomplete!") - let count = min(nbytes - index, transp.offset) - if count > 0: - copyMem(addr pbuffer[index], addr(transp.buffer[0]), count) - index += count - (consumed: count, done: index == nbytes) + var readed = 0 + for (region, rsize) in transp.buffer.regions(): + let count = min(nbytes - index, rsize) + readed += count + if count > 0: + copyMem(addr pbuffer[index], region, count) + index += count + if index == nbytes: + break + (consumed: readed, done: index == nbytes) proc readOnce*(transp: StreamTransport, pbytes: pointer, nbytes: int): Future[int] {. @@ -2425,15 +2426,21 @@ proc readOnce*(transp: StreamTransport, pbytes: pointer, doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes > 0, "nbytes must be positive integer") - var count = 0 + var + pbuffer = pbytes.toUnchecked() + index = 0 readLoop("stream.transport.readOnce"): - if transp.offset == 0: + if len(transp.buffer) == 0: (0, transp.atEof()) else: - count = min(transp.offset, nbytes) - copyMem(pbytes, addr(transp.buffer[0]), count) - (count, true) - return count + for (region, rsize) in transp.buffer.regions(): + let size = min(rsize, nbytes - index) + copyMem(addr pbuffer[index], region, size) + index += size + if index >= nbytes: + break + (index, true) + index proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, sep: seq[byte]): Future[int] {. @@ -2457,7 +2464,7 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, if nbytes == 0: raise newException(TransportLimitError, "Limit reached!") - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) + var pbuffer = pbytes.toUnchecked() var state = 0 var k = 0 @@ -2466,14 +2473,11 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, raise newException(TransportIncompleteError, "Data incomplete!") var index = 0 - - while index < transp.offset: + for ch in transp.buffer: if k >= nbytes: raise newException(TransportLimitError, "Limit reached!") - let ch = transp.buffer[index] inc(index) - pbuffer[k] = ch inc(k) @@ -2485,8 +2489,7 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, state = 0 (index, state == len(sep)) - - return k + k proc readLine*(transp: StreamTransport, limit = 0, sep = "\r\n"): Future[string] {. @@ -2503,46 +2506,52 @@ proc readLine*(transp: StreamTransport, limit = 0, ## If ``limit`` more then 0, then read is limited to ``limit`` bytes. let lim = if limit <= 0: -1 else: limit var state = 0 + var res: string readLoop("stream.transport.readLine"): if transp.atEof(): (0, true) else: var index = 0 - while index < transp.offset: - let ch = char(transp.buffer[index]) - index += 1 + for ch in transp.buffer: + inc(index) - if sep[state] == ch: + if sep[state] == char(ch): inc(state) if state == len(sep): break else: if state != 0: if limit > 0: - let missing = min(state, lim - len(result) - 1) - result.add(sep[0 ..< missing]) + let missing = min(state, lim - len(res) - 1) + res.add(sep[0 ..< missing]) else: - result.add(sep[0 ..< state]) + res.add(sep[0 ..< state]) state = 0 - result.add(ch) - if len(result) == lim: + res.add(char(ch)) + if len(res) == lim: break - (index, (state == len(sep)) or (lim == len(result))) + (index, (state == len(sep)) or (lim == len(res))) + res proc read*(transp: StreamTransport): Future[seq[byte]] {. async: (raises: [TransportError, CancelledError]).} = ## Read all bytes from transport ``transp``. ## ## This procedure allocates buffer seq[byte] and return it as result. + var res: seq[byte] readLoop("stream.transport.read"): if transp.atEof(): (0, true) else: - result.add(transp.buffer.toOpenArray(0, transp.offset - 1)) - (transp.offset, false) + var readed = 0 + for (region, rsize) in transp.buffer.regions(): + readed += rsize + res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) + (readed, false) + res proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. async: (raises: [TransportError, CancelledError]).} = @@ -2550,27 +2559,35 @@ proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. ## ## This procedure allocates buffer seq[byte] and return it as result. if n <= 0: - return await transp.read() + await transp.read() else: + var res: seq[byte] readLoop("stream.transport.read"): if transp.atEof(): (0, true) else: - let count = min(transp.offset, n - len(result)) - result.add(transp.buffer.toOpenArray(0, count - 1)) - (count, len(result) == n) + var readed = 0 + for (region, rsize) in transp.buffer.regions(): + let count = min(rsize, n - len(res)) + readed += count + res.add(region.toUnchecked().toOpenArray(0, count - 1)) + (readed, len(res) == n) + res proc consume*(transp: StreamTransport): Future[int] {. async: (raises: [TransportError, CancelledError]).} = ## Consume all bytes from transport ``transp`` and discard it. ## ## Return number of bytes actually consumed and discarded. + var res = 0 readLoop("stream.transport.consume"): if transp.atEof(): (0, true) else: - result += transp.offset - (transp.offset, false) + let used = len(transp.buffer) + res += used + (used, false) + res proc consume*(transp: StreamTransport, n: int): Future[int] {. async: (raises: [TransportError, CancelledError]).} = @@ -2579,15 +2596,19 @@ proc consume*(transp: StreamTransport, n: int): Future[int] {. ## ## Return number of bytes actually consumed and discarded. if n <= 0: - return await transp.consume() + await transp.consume() else: + var res = 0 readLoop("stream.transport.consume"): if transp.atEof(): (0, true) else: - let count = min(transp.offset, n - result) - result += count - (count, result == n) + let + used = len(transp.buffer) + count = min(used, n - res) + res += count + (count, res == n) + res proc readMessage*(transp: StreamTransport, predicate: ReadMessagePredicate) {. @@ -2605,14 +2626,18 @@ proc readMessage*(transp: StreamTransport, ## ``predicate`` callback will receive (zero-length) openArray, if transport ## is at EOF. readLoop("stream.transport.readMessage"): - if transp.offset == 0: + if len(transp.buffer) == 0: if transp.atEof(): predicate([]) else: # Case, when transport's buffer is not yet filled with data. (0, false) else: - predicate(transp.buffer.toOpenArray(0, transp.offset - 1)) + var res: tuple[consumed: int, done: bool] + for (region, rsize) in transp.buffer.regions(): + res = predicate(region.toUnchecked().toOpenArray(0, rsize - 1)) + break + res proc join*(transp: StreamTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = @@ -2630,7 +2655,7 @@ proc join*(transp: StreamTransport): Future[void] {. retFuture.cancelCallback = cancel else: retFuture.complete() - return retFuture + retFuture proc closed*(transp: StreamTransport): bool {.inline.} = ## Returns ``true`` if transport in closed state. From 2d85229dce6a2c0229d5c1985c6dce211ed9e8ee Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 4 Apr 2024 00:30:01 +0300 Subject: [PATCH 16/37] Add `join()` operation to wait for future completion. (#525) * Add `join()` operation to wait for future completion without cancelling it when `join()` got cancelled. * Start using join() operation. --- chronos/apps/http/httpserver.nim | 18 +---- chronos/internal/asyncfutures.nim | 33 ++++++++++ chronos/streams/asyncstream.nim | 19 +----- chronos/transports/datagram.nim | 16 +---- chronos/transports/stream.nim | 15 +---- tests/testfut.nim | 106 ++++++++++++++++++++++++++++++ 6 files changed, 143 insertions(+), 64 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index c1e5279..1adb8fc 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -1187,23 +1187,7 @@ proc closeWait*(server: HttpServerRef) {.async: (raises: []).} = proc join*(server: HttpServerRef): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until HTTP server will not be closed. - var retFuture = newFuture[void]("http.server.join") - - proc continuation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - server.lifetime.removeCallback(continuation, cast[pointer](retFuture)) - - if server.state == ServerClosed: - retFuture.complete() - else: - server.lifetime.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation - - retFuture + server.lifetime.join() proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] = ## Create new MultiPartReader interface for specific request. diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 49c6acd..7f93b0e 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1607,6 +1607,39 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. else: wait(fut, timeout.milliseconds()) +proc join*(future: FutureBase): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = + ## Returns a future which will complete once future ``future`` completes. + ## + ## This primitive helps to carefully monitor ``future`` state, in case of + ## cancellation ``join`` operation it will not going to cancel ``future``. + ## + ## If ``future`` is already completed - ``join`` will return completed + ## future immediately. + let retFuture = newFuture[void]("chronos.join()") + + proc continuation(udata: pointer) {.gcsafe.} = + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe.} = + future.removeCallback(continuation, cast[pointer](retFuture)) + + if not(future.finished()): + future.addCallback(continuation, cast[pointer](retFuture)) + retFuture.cancelCallback = cancellation + else: + retFuture.complete() + + retFuture + +proc join*(future: SomeFuture): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = + ## Returns a future which will complete once future ``future`` completes. + ## + ## This primitive helps to carefully monitor ``future`` state, in case of + ## cancellation ``join`` operation it will not going to cancel ``future``. + join(FutureBase(future)) + when defined(windows): import ../osdefs diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 473cc38..bb878db 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -836,24 +836,7 @@ proc join*(rw: AsyncStreamRW): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Get Future[void] which will be completed when stream become finished or ## closed. - when rw is AsyncStreamReader: - var retFuture = newFuture[void]("async.stream.reader.join") - else: - var retFuture = newFuture[void]("async.stream.writer.join") - - proc continuation(udata: pointer) {.gcsafe, raises:[].} = - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe, raises:[].} = - rw.future.removeCallback(continuation, cast[pointer](retFuture)) - - if not(rw.future.finished()): - rw.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation - else: - retFuture.complete() - - return retFuture + rw.future.join() proc close*(rw: AsyncStreamRW) = ## Close and frees resources of stream ``rw``. diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index d639121..7f47142 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -827,21 +827,7 @@ proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback, proc join*(transp: DatagramTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until the transport ``transp`` will be closed. - let retFuture = newFuture[void]("datagram.transport.join") - - proc continuation(udata: pointer) {.gcsafe.} = - retFuture.complete() - - proc cancel(udata: pointer) {.gcsafe.} = - transp.future.removeCallback(continuation, cast[pointer](retFuture)) - - if not(transp.future.finished()): - transp.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancel - else: - retFuture.complete() - - return retFuture + transp.future.join() proc closed*(transp: DatagramTransport): bool {.inline.} = ## Returns ``true`` if transport in closed state. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index b81a512..7b5925b 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1780,20 +1780,7 @@ proc stop*(server: StreamServer) {.raises: [TransportOsError].} = proc join*(server: StreamServer): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Waits until ``server`` is not closed. - var retFuture = newFuture[void]("stream.transport.server.join") - - proc continuation(udata: pointer) = - retFuture.complete() - - proc cancel(udata: pointer) = - server.loopFuture.removeCallback(continuation, cast[pointer](retFuture)) - - if not(server.loopFuture.finished()): - server.loopFuture.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancel - else: - retFuture.complete() - return retFuture + server.loopFuture.join() proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, diff --git a/tests/testfut.nim b/tests/testfut.nim index c2231f1..1cf0aed 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -2048,6 +2048,112 @@ suite "Future[T] behavior test suite": future1.cancelled() == true future2.cancelled() == true + asyncTest "join() test": + proc joinFoo0(future: FutureBase) {.async.} = + await join(future) + + proc joinFoo1(future: Future[void]) {.async.} = + await join(future) + + proc joinFoo2(future: Future[void]) {. + async: (raises: [CancelledError]).} = + await join(future) + + let + future0 = newFuture[void]() + future1 = newFuture[void]() + future2 = Future[void].Raising([CancelledError]).init() + + let + resfut0 = joinFoo0(future0) + resfut1 = joinFoo1(future1) + resfut2 = joinFoo2(future2) + + check: + resfut0.finished() == false + resfut1.finished() == false + resfut2.finished() == false + + future0.complete() + future1.complete() + future2.complete() + + let res = + try: + await noCancel allFutures(resfut0, resfut1, resfut2).wait(1.seconds) + true + except AsyncTimeoutError: + false + + check: + res == true + resfut0.finished() == true + resfut1.finished() == true + resfut2.finished() == true + future0.finished() == true + future1.finished() == true + future2.finished() == true + + asyncTest "join() cancellation test": + proc joinFoo0(future: FutureBase) {.async.} = + await join(future) + + proc joinFoo1(future: Future[void]) {.async.} = + await join(future) + + proc joinFoo2(future: Future[void]) {. + async: (raises: [CancelledError]).} = + await join(future) + + let + future0 = newFuture[void]() + future1 = newFuture[void]() + future2 = Future[void].Raising([CancelledError]).init() + + let + resfut0 = joinFoo0(future0) + resfut1 = joinFoo1(future1) + resfut2 = joinFoo2(future2) + + check: + resfut0.finished() == false + resfut1.finished() == false + resfut2.finished() == false + + let + cancelfut0 = cancelAndWait(resfut0) + cancelfut1 = cancelAndWait(resfut1) + cancelfut2 = cancelAndWait(resfut2) + + let res = + try: + await noCancel allFutures(cancelfut0, cancelfut1, + cancelfut2).wait(1.seconds) + true + except AsyncTimeoutError: + false + + check: + res == true + cancelfut0.finished() == true + cancelfut1.finished() == true + cancelfut2.finished() == true + resfut0.cancelled() == true + resfut1.cancelled() == true + resfut2.cancelled() == true + future0.finished() == false + future1.finished() == false + future2.finished() == false + + future0.complete() + future1.complete() + future2.complete() + + check: + future0.finished() == true + future1.finished() == true + future2.finished() == true + test "Sink with literals": # https://github.com/nim-lang/Nim/issues/22175 let fut = newFuture[string]() From 8e49df14007e27370cd1ce77edb2843783b45e6d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 7 Apr 2024 07:03:12 +0300 Subject: [PATCH 17/37] Ensure that all buffers used inside HTTP client will follow original buffer size. (#530) Ensure that buffer size cannot be lower than default size. --- chronos/apps/http/httpclient.nim | 18 ++++++++++++------ chronos/streams/asyncstream.nim | 6 ++++-- chronos/transports/stream.nim | 12 ++++++++---- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 33a6b7f..414b1d3 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -567,7 +567,8 @@ proc new( tls = try: newTLSClientAsyncStream(treader, twriter, ha.hostname, - flags = session.flags.getTLSFlags()) + flags = session.flags.getTLSFlags(), + bufferSize = session.connectionBufferSize) except TLSStreamInitError as exc: return err(exc.msg) @@ -1327,13 +1328,18 @@ proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader {. let reader = case response.bodyFlag of HttpClientBodyFlag.Sized: - let bstream = newBoundedStreamReader(response.connection.reader, - response.contentLength) - newHttpBodyReader(bstream) + newHttpBodyReader( + newBoundedStreamReader( + response.connection.reader, response.contentLength, + bufferSize = response.session.connectionBufferSize)) of HttpClientBodyFlag.Chunked: - newHttpBodyReader(newChunkedStreamReader(response.connection.reader)) + newHttpBodyReader( + newChunkedStreamReader( + response.connection.reader, + bufferSize = response.session.connectionBufferSize)) of HttpClientBodyFlag.Custom: - newHttpBodyReader(newAsyncStreamReader(response.connection.reader)) + newHttpBodyReader( + newAsyncStreamReader(response.connection.reader)) response.connection.state = HttpClientConnectionState.ResponseBodyReceiving response.reader = reader response.reader diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index bb878db..301b086 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -929,7 +929,8 @@ proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBufferRef.new(bufferSize) + let size = max(AsyncStreamDefaultBufferSize, bufferSize) + child.buffer = AsyncBufferRef.new(size) trackCounter(AsyncStreamReaderTrackerName) child.startReader() @@ -941,7 +942,8 @@ proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBufferRef.new(bufferSize) + let size = max(AsyncStreamDefaultBufferSize, bufferSize) + child.buffer = AsyncBufferRef.new(size) if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 7b5925b..0f006b8 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -585,7 +585,8 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -606,7 +607,8 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.flags = flags transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() @@ -1452,7 +1454,8 @@ else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -1469,7 +1472,8 @@ else: transp = StreamTransport(kind: TransportKind.Pipe) transp.fd = fd - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( From 0d050d582306e8c521c3a4a6f6dcb3c83c93a90f Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 13 Apr 2024 03:04:42 +0300 Subject: [PATCH 18/37] Add automatic constructors for TCP and UDP transports. (#512) * Add automatic constructors for TCP and UDP transports. * Add port number argument. Add some documentation comments. Fix tests. * Make datagram test use request/response scheme. * Add helper. * Fix issue with non-zero port setups. Add test. * Fix tests to probe ports. * Attempt to fix MacOS issue. * Add Opt[IpAddress]. Make IPv4 mapping to IPv6 space automatic. * Add tests. * Add stream capabilities. * Fix Linux issues. * Make getTransportFlags() available for all OSes. * Fix one more compilation issue. * Workaround weird compiler bug. * Fix forgotten typed version of constructor. * Make single source for addresses calculation. * Add one more check into tests. * Fix flags not being set in transport constructor. * Fix post-rebase issues with flags not being set. * Address review comments. --- chronos/transports/common.nim | 36 +- chronos/transports/datagram.nim | 287 ++++++++++++--- chronos/transports/stream.nim | 595 +++++++++++++++++++++----------- tests/testdatagram.nim | 342 ++++++++++++++++++ tests/teststream.nim | 254 ++++++++++++++ 5 files changed, 1257 insertions(+), 257 deletions(-) diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 8fcf0eb..6419b00 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -10,6 +10,7 @@ {.push raises: [].} import std/[strutils] +import results import stew/[base10, byteutils] import ".."/[config, asyncloop, osdefs, oserrno, handles] @@ -18,7 +19,7 @@ from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, from std/nativesockets import toInt, `$` export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, - Protocol, Port, toInt, `$` + Protocol, Port, toInt, `$`, results const DefaultStreamBufferSize* = chronosTransportDefaultBufferSize @@ -29,7 +30,7 @@ type ServerFlags* = enum ## Server's flags ReuseAddr, ReusePort, TcpNoDelay, NoAutoRead, GCUserData, FirstPipe, - NoPipeFlash, Broadcast + NoPipeFlash, Broadcast, V4Mapped DualStackType* {.pure.} = enum Auto, Enabled, Disabled, Default @@ -200,6 +201,15 @@ proc `$`*(address: TransportAddress): string = of AddressFamily.None: "None" +proc toIpAddress*(address: TransportAddress): IpAddress = + case address.family + of AddressFamily.IPv4: + IpAddress(family: IpAddressFamily.IPv4, address_v4: address.address_v4) + of AddressFamily.IPv6: + IpAddress(family: IpAddressFamily.IPv6, address_v6: address.address_v6) + else: + raiseAssert "IpAddress do not support address family " & $address.family + proc toHex*(address: TransportAddress): string = ## Returns hexadecimal representation of ``address``. case address.family @@ -783,3 +793,25 @@ proc setDualstack*(socket: AsyncFD, else: ? getDomain(socket) setDualstack(socket, family, flag) + +proc getAutoAddress*(port: Port): TransportAddress = + var res = + if isAvailable(AddressFamily.IPv6): + AnyAddress6 + else: + AnyAddress + res.port = port + res + +proc getAutoAddresses*( + localPort: Port, + remotePort: Port +): tuple[local: TransportAddress, remote: TransportAddress] = + var (local, remote) = + if isAvailable(AddressFamily.IPv6): + (AnyAddress6, AnyAddress6) + else: + (AnyAddress, AnyAddress) + local.port = localPort + remote.port = remotePort + (local, remote) diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 7f47142..fdb406b 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -10,11 +10,14 @@ {.push raises: [].} import std/deques +import results when not(defined(windows)): import ".."/selectors2 import ".."/[asyncloop, osdefs, oserrno, osutils, handles] -import "."/common +import "."/[common, ipnet] import stew/ptrops +export results + type VectorKind = enum WithoutAddress, WithAddress @@ -60,29 +63,78 @@ type const DgramTransportTrackerName* = "datagram.transport" +proc getRemoteAddress(transp: DatagramTransport, + address: Sockaddr_storage, length: SockLen, + ): TransportAddress = + var raddr: TransportAddress + fromSAddr(unsafeAddr address, length, raddr) + if ServerFlags.V4Mapped in transp.flags: + if raddr.isV4Mapped(): raddr.toIPv4() else: raddr + else: + raddr + +proc getRemoteAddress(transp: DatagramTransport): TransportAddress = + transp.getRemoteAddress(transp.raddr, transp.ralen) + +proc setRemoteAddress(transp: DatagramTransport, + address: TransportAddress): TransportAddress = + let + fixedAddress = + when defined(windows): + windowsAnyAddressFix(address) + else: + address + remoteAddress = + if ServerFlags.V4Mapped in transp.flags: + if address.family == AddressFamily.IPv4: + fixedAddress.toIPv6() + else: + fixedAddress + else: + fixedAddress + toSAddr(remoteAddress, transp.waddr, transp.walen) + remoteAddress + +proc remoteAddress2*( + transp: DatagramTransport +): Result[TransportAddress, OSErrorCode] = + ## Returns ``transp`` remote socket address. + if transp.remote.family == AddressFamily.None: + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) + if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + return err(osLastError()) + transp.remote = transp.getRemoteAddress(saddr, slen) + ok(transp.remote) + +proc localAddress2*( + transp: DatagramTransport +): Result[TransportAddress, OSErrorCode] = + ## Returns ``transp`` local socket address. + if transp.local.family == AddressFamily.None: + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) + if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + return err(osLastError()) + fromSAddr(addr saddr, slen, transp.local) + ok(transp.local) + +func toException(v: OSErrorCode): ref TransportOsError = + getTransportOsError(v) + proc remoteAddress*(transp: DatagramTransport): TransportAddress {. raises: [TransportOsError].} = ## Returns ``transp`` remote socket address. - if transp.remote.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) - if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), - addr slen) != 0: - raiseTransportOsError(osLastError()) - fromSAddr(addr saddr, slen, transp.remote) - transp.remote + remoteAddress2(transp).tryGet() proc localAddress*(transp: DatagramTransport): TransportAddress {. raises: [TransportOsError].} = - ## Returns ``transp`` local socket address. - if transp.local.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) - if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), - addr slen) != 0: - raiseTransportOsError(osLastError()) - fromSAddr(addr saddr, slen, transp.local) - transp.local + ## Returns ``transp`` remote socket address. + localAddress2(transp).tryGet() template setReadError(t, e: untyped) = (t).state.incl(ReadError) @@ -124,8 +176,8 @@ when defined(windows): transp.setWriterWSABuffer(vector) let ret = if vector.kind == WithAddress: - var fixedAddress = windowsAnyAddressFix(vector.address) - toSAddr(fixedAddress, transp.waddr, transp.walen) + # We only need `Sockaddr_storage` data here, so result discarded. + discard transp.setRemoteAddress(vector.address) wsaSendTo(fd, addr transp.wwsabuf, DWORD(1), addr bytesCount, DWORD(0), cast[ptr SockAddr](addr transp.waddr), cint(transp.walen), @@ -159,22 +211,24 @@ when defined(windows): proc readDatagramLoop(udata: pointer) = var bytesCount: uint32 - raddr: TransportAddress - var ovl = cast[PtrCustomOverlapped](udata) - var transp = cast[DatagramTransport](ovl.data.udata) + ovl = cast[PtrCustomOverlapped](udata) + + let transp = cast[DatagramTransport](ovl.data.udata) + while true: if ReadPending in transp.state: ## Continuation transp.state.excl(ReadPending) - let err = transp.rovl.data.errCode + let + err = transp.rovl.data.errCode + remoteAddress = transp.getRemoteAddress() case err of OSErrorCode(-1): let bytesCount = transp.rovl.data.bytesCount if bytesCount == 0: transp.state.incl({ReadEof, ReadPaused}) - fromSAddr(addr transp.raddr, transp.ralen, raddr) transp.buflen = int(bytesCount) - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, remoteAddress) of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or closeSocket() call. transp.state.incl(ReadPaused) @@ -189,7 +243,7 @@ when defined(windows): transp.setReadError(err) transp.state.incl(ReadPaused) transp.buflen = 0 - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, remoteAddress) else: ## Initiation if transp.state * {ReadEof, ReadClosed, ReadError} == {}: @@ -220,7 +274,7 @@ when defined(windows): transp.state.incl(ReadPaused) transp.setReadError(err) transp.buflen = 0 - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, transp.getRemoteAddress()) else: # Transport closure happens in callback, and we not started new # WSARecvFrom session. @@ -341,18 +395,25 @@ when defined(windows): closeSocket(localSock) raiseTransportOsError(err) + res.flags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res = flags + if (local.family == AddressFamily.IPv6) and local.isAnyLocal(): + if dualstack in {DualStackType.Enabled, DualStackType.Auto}: + res.incl(ServerFlags.V4Mapped) + res + if remote.port != Port(0): - var fixedAddress = windowsAnyAddressFix(remote) - var saddr: Sockaddr_storage - var slen: SockLen - toSAddr(fixedAddress, saddr, slen) - if connect(SocketHandle(localSock), cast[ptr SockAddr](addr saddr), - slen) != 0: + let remoteAddress = res.setRemoteAddress(remote) + if connect(SocketHandle(localSock), cast[ptr SockAddr](addr res.waddr), + res.walen) != 0: let err = osLastError() if sock == asyncInvalidSocket: closeSocket(localSock) raiseTransportOsError(err) - res.remote = fixedAddress + res.remote = remoteAddress res.fd = localSock res.function = cbproc @@ -362,12 +423,12 @@ when defined(windows): res.state = {ReadPaused, WritePaused} res.future = Future[void].Raising([]).init( "datagram.transport", {FutureFlag.OwnCancelSchedule}) - res.rovl.data = CompletionData(cb: readDatagramLoop, - udata: cast[pointer](res)) - res.wovl.data = CompletionData(cb: writeDatagramLoop, - udata: cast[pointer](res)) - res.rwsabuf = WSABUF(buf: cast[cstring](baseAddr res.buffer), - len: ULONG(len(res.buffer))) + res.rovl.data = CompletionData( + cb: readDatagramLoop, udata: cast[pointer](res)) + res.wovl.data = CompletionData( + cb: writeDatagramLoop, udata: cast[pointer](res)) + res.rwsabuf = WSABUF( + buf: cast[cstring](baseAddr res.buffer), len: ULONG(len(res.buffer))) GC_ref(res) # Start tracking transport trackCounter(DgramTransportTrackerName) @@ -380,10 +441,10 @@ else: # Linux/BSD/MacOS part proc readDatagramLoop(udata: pointer) {.raises: [].}= - var raddr: TransportAddress doAssert(not isNil(udata)) - let transp = cast[DatagramTransport](udata) - let fd = SocketHandle(transp.fd) + let + transp = cast[DatagramTransport](udata) + fd = SocketHandle(transp.fd) if int(fd) == 0: ## This situation can be happen, when there events present ## after transport was closed. @@ -398,9 +459,8 @@ else: cast[ptr SockAddr](addr transp.raddr), addr transp.ralen) if res >= 0: - fromSAddr(addr transp.raddr, transp.ralen, raddr) transp.buflen = res - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, transp.getRemoteAddress()) else: let err = osLastError() case err @@ -409,14 +469,15 @@ else: else: transp.buflen = 0 transp.setReadError(err) - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, transp.getRemoteAddress()) break proc writeDatagramLoop(udata: pointer) = var res: int doAssert(not isNil(udata)) - var transp = cast[DatagramTransport](udata) - let fd = SocketHandle(transp.fd) + let + transp = cast[DatagramTransport](udata) + fd = SocketHandle(transp.fd) if int(fd) == 0: ## This situation can be happen, when there events present ## after transport was closed. @@ -428,7 +489,8 @@ else: let vector = transp.queue.popFirst() while true: if vector.kind == WithAddress: - toSAddr(vector.address, transp.waddr, transp.walen) + # We only need `Sockaddr_storage` data here, so result discarded. + discard transp.setRemoteAddress(vector.address) res = osdefs.sendto(fd, vector.buf, vector.buflen, MSG_NOSIGNAL, cast[ptr SockAddr](addr transp.waddr), transp.walen) @@ -551,21 +613,28 @@ else: closeSocket(localSock) raiseTransportOsError(err) + res.flags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res = flags + if (local.family == AddressFamily.IPv6) and local.isAnyLocal(): + if dualstack != DualStackType.Disabled: + res.incl(ServerFlags.V4Mapped) + res + if remote.port != Port(0): - var saddr: Sockaddr_storage - var slen: SockLen - toSAddr(remote, saddr, slen) - if connect(SocketHandle(localSock), cast[ptr SockAddr](addr saddr), - slen) != 0: + let remoteAddress = res.setRemoteAddress(remote) + if connect(SocketHandle(localSock), cast[ptr SockAddr](addr res.waddr), + res.walen) != 0: let err = osLastError() if sock == asyncInvalidSocket: closeSocket(localSock) raiseTransportOsError(err) - res.remote = remote + res.remote = remoteAddress res.fd = localSock res.function = cbproc - res.flags = flags res.buffer = newSeq[byte](bufferSize) res.queue = initDeque[GramVector]() res.udata = udata @@ -605,6 +674,24 @@ proc close*(transp: DatagramTransport) = transp.state.incl({WriteClosed, ReadClosed}) closeSocket(transp.fd, continuation) +proc getTransportAddresses( + local, remote: Opt[IpAddress], + localPort, remotePort: Port +): tuple[local: TransportAddress, remote: TransportAddress] = + let + (localAuto, remoteAuto) = getAutoAddresses(localPort, remotePort) + lres = + if local.isSome(): + initTAddress(local.get(), localPort) + else: + localAuto + rres = + if remote.isSome(): + initTAddress(remote.get(), remotePort) + else: + remoteAuto + (lres, rres) + proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback, remote: TransportAddress, local: TransportAddress, @@ -824,6 +911,92 @@ proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback, cast[pointer](udata), child, bufSize, ttl, dualstack) +proc newDatagramTransport*(cbproc: DatagramCallback, + localPort: Port, + remotePort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + remote: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: pointer = nil, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + ## Create new UDP datagram transport (IPv6) and bind it to ANY_ADDRESS. + ## Depending on OS settings procedure perform an attempt to create transport + ## using IPv6 ANY_ADDRESS, if its not available it will try to bind transport + ## to IPv4 ANY_ADDRESS. + ## + ## ``cbproc`` - callback which will be called, when new datagram received. + ## ``localPort`` - local peer's port number. + ## ``remotePort`` - remote peer's port number. + ## ``local`` - optional local peer's IPv4/IPv6 address. + ## ``remote`` - optional remote peer's IPv4/IPv6 address. + ## ``sock`` - application-driven socket to use. + ## ``flags`` - flags that will be applied to socket. + ## ``udata`` - custom argument which will be passed to ``cbproc``. + ## ``bufSize`` - size of internal buffer. + ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has + ## ``Broadcast`` option). + let + (localHost, remoteHost) = + getTransportAddresses(local, remote, localPort, remotePort) + newDatagramTransportCommon(cbproc, remoteHost, localHost, asyncInvalidSocket, + flags, cast[pointer](udata), child, bufSize, + ttl, dualstack) + +proc newDatagramTransport*(cbproc: DatagramCallback, + localPort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: pointer = nil, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + newDatagramTransport(cbproc, localPort, Port(0), local, Opt.none(IpAddress), + flags, udata, child, bufSize, ttl, dualstack) + +proc newDatagramTransport*[T](cbproc: DatagramCallback, + localPort: Port, + remotePort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + remote: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + let + (localHost, remoteHost) = + getTransportAddresses(local, remote, localPort, remotePort) + fflags = flags + {GCUserData} + GC_ref(udata) + newDatagramTransportCommon(cbproc, remoteHost, localHost, asyncInvalidSocket, + fflags, cast[pointer](udata), child, bufSize, ttl, + dualstack) + +proc newDatagramTransport*[T](cbproc: DatagramCallback, + localPort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + newDatagramTransport(cbproc, localPort, Port(0), local, Opt.none(IpAddress), + flags, udata, child, bufSize, ttl, dualstack) + proc join*(transp: DatagramTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until the transport ``transp`` will be closed. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 0f006b8..f4ef1ad 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -11,8 +11,11 @@ import std/deques import stew/ptrops +import results import ".."/[asyncloop, config, handles, bipbuffer, osdefs, osutils, oserrno] -import ./common +import ./[common, ipnet] + +export results type VectorKind = enum @@ -48,7 +51,8 @@ type # get stuck on transport `close()`. # Please use this flag only if you are making both client and server in # the same thread. - TcpNoDelay # deprecated: Use SocketFlags.TcpNoDelay + TcpNoDelay, # deprecated: Use SocketFlags.TcpNoDelay + V4Mapped SocketFlags* {.pure.} = enum TcpNoDelay, @@ -101,6 +105,7 @@ else: error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void].Raising([]) # Stream life future + flags: set[TransportFlags] # Internal flags case kind*: TransportKind of TransportKind.Socket: domain: Domain # Socket transport domain (IPv4/IPv6) @@ -138,31 +143,59 @@ type init*: TransportInitCallback # callback which will be called before # transport for new client +proc getRemoteAddress(transp: StreamTransport, + address: Sockaddr_storage, length: SockLen, + ): TransportAddress = + var raddr: TransportAddress + fromSAddr(unsafeAddr address, length, raddr) + if TransportFlags.V4Mapped in transp.flags: + if raddr.isV4Mapped(): raddr.toIPv4() else: raddr + else: + raddr + +proc remoteAddress2*( + transp: StreamTransport +): Result[TransportAddress, OSErrorCode] = + ## Returns ``transp`` remote socket address. + if transp.remote.family == AddressFamily.None: + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) + if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + return err(osLastError()) + transp.remote = transp.getRemoteAddress(saddr, slen) + ok(transp.remote) + +proc localAddress2*( + transp: StreamTransport +): Result[TransportAddress, OSErrorCode] = + ## Returns ``transp`` local socket address. + if transp.local.family == AddressFamily.None: + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) + if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + return err(osLastError()) + fromSAddr(addr saddr, slen, transp.local) + ok(transp.local) + +# TODO(cheatfate): This function should not be public, but for some weird +# reason if we will make it non-public it start generate +# Hint: 'toException' is declared but not used [XDeclaredButNotUsed] +func toException*(v: OSErrorCode): ref TransportOsError = + getTransportOsError(v) + proc remoteAddress*(transp: StreamTransport): TransportAddress {. raises: [TransportOsError].} = ## Returns ``transp`` remote socket address. - doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") - if transp.remote.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) - if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), - addr slen) != 0: - raiseTransportOsError(osLastError()) - fromSAddr(addr saddr, slen, transp.remote) - transp.remote + remoteAddress2(transp).tryGet() proc localAddress*(transp: StreamTransport): TransportAddress {. raises: [TransportOsError].} = - ## Returns ``transp`` local socket address. - doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") - if transp.local.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) - if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), - addr slen) != 0: - raiseTransportOsError(osLastError()) - fromSAddr(addr saddr, slen, transp.local) - transp.local + ## Returns ``transp`` remote socket address. + localAddress2(transp).tryGet() proc localAddress*(server: StreamServer): TransportAddress = ## Returns ``server`` bound local socket address. @@ -220,6 +253,12 @@ proc clean(transp: StreamTransport) {.inline.} = template toUnchecked*(a: untyped): untyped = cast[ptr UncheckedArray[byte]](a) +func getTransportFlags(server: StreamServer): set[TransportFlags] = + if ServerFlags.V4Mapped in server.flags: + {TransportFlags.V4Mapped} + else: + {} + when defined(windows): template zeroOvelappedOffset(t: untyped) = @@ -574,13 +613,15 @@ when defined(windows): break proc newStreamSocketTransport(sock: AsyncFD, bufsize: int, - child: StreamTransport): StreamTransport = + child: StreamTransport, + flags: set[TransportFlags]): StreamTransport = var transp: StreamTransport if not(isNil(child)): transp = child else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock + transp.flags = flags transp.rovl.data = CompletionData(cb: readStreamLoop, udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, @@ -617,25 +658,27 @@ when defined(windows): GC_ref(transp) transp - proc bindToDomain(handle: AsyncFD, domain: Domain): bool = - if domain == Domain.AF_INET6: + proc bindToDomain(handle: AsyncFD, + family: AddressFamily): Result[void, OSErrorCode] = + case family + of AddressFamily.IPv6: var saddr: Sockaddr_in6 saddr.sin6_family = type(saddr.sin6_family)(osdefs.AF_INET6) if osdefs.bindSocket(SocketHandle(handle), cast[ptr SockAddr](addr(saddr)), sizeof(saddr).SockLen) != 0'i32: - return false - true - elif domain == Domain.AF_INET: + return err(osLastError()) + ok() + of AddressFamily.IPv4: var saddr: Sockaddr_in saddr.sin_family = type(saddr.sin_family)(osdefs.AF_INET) if osdefs.bindSocket(SocketHandle(handle), cast[ptr SockAddr](addr(saddr)), sizeof(saddr).SockLen) != 0'i32: - return false - true + return err(osLastError()) + ok() else: - raiseAssert "Unsupported domain" + raiseAssert "Unsupported family" proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, @@ -691,26 +734,36 @@ when defined(windows): retFuture.fail(getTransportOsError(error)) return retFuture - if localAddress != TransportAddress(): - if localAddress.family != address.family: - sock.closeSocket() - retFuture.fail(newException(TransportOsError, - "connect local address domain is not equal to target address domain")) - return retFuture + let transportFlags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res: set[TransportFlags] + if (localAddress.family == AddressFamily.IPv6) and + localAddress.isAnyLocal(): + if dualstack in {DualStackType.Enabled, DualStackType.Auto}: + res.incl(TransportFlags.V4Mapped) + res + + case localAddress.family + of AddressFamily.IPv4, AddressFamily.IPv6: var - localAddr: Sockaddr_storage - localAddrLen: SockLen - localAddress.toSAddr(localAddr, localAddrLen) + saddr: Sockaddr_storage + slen: SockLen + toSAddr(localAddress, saddr, slen) if bindSocket(SocketHandle(sock), - cast[ptr SockAddr](addr localAddr), localAddrLen) != 0: + cast[ptr SockAddr](addr saddr), slen) != 0: sock.closeSocket() retFuture.fail(getTransportOsError(osLastError())) return retFuture - elif not(bindToDomain(sock, raddress.getDomain())): - let err = wsaGetLastError() - sock.closeSocket() - retFuture.fail(getTransportOsError(err)) - return retFuture + of AddressFamily.Unix: + raiseAssert "Unsupported local address family" + of AddressFamily.None: + let res = bindToDomain(sock, raddress.family) + if res.isErr(): + sock.closeSocket() + retFuture.fail(getTransportOsError(res.error)) + return retFuture proc socketContinuation(udata: pointer) {.gcsafe.} = var ovl = cast[RefCustomOverlapped](udata) @@ -723,7 +776,8 @@ when defined(windows): sock.closeSocket() retFuture.fail(getTransportOsError(err)) else: - let transp = newStreamSocketTransport(sock, bufferSize, child) + let transp = newStreamSocketTransport(sock, bufferSize, child, + transportFlags) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(transp) @@ -949,10 +1003,12 @@ when defined(windows): let transp = server.init(server, server.asock) ntransp = newStreamSocketTransport(server.asock, server.bufferSize, - transp) + transp, + server.getTransportFlags()) else: ntransp = newStreamSocketTransport(server.asock, - server.bufferSize, nil) + server.bufferSize, nil, + server.getTransportFlags()) # Start tracking transport trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) @@ -1090,10 +1146,12 @@ when defined(windows): let transp = server.init(server, server.asock) ntransp = newStreamSocketTransport(server.asock, server.bufferSize, - transp) + transp, + server.getTransportFlags()) else: ntransp = newStreamSocketTransport(server.asock, - server.bufferSize, nil) + server.bufferSize, nil, + server.getTransportFlags()) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) @@ -1446,7 +1504,8 @@ else: break proc newStreamSocketTransport(sock: AsyncFD, bufsize: int, - child: StreamTransport): StreamTransport = + child: StreamTransport, + flags: set[TransportFlags]): StreamTransport = var transp: StreamTransport if not(isNil(child)): transp = child @@ -1454,6 +1513,7 @@ else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock + transp.flags = flags let size = max(bufsize, DefaultStreamBufferSize) transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} @@ -1535,21 +1595,30 @@ else: retFuture.fail(getTransportOsError(error)) return retFuture - if localAddress != TransportAddress(): - if localAddress.family != address.family: - sock.closeSocket() - retFuture.fail(newException(TransportOsError, - "connect local address domain is not equal to target address domain")) - return retFuture + let transportFlags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res: set[TransportFlags] + if (localAddress.family == AddressFamily.IPv6) and + localAddress.isAnyLocal(): + if dualstack != DualStackType.Disabled: + res.incl(TransportFlags.V4Mapped) + res + + case localAddress.family + of AddressFamily.IPv4, AddressFamily.IPv6, AddressFamily.Unix: var - localAddr: Sockaddr_storage - localAddrLen: SockLen - localAddress.toSAddr(localAddr, localAddrLen) + lsaddr: Sockaddr_storage + lslen: SockLen + toSAddr(localAddress, lsaddr, lslen) if bindSocket(SocketHandle(sock), - cast[ptr SockAddr](addr localAddr), localAddrLen) != 0: + cast[ptr SockAddr](addr lsaddr), lslen) != 0: sock.closeSocket() retFuture.fail(getTransportOsError(osLastError())) return retFuture + of AddressFamily.None: + discard proc continuation(udata: pointer) = if not(retFuture.finished()): @@ -1568,7 +1637,8 @@ else: retFuture.fail(getTransportOsError(OSErrorCode(err))) return - let transp = newStreamSocketTransport(sock, bufferSize, child) + let transp = newStreamSocketTransport(sock, bufferSize, child, + transportFlags) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(transp) @@ -1581,7 +1651,8 @@ else: let res = osdefs.connect(SocketHandle(sock), cast[ptr SockAddr](addr saddr), slen) if res == 0: - let transp = newStreamSocketTransport(sock, bufferSize, child) + let transp = newStreamSocketTransport(sock, bufferSize, child, + transportFlags) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(transp) @@ -1634,9 +1705,11 @@ else: let ntransp = if not(isNil(server.init)): let transp = server.init(server, sock) - newStreamSocketTransport(sock, server.bufferSize, transp) + newStreamSocketTransport(sock, server.bufferSize, transp, + server.getTransportFlags()) else: - newStreamSocketTransport(sock, server.bufferSize, nil) + newStreamSocketTransport(sock, server.bufferSize, nil, + server.getTransportFlags()) trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) else: @@ -1724,9 +1797,11 @@ else: let ntransp = if not(isNil(server.init)): let transp = server.init(server, sock) - newStreamSocketTransport(sock, server.bufferSize, transp) + newStreamSocketTransport(sock, server.bufferSize, transp, + server.getTransportFlags()) else: - newStreamSocketTransport(sock, server.bufferSize, nil) + newStreamSocketTransport(sock, server.bufferSize, nil, + server.getTransportFlags()) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) @@ -1879,166 +1954,196 @@ proc createStreamServer*(host: TransportAddress, ## ``child`` - existing object ``StreamServer``object to initialize, can be ## used to initalize ``StreamServer`` inherited objects. ## ``udata`` - user-defined pointer. - var - saddr: Sockaddr_storage - slen: SockLen - serverSocket: AsyncFD - localAddress: TransportAddress + let (serverSocket, localAddress, serverFlags) = + when defined(windows): + # Windows + if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: + var + saddr: Sockaddr_storage + slen: SockLen + laddress: TransportAddress - when defined(nimdoc): - discard - elif defined(windows): - # Windows - if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - serverSocket = + let sockres = + if sock == asyncInvalidSocket: + # TODO (cheatfate): `valueOr` generates weird compile error. + let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, + Protocol.IPPROTO_TCP) + if res.isErr(): + raiseTransportOsError(res.error()) + res.get() + else: + setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + raiseTransportOsError(error) + register2(sock).isOkOr: + raiseTransportOsError(error) + sock + # SO_REUSEADDR + if ServerFlags.ReuseAddr in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + # SO_REUSEPORT + if ServerFlags.ReusePort in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + # TCP_NODELAY + if ServerFlags.TcpNoDelay in flags: + setSockOpt2(sockres, osdefs.IPPROTO_TCP, + osdefs.TCP_NODELAY, 1).isOkOr: + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + # IPV6_V6ONLY. if sock == asyncInvalidSocket: + setDualstack(sockres, host.family, dualstack).isOkOr: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + else: + setDualstack(sockres, dualstack).isOkOr: + raiseTransportOsError(error) + + let flagres = + block: + var res = flags + if (host.family == AddressFamily.IPv6) and host.isAnyLocal(): + if dualstack in {DualStackType.Enabled, DualStackType.Auto}: + res.incl(ServerFlags.V4Mapped) + res + + host.toSAddr(saddr, slen) + + if bindSocket(SocketHandle(sockres), + cast[ptr SockAddr](addr saddr), slen) != 0: + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(err) + + slen = SockLen(sizeof(saddr)) + + if getsockname(SocketHandle(sockres), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(err) + + fromSAddr(addr saddr, slen, laddress) + + if listen(SocketHandle(sockres), getBacklogSize(backlog)) != 0: + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(err) + + (sockres, laddress, flagres) + elif host.family == AddressFamily.Unix: + (AsyncFD(0), host, flags) + else: + raiseAssert "Incorrect host address family" + else: + # Posix + var + saddr: Sockaddr_storage + slen: SockLen + laddress: TransportAddress + + let sockres = + if sock == asyncInvalidSocket: + let proto = if host.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_TCP # TODO (cheatfate): `valueOr` generates weird compile error. let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, - Protocol.IPPROTO_TCP) + proto) if res.isErr(): raiseTransportOsError(res.error()) res.get() else: - setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + setDescriptorFlags(cint(sock), true, true).isOkOr: raiseTransportOsError(error) register2(sock).isOkOr: raiseTransportOsError(error) sock - # SO_REUSEADDR - if ServerFlags.ReuseAddr in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: - if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - # SO_REUSEPORT - if ServerFlags.ReusePort in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: - if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - # TCP_NODELAY - if ServerFlags.TcpNoDelay in flags: - setSockOpt2(serverSocket, osdefs.IPPROTO_TCP, - osdefs.TCP_NODELAY, 1).isOkOr: - if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - # IPV6_V6ONLY. - if sock == asyncInvalidSocket: - setDualstack(serverSocket, host.family, dualstack).isOkOr: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - else: - setDualstack(serverSocket, dualstack).isOkOr: - raiseTransportOsError(error) + + if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: + # SO_REUSEADDR + if ServerFlags.ReuseAddr in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: + if sock == asyncInvalidSocket: + discard unregisterAndCloseFd(sockres) + raiseTransportOsError(error) + # SO_REUSEPORT + if ServerFlags.ReusePort in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: + if sock == asyncInvalidSocket: + discard unregisterAndCloseFd(sockres) + raiseTransportOsError(error) + # TCP_NODELAY + if ServerFlags.TcpNoDelay in flags: + setSockOpt2(sockres, osdefs.IPPROTO_TCP, + osdefs.TCP_NODELAY, 1).isOkOr: + if sock == asyncInvalidSocket: + discard unregisterAndCloseFd(sockres) + raiseTransportOsError(error) + # IPV6_V6ONLY + if sock == asyncInvalidSocket: + setDualstack(sockres, host.family, dualstack).isOkOr: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + else: + setDualstack(sockres, dualstack).isOkOr: + raiseTransportOsError(error) + + elif host.family in {AddressFamily.Unix}: + # We do not care about result here, because if file cannot be removed, + # `bindSocket` will return EADDRINUSE. + discard osdefs.unlink(cast[cstring](baseAddr host.address_un)) + + let flagres = + block: + var res = flags + if (host.family == AddressFamily.IPv6) and host.isAnyLocal(): + if dualstack != DualStackType.Disabled: + res.incl(ServerFlags.V4Mapped) + res host.toSAddr(saddr, slen) - if bindSocket(SocketHandle(serverSocket), - cast[ptr SockAddr](addr saddr), slen) != 0: + + if osdefs.bindSocket(SocketHandle(sockres), + cast[ptr SockAddr](addr saddr), slen) != 0: let err = osLastError() if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) + discard unregisterAndCloseFd(sockres) raiseTransportOsError(err) + # Obtain real address slen = SockLen(sizeof(saddr)) - if getsockname(SocketHandle(serverSocket), cast[ptr SockAddr](addr saddr), + if getsockname(SocketHandle(sockres), cast[ptr SockAddr](addr saddr), addr slen) != 0: let err = osLastError() if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) + discard unregisterAndCloseFd(sockres) raiseTransportOsError(err) - fromSAddr(addr saddr, slen, localAddress) - if listen(SocketHandle(serverSocket), getBacklogSize(backlog)) != 0: + fromSAddr(addr saddr, slen, laddress) + + if listen(SocketHandle(sockres), getBacklogSize(backlog)) != 0: let err = osLastError() if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) + discard unregisterAndCloseFd(sockres) raiseTransportOsError(err) - elif host.family == AddressFamily.Unix: - serverSocket = AsyncFD(0) - else: - # Posix - serverSocket = - if sock == asyncInvalidSocket: - let proto = if host.family == AddressFamily.Unix: - Protocol.IPPROTO_IP - else: - Protocol.IPPROTO_TCP - # TODO (cheatfate): `valueOr` generates weird compile error. - let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, - proto) - if res.isErr(): - raiseTransportOsError(res.error()) - res.get() - else: - setDescriptorFlags(cint(sock), true, true).isOkOr: - raiseTransportOsError(error) - register2(sock).isOkOr: - raiseTransportOsError(error) - sock - if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - # SO_REUSEADDR - if ServerFlags.ReuseAddr in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(error) - # SO_REUSEPORT - if ServerFlags.ReusePort in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(error) - # TCP_NODELAY - if ServerFlags.TcpNoDelay in flags: - setSockOpt2(serverSocket, osdefs.IPPROTO_TCP, - osdefs.TCP_NODELAY, 1).isOkOr: - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(error) - # IPV6_V6ONLY - if sock == asyncInvalidSocket: - setDualstack(serverSocket, host.family, dualstack).isOkOr: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - else: - setDualstack(serverSocket, dualstack).isOkOr: - raiseTransportOsError(error) - - elif host.family in {AddressFamily.Unix}: - # We do not care about result here, because if file cannot be removed, - # `bindSocket` will return EADDRINUSE. - discard osdefs.unlink(cast[cstring](baseAddr host.address_un)) - - host.toSAddr(saddr, slen) - if osdefs.bindSocket(SocketHandle(serverSocket), - cast[ptr SockAddr](addr saddr), slen) != 0: - let err = osLastError() - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) - - # Obtain real address - slen = SockLen(sizeof(saddr)) - if getsockname(SocketHandle(serverSocket), cast[ptr SockAddr](addr saddr), - addr slen) != 0: - let err = osLastError() - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) - fromSAddr(addr saddr, slen, localAddress) - - if listen(SocketHandle(serverSocket), getBacklogSize(backlog)) != 0: - let err = osLastError() - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) + (sockres, laddress, flagres) var sres = if not(isNil(child)): child else: StreamServer() sres.sock = serverSocket - sres.flags = flags + sres.flags = serverFlags sres.function = cbproc sres.init = init sres.bufferSize = bufferSize @@ -2048,9 +2153,7 @@ proc createStreamServer*(host: TransportAddress, {FutureFlag.OwnCancelSchedule}) sres.udata = udata sres.dualstack = dualstack - if localAddress.family == AddressFamily.None: - sres.local = host - else: + if localAddress.family != AddressFamily.None: sres.local = localAddress when defined(windows): @@ -2115,6 +2218,52 @@ proc createStreamServer*(host: TransportAddress, createStreamServer(host, StreamCallback2(nil), flags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) +proc createStreamServer*(port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, StreamCallback2(nil), flags, sock, + backlog, bufferSize, child, init, cast[pointer](udata), + dualstack) + +proc createStreamServer*(cbproc: StreamCallback2, + port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, cbproc, flags, sock, backlog, + bufferSize, child, init, cast[pointer](udata), dualstack) + proc createStreamServer*[T](host: TransportAddress, cbproc: StreamCallback2, flags: set[ServerFlags] = {}, @@ -2163,6 +2312,56 @@ proc createStreamServer*[T](host: TransportAddress, createStreamServer(host, StreamCallback2(nil), fflags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) +proc createStreamServer*[T](cbproc: StreamCallback2, + port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let fflags = flags + {GCUserData} + GC_ref(udata) + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, cbproc, fflags, sock, backlog, + bufferSize, child, init, cast[pointer](udata), dualstack) + +proc createStreamServer*[T](port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let fflags = flags + {GCUserData} + GC_ref(udata) + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, StreamCallback2(nil), fflags, sock, + backlog, bufferSize, child, init, cast[pointer](udata), + dualstack) + proc getUserData*[T](server: StreamServer): T {.inline.} = ## Obtain user data stored in ``server`` object. cast[T](server.udata) diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index 7b27c34..38c10ac 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -32,6 +32,10 @@ suite "Datagram Transport test suite": m8 = "Bounded multiple clients with messages (" & $ClientsCount & " clients x " & $MessagesCount & " messages)" + type + DatagramSocketType {.pure.} = enum + Bound, Unbound + proc client1(transp: DatagramTransport, raddr: TransportAddress): Future[void] {.async: (raises: []).} = try: @@ -628,6 +632,243 @@ suite "Datagram Transport test suite": await allFutures(sdgram.closeWait(), cdgram.closeWait()) res == 1 + proc performAutoAddressTest(port: Port, + family: AddressFamily): Future[bool] {.async.} = + var + expectRequest1 = "AUTO REQUEST1" + expectRequest2 = "AUTO REQUEST2" + expectResponse = "AUTO RESPONSE" + mappedResponse = "MAPPED RESPONSE" + event = newAsyncEvent() + event2 = newAsyncEvent() + res = 0 + + proc process1(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + try: + var + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectRequest1: + inc(res) + await noCancel transp.sendTo( + raddr, addr expectResponse[0], len(expectResponse)) + elif smsg == expectRequest2: + inc(res) + await noCancel transp.sendTo( + raddr, addr mappedResponse[0], len(mappedResponse)) + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + proc process2(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + try: + var + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectResponse: + inc(res) + event.fire() + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + proc process3(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + try: + var + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == mappedResponse: + inc(res) + event2.fire() + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + let sdgram = + block: + var res: DatagramTransport + var currentPort = port + for i in 0 ..< 10: + res = + try: + newDatagramTransport(process1, currentPort, + flags = {ServerFlags.ReusePort}) + except TransportOsError: + echo "Unable to create transport on port ", currentPort + currentPort = Port(uint16(currentPort) + 1'u16) + nil + if not(isNil(res)): + break + doAssert(not(isNil(res)), "Unable to create transport, giving up") + res + + var + address = + case family + of AddressFamily.IPv4: + initTAddress("127.0.0.1:0") + of AddressFamily.IPv6: + initTAddress("::1:0") + of AddressFamily.Unix, AddressFamily.None: + raiseAssert "Not allowed" + + let + cdgram = + case family + of AddressFamily.IPv4: + newDatagramTransport(process2, local = address) + of AddressFamily.IPv6: + newDatagramTransport6(process2, local = address) + of AddressFamily.Unix, AddressFamily.None: + raiseAssert "Not allowed" + + address.port = sdgram.localAddress().port + + try: + await noCancel cdgram.sendTo( + address, addr expectRequest1[0], len(expectRequest1)) + except TransportError: + discard + + if family == AddressFamily.IPv6: + var remote = initTAddress("127.0.0.1:0") + remote.port = sdgram.localAddress().port + let wtransp = + newDatagramTransport(process3, local = initTAddress("0.0.0.0:0")) + try: + await noCancel wtransp.sendTo( + remote, addr expectRequest2[0], len(expectRequest2)) + except TransportError as exc: + raiseAssert "Got transport error, reason = " & $exc.msg + + try: + await event2.wait().wait(1.seconds) + except CatchableError: + discard + + await wtransp.closeWait() + + try: + await event.wait().wait(1.seconds) + except CatchableError: + discard + + await allFutures(sdgram.closeWait(), cdgram.closeWait()) + + if family == AddressFamily.IPv4: + res == 2 + else: + res == 4 + + proc performAutoAddressTest2( + address1: Opt[IpAddress], + address2: Opt[IpAddress], + port: Port, + sendType: AddressFamily, + boundType: DatagramSocketType + ): Future[bool] {.async.} = + let + expectRequest = "TEST REQUEST" + expectResponse = "TEST RESPONSE" + event = newAsyncEvent() + var res = 0 + + proc process1(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + if raddr.family != sendType: + raiseAssert "Incorrect address family received [" & $raddr & + "], expected [" & $sendType & "]" + try: + let + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectRequest: + inc(res) + await noCancel transp.sendTo( + raddr, unsafeAddr expectResponse[0], len(expectResponse)) + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + proc process2(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + if raddr.family != sendType: + raiseAssert "Incorrect address family received [" & $raddr & + "], expected [" & $sendType & "]" + try: + let + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectResponse: + inc(res) + event.fire() + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + let + serverFlags = {ServerFlags.ReuseAddr} + server = newDatagramTransport(process1, flags = serverFlags, + local = address1, localPort = port) + serverAddr = server.localAddress() + serverPort = serverAddr.port + remoteAddress = + case sendType + of AddressFamily.IPv4: + var res = initTAddress("127.0.0.1:0") + res.port = serverPort + res + of AddressFamily.IPv6: + var res = initTAddress("[::1]:0") + res.port = serverPort + res + else: + raiseAssert "Incorrect sending type" + remoteIpAddress = Opt.some(remoteAddress.toIpAddress()) + client = + case boundType + of DatagramSocketType.Bound: + newDatagramTransport(process2, + localPort = Port(0), remotePort = serverPort, + local = address2, remote = remoteIpAddress) + of DatagramSocketType.Unbound: + newDatagramTransport(process2, + localPort = Port(0), remotePort = Port(0), + local = address2) + + try: + case boundType + of DatagramSocketType.Bound: + await noCancel client.send( + unsafeAddr expectRequest[0], len(expectRequest)) + of DatagramSocketType.Unbound: + await noCancel client.sendTo(remoteAddress, + unsafeAddr expectRequest[0], len(expectRequest)) + except TransportError as exc: + raiseAssert "Could not send datagram to remote peer, reason = " & $exc.msg + + try: + await event.wait().wait(1.seconds) + except CatchableError: + discard + + await allFutures(server.closeWait(), client.closeWait()) + + res == 2 + test "close(transport) test": check waitFor(testTransportClose()) == true test m1: @@ -730,3 +971,104 @@ suite "Datagram Transport test suite": DualStackType.Auto, initTAddress("[::1]:0"))) == true else: skip() + asyncTest "[IP] Auto-address constructor test (*:0)": + if isAvailable(AddressFamily.IPv6): + check: + (await performAutoAddressTest(Port(0), AddressFamily.IPv6)) == true + # If IPv6 is available newAutoDatagramTransport should bind to `::` - this + # means that we should be able to connect to it via IPV4_MAPPED address, + # but only when IPv4 is also available. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true + else: + # If IPv6 is not available newAutoDatagramTransport should bind to + # `0.0.0.0` - this means we should be able to connect to it via IPv4 + # address. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true + asyncTest "[IP] Auto-address constructor test (*:30231)": + if isAvailable(AddressFamily.IPv6): + check: + (await performAutoAddressTest(Port(30231), AddressFamily.IPv6)) == true + # If IPv6 is available newAutoDatagramTransport should bind to `::` - this + # means that we should be able to connect to it via IPV4_MAPPED address, + # but only when IPv4 is also available. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(30231), AddressFamily.IPv4)) == + true + else: + # If IPv6 is not available newAutoDatagramTransport should bind to + # `0.0.0.0` - this means we should be able to connect to it via IPv4 + # address. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(30231), AddressFamily.IPv4)) == + true + + for socketType in DatagramSocketType: + for portNumber in [Port(0), Port(30231)]: + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/auto-auto:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.none(IpAddress) + address2 = Opt.none(IpAddress) + + check: + (await performAutoAddressTest2( + address1, address2, portNumber, AddressFamily.IPv4, socketType)) + (await performAutoAddressTest2( + address1, address2, portNumber, AddressFamily.IPv6, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/auto-ipv6:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.none(IpAddress) + address2 = Opt.some(initTAddress("[::1]:0").toIpAddress()) + check: + (await performAutoAddressTest2( + address1, address2, portNumber, AddressFamily.IPv6, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/auto-ipv4:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.none(IpAddress) + address2 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress()) + check: + (await performAutoAddressTest2(address1, address2, portNumber, + AddressFamily.IPv4, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/ipv6-auto:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.some(initTAddress("[::1]:0").toIpAddress()) + address2 = Opt.none(IpAddress) + check: + (await performAutoAddressTest2(address1, address2, portNumber, + AddressFamily.IPv6, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/ipv4-auto:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress()) + address2 = Opt.none(IpAddress) + check: + (await performAutoAddressTest2(address1, address2, portNumber, + AddressFamily.IPv4, socketType)) + else: + skip() diff --git a/tests/teststream.nim b/tests/teststream.nim index 340575c..25278f4 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1486,6 +1486,170 @@ suite "Stream Transport test suite": await server.closeWait() testResult + proc performAutoAddressTest(port: Port, + family: AddressFamily): Future[bool] {. + async: (raises: []).} = + let server = + block: + var currentPort = port + var res: StreamServer + for i in 0 ..< 10: + res = + try: + createStreamServer(port, flags = {ServerFlags.ReuseAddr}) + except TransportOsError as exc: + echo "Unable to create server on port ", currentPort, + " with error: ", exc.msg + currentPort = Port(uint16(currentPort) + 1'u16) + nil + if not(isNil(res)): + break + doAssert(not(isNil(res)), "Unable to create server, giving up") + res + + var + address = + case family + of AddressFamily.IPv4: + try: + initTAddress("127.0.0.1:0") + except TransportAddressError as exc: + raiseAssert exc.msg + of AddressFamily.IPv6: + try: + initTAddress("::1:0") + except TransportAddressError as exc: + raiseAssert exc.msg + of AddressFamily.Unix, AddressFamily.None: + raiseAssert "Not allowed" + + address.port = server.localAddress().port + var acceptFut = server.accept() + let + clientTransp = + try: + let res = await connect(address).wait(2.seconds) + Opt.some(res) + except CatchableError: + Opt.none(StreamTransport) + serverTransp = + if clientTransp.isSome(): + let res = + try: + await noCancel acceptFut + except TransportError as exc: + raiseAssert exc.msg + Opt.some(res) + else: + Opt.none(StreamTransport) + + let testResult = clientTransp.isSome() and serverTransp.isSome() + var pending: seq[FutureBase] + if clientTransp.isSome(): + pending.add(closeWait(clientTransp.get())) + if serverTransp.isSome(): + pending.add(closeWait(serverTransp.get())) + else: + pending.add(cancelAndWait(acceptFut)) + await noCancel allFutures(pending) + try: + server.stop() + except TransportError as exc: + raiseAssert exc.msg + await server.closeWait() + testResult + + proc performAutoAddressTest2( + address1: Opt[IpAddress], + address2: Opt[IpAddress], + port: Port, + sendType: AddressFamily + ): Future[bool] {.async: (raises: []).} = + let + server = + block: + var + currentPort = port + res: StreamServer + for i in 0 ..< 10: + res = + try: + createStreamServer(port, host = address1, + flags = {ServerFlags.ReuseAddr}) + except TransportOsError as exc: + echo "Unable to create server on port ", currentPort, + " with error: ", exc.msg + currentPort = Port(uint16(currentPort) + 1'u16) + nil + if not(isNil(res)): + break + doAssert(not(isNil(res)), "Unable to create server, giving up") + res + serverAddr = server.localAddress() + serverPort = serverAddr.port + remoteAddress = + try: + case sendType + of AddressFamily.IPv4: + var res = initTAddress("127.0.0.1:0") + res.port = serverPort + res + of AddressFamily.IPv6: + var res = initTAddress("[::1]:0") + res.port = serverPort + res + else: + raiseAssert "Incorrect sending type" + except TransportAddressError as exc: + raiseAssert "Unable to initialize transport address, " & + "reason = " & exc.msg + acceptFut = server.accept() + + let + clientTransp = + try: + if address2.isSome(): + let + laddr = initTAddress(address2.get(), Port(0)) + res = await connect(remoteAddress, localAddress = laddr). + wait(2.seconds) + Opt.some(res) + + else: + let res = await connect(remoteAddress).wait(2.seconds) + Opt.some(res) + except CatchableError: + Opt.none(StreamTransport) + serverTransp = + if clientTransp.isSome(): + let res = + try: + await noCancel acceptFut + except TransportError as exc: + raiseAssert exc.msg + Opt.some(res) + else: + Opt.none(StreamTransport) + testResult = + clientTransp.isSome() and serverTransp.isSome() and + (serverTransp.get().remoteAddress2().get().family == sendType) and + (clientTransp.get().remoteAddress2().get().family == sendType) + var pending: seq[FutureBase] + if clientTransp.isSome(): + pending.add(closeWait(clientTransp.get())) + if serverTransp.isSome(): + pending.add(closeWait(serverTransp.get())) + else: + pending.add(cancelAndWait(acceptFut)) + await noCancel allFutures(pending) + try: + server.stop() + except TransportError as exc: + raiseAssert exc.msg + await server.closeWait() + + testResult + markFD = getCurrentFD() for i in 0.. Date: Wed, 17 Apr 2024 17:27:14 +0300 Subject: [PATCH 19/37] Fix inability to change httpclient's internal buffer size. (#531) Add test. Address #529. --- chronos/apps/http/httpclient.nim | 66 +++++++++++++++++++++----------- tests/testhttpclient.nim | 60 +++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 23 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 414b1d3..3b4844d 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -159,6 +159,7 @@ type redirectCount: int timestamp*: Moment duration*: Duration + headersBuffer: seq[byte] HttpClientRequestRef* = ref HttpClientRequest @@ -859,6 +860,7 @@ proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} = await noCancel(allFutures(pending)) request.session = nil request.error = nil + request.headersBuffer.reset() request.state = HttpReqRespState.Closed untrackCounter(HttpClientRequestTrackerName) @@ -992,14 +994,14 @@ proc prepareResponse( proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. async: (raises: [CancelledError, HttpError]).} = - var buffer: array[HttpMaxHeadersSize, byte] let timestamp = Moment.now() req.connection.setTimestamp(timestamp) let bytesRead = try: - await req.connection.reader.readUntil(addr buffer[0], - len(buffer), HeadersMark).wait( + await req.connection.reader.readUntil(addr req.headersBuffer[0], + len(req.headersBuffer), + HeadersMark).wait( req.session.headersTimeout) except AsyncTimeoutError: raiseHttpReadError("Reading response headers timed out") @@ -1007,23 +1009,25 @@ proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. raiseHttpReadError( "Could not read response headers, reason: " & $exc.msg) - let response = prepareResponse(req, buffer.toOpenArray(0, bytesRead - 1)) - if response.isErr(): - raiseHttpProtocolError(response.error()) - let res = response.get() - res.setTimestamp(timestamp) - return res + let response = + prepareResponse(req, + req.headersBuffer.toOpenArray(0, bytesRead - 1)).valueOr: + raiseHttpProtocolError(error) + response.setTimestamp(timestamp) + response proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, meth: HttpMethod = MethodGet, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpClientRequestRef = let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, version: version, flags: flags, headers: HttpTable.init(headers), - address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body + address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body, + headersBuffer: newSeq[byte](max(maxResponseHeadersSize, HttpMaxHeadersSize)) ) trackCounter(HttpClientRequestTrackerName) res @@ -1032,13 +1036,15 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, meth: HttpMethod = MethodGet, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpResult[HttpClientRequestRef] = let address = ? session.getAddress(parseUri(url)) let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, version: version, flags: flags, headers: HttpTable.init(headers), - address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body + address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body, + headersBuffer: newSeq[byte](max(maxResponseHeadersSize, HttpMaxHeadersSize)) ) trackCounter(HttpClientRequestTrackerName) ok(res) @@ -1046,48 +1052,58 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [] ): HttpResult[HttpClientRequestRef] = - HttpClientRequestRef.new(session, url, MethodGet, version, flags, headers) + HttpClientRequestRef.new(session, url, MethodGet, version, flags, + maxResponseHeadersSize, headers) proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [] ): HttpClientRequestRef = - HttpClientRequestRef.new(session, ha, MethodGet, version, flags, headers) + HttpClientRequestRef.new(session, ha, MethodGet, version, flags, + maxResponseHeadersSize, headers) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = [] ): HttpResult[HttpClientRequestRef] = - HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, - body) + HttpClientRequestRef.new(session, url, MethodPost, version, flags, + maxResponseHeadersSize, headers, body) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[char] = []): HttpResult[HttpClientRequestRef] = - HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, + HttpClientRequestRef.new(session, url, MethodPost, version, flags, + maxResponseHeadersSize, headers, body.toOpenArrayByte(0, len(body) - 1)) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpClientRequestRef = - HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, - body) + HttpClientRequestRef.new(session, ha, MethodPost, version, flags, + maxResponseHeadersSize, headers, body) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[char] = []): HttpClientRequestRef = - HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, + HttpClientRequestRef.new(session, ha, MethodPost, version, flags, + maxResponseHeadersSize, headers, body.toOpenArrayByte(0, len(body) - 1)) proc prepareRequest(request: HttpClientRequestRef): string = @@ -1454,8 +1470,10 @@ proc redirect*(request: HttpClientRequestRef, var res = request.headers res.set(HostHeader, ha.hostname) res - var res = HttpClientRequestRef.new(request.session, ha, request.meth, - request.version, request.flags, headers.toList(), request.buffer) + var res = + HttpClientRequestRef.new(request.session, ha, request.meth, + request.version, request.flags, headers = headers.toList(), + body = request.buffer) res.redirectCount = redirectCount ok(res) @@ -1478,8 +1496,10 @@ proc redirect*(request: HttpClientRequestRef, var res = request.headers res.set(HostHeader, address.hostname) res - var res = HttpClientRequestRef.new(request.session, address, request.meth, - request.version, request.flags, headers.toList(), request.buffer) + var res = + HttpClientRequestRef.new(request.session, address, request.meth, + request.version, request.flags, headers = headers.toList(), + body = request.buffer) res.redirectCount = redirectCount ok(res) diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index a468aae..e298d5a 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -1518,3 +1518,63 @@ suite "HTTP client testing suite": res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed res.error.isRecoverableError() not(res.error.isCriticalError()) + + asyncTest "HTTPS response headers buffer size test": + const HeadersSize = HttpMaxHeadersSize + let expectValue = + string.fromBytes(createBigMessage("HEADERSTEST", HeadersSize)) + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + if r.isOk(): + let request = r.get() + try: + case request.uri.path + of "/test": + let headers = HttpTable.init([("big-header", expectValue)]) + await request.respond(Http200, "ok", headers) + else: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) + else: + defaultResponse() + + var server = createServer(initTAddress("127.0.0.1:0"), process, false) + server.start() + let + address = server.instance.localAddress() + ha = getAddress(address, HttpClientScheme.NonSecure, "/test") + session = HttpSessionRef.new() + let + req1 = HttpClientRequestRef.new(session, ha) + req2 = + HttpClientRequestRef.new(session, ha, + maxResponseHeadersSize = HttpMaxHeadersSize * 2) + res1 = + try: + let res {.used.} = await send(req1) + await closeWait(req1) + await closeWait(res) + false + except HttpReadError: + true + except HttpError: + await closeWait(req1) + false + except CancelledError: + await closeWait(req1) + false + + res2 = await send(req2) + + check: + res1 == true + res2.status == 200 + res2.headers.getString("big-header") == expectValue + + await req1.closeWait() + await req2.closeWait() + await res2.closeWait() + await session.closeWait() + await server.stop() + await server.closeWait() From bd7d84fbcb738ee06755aa9c0b0ebd94e93f3a62 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 17 Apr 2024 17:41:36 +0300 Subject: [PATCH 20/37] Fix AsyncStreamReader constructor declaration mistypes. (#533) --- chronos/streams/asyncstream.nim | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 301b086..0ff9f4b 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -1082,6 +1082,22 @@ proc newAsyncStreamReader*(tsource: StreamTransport): AsyncStreamReader = res.init(tsource) res +proc newAsyncStreamReader*[T](rsource: AsyncStreamReader, + udata: ref T): AsyncStreamReader = + ## Create copy of AsyncStreamReader object ``rsource``. + ## + ## ``udata`` - user object which will be associated with new AsyncStreamReader + ## object. + var res = AsyncStreamReader() + res.init(rsource, udata) + res + +proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader = + ## Create copy of AsyncStreamReader object ``rsource``. + var res = AsyncStreamReader() + res.init(rsource) + res + proc newAsyncStreamWriter*[T](wsource: AsyncStreamWriter, loop: StreamWriterLoop, queueSize = AsyncStreamDefaultQueueSize, @@ -1147,22 +1163,6 @@ proc newAsyncStreamWriter*(wsource: AsyncStreamWriter): AsyncStreamWriter = res.init(wsource) res -proc newAsyncStreamReader*[T](rsource: AsyncStreamWriter, - udata: ref T): AsyncStreamWriter = - ## Create copy of AsyncStreamReader object ``rsource``. - ## - ## ``udata`` - user object which will be associated with new AsyncStreamReader - ## object. - var res = AsyncStreamReader() - res.init(rsource, udata) - res - -proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader = - ## Create copy of AsyncStreamReader object ``rsource``. - var res = AsyncStreamReader() - res.init(rsource) - res - proc getUserData*[T](rw: AsyncStreamRW): T {.inline.} = ## Obtain user data associated with AsyncStreamReader or AsyncStreamWriter ## object ``rw``. From 7a3eaffa4f4bde711510b0aef4d2b1f18abbb53c Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 18 Apr 2024 02:08:19 +0300 Subject: [PATCH 21/37] Fix English spelling for `readed` variable. (#534) --- chronos/streams/asyncstream.nim | 20 ++++++++++---------- chronos/transports/stream.nim | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 0ff9f4b..bf6daa0 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -317,7 +317,7 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, ## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store ## it to ``pbytes``. ## - ## If EOF is received and ``nbytes`` is not yet readed, the procedure + ## If EOF is received and ``nbytes`` is not yet read, the procedure ## will raise ``AsyncStreamIncompleteError``. doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes >= 0, "nbytes must be non-negative integer") @@ -347,16 +347,16 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, if len(rstream.buffer.backend) == 0: if rstream.atEof(): raise newAsyncStreamIncompleteError() - var readed = 0 + var bytesRead = 0 for (region, rsize) in rstream.buffer.backend.regions(): let count = min(nbytes - index, rsize) - readed += count + bytesRead += count if count > 0: copyMem(addr pbuffer[index], region, count) index += count if index == nbytes: break - (consumed: readed, done: index == nbytes) + (consumed: bytesRead, done: index == nbytes) proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int): Future[int] {. @@ -547,11 +547,11 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. if rstream.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in rstream.buffer.backend.regions(): - readed += rsize + bytesRead += rsize res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) - (readed, false) + (bytesRead, false) res proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. @@ -581,12 +581,12 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. if rstream.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in rstream.buffer.backend.regions(): let count = min(rsize, n - len(res)) - readed += count + bytesRead += count res.add(region.toUnchecked().toOpenArray(0, count - 1)) - (readed, len(res) == n) + (bytesRead, len(res) == n) res proc consume*(rstream: AsyncStreamReader): Future[int] {. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index f4ef1ad..9992543 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2579,7 +2579,7 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, ## ## If ``nbytes == 0`` this operation will return immediately. ## - ## If EOF is received and ``nbytes`` is not yet readed, the procedure + ## If EOF is received and ``nbytes`` is not yet read, the procedure ## will raise ``TransportIncompleteError``, potentially with some bytes ## already written. doAssert(not(isNil(pbytes)), "pbytes must not be nil") @@ -2595,16 +2595,16 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, if len(transp.buffer) == 0: if transp.atEof(): raise newException(TransportIncompleteError, "Data incomplete!") - var readed = 0 + var bytesRead = 0 for (region, rsize) in transp.buffer.regions(): let count = min(nbytes - index, rsize) - readed += count + bytesRead += count if count > 0: copyMem(addr pbuffer[index], region, count) index += count if index == nbytes: break - (consumed: readed, done: index == nbytes) + (consumed: bytesRead, done: index == nbytes) proc readOnce*(transp: StreamTransport, pbytes: pointer, nbytes: int): Future[int] {. @@ -2736,11 +2736,11 @@ proc read*(transp: StreamTransport): Future[seq[byte]] {. if transp.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in transp.buffer.regions(): - readed += rsize + bytesRead += rsize res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) - (readed, false) + (bytesRead, false) res proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. @@ -2756,12 +2756,12 @@ proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. if transp.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in transp.buffer.regions(): let count = min(rsize, n - len(res)) - readed += count + bytesRead += count res.add(region.toUnchecked().toOpenArray(0, count - 1)) - (readed, len(res) == n) + (bytesRead, len(res) == n) res proc consume*(transp: StreamTransport): Future[int] {. From d184a92227e8b9ccaa10e8f1b7547caf81770225 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 19 Apr 2024 16:43:34 +0300 Subject: [PATCH 22/37] Fix rare cancellation race issue on timeout for wait/withTimeout. (#536) Add tests. --- chronos/internal/asyncfutures.nim | 36 +++++--- tests/testfut.nim | 138 ++++++++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 11 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 7f93b0e..c3396bf 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1466,18 +1466,25 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. timer: TimerCallback timeouted = false - template completeFuture(fut: untyped): untyped = + template completeFuture(fut: untyped, timeout: bool): untyped = if fut.failed() or fut.completed(): retFuture.complete(true) else: - retFuture.cancelAndSchedule() + if timeout: + retFuture.complete(false) + else: + retFuture.cancelAndSchedule() # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 proc continuation(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): if timeouted: - retFuture.complete(false) + # We should not unconditionally complete result future with `false`. + # Initiated by timeout handler cancellation could fail, in this case + # we could get `fut` in complete or in failed state, so we should + # complete result future with `true` instead of `false` here. + fut.completeFuture(timeouted) return if not(fut.finished()): # Timer exceeded first, we going to cancel `fut` and wait until it @@ -1488,7 +1495,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. # Future `fut` completed/failed/cancelled first. if not(isNil(timer)): clearTimer(timer) - fut.completeFuture() + fut.completeFuture(false) timer = nil # TODO: raises annotation shouldn't be needed, but likely similar issue as @@ -1499,7 +1506,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. clearTimer(timer) fut.cancelSoon() else: - fut.completeFuture() + fut.completeFuture(false) timer = nil if fut.finished(): @@ -1528,11 +1535,14 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = timer: TimerCallback timeouted = false - template completeFuture(fut: untyped): untyped = + template completeFuture(fut: untyped, timeout: bool): untyped = if fut.failed(): retFuture.fail(fut.error(), warn = false) elif fut.cancelled(): - retFuture.cancelAndSchedule() + if timeout: + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + else: + retFuture.cancelAndSchedule() else: when type(fut).T is void: retFuture.complete() @@ -1542,7 +1552,11 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = proc continuation(udata: pointer) {.raises: [].} = if not(retFuture.finished()): if timeouted: - retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + # We should not unconditionally fail `retFuture` with + # `AsyncTimeoutError`. Initiated by timeout handler cancellation + # could fail, in this case we could get `fut` in complete or in failed + # state, so we should return error/value instead of `AsyncTimeoutError`. + fut.completeFuture(timeouted) return if not(fut.finished()): # Timer exceeded first. @@ -1552,7 +1566,7 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = # Future `fut` completed/failed/cancelled first. if not(isNil(timer)): clearTimer(timer) - fut.completeFuture() + fut.completeFuture(false) timer = nil var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} @@ -1562,12 +1576,12 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = clearTimer(timer) fut.cancelSoon() else: - fut.completeFuture() + fut.completeFuture(false) timer = nil if fut.finished(): - fut.completeFuture() + fut.completeFuture(false) else: if timeout.isZero(): retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) diff --git a/tests/testfut.nim b/tests/testfut.nim index 1cf0aed..46e9c2a 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -2177,3 +2177,141 @@ suite "Future[T] behavior test suite": check: not compiles(Future[void].Raising([42])) not compiles(Future[void].Raising(42)) + + asyncTest "Timeout/cancellation race wait() test": + proc raceTest(T: typedesc, itype: int) {.async.} = + let monitorFuture = newFuture[T]("monitor", + {FutureFlag.OwnCancelSchedule}) + + proc raceProc0(future: Future[T]): Future[T] {.async.} = + await future + proc raceProc1(future: Future[T]): Future[T] {.async.} = + await raceProc0(future) + proc raceProc2(future: Future[T]): Future[T] {.async.} = + await raceProc1(future) + + proc activation(udata: pointer) {.gcsafe.} = + if itype == 0: + when T is void: + monitorFuture.complete() + elif T is int: + monitorFuture.complete(100) + elif itype == 1: + monitorFuture.fail(newException(ValueError, "test")) + else: + monitorFuture.cancelAndSchedule() + + monitorFuture.cancelCallback = activation + let + testFut = raceProc2(monitorFuture) + waitFut = wait(testFut, 10.milliseconds) + + when T is void: + let waitRes = + try: + await waitFut + if itype == 0: + true + else: + false + except CancelledError: + false + except CatchableError: + if itype != 0: + true + else: + false + check waitRes == true + elif T is int: + let waitRes = + try: + let res = await waitFut + if itype == 0: + (true, res) + else: + (false, -1) + except CancelledError: + (false, -1) + except CatchableError: + if itype != 0: + (true, 0) + else: + (false, -1) + if itype == 0: + check: + waitRes[0] == true + waitRes[1] == 100 + else: + check: + waitRes[0] == true + + await raceTest(void, 0) + await raceTest(void, 1) + await raceTest(void, 2) + await raceTest(int, 0) + await raceTest(int, 1) + await raceTest(int, 2) + + asyncTest "Timeout/cancellation race withTimeout() test": + proc raceTest(T: typedesc, itype: int) {.async.} = + let monitorFuture = newFuture[T]("monitor", + {FutureFlag.OwnCancelSchedule}) + + proc raceProc0(future: Future[T]): Future[T] {.async.} = + await future + proc raceProc1(future: Future[T]): Future[T] {.async.} = + await raceProc0(future) + proc raceProc2(future: Future[T]): Future[T] {.async.} = + await raceProc1(future) + + proc activation(udata: pointer) {.gcsafe.} = + if itype == 0: + when T is void: + monitorFuture.complete() + elif T is int: + monitorFuture.complete(100) + elif itype == 1: + monitorFuture.fail(newException(ValueError, "test")) + else: + monitorFuture.cancelAndSchedule() + + monitorFuture.cancelCallback = activation + let + testFut = raceProc2(monitorFuture) + waitFut = withTimeout(testFut, 10.milliseconds) + + when T is void: + let waitRes = + try: + await waitFut + except CancelledError: + false + except CatchableError: + false + if itype == 0: + check waitRes == true + elif itype == 1: + check waitRes == true + else: + check waitRes == false + elif T is int: + let waitRes = + try: + await waitFut + except CancelledError: + false + except CatchableError: + false + if itype == 0: + check waitRes == true + elif itype == 1: + check waitRes == true + else: + check waitRes == false + + await raceTest(void, 0) + await raceTest(void, 1) + await raceTest(void, 2) + await raceTest(int, 0) + await raceTest(int, 1) + await raceTest(int, 2) From 0f0ed1d654aa2f2bdd792eb9ab55b227156fa544 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 20 Apr 2024 03:49:07 +0300 Subject: [PATCH 23/37] Add wait(deadline future) implementation. (#535) * Add waitUntil(deadline) implementation. * Add one more test. * Fix rare race condition and tests for it. * Rename waitUntil() to wait(). --- chronos/internal/asyncfutures.nim | 94 ++++++- tests/testfut.nim | 397 +++++++++++++++++++++++++++++- 2 files changed, 481 insertions(+), 10 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index c3396bf..2b92e74 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1529,6 +1529,60 @@ proc withTimeout*[T](fut: Future[T], timeout: int): Future[bool] {. inline, deprecated: "Use withTimeout(Future[T], Duration)".} = withTimeout(fut, timeout.milliseconds()) +proc waitUntilImpl[F: SomeFuture](fut: F, retFuture: auto, + deadline: auto): auto = + var timeouted = false + + template completeFuture(fut: untyped, timeout: bool): untyped = + if fut.failed(): + retFuture.fail(fut.error(), warn = false) + elif fut.cancelled(): + if timeout: + # Its possible that `future` could be cancelled in some other place. In + # such case we can't detect if it was our cancellation due to timeout, + # or some other cancellation. + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + else: + retFuture.cancelAndSchedule() + else: + when type(fut).T is void: + retFuture.complete() + else: + retFuture.complete(fut.value) + + proc continuation(udata: pointer) {.raises: [].} = + if not(retFuture.finished()): + if timeouted: + # When timeout is exceeded and we cancelled future via cancelSoon(), + # its possible that future at this moment already has value + # and/or error. + fut.completeFuture(timeouted) + return + if not(fut.finished()): + timeouted = true + fut.cancelSoon() + else: + fut.completeFuture(false) + + var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} + cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = + deadline.removeCallback(continuation) + if not(fut.finished()): + fut.cancelSoon() + else: + fut.completeFuture(false) + + if fut.finished(): + fut.completeFuture(false) + else: + if deadline.finished(): + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + else: + retFuture.cancelCallback = cancellation + fut.addCallback(continuation) + deadline.addCallback(continuation) + retFuture + proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = var moment: Moment @@ -1606,7 +1660,8 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = ## TODO: In case when ``fut`` got cancelled, what result Future[T] ## should return, because it can't be cancelled too. var - retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + retFuture = newFuture[T]("chronos.wait(duration)", + {FutureFlag.OwnCancelSchedule}) # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` # manually at proper time. @@ -1621,6 +1676,28 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. else: wait(fut, timeout.milliseconds()) +proc wait*[T](fut: Future[T], deadline: SomeFuture): Future[T] = + ## Returns a future which will complete once future ``fut`` completes + ## or if ``deadline`` future completes. + ## + ## If `deadline` future completes before future `fut` - + ## `AsyncTimeoutError` exception will be raised. + ## + ## Note: `deadline` future will not be cancelled and/or failed. + ## + ## Note: While `waitUntil(future)` operation is pending, please avoid any + ## attempts to cancel future `fut`. If it happens `waitUntil()` could + ## introduce undefined behavior - it could raise`CancelledError` or + ## `AsyncTimeoutError`. + ## + ## If you need to cancel `future` - cancel `waitUntil(future)` instead. + var + retFuture = newFuture[T]("chronos.wait(future)", + {FutureFlag.OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. + waitUntilImpl(fut, retFuture, deadline) + proc join*(future: FutureBase): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete once future ``future`` completes. @@ -1783,8 +1860,21 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError) let - retFuture = newFuture[T]("chronos.wait()", {OwnCancelSchedule}) + retFuture = newFuture[T]("chronos.wait(duration)", {OwnCancelSchedule}) # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` # manually at proper time. waitImpl(fut, retFuture, timeout) + +proc wait*(fut: InternalRaisesFuture, deadline: InternalRaisesFuture): auto = + type + T = type(fut).T + E = type(fut).E + InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError) + + let + retFuture = newFuture[T]("chronos.wait(future)", {OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. + + waitUntilImpl(fut, retFuture, deadline) diff --git a/tests/testfut.nim b/tests/testfut.nim index 46e9c2a..9737439 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -83,7 +83,7 @@ suite "Future[T] behavior test suite": fut.finished testResult == "1245" - asyncTest "wait[T]() test": + asyncTest "wait(duration) test": block: ## Test for not immediately completed future and timeout = -1 let res = @@ -146,6 +146,183 @@ suite "Future[T] behavior test suite": false check res + asyncTest "wait(future) test": + block: + ## Test for not immediately completed future and deadline which is not + ## going to be finished + let + deadline = newFuture[void]() + future1 = testFuture1() + let res = + try: + discard await wait(future1, deadline) + true + except CatchableError: + false + check: + deadline.finished() == false + future1.finished() == true + res == true + + await deadline.cancelAndWait() + + check deadline.finished() == true + block: + ## Test for immediately completed future and timeout = -1 + let + deadline = newFuture[void]() + future2 = testFuture2() + let res = + try: + discard await wait(future2, deadline) + true + except CatchableError: + false + check: + deadline.finished() == false + future2.finished() == true + res + + await deadline.cancelAndWait() + + check deadline.finished() == true + block: + ## Test for not immediately completed future and timeout = 0 + let + deadline = newFuture[void]() + future1 = testFuture1() + deadline.complete() + let res = + try: + discard await wait(future1, deadline) + false + except AsyncTimeoutError: + true + except CatchableError: + false + check: + future1.finished() == false + deadline.finished() == true + res + + block: + ## Test for immediately completed future and timeout = 0 + let + deadline = newFuture[void]() + future2 = testFuture2() + deadline.complete() + let (res1, res2) = + try: + let res = await wait(future2, deadline) + (true, res) + except CatchableError: + (false, -1) + check: + future2.finished() == true + deadline.finished() == true + res1 == true + res2 == 1 + + block: + ## Test for future which cannot be completed in timeout period + let + deadline = sleepAsync(50.milliseconds) + future100 = testFuture100() + let res = + try: + discard await wait(future100, deadline) + false + except AsyncTimeoutError: + true + except CatchableError: + false + check: + deadline.finished() == true + res + await future100.cancelAndWait() + check: + future100.finished() == true + + block: + ## Test for future which will be completed before timeout exceeded. + let + deadline = sleepAsync(500.milliseconds) + future100 = testFuture100() + let (res1, res2) = + try: + let res = await wait(future100, deadline) + (true, res) + except CatchableError: + (false, -1) + check: + future100.finished() == true + deadline.finished() == false + res1 == true + res2 == 0 + await deadline.cancelAndWait() + check: + deadline.finished() == true + + asyncTest "wait(future) cancellation behavior test": + proc deepTest3(future: Future[void]) {.async.} = + await future + + proc deepTest2(future: Future[void]) {.async.} = + await deepTest3(future) + + proc deepTest1(future: Future[void]) {.async.} = + await deepTest2(future) + + let + + deadlineFuture = newFuture[void]() + + block: + # Cancellation should affect `testFuture` because it is in pending state. + let monitorFuture = newFuture[void]() + var testFuture = deepTest1(monitorFuture) + let waitFut = wait(testFuture, deadlineFuture) + await cancelAndWait(waitFut) + check: + monitorFuture.cancelled() == true + testFuture.cancelled() == true + waitFut.cancelled() == true + deadlineFuture.finished() == false + + block: + # Cancellation should not affect `testFuture` because it is completed. + let monitorFuture = newFuture[void]() + var testFuture = deepTest1(monitorFuture) + let waitFut = wait(testFuture, deadlineFuture) + monitorFuture.complete() + await cancelAndWait(waitFut) + check: + monitorFuture.completed() == true + monitorFuture.cancelled() == false + testFuture.completed() == true + waitFut.completed() == true + deadlineFuture.finished() == false + + block: + # Cancellation should not affect `testFuture` because it is failed. + let monitorFuture = newFuture[void]() + var testFuture = deepTest1(monitorFuture) + let waitFut = wait(testFuture, deadlineFuture) + monitorFuture.fail(newException(ValueError, "TEST")) + await cancelAndWait(waitFut) + check: + monitorFuture.failed() == true + monitorFuture.cancelled() == false + testFuture.failed() == true + testFuture.cancelled() == false + waitFut.failed() == true + testFuture.cancelled() == false + deadlineFuture.finished() == false + + await cancelAndWait(deadlineFuture) + + check deadlineFuture.finished() == true + asyncTest "Discarded result Future[T] test": var completedFutures = 0 @@ -1082,7 +1259,7 @@ suite "Future[T] behavior test suite": completed == 0 cancelled == 1 - asyncTest "Cancellation wait() test": + asyncTest "Cancellation wait(duration) test": var neverFlag1, neverFlag2, neverFlag3: bool var waitProc1, waitProc2: bool proc neverEndingProc(): Future[void] = @@ -1143,7 +1320,39 @@ suite "Future[T] behavior test suite": fut.state == FutureState.Completed neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2 - asyncTest "Cancellation race test": + asyncTest "Cancellation wait(future) test": + var neverFlag1, neverFlag2, neverFlag3: bool + var waitProc1, waitProc2: bool + proc neverEndingProc(): Future[void] = + var res = newFuture[void]() + proc continuation(udata: pointer) {.gcsafe.} = + neverFlag2 = true + proc cancellation(udata: pointer) {.gcsafe.} = + neverFlag3 = true + res.addCallback(continuation) + res.cancelCallback = cancellation + result = res + neverFlag1 = true + + proc waitProc() {.async.} = + let deadline = sleepAsync(100.milliseconds) + try: + await wait(neverEndingProc(), deadline) + except CancelledError: + waitProc1 = true + except CatchableError: + doAssert(false) + finally: + await cancelAndWait(deadline) + waitProc2 = true + + var fut = waitProc() + await cancelAndWait(fut) + check: + fut.state == FutureState.Completed + neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2 + + asyncTest "Cancellation race() test": var someFut = newFuture[void]() proc raceProc(): Future[void] {.async.} = @@ -1298,7 +1507,7 @@ suite "Future[T] behavior test suite": false check res - asyncTest "wait(fut) should wait cancellation test": + asyncTest "wait(future) should wait cancellation test": proc futureNeverEnds(): Future[void] = newFuture[void]("neverending.future") @@ -1322,6 +1531,29 @@ suite "Future[T] behavior test suite": check res + asyncTest "wait(future) should wait cancellation test": + proc futureNeverEnds(): Future[void] = + newFuture[void]("neverending.future") + + proc futureOneLevelMore() {.async.} = + await futureNeverEnds() + + var fut = futureOneLevelMore() + let res = + try: + await wait(fut, sleepAsync(100.milliseconds)) + false + except AsyncTimeoutError: + # Because `fut` is never-ending Future[T], `wait` should raise + # `AsyncTimeoutError`, but only after `fut` is cancelled. + if fut.cancelled(): + true + else: + false + except CatchableError: + false + check res + test "race(zero) test": var tseq = newSeq[FutureBase]() var fut1 = race(tseq) @@ -1563,7 +1795,7 @@ suite "Future[T] behavior test suite": v1_u == 0'u v2_u + 1'u == 0'u - asyncTest "wait() cancellation undefined behavior test #1": + asyncTest "wait(duration) cancellation undefined behavior test #1": proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. async.} = await fooFut @@ -1586,7 +1818,7 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut - asyncTest "wait() cancellation undefined behavior test #2": + asyncTest "wait(duration) cancellation undefined behavior test #2": proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. async.} = await fooFut @@ -1613,7 +1845,7 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut - asyncTest "wait() should allow cancellation test (depends on race())": + asyncTest "wait(duration) should allow cancellation test (depends on race())": proc testFoo(): Future[bool] {.async.} = let resFut = sleepAsync(2.seconds).wait(3.seconds) @@ -1699,6 +1931,78 @@ suite "Future[T] behavior test suite": check (await testFoo()) == true + asyncTest "wait(future) cancellation undefined behavior test #1": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testFoo(fooFut: Future[void]) {.async.} = + let deadline = sleepAsync(10.seconds) + let connection = + try: + let res = await testInnerFoo(fooFut).wait(deadline) + Result[TestFooConnection, int].ok(res) + except CancelledError: + Result[TestFooConnection, int].err(0) + except CatchableError: + Result[TestFooConnection, int].err(1) + finally: + await deadline.cancelAndWait() + + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "wait(future) cancellation undefined behavior test #2": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await testInnerFoo(fooFut) + + proc testFoo(fooFut: Future[void]) {.async.} = + let deadline = sleepAsync(10.seconds) + let connection = + try: + let res = await testMiddleFoo(fooFut).wait(deadline) + Result[TestFooConnection, int].ok(res) + except CancelledError: + Result[TestFooConnection, int].err(0) + except CatchableError: + Result[TestFooConnection, int].err(1) + finally: + await deadline.cancelAndWait() + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "wait(future) should allow cancellation test (depends on race())": + proc testFoo(): Future[bool] {.async.} = + let + deadline = sleepAsync(3.seconds) + resFut = sleepAsync(2.seconds).wait(deadline) + timeFut = sleepAsync(1.seconds) + cancelFut = cancelAndWait(resFut) + discard await race(cancelFut, timeFut) + await deadline.cancelAndWait() + if cancelFut.finished(): + return (resFut.cancelled() and cancelFut.completed()) + false + + check (await testFoo()) == true + asyncTest "Cancellation behavior test": proc testInnerFoo(fooFut: Future[void]) {.async.} = await fooFut @@ -2178,7 +2482,7 @@ suite "Future[T] behavior test suite": not compiles(Future[void].Raising([42])) not compiles(Future[void].Raising(42)) - asyncTest "Timeout/cancellation race wait() test": + asyncTest "Timeout/cancellation race wait(duration) test": proc raceTest(T: typedesc, itype: int) {.async.} = let monitorFuture = newFuture[T]("monitor", {FutureFlag.OwnCancelSchedule}) @@ -2252,6 +2556,83 @@ suite "Future[T] behavior test suite": await raceTest(int, 1) await raceTest(int, 2) + asyncTest "Timeout/cancellation race wait(future) test": + proc raceTest(T: typedesc, itype: int) {.async.} = + let monitorFuture = newFuture[T]() + + proc raceProc0(future: Future[T]): Future[T] {.async.} = + await future + proc raceProc1(future: Future[T]): Future[T] {.async.} = + await raceProc0(future) + proc raceProc2(future: Future[T]): Future[T] {.async.} = + await raceProc1(future) + + proc continuation(udata: pointer) {.gcsafe.} = + if itype == 0: + when T is void: + monitorFuture.complete() + elif T is int: + monitorFuture.complete(100) + elif itype == 1: + monitorFuture.fail(newException(ValueError, "test")) + else: + monitorFuture.cancelAndSchedule() + + let deadlineFuture = newFuture[void]() + deadlineFuture.addCallback continuation + + let + testFut = raceProc2(monitorFuture) + waitFut = wait(testFut, deadlineFuture) + + deadlineFuture.complete() + + when T is void: + let waitRes = + try: + await waitFut + if itype == 0: + true + else: + false + except CancelledError: + false + except CatchableError: + if itype != 0: + true + else: + false + check waitRes == true + elif T is int: + let waitRes = + try: + let res = await waitFut + if itype == 0: + (true, res) + else: + (false, -1) + except CancelledError: + (false, -1) + except CatchableError: + if itype != 0: + (true, 0) + else: + (false, -1) + if itype == 0: + check: + waitRes[0] == true + waitRes[1] == 100 + else: + check: + waitRes[0] == true + + await raceTest(void, 0) + await raceTest(void, 1) + await raceTest(void, 2) + await raceTest(int, 0) + await raceTest(int, 1) + await raceTest(int, 2) + asyncTest "Timeout/cancellation race withTimeout() test": proc raceTest(T: typedesc, itype: int) {.async.} = let monitorFuture = newFuture[T]("monitor", From bb96f02ae877e04230dac85c040e66656b7d2ef0 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 24 Apr 2024 03:16:23 +0300 Subject: [PATCH 24/37] Fix `wait(future)` declaration signature. (#537) --- chronos/internal/asyncfutures.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 2b92e74..ad0b847 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1866,7 +1866,7 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = waitImpl(fut, retFuture, timeout) -proc wait*(fut: InternalRaisesFuture, deadline: InternalRaisesFuture): auto = +proc wait*(fut: InternalRaisesFuture, deadline: SomeFuture): auto = type T = type(fut).T E = type(fut).E From 72f560f049efa42fffb50aff2015782f6f17825e Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 25 Apr 2024 19:08:53 +0300 Subject: [PATCH 25/37] Fix RangeError defect being happened using android toolchain. (#538) * Fix RangeError defect being happened using android toolchain. * Set proper type for `Tnfds`. * Update comment. --- chronos/ioselects/ioselectors_poll.nim | 5 ++++- chronos/osdefs.nim | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/chronos/ioselects/ioselectors_poll.nim b/chronos/ioselects/ioselectors_poll.nim index 25cc035..51f21bb 100644 --- a/chronos/ioselects/ioselectors_poll.nim +++ b/chronos/ioselects/ioselectors_poll.nim @@ -220,7 +220,10 @@ proc selectInto2*[T](s: Selector[T], timeout: int, verifySelectParams(timeout, -1, int(high(cint))) let - maxEventsCount = min(len(s.pollfds), len(readyKeys)) + maxEventsCount = culong(min(len(s.pollfds), len(readyKeys))) + # Without `culong` conversion, this code could fail with RangeError + # defect on explicit Tnfds(integer) conversion (probably related to + # combination of nim+clang (android toolchain)). eventsCount = if maxEventsCount > 0: let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount), diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 40a6365..303a611 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -965,7 +965,7 @@ elif defined(macos) or defined(macosx): events*: cshort revents*: cshort - Tnfds* {.importc: "nfds_t", header: "".} = cuint + Tnfds* {.importc: "nfds_t", header: "".} = culong const POLLIN* = 0x0001 From 52b02b9977d0b06e0b235861b0c8b06fdc7294be Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sat, 4 May 2024 11:52:42 +0200 Subject: [PATCH 26/37] remove unnecessary impl overloads (#539) --- chronos/internal/asyncfutures.nim | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index ad0b847..1898685 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -76,22 +76,11 @@ template Finished*(T: type FutureState): FutureState {. deprecated: "Use FutureState.Completed instead".} = FutureState.Completed -proc newFutureImpl[T](loc: ptr SrcLoc): Future[T] = - let fut = Future[T]() - internalInitFutureBase(fut, loc, FutureState.Pending, {}) - fut - proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] = let fut = Future[T]() internalInitFutureBase(fut, loc, FutureState.Pending, flags) fut -proc newInternalRaisesFutureImpl[T, E]( - loc: ptr SrcLoc): InternalRaisesFuture[T, E] = - let fut = InternalRaisesFuture[T, E]() - internalInitFutureBase(fut, loc, FutureState.Pending, {}) - fut - proc newInternalRaisesFutureImpl[T, E]( loc: ptr SrcLoc, flags: FutureFlags): InternalRaisesFuture[T, E] = let fut = InternalRaisesFuture[T, E]() @@ -125,7 +114,7 @@ template newInternalRaisesFuture*[T, E](fromProc: static[string] = ""): auto = ## ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. - newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc)) + newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc), {}) template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] {.deprecated.} = ## Create a new future which can hold/preserve GC sequence until future will @@ -1697,7 +1686,7 @@ proc wait*[T](fut: Future[T], deadline: SomeFuture): Future[T] = # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` # manually at proper time. waitUntilImpl(fut, retFuture, deadline) - + proc join*(future: FutureBase): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete once future ``future`` completes. From 1ff81c60eaaff6867fef81680273f3d0f4b5d18b Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 6 May 2024 10:56:48 +0200 Subject: [PATCH 27/37] avoid warning in noCancel with non-raising future (#540) --- chronos/internal/asyncfutures.nim | 19 +++++++++++------- tests/testmacro.nim | 32 ++++++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 1898685..6c8f2bd 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1031,19 +1031,24 @@ proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: as let retFuture = newFuture[F.T]("chronos.noCancel(T)", {FutureFlag.OwnCancelSchedule}) template completeFuture() = + const canFail = when declared(InternalRaisesFutureRaises): + InternalRaisesFutureRaises isnot void + else: + true + if future.completed(): when F.T is void: retFuture.complete() else: retFuture.complete(future.value) - elif future.failed(): - when F is Future: - retFuture.fail(future.error, warn = false) - when declared(InternalRaisesFutureRaises): - when InternalRaisesFutureRaises isnot void: - retFuture.fail(future.error, warn = false) else: - raiseAssert("Unexpected future state [" & $future.state & "]") + when canFail: # Avoid calling `failed` on non-failing raises futures + if future.failed(): + retFuture.fail(future.error, warn = false) + else: + raiseAssert("Unexpected future state [" & $future.state & "]") + else: + raiseAssert("Unexpected future state [" & $future.state & "]") proc continuation(udata: pointer) {.gcsafe.} = completeFuture() diff --git a/tests/testmacro.nim b/tests/testmacro.nim index d646303..335e2ee 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -519,7 +519,7 @@ suite "Exceptions tracking": noraises() - test "Nocancel errors": + test "Nocancel errors with raises": proc testit {.async: (raises: [ValueError, CancelledError]).} = await sleepAsync(5.milliseconds) raise (ref ValueError)() @@ -535,6 +535,36 @@ suite "Exceptions tracking": noraises() + test "Nocancel with no errors": + proc testit {.async: (raises: [CancelledError]).} = + await sleepAsync(5.milliseconds) + + proc test {.async: (raises: []).} = + await noCancel testit() + + proc noraises() {.raises: [].} = + let f = test() + waitFor(f.cancelAndWait()) + waitFor(f) + + noraises() + + test "Nocancel errors without raises": + proc testit {.async.} = + await sleepAsync(5.milliseconds) + raise (ref ValueError)() + + proc test {.async.} = + await noCancel testit() + + proc noraises() = + expect(ValueError): + let f = test() + waitFor(f.cancelAndWait()) + waitFor(f) + + noraises() + test "Defect on wrong exception type at runtime": {.push warning[User]: off} let f = InternalRaisesFuture[void, (ValueError,)]() From 8a306763cec8105fa83574b56734b0f66823f844 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 26 Mar 2024 20:08:21 +0100 Subject: [PATCH 28/37] docs for `join` and `noCancel` --- docs/src/concepts.md | 86 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 3 deletions(-) diff --git a/docs/src/concepts.md b/docs/src/concepts.md index 0469b8b..72a5db3 100644 --- a/docs/src/concepts.md +++ b/docs/src/concepts.md @@ -4,6 +4,9 @@ Async/await is a programming model that relies on cooperative multitasking to coordinate the concurrent execution of procedures, using event notifications from the operating system or other treads to resume execution. +Code execution happens in a loop that alternates between making progress on +tasks and handling events. + ## The dispatcher @@ -118,7 +121,8 @@ The `CancelledError` will now travel up the stack like any other exception. It can be caught for instance to free some resources and is then typically re-raised for the whole chain operations to get cancelled. -Alternatively, the cancellation request can be translated to a regular outcome of the operation - for example, a `read` operation might return an empty result. +Alternatively, the cancellation request can be translated to a regular outcome +of the operation - for example, a `read` operation might return an empty result. Cancelling an already-finished `Future` has no effect, as the following example of downloading two web pages concurrently shows: @@ -127,8 +131,84 @@ of downloading two web pages concurrently shows: {{#include ../examples/twogets.nim}} ``` +### Ownership + +When calling a procedure that returns a `Future`, ownership of that `Future` is +shared between the callee that created it and the caller that waits for it to be +finished. + +The `Future` can be thought of as a single-item channel between a producer and a +consumer. The producer creates the `Future` and is responsible for completing or +failing it while the caller waits for completion and may `cancel` it. + +Although it is technically possible, callers must not `complete` or `fail` +futures and callees or other intermediate observers must not `cancel` them as +this may lead to panics and shutdown (ie if the future is completed twice or a +cancalletion is not handled by the original caller). + +### `noCancel` + +Certain operations must not be cancelled for semantic reasons. Common scenarios +include `closeWait` that releases a resources irrevocably and composed +operations whose individual steps should be performed together or not at all. + +In such cases, the `noCancel` modifier to `await` can be used to temporarily +disable cancellation propagation, allowing the operation to complete even if +the caller initiates a cancellation request: + +```nim +proc deepSleep(dur: Duration) {.async.} = + # `noCancel` prevents any cancellation request by the caller of `deepSleep` + # from reaching `sleepAsync` - even if `deepSleep` is cancelled, its future + # will not complete until the sleep finishes. + await noCancel sleepAsync(dur) + +let future = deepSleep(10.minutes) + +# This will take ~10 minutes even if we try to cancel the call to `deepSleep`! +await cancelAndWait(future) +``` + +### `join` + +The `join` modifier to `await` allows cancelling an `async` procedure without +propagating the cancellation to the awaited operation. This is useful when +`await`:ing a `Future` for monitoring purposes, ie when a procedure is not the +owner of the future that's being `await`:ed. + +One situation where this happens is when implementing the "observer" pattern, +where a helper monitors an operation it did not initiate: + +```nim +var tick: Future[void] +proc ticker() {.async.} = + while true: + tick = sleepAsync(1.second) + await tick + echo "tick!" + +proc tocker() {.async.} = + # This operation does not own or implement the operation behind `tick`, + # so it should not cancel it when `tocker` is cancelled + await join tick + echo "tock!" + +let + fut = ticker() # `ticker` is now looping and most likely waiting for `tick` + fut2 = tocker() # both `ticker` and `tocker` are waiting for `tick` + +# We don't want `tocker` to cancel a future that was created in `ticker` +waitFor fut2.cancelAndWait() + +waitFor fut # keeps printing `tick!` every second. +``` + ## Compile-time configuration -`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant. +`chronos` contains several compile-time +[configuration options](./chronos/config.nim) enabling stricter compile-time +checks and debugging helpers whose runtime cost may be significant. -Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information. +Strictness options generally will become default in future chronos releases and +allow adapting existing code without changing the new version - see the +[`config.nim`](./chronos/config.nim) module for more information. From 1b9d9253e89445d585d0fff39cc0d19254fdfd0d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 2 Jun 2024 18:05:22 +0300 Subject: [PATCH 29/37] Fix GCC-14 [-Wincompatible-pointer-types] issues. (#546) * Fix class assignment. * One more fix. * Bump bearssl version. --- chronos.nimble | 2 +- chronos/streams/tlsstream.nim | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index ba92ac6..490a086 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -10,7 +10,7 @@ skipDirs = @["tests"] requires "nim >= 1.6.16", "results", "stew", - "bearssl", + "bearssl >= 0.2.3", "httputils", "unittest2" diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 9d90ab7..0e3430e 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -511,7 +511,8 @@ proc newTLSClientAsyncStream*( if TLSFlags.NoVerifyHost in flags: sslClientInitFull(res.ccontext, addr res.x509, nil, 0) x509NoanchorInit(res.xwc, addr res.x509.vtable) - sslEngineSetX509(res.ccontext.eng, addr res.xwc.vtable) + sslEngineSetX509(res.ccontext.eng, + X509ClassPointerConst(addr res.xwc.vtable)) else: when trustAnchors is TrustAnchorStore: res.trustAnchors = trustAnchors @@ -611,7 +612,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader, uint16(maxVersion)) if not isNil(cache): - sslServerSetCache(res.scontext, addr cache.context.vtable) + sslServerSetCache( + res.scontext, SslSessionCacheClassPointerConst(addr cache.context.vtable)) if TLSFlags.EnforceServerPref in flags: sslEngineAddFlags(res.scontext.eng, OPT_ENFORCE_SERVER_PREFERENCES) From c44406594ff4375649f35f48f79dd6a0963bdf3c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 7 Jun 2024 12:05:15 +0200 Subject: [PATCH 30/37] fix results import --- tests/testfut.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testfut.nim b/tests/testfut.nim index 9737439..8d9fa58 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -6,7 +6,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import unittest2 -import stew/results +import results import ../chronos, ../chronos/unittest2/asynctests {.used.} From 7630f394718ebcdb8577e36faacd78cb7a0b7dd6 Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Mon, 10 Jun 2024 05:18:42 -0300 Subject: [PATCH 31/37] Fixes compilation issues in v3 compatibility mode (`-d:chronosHandleException`) (#545) * add missing calls to await * add test run in v3 compatibility * fix semantics for chronosHandleException so it does not override local raises/handleException annotations * distinguish between explicit override and default setting; fix test * re-enable wrongly disabled check * make implementation simpler/clearer * update docs * reflow long line * word swap --- chronos.nimble | 8 +++++++ chronos/internal/asyncmacro.nim | 8 ++++++- chronos/transports/datagram.nim | 2 +- chronos/transports/stream.nim | 2 +- docs/src/error_handling.md | 39 ++++++++++++++++++++++++--------- tests/testmacro.nim | 15 +++++++++++++ 6 files changed, 61 insertions(+), 13 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index 490a086..e8334ce 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -60,6 +60,14 @@ task test, "Run all tests": run args & " --mm:refc", "tests/testall" run args, "tests/testall" +task test_v3_compat, "Run all tests in v3 compatibility mode": + for args in testArguments: + if (NimMajor, NimMinor) > (1, 6): + # First run tests with `refc` memory manager. + run args & " --mm:refc -d:chronosHandleException", "tests/testall" + + run args & " -d:chronosHandleException", "tests/testall" + task test_libbacktrace, "test with libbacktrace": if platform != "x86": let allArgs = @[ diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index 4ece9f0..e416e1e 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -219,12 +219,14 @@ proc decodeParams(params: NimNode): AsyncParams = var raw = false raises: NimNode = nil - handleException = chronosHandleException + handleException = false + hasLocalAnnotations = false for param in params: param.expectKind(nnkExprColonExpr) if param[0].eqIdent("raises"): + hasLocalAnnotations = true param[1].expectKind(nnkBracket) if param[1].len == 0: raises = makeNoRaises() @@ -236,10 +238,14 @@ proc decodeParams(params: NimNode): AsyncParams = # boolVal doesn't work in untyped macros it seems.. raw = param[1].eqIdent("true") elif param[0].eqIdent("handleException"): + hasLocalAnnotations = true handleException = param[1].eqIdent("true") else: warning("Unrecognised async parameter: " & repr(param[0]), param) + if not hasLocalAnnotations: + handleException = chronosHandleException + (raw, raises, handleException) proc isEmpty(n: NimNode): bool {.compileTime.} = diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index fdb406b..1423d76 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -720,7 +720,7 @@ proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback, proc wrap(transp: DatagramTransport, remote: TransportAddress) {.async: (raises: []).} = try: - cbproc(transp, remote) + await cbproc(transp, remote) except CatchableError as exc: raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 9992543..391ff0a 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2197,7 +2197,7 @@ proc createStreamServer*(host: TransportAddress, proc wrap(server: StreamServer, client: StreamTransport) {.async: (raises: []).} = try: - cbproc(server, client) + await cbproc(server, client) except CatchableError as exc: raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg diff --git a/docs/src/error_handling.md b/docs/src/error_handling.md index 54c1236..2b03dc2 100644 --- a/docs/src/error_handling.md +++ b/docs/src/error_handling.md @@ -110,7 +110,7 @@ sometimes lead to compile errors around forward declarations, methods and closures as Nim conservatively asssumes that any `Exception` might be raised from those. -Make sure to excplicitly annotate these with `{.raises.}`: +Make sure to explicitly annotate these with `{.raises.}`: ```nim # Forward declarations need to explicitly include a raises list: @@ -124,11 +124,12 @@ proc myfunction() = let closure: MyClosure = myfunction ``` +## Compatibility modes -For compatibility, `async` functions can be instructed to handle `Exception` as -well, specifying `handleException: true`. `Exception` that is not a `Defect` and -not a `CatchableError` will then be caught and remapped to -`AsyncExceptionError`: +**Individual functions.** For compatibility, `async` functions can be instructed +to handle `Exception` as well, specifying `handleException: true`. Any +`Exception` that is not a `Defect` and not a `CatchableError` will then be +caught and remapped to `AsyncExceptionError`: ```nim proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} = @@ -136,14 +137,32 @@ proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionEr proc callRaiseException() {.async: (raises: []).} = try: - raiseException() + await raiseException() except AsyncExceptionError as exc: # The original Exception is available from the `parent` field echo exc.parent.msg ``` -This mode can be enabled globally with `-d:chronosHandleException` as a help -when porting code to `chronos` but should generally be avoided as global -configuration settings may interfere with libraries that use `chronos` leading -to unexpected behavior. +**Global flag.** This mode can be enabled globally with +`-d:chronosHandleException` as a help when porting code to `chronos`. The +behavior in this case will be that: +1. old-style functions annotated with plain `async` will behave as if they had + been annotated with `async: (handleException: true)`. + + This is functionally equivalent to + `async: (handleException: true, raises: [CatchableError])` and will, as + before, remap any `Exception` that is not `Defect` into + `AsyncExceptionError`, while also allowing any `CatchableError` (including + `AsyncExceptionError`) to get through without compilation errors. + +2. New-style functions with `async: (raises: [...])` annotations or their own + `handleException` annotations will not be affected. + +The rationale here is to allow one to incrementally introduce exception +annotations and get compiler feedback while not requiring that every bit of +legacy code is updated at once. + +This should be used sparingly and with care, however, as global configuration +settings may interfere with libraries that use `chronos` leading to unexpected +behavior. diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 335e2ee..ba1f691 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -8,6 +8,7 @@ import std/[macros, strutils] import unittest2 import ../chronos +import ../chronos/config {.used.} @@ -586,6 +587,20 @@ suite "Exceptions tracking": waitFor(callCatchAll()) + test "Global handleException does not override local annotations": + when chronosHandleException: + proc unnanotated() {.async.} = raise (ref CatchableError)() + + checkNotCompiles: + proc annotated() {.async: (raises: [ValueError]).} = + raise (ref CatchableError)() + + checkNotCompiles: + proc noHandleException() {.async: (handleException: false).} = + raise (ref Exception)() + else: + skip() + test "Results compatibility": proc returnOk(): Future[Result[int, string]] {.async: (raises: []).} = ok(42) From 4ad38079dec8407c396ebaaf6ba60e5e94e3fce5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 20 Jun 2024 09:52:23 +0200 Subject: [PATCH 32/37] pretty-printer for `Duration` (#547) --- chronos/timer.nim | 77 ++++++++++++++++++++-------------------------- tests/testtime.nim | 3 ++ 2 files changed, 36 insertions(+), 44 deletions(-) diff --git a/chronos/timer.nim b/chronos/timer.nim index 29af20e..1aabd64 100644 --- a/chronos/timer.nim +++ b/chronos/timer.nim @@ -370,53 +370,42 @@ template add(a: var string, b: Base10Buf[uint64]) = for index in 0 ..< b.len: a.add(char(b.data[index])) -func `$`*(a: Duration): string {.inline.} = - ## Returns string representation of Duration ``a`` as nanoseconds value. - var res = "" - var v = a.value +func toString*(a: timer.Duration, parts = int.high): string = + ## Returns a pretty string representation of Duration ``a`` - the + ## number of parts returned can be limited thus truncating the output to + ## an approximation that grows more precise as the duration becomes smaller + var + res = newStringOfCap(32) + v = a.nanoseconds() + parts = parts + + template f(n: string, T: Duration) = + if parts <= 0: + return res + + if v >= T.nanoseconds(): + res.add(Base10.toBytes(uint64(v div T.nanoseconds()))) + res.add(n) + v = v mod T.nanoseconds() + dec parts + if v == 0: + return res + + f("w", Week) + f("d", Day) + f("h", Hour) + f("m", Minute) + f("s", Second) + f("ms", Millisecond) + f("us", Microsecond) + f("ns", Nanosecond) - if v >= Week.value: - res.add(Base10.toBytes(uint64(v div Week.value))) - res.add('w') - v = v mod Week.value - if v == 0: return res - if v >= Day.value: - res.add(Base10.toBytes(uint64(v div Day.value))) - res.add('d') - v = v mod Day.value - if v == 0: return res - if v >= Hour.value: - res.add(Base10.toBytes(uint64(v div Hour.value))) - res.add('h') - v = v mod Hour.value - if v == 0: return res - if v >= Minute.value: - res.add(Base10.toBytes(uint64(v div Minute.value))) - res.add('m') - v = v mod Minute.value - if v == 0: return res - if v >= Second.value: - res.add(Base10.toBytes(uint64(v div Second.value))) - res.add('s') - v = v mod Second.value - if v == 0: return res - if v >= Millisecond.value: - res.add(Base10.toBytes(uint64(v div Millisecond.value))) - res.add('m') - res.add('s') - v = v mod Millisecond.value - if v == 0: return res - if v >= Microsecond.value: - res.add(Base10.toBytes(uint64(v div Microsecond.value))) - res.add('u') - res.add('s') - v = v mod Microsecond.value - if v == 0: return res - res.add(Base10.toBytes(uint64(v div Nanosecond.value))) - res.add('n') - res.add('s') res +func `$`*(a: Duration): string {.inline.} = + ## Returns string representation of Duration ``a``. + a.toString() + func `$`*(a: Moment): string {.inline.} = ## Returns string representation of Moment ``a`` as nanoseconds value. var res = "" diff --git a/tests/testtime.nim b/tests/testtime.nim index 03c2318..118a602 100644 --- a/tests/testtime.nim +++ b/tests/testtime.nim @@ -89,6 +89,9 @@ suite "Asynchronous timers & steps test suite": $nanoseconds(1_000_000_900) == "1s900ns" $nanoseconds(1_800_700_000) == "1s800ms700us" $nanoseconds(1_800_000_600) == "1s800ms600ns" + nanoseconds(1_800_000_600).toString(0) == "" + nanoseconds(1_800_000_600).toString(1) == "1s" + nanoseconds(1_800_000_600).toString(2) == "1s800ms" test "Asynchronous steps test": var fut1 = stepsAsync(1) From 13d28a5b710c414be17bfe36ca25bf34771875cc Mon Sep 17 00:00:00 2001 From: Miran Date: Wed, 3 Jul 2024 12:57:58 +0200 Subject: [PATCH 33/37] update ci.yml and be more explicit in .nimble (#549) --- .github/workflows/ci.yml | 22 +++++++++++++++------- .github/workflows/doc.yml | 4 ++-- chronos.nimble | 12 ++++++------ 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cab8555..81d6cca 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,23 +22,29 @@ jobs: cpu: i386 - os: macos cpu: amd64 + - os: macos + cpu: arm64 - os: windows cpu: amd64 - #- os: windows - #cpu: i386 branch: [version-1-6, version-2-0, devel] include: - target: os: linux - builder: ubuntu-20.04 + builder: ubuntu-latest shell: bash - target: os: macos - builder: macos-12 + cpu: amd64 + builder: macos-13 + shell: bash + - target: + os: macos + cpu: arm64 + builder: macos-latest shell: bash - target: os: windows - builder: windows-2019 + builder: windows-latest shell: msys2 {0} defaults: @@ -50,7 +56,7 @@ jobs: continue-on-error: ${{ matrix.branch == 'devel' }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Enable debug verbosity if: runner.debug == '1' @@ -102,7 +108,7 @@ jobs: - name: Restore Nim DLLs dependencies (Windows) from cache if: runner.os == 'Windows' id: windows-dlls-cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: external/dlls-${{ matrix.target.cpu }} key: 'dlls-${{ matrix.target.cpu }}' @@ -126,6 +132,8 @@ jobs: run: | if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then PLATFORM=x64 + elif [[ '${{ matrix.target.cpu }}' == 'arm64' ]]; then + PLATFORM=arm64 else PLATFORM=x86 fi diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 5d4022c..5fc0d52 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -15,7 +15,7 @@ jobs: continue-on-error: true steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: true - uses: actions-rs/install@v0.1 @@ -41,7 +41,7 @@ jobs: - uses: jiro4989/setup-nim-action@v1 with: - nim-version: '1.6.16' + nim-version: '1.6.20' - name: Generate doc run: | diff --git a/chronos.nimble b/chronos.nimble index e8334ce..a6ae749 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -55,10 +55,10 @@ task examples, "Build examples": task test, "Run all tests": for args in testArguments: + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" if (NimMajor, NimMinor) > (1, 6): - # First run tests with `refc` memory manager. - run args & " --mm:refc", "tests/testall" - run args, "tests/testall" + run args & " --mm:orc", "tests/testall" task test_v3_compat, "Run all tests in v3 compatibility mode": for args in testArguments: @@ -75,10 +75,10 @@ task test_libbacktrace, "test with libbacktrace": ] for args in allArgs: + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" if (NimMajor, NimMinor) > (1, 6): - # First run tests with `refc` memory manager. - run args & " --mm:refc", "tests/testall" - run args, "tests/testall" + run args & " --mm:orc", "tests/testall" task docs, "Generate API documentation": exec "mdbook build docs" From 8f609b6c17a7eec030f4acdde69e2b5c4dd4778f Mon Sep 17 00:00:00 2001 From: c-blake Date: Tue, 9 Jul 2024 09:42:20 +0000 Subject: [PATCH 34/37] Fix tests to be string hash order independent (#551) --- tests/testhttpserver.nim | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 70cca33..4cc61a0 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -13,6 +13,11 @@ import stew/base10 {.used.} +# Trouble finding this if defined near its use for `data2.sorted`, etc. likely +# related to "generic sandwich" issues. If any test ever wants to `sort` a +# `seq[(string, seq[string]]` differently, they may need to re-work that test. +proc `<`(a, b: (string, seq[string])): bool = a[0] < b[0] + suite "HTTP server testing suite": teardown: checkLeaks() @@ -846,11 +851,11 @@ suite "HTTP server testing suite": for key, value in table1.items(true): data2.add((key, value)) - check: - data1 == @[("Header2", "value2"), ("Header2", "VALUE3"), - ("Header1", "value1")] - data2 == @[("Header2", @["value2", "VALUE3"]), - ("Header1", @["value1"])] + check: # .sorted to not depend upon hash(key)-order + data1.sorted == sorted(@[("Header2", "value2"), ("Header2", "VALUE3"), + ("Header1", "value1")]) + data2.sorted == sorted(@[("Header2", @["value2", "VALUE3"]), + ("Header1", @["value1"])]) table1.set("header2", "value4") check: From dc3847e4d6733dfc3811454c2a9c384b87343e26 Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Thu, 18 Jul 2024 19:59:03 +0200 Subject: [PATCH 35/37] add ubuntu 24 and gcc 14 (#553) * add ubuntu 24 and gcc 14 * upgrade bearssl * Fix nim-1-6 gcc-14 issue. * rename target to linux-gcc-14 * Bump bearssl. --------- Co-authored-by: cheatfate --- .github/workflows/ci.yml | 15 +++++++++++++++ chronos.nimble | 2 +- chronos/streams/tlsstream.nim | 3 ++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81d6cca..3b04538 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,8 @@ jobs: target: - os: linux cpu: amd64 + - os: linux-gcc-14 # this is to use ubuntu 24 and install gcc 14. Must be removed when ubuntu-latest is 24.04 + cpu: amd64 - os: linux cpu: i386 - os: macos @@ -32,6 +34,10 @@ jobs: os: linux builder: ubuntu-latest shell: bash + - target: + os: linux-gcc-14 # this is to use ubuntu 24 and install gcc 14. Must be removed when ubuntu-latest is 24.04 + builder: ubuntu-24.04 + shell: bash - target: os: macos cpu: amd64 @@ -165,6 +171,15 @@ jobs: bash build_nim.sh nim csources dist/nimble NimBinaries echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH + - name: Use gcc 14 # Must be removed when ubuntu-latest is 24.04 and gcc 14 is the default + if : ${{ matrix.target.os == 'linux-gcc-14' }} + run: | + # Add GCC-14 to alternatives + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14 + + # Set GCC-14 as the default + sudo update-alternatives --set gcc /usr/bin/gcc-14 + - name: Run tests run: | nim --version diff --git a/chronos.nimble b/chronos.nimble index a6ae749..2040642 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -10,7 +10,7 @@ skipDirs = @["tests"] requires "nim >= 1.6.16", "results", "stew", - "bearssl >= 0.2.3", + "bearssl >= 0.2.5", "httputils", "unittest2" diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 0e3430e..803208e 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -510,7 +510,8 @@ proc newTLSClientAsyncStream*( if TLSFlags.NoVerifyHost in flags: sslClientInitFull(res.ccontext, addr res.x509, nil, 0) - x509NoanchorInit(res.xwc, addr res.x509.vtable) + x509NoanchorInit(res.xwc, + X509ClassPointerConst(addr res.x509.vtable)) sslEngineSetX509(res.ccontext.eng, X509ClassPointerConst(addr res.xwc.vtable)) else: From c04576d829b8a0a1b12baaa8bc92037501b3a4a0 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 22 Aug 2024 02:53:48 +0300 Subject: [PATCH 36/37] Bump version to 4.0.3. (#555) --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index 2040642..5312f7d 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "4.0.2" +version = "4.0.3" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From 9186950e03254b87533715f7c991a537b3167bef Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 15 Oct 2024 17:19:42 +0200 Subject: [PATCH 37/37] Replace `apt-fast` with `apt-get` (#558) `apt-fast` was removed from GitHub with Ubuntu 24.04: - https://github.com/actions/runner-images/issues/10003 For compatibility, switch back to `apt-get`. --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3b04538..6c22f67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,8 +74,8 @@ jobs: if: runner.os == 'Linux' && matrix.target.cpu == 'i386' run: | sudo dpkg --add-architecture i386 - sudo apt-fast update -qq - sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \ + sudo apt-get update -qq + sudo DEBIAN_FRONTEND='noninteractive' apt-get install \ --no-install-recommends -yq gcc-multilib g++-multilib \ libssl-dev:i386 mkdir -p external/bin @@ -176,7 +176,7 @@ jobs: run: | # Add GCC-14 to alternatives sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14 - + # Set GCC-14 as the default sudo update-alternatives --set gcc /usr/bin/gcc-14