Merge branch 'master' into feature/profiler-v4

This commit is contained in:
gmega 2024-01-22 14:36:18 -03:00
commit 4916381aa0
No known key found for this signature in database
GPG Key ID: FFD8DAF00660270F
88 changed files with 10622 additions and 6887 deletions

View File

@ -1,40 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
build_script:
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
- nimble install -y --depsOnly
- nimble install -y libbacktrace
test_script:
- nimble test
deploy: off

View File

@ -52,6 +52,12 @@ jobs:
- name: Checkout
uses: actions/checkout@v3
- name: Enable debug verbosity
if: runner.debug == '1'
run: |
echo "V=1" >> $GITHUB_ENV
echo "UNITTEST2_OUTPUT_LVL=VERBOSE" >> $GITHUB_ENV
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
@ -96,7 +102,7 @@ jobs:
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: external/dlls-${{ matrix.target.cpu }}
key: 'dlls-${{ matrix.target.cpu }}'
@ -159,3 +165,4 @@ jobs:
nimble install -y libbacktrace
nimble test
nimble test_libbacktrace
nimble examples

View File

@ -15,48 +15,44 @@ jobs:
continue-on-error: true
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
submodules: true
- uses: actions-rs/install@v0.1
with:
crate: mdbook
use-tool-cache: true
version: "0.4.36"
- uses: actions-rs/install@v0.1
with:
crate: mdbook-toc
use-tool-cache: true
version: "0.14.1"
- uses: actions-rs/install@v0.1
with:
crate: mdbook-open-on-gh
use-tool-cache: true
version: "2.4.1"
- uses: actions-rs/install@v0.1
with:
crate: mdbook-admonish
use-tool-cache: true
version: "1.14.0"
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: '1.6.6'
nim-version: '1.6.16'
- name: Generate doc
run: |
nim --version
nimble --version
nimble install -dy
# nim doc can "fail", but the doc is still generated
nim doc --git.url:https://github.com/status-im/nim-chronos --git.commit:master --outdir:docs --project chronos || true
nimble docs || true
# check that the folder exists
ls docs
- name: Clone the gh-pages branch
uses: actions/checkout@v2
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
with:
repository: status-im/nim-chronos
ref: gh-pages
path: subdoc
submodules: true
fetch-depth: 0
- name: Commit & push
run: |
cd subdoc
# Update / create this branch doc
rm -rf docs
mv ../docs .
# Remove .idx files
# NOTE: git also uses idx files in his
# internal folder, hence the `*` instead of `.`
find * -name "*.idx" -delete
git add .
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit -a -m "update docs"
git push origin gh-pages
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./docs/book
force_orphan: true

View File

@ -1,27 +0,0 @@
language: c
# https://docs.travis-ci.com/user/caching/
cache:
directories:
- NimBinaries
git:
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
depth: 10
os:
- linux
- osx
install:
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
- export PATH="$PWD/Nim/bin:$PATH"
script:
- nimble install -y
- nimble test

340
README.md
View File

@ -1,6 +1,6 @@
# Chronos - An efficient library for asynchronous programming
[![Github action](https://github.com/status-im/nim-chronos/workflows/nim-chronos%20CI/badge.svg)](https://github.com/status-im/nim-chronos/actions/workflows/ci.yml)
[![Github action](https://github.com/status-im/nim-chronos/workflows/CI/badge.svg)](https://github.com/status-im/nim-chronos/actions/workflows/ci.yml)
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)
@ -9,16 +9,16 @@
Chronos is an efficient [async/await](https://en.wikipedia.org/wiki/Async/await) framework for Nim. Features include:
* Efficient dispatch pipeline for asynchronous execution
* Asynchronous socket and process I/O
* HTTP server with SSL/TLS support out of the box (no OpenSSL needed)
* Cancellation support
* Synchronization primitivies like queues, events and locks
* FIFO processing order of dispatch queue
* Minimal exception effect support (see [exception effects](#exception-effects))
* Cancellation
* Efficient dispatch pipeline with excellent multi-platform support
* Exceptional error handling features, including `raises` tracking
## Installation
## Getting started
You can use Nim's official package manager Nimble to install Chronos:
Install `chronos` using `nimble`:
```text
nimble install chronos
@ -30,6 +30,30 @@ or add a dependency to your `.nimble` file:
requires "chronos"
```
and start using it:
```nim
import chronos/apps/http/httpclient
proc retrievePage(uri: string): Future[string] {.async.} =
# Create a new HTTP session
let httpSession = HttpSessionRef.new()
try:
# Fetch page contents
let resp = await httpSession.fetch(parseUri(uri))
# Convert response to a string, assuming its encoding matches the terminal!
bytesToString(resp.data)
finally: # Close the session
await noCancel(httpSession.closeWait())
echo waitFor retrievePage(
"https://raw.githubusercontent.com/status-im/nim-chronos/master/README.md")
```
## Documentation
See the [user guide](https://status-im.github.io/nim-chronos/).
## Projects using `chronos`
* [libp2p](https://github.com/status-im/nim-libp2p) - Peer-to-Peer networking stack implemented in many languages
@ -42,305 +66,7 @@ requires "chronos"
Submit a PR to add yours!
## Documentation
### Concepts
Chronos implements the async/await paradigm in a self-contained library, using
macros, with no specific helpers from the compiler.
Our event loop is called a "dispatcher" and a single instance per thread is
created, as soon as one is needed.
To trigger a dispatcher's processing step, we need to call `poll()` - either
directly or through a wrapper like `runForever()` or `waitFor()`. This step
handles any file descriptors, timers and callbacks that are ready to be
processed.
`Future` objects encapsulate the result of an async procedure, upon successful
completion, and a list of callbacks to be scheduled after any type of
completion - be that success, failure or cancellation.
(These explicit callbacks are rarely used outside Chronos, being replaced by
implicit ones generated by async procedure execution and `await` chaining.)
Async procedures (those using the `{.async.}` pragma) return `Future` objects.
Inside an async procedure, you can `await` the future returned by another async
procedure. At this point, control will be handled to the event loop until that
future is completed.
Future completion is tested with `Future.finished()` and is defined as success,
failure or cancellation. This means that a future is either pending or completed.
To differentiate between completion states, we have `Future.failed()` and
`Future.cancelled()`.
### Dispatcher
You can run the "dispatcher" event loop forever, with `runForever()` which is defined as:
```nim
proc runForever*() =
while true:
poll()
```
You can also run it until a certain future is completed, with `waitFor()` which
will also call `Future.read()` on it:
```nim
proc p(): Future[int] {.async.} =
await sleepAsync(100.milliseconds)
return 1
echo waitFor p() # prints "1"
```
`waitFor()` is defined like this:
```nim
proc waitFor*[T](fut: Future[T]): T =
while not(fut.finished()):
poll()
return fut.read()
```
### Async procedures and methods
The `{.async.}` pragma will transform a procedure (or a method) returning a
specialised `Future` type into a closure iterator. If there is no return type
specified, a `Future[void]` is returned.
```nim
proc p() {.async.} =
await sleepAsync(100.milliseconds)
echo p().type # prints "Future[system.void]"
```
Whenever `await` is encountered inside an async procedure, control is passed
back to the dispatcher for as many steps as it's necessary for the awaited
future to complete successfully, fail or be cancelled. `await` calls the
equivalent of `Future.read()` on the completed future and returns the
encapsulated value.
```nim
proc p1() {.async.} =
await sleepAsync(1.seconds)
proc p2() {.async.} =
await sleepAsync(1.seconds)
proc p3() {.async.} =
let
fut1 = p1()
fut2 = p2()
# Just by executing the async procs, both resulting futures entered the
# dispatcher's queue and their "clocks" started ticking.
await fut1
await fut2
# Only one second passed while awaiting them both, not two.
waitFor p3()
```
Don't let `await`'s behaviour of giving back control to the dispatcher surprise
you. If an async procedure modifies global state, and you can't predict when it
will start executing, the only way to avoid that state changing underneath your
feet, in a certain section, is to not use `await` in it.
### Error handling
Exceptions inheriting from `CatchableError` are caught by hidden `try` blocks
and placed in the `Future.error` field, changing the future's status to
`Failed`.
When a future is awaited, that exception is re-raised, only to be caught again
by a hidden `try` block in the calling async procedure. That's how these
exceptions move up the async chain.
A failed future's callbacks will still be scheduled, but it's not possible to
resume execution from the point an exception was raised.
```nim
proc p1() {.async.} =
await sleepAsync(1.seconds)
raise newException(ValueError, "ValueError inherits from CatchableError")
proc p2() {.async.} =
await sleepAsync(1.seconds)
proc p3() {.async.} =
let
fut1 = p1()
fut2 = p2()
await fut1
echo "unreachable code here"
await fut2
# `waitFor()` would call `Future.read()` unconditionally, which would raise the
# exception in `Future.error`.
let fut3 = p3()
while not(fut3.finished()):
poll()
echo "fut3.state = ", fut3.state # "Failed"
if fut3.failed():
echo "p3() failed: ", fut3.error.name, ": ", fut3.error.msg
# prints "p3() failed: ValueError: ValueError inherits from CatchableError"
```
You can put the `await` in a `try` block, to deal with that exception sooner:
```nim
proc p3() {.async.} =
let
fut1 = p1()
fut2 = p2()
try:
await fut1
except CachableError:
echo "p1() failed: ", fut1.error.name, ": ", fut1.error.msg
echo "reachable code here"
await fut2
```
Chronos does not allow that future continuations and other callbacks raise
`CatchableError` - as such, calls to `poll` will never raise exceptions caused
originating from tasks on the dispatcher queue. It is however possible that
`Defect` that happen in tasks bubble up through `poll` as these are not caught
by the transformation.
### Platform independence
Several functions in `chronos` are backed by the operating system, such as
waiting for network events, creating files and sockets etc. The specific
exceptions that are raised by the OS is platform-dependent, thus such functions
are declared as raising `CatchableError` but will in general raise something
more specific. In particular, it's possible that some functions that are
annotated as raising `CatchableError` only raise on _some_ platforms - in order
to work on all platforms, calling code must assume that they will raise even
when they don't seem to do so on one platform.
### Exception effects
`chronos` currently offers minimal support for exception effects and `raises`
annotations. In general, during the `async` transformation, a generic
`except CatchableError` handler is added around the entire function being
transformed, in order to catch any exceptions and transfer them to the `Future`.
Because of this, the effect system thinks no exceptions are "leaking" because in
fact, exception _handling_ is deferred to when the future is being read.
Effectively, this means that while code can be compiled with
`{.push raises: [Defect]}`, the intended effect propagation and checking is
**disabled** for `async` functions.
To enable checking exception effects in `async` code, enable strict mode with
`-d:chronosStrictException`.
In the strict mode, `async` functions are checked such that they only raise
`CatchableError` and thus must make sure to explicitly specify exception
effects on forward declarations, callbacks and methods using
`{.raises: [CatchableError].}` (or more strict) annotations.
### Cancellation support
Any running `Future` can be cancelled. This can be used to launch multiple
futures, and wait for one of them to finish, and cancel the rest of them,
to add timeout, or to let the user cancel a running task.
```nim
# Simple cancellation
let future = sleepAsync(10.minutes)
future.cancel()
# Wait for cancellation
let future2 = sleepAsync(10.minutes)
await future2.cancelAndWait()
# Race between futures
proc retrievePage(uri: string): Future[string] {.async.} =
# requires to import uri, chronos/apps/http/httpclient, stew/byteutils
let httpSession = HttpSessionRef.new()
try:
resp = await httpSession.fetch(parseUri(uri))
result = string.fromBytes(resp.data)
finally:
# be sure to always close the session
await httpSession.closeWait()
let
futs =
@[
retrievePage("https://duckduckgo.com/?q=chronos"),
retrievePage("https://www.google.fr/search?q=chronos")
]
let finishedFut = await one(futs)
for fut in futs:
if not fut.finished:
fut.cancel()
echo "Result: ", await finishedFut
```
When an `await` is cancelled, it will raise a `CancelledError`:
```nim
proc c1 {.async.} =
echo "Before sleep"
try:
await sleepAsync(10.minutes)
echo "After sleep" # not reach due to cancellation
except CancelledError as exc:
echo "We got cancelled!"
raise exc
proc c2 {.async.} =
await c1()
echo "Never reached, since the CancelledError got re-raised"
let work = c2()
waitFor(work.cancelAndWait())
```
The `CancelledError` will now travel up the stack like any other exception.
It can be caught and handled (for instance, freeing some resources)
### Multiple async backend support
Thanks to its powerful macro support, Nim allows `async`/`await` to be
implemented in libraries with only minimal support from the language - as such,
multiple `async` libraries exist, including `chronos` and `asyncdispatch`, and
more may come to be developed in the futures.
Libraries built on top of `async`/`await` may wish to support multiple async
backends - the best way to do so is to create separate modules for each backend
that may be imported side-by-side - see [nim-metrics](https://github.com/status-im/nim-metrics/blob/master/metrics/)
for an example.
An alternative way is to select backend using a global compile flag - this
method makes it diffucult to compose applications that use both backends as may
happen with transitive dependencies, but may be appropriate in some cases -
libraries choosing this path should call the flag `asyncBackend`, allowing
applications to choose the backend with `-d:asyncBackend=<backend_name>`.
Known `async` backends include:
* `chronos` - this library (`-d:asyncBackend=chronos`)
* `asyncdispatch` the standard library `asyncdispatch` [module](https://nim-lang.org/docs/asyncdispatch.html) (`-d:asyncBackend=asyncdispatch`)
* `none` - ``-d:asyncBackend=none`` - disable ``async`` support completely
``none`` can be used when a library supports both a synchronous and
asynchronous API, to disable the latter.
### Compile-time configuration
`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant.
Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information.
## TODO
* Pipe/Subprocess Transports.
* Multithreading Stream/Datagram servers
## Contributing
@ -349,10 +75,6 @@ When submitting pull requests, please add test cases for any new features or fix
`chronos` follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
## Other resources
* [Historical differences with asyncdispatch](https://github.com/status-im/nim-chronos/wiki/AsyncDispatch-comparison)
## License
Licensed and distributed under either of

View File

@ -5,6 +5,10 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import chronos/[asyncloop, asyncsync, handles, transport, timer,
asyncproc, debugutils]
export asyncloop, asyncsync, handles, transport, timer, asyncproc, debugutils
## `async`/`await` framework for [Nim](https://nim-lang.org)
##
## See https://status-im.github.io/nim-chronos/ for documentation
import chronos/[asyncloop, asyncsync, handles, transport, timer, debugutils]
export asyncloop, asyncsync, handles, transport, timer, debugutils

View File

@ -7,40 +7,58 @@ description = "Networking framework with async/await support"
license = "MIT or Apache License 2.0"
skipDirs = @["tests"]
requires "nim >= 1.2.0",
requires "nim >= 1.6.0",
"results",
"stew",
"bearssl",
"httputils",
"unittest2"
import os, strutils
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler
let verbose = getEnv("V", "") notin ["", "0"]
let testArguments =
when defined(windows):
[
"-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert",
"-d:release",
]
else:
[
"-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert",
"-d:debug -d:chronosDebug -d:chronosEventEngine=poll -d:useSysAssert -d:useGcAssert",
"-d:release",
]
let styleCheckStyle = if (NimMajor, NimMinor) < (1, 6): "hint" else: "error"
let cfg =
" --styleCheck:usages --styleCheck:" & styleCheckStyle &
" --styleCheck:usages --styleCheck:error" &
(if verbose: "" else: " --verbosity:0 --hints:off") &
" --skipParentCfg --skipUserCfg --outdir:build --nimcache:build/nimcache -f"
" --skipParentCfg --skipUserCfg --outdir:build " &
quoteShell("--nimcache:build/nimcache/$projectName")
proc build(args, path: string) =
exec nimc & " " & lang & " " & cfg & " " & flags & " " & args & " " & path
proc run(args, path: string) =
build args & " -r", path
build args, path
exec "build/" & path.splitPath[1]
task examples, "Build examples":
# Build book examples
for file in listFiles("docs/examples"):
if file.endsWith(".nim"):
build "", file
task test, "Run all tests":
for args in [
"-d:debug -d:chronosDebug",
"-d:debug -d:chronosPreviewV4",
"-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert",
"-d:release",
"-d:release -d:chronosPreviewV4"]:
for args in testArguments:
run args, "tests/testall"
if (NimMajor, NimMinor) > (1, 6):
run args & " --mm:refc", "tests/testall"
task test_libbacktrace, "test with libbacktrace":
var allArgs = @[
"-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace",
@ -56,3 +74,7 @@ task test_profiler, "test with profiler instrumentation":
for args in allArgs:
run args, "tests/testall"
task docs, "Generate API documentation":
exec "mdbook build docs"
exec nimc & " doc " & "--git.url:https://github.com/status-im/nim-chronos --git.commit:master --outdir:docs/book/api --project chronos"

View File

@ -6,6 +6,9 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import strutils
const

View File

@ -6,6 +6,9 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import ../../asyncloop, ../../asyncsync
import ../../streams/[asyncstream, boundstream]
import httpcommon
@ -36,7 +39,7 @@ proc newHttpBodyReader*(streams: varargs[AsyncStreamReader]): HttpBodyReader =
trackCounter(HttpBodyReaderTrackerName)
res
proc closeWait*(bstream: HttpBodyReader) {.async.} =
proc closeWait*(bstream: HttpBodyReader) {.async: (raises: []).} =
## Close and free resource allocated by body reader.
if bstream.bstate == HttpState.Alive:
bstream.bstate = HttpState.Closing
@ -45,8 +48,8 @@ proc closeWait*(bstream: HttpBodyReader) {.async.} =
# data from stream at position [1].
for index in countdown((len(bstream.streams) - 1), 0):
res.add(bstream.streams[index].closeWait())
await allFutures(res)
await procCall(closeWait(AsyncStreamReader(bstream)))
res.add(procCall(closeWait(AsyncStreamReader(bstream))))
await noCancel(allFutures(res))
bstream.bstate = HttpState.Closed
untrackCounter(HttpBodyReaderTrackerName)
@ -61,19 +64,19 @@ proc newHttpBodyWriter*(streams: varargs[AsyncStreamWriter]): HttpBodyWriter =
trackCounter(HttpBodyWriterTrackerName)
res
proc closeWait*(bstream: HttpBodyWriter) {.async.} =
proc closeWait*(bstream: HttpBodyWriter) {.async: (raises: []).} =
## Close and free all the resources allocated by body writer.
if bstream.bstate == HttpState.Alive:
bstream.bstate = HttpState.Closing
var res = newSeq[Future[void]]()
for index in countdown(len(bstream.streams) - 1, 0):
res.add(bstream.streams[index].closeWait())
await allFutures(res)
await noCancel(allFutures(res))
await procCall(closeWait(AsyncStreamWriter(bstream)))
bstream.bstate = HttpState.Closed
untrackCounter(HttpBodyWriterTrackerName)
proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [].} =
proc hasOverflow*(bstream: HttpBodyReader): bool =
if len(bstream.streams) == 1:
# If HttpBodyReader has only one stream it has ``BoundedStreamReader``, in
# such case its impossible to get more bytes then expected amount.
@ -89,6 +92,5 @@ proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [].} =
else:
false
proc closed*(bstream: HttpBodyReader | HttpBodyWriter): bool {.
raises: [].} =
proc closed*(bstream: HttpBodyReader | HttpBodyWriter): bool =
bstream.bstate != HttpState.Alive

View File

@ -6,14 +6,17 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import std/[uri, tables, sequtils]
import stew/[results, base10, base64, byteutils], httputils
import stew/[base10, base64, byteutils], httputils, results
import ../../asyncloop, ../../asyncsync
import ../../streams/[asyncstream, tlsstream, chunkstream, boundstream]
import httptable, httpcommon, httpagent, httpbodyrw, multipart
export results, asyncloop, asyncsync, asyncstream, tlsstream, chunkstream,
boundstream, httptable, httpcommon, httpagent, httpbodyrw, multipart,
httputils
httputils, uri, results
export SocketFlags
const
@ -108,6 +111,7 @@ type
remoteHostname*: string
flags*: set[HttpClientConnectionFlag]
timestamp*: Moment
duration*: Duration
HttpClientConnectionRef* = ref HttpClientConnection
@ -119,12 +123,13 @@ type
headersTimeout*: Duration
idleTimeout: Duration
idlePeriod: Duration
watcherFut: Future[void]
watcherFut: Future[void].Raising([])
connectionBufferSize*: int
maxConnections*: int
connectionsCount*: int
socketFlags*: set[SocketFlags]
flags*: HttpClientFlags
dualstack*: DualStackType
HttpAddress* = object
id*: string
@ -194,6 +199,8 @@ type
name*: string
data*: string
HttpAddressResult* = Result[HttpAddress, HttpAddressErrorType]
# HttpClientRequestRef valid states are:
# Ready -> Open -> (Finished, Error) -> (Closing, Closed)
#
@ -233,6 +240,12 @@ template setDuration(
reqresp.duration = timestamp - reqresp.timestamp
reqresp.connection.setTimestamp(timestamp)
template setDuration(conn: HttpClientConnectionRef): untyped =
if not(isNil(conn)):
let timestamp = Moment.now()
conn.duration = timestamp - conn.timestamp
conn.setTimestamp(timestamp)
template isReady(conn: HttpClientConnectionRef): bool =
(conn.state == HttpClientConnectionState.Ready) and
(HttpClientConnectionFlag.KeepAlive in conn.flags) and
@ -243,7 +256,7 @@ template isIdle(conn: HttpClientConnectionRef, timestamp: Moment,
timeout: Duration): bool =
(timestamp - conn.timestamp) >= timeout
proc sessionWatcher(session: HttpSessionRef) {.async.}
proc sessionWatcher(session: HttpSessionRef) {.async: (raises: []).}
proc new*(t: typedesc[HttpSessionRef],
flags: HttpClientFlags = {},
@ -254,8 +267,8 @@ proc new*(t: typedesc[HttpSessionRef],
maxConnections = -1,
idleTimeout = HttpConnectionIdleTimeout,
idlePeriod = HttpConnectionCheckPeriod,
socketFlags: set[SocketFlags] = {}): HttpSessionRef {.
raises: [] .} =
socketFlags: set[SocketFlags] = {},
dualstack = DualStackType.Auto): HttpSessionRef =
## Create new HTTP session object.
##
## ``maxRedirections`` - maximum number of HTTP 3xx redirections
@ -274,16 +287,17 @@ proc new*(t: typedesc[HttpSessionRef],
idleTimeout: idleTimeout,
idlePeriod: idlePeriod,
connections: initTable[string, seq[HttpClientConnectionRef]](),
socketFlags: socketFlags
socketFlags: socketFlags,
dualstack: dualstack
)
res.watcherFut =
if HttpClientFlag.Http11Pipeline in flags:
sessionWatcher(res)
else:
newFuture[void]("session.watcher.placeholder")
Future[void].Raising([]).init("session.watcher.placeholder")
res
proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [] .} =
proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] =
var res: set[TLSFlags]
if HttpClientFlag.NoVerifyHost in flags:
res.incl(TLSFlags.NoVerifyHost)
@ -291,8 +305,90 @@ proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [] .} =
res.incl(TLSFlags.NoVerifyServerName)
res
proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {.
raises: [] .} =
proc getHttpAddress*(
url: Uri,
flags: HttpClientFlags = {}
): HttpAddressResult =
let
scheme =
if len(url.scheme) == 0:
HttpClientScheme.NonSecure
else:
case toLowerAscii(url.scheme)
of "http":
HttpClientScheme.NonSecure
of "https":
HttpClientScheme.Secure
else:
return err(HttpAddressErrorType.InvalidUrlScheme)
port =
if len(url.port) == 0:
case scheme
of HttpClientScheme.NonSecure:
80'u16
of HttpClientScheme.Secure:
443'u16
else:
Base10.decode(uint16, url.port).valueOr:
return err(HttpAddressErrorType.InvalidPortNumber)
hostname =
block:
if len(url.hostname) == 0:
return err(HttpAddressErrorType.MissingHostname)
url.hostname
id = hostname & ":" & Base10.toString(port)
addresses =
if (HttpClientFlag.NoInet4Resolution in flags) and
(HttpClientFlag.NoInet6Resolution in flags):
# DNS resolution is disabled.
try:
@[initTAddress(hostname, Port(port))]
except TransportAddressError:
return err(HttpAddressErrorType.InvalidIpHostname)
else:
try:
if (HttpClientFlag.NoInet4Resolution notin flags) and
(HttpClientFlag.NoInet6Resolution notin flags):
# DNS resolution for both IPv4 and IPv6 addresses.
resolveTAddress(hostname, Port(port))
else:
if HttpClientFlag.NoInet6Resolution in flags:
# DNS resolution only for IPv4 addresses.
resolveTAddress(hostname, Port(port), AddressFamily.IPv4)
else:
# DNS resolution only for IPv6 addresses
resolveTAddress(hostname, Port(port), AddressFamily.IPv6)
except TransportAddressError:
return err(HttpAddressErrorType.NameLookupFailed)
if len(addresses) == 0:
return err(HttpAddressErrorType.NoAddressResolved)
ok(HttpAddress(id: id, scheme: scheme, hostname: hostname, port: port,
path: url.path, query: url.query, anchor: url.anchor,
username: url.username, password: url.password,
addresses: addresses))
proc getHttpAddress*(
url: string,
flags: HttpClientFlags = {}
): HttpAddressResult =
getHttpAddress(parseUri(url), flags)
proc getHttpAddress*(
session: HttpSessionRef,
url: Uri
): HttpAddressResult =
getHttpAddress(url, session.flags)
proc getHttpAddress*(
session: HttpSessionRef,
url: string
): HttpAddressResult =
## Create new HTTP address using URL string ``url`` and .
getHttpAddress(parseUri(url), session.flags)
proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] =
let scheme =
if len(url.scheme) == 0:
HttpClientScheme.NonSecure
@ -356,13 +452,13 @@ proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {.
addresses: addresses))
proc getAddress*(session: HttpSessionRef,
url: string): HttpResult[HttpAddress] {.raises: [].} =
url: string): HttpResult[HttpAddress] =
## Create new HTTP address using URL string ``url`` and .
session.getAddress(parseUri(url))
proc getAddress*(address: TransportAddress,
ctype: HttpClientScheme = HttpClientScheme.NonSecure,
queryString: string = "/"): HttpAddress {.raises: [].} =
queryString: string = "/"): HttpAddress =
## Create new HTTP address using Transport address ``address``, connection
## type ``ctype`` and query string ``queryString``.
let uri = parseUri(queryString)
@ -445,8 +541,12 @@ proc getUniqueConnectionId(session: HttpSessionRef): uint64 =
inc(session.counter)
session.counter
proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef,
ha: HttpAddress, transp: StreamTransport): HttpClientConnectionRef =
proc new(
t: typedesc[HttpClientConnectionRef],
session: HttpSessionRef,
ha: HttpAddress,
transp: StreamTransport
): Result[HttpClientConnectionRef, string] =
case ha.scheme
of HttpClientScheme.NonSecure:
let res = HttpClientConnectionRef(
@ -459,44 +559,48 @@ proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef,
remoteHostname: ha.id
)
trackCounter(HttpClientConnectionTrackerName)
res
ok(res)
of HttpClientScheme.Secure:
let treader = newAsyncStreamReader(transp)
let twriter = newAsyncStreamWriter(transp)
let tls = newTLSClientAsyncStream(treader, twriter, ha.hostname,
flags = session.flags.getTLSFlags())
let res = HttpClientConnectionRef(
id: session.getUniqueConnectionId(),
kind: HttpClientScheme.Secure,
transp: transp,
treader: treader,
twriter: twriter,
reader: tls.reader,
writer: tls.writer,
tls: tls,
state: HttpClientConnectionState.Connecting,
remoteHostname: ha.id
)
trackCounter(HttpClientConnectionTrackerName)
res
let
treader = newAsyncStreamReader(transp)
twriter = newAsyncStreamWriter(transp)
tls =
try:
newTLSClientAsyncStream(treader, twriter, ha.hostname,
flags = session.flags.getTLSFlags())
except TLSStreamInitError as exc:
return err(exc.msg)
proc setError(request: HttpClientRequestRef, error: ref HttpError) {.
raises: [] .} =
res = HttpClientConnectionRef(
id: session.getUniqueConnectionId(),
kind: HttpClientScheme.Secure,
transp: transp,
treader: treader,
twriter: twriter,
reader: tls.reader,
writer: tls.writer,
tls: tls,
state: HttpClientConnectionState.Connecting,
remoteHostname: ha.id
)
trackCounter(HttpClientConnectionTrackerName)
ok(res)
proc setError(request: HttpClientRequestRef, error: ref HttpError) =
request.error = error
request.state = HttpReqRespState.Error
if not(isNil(request.connection)):
request.connection.state = HttpClientConnectionState.Error
request.connection.error = error
proc setError(response: HttpClientResponseRef, error: ref HttpError) {.
raises: [] .} =
proc setError(response: HttpClientResponseRef, error: ref HttpError) =
response.error = error
response.state = HttpReqRespState.Error
if not(isNil(response.connection)):
response.connection.state = HttpClientConnectionState.Error
response.connection.error = error
proc closeWait(conn: HttpClientConnectionRef) {.async.} =
proc closeWait(conn: HttpClientConnectionRef) {.async: (raises: []).} =
## Close HttpClientConnectionRef instance ``conn`` and free all the resources.
if conn.state notin {HttpClientConnectionState.Closing,
HttpClientConnectionState.Closed}:
@ -508,59 +612,69 @@ proc closeWait(conn: HttpClientConnectionRef) {.async.} =
res.add(conn.reader.closeWait())
if not(isNil(conn.writer)) and not(conn.writer.closed()):
res.add(conn.writer.closeWait())
if conn.kind == HttpClientScheme.Secure:
res.add(conn.treader.closeWait())
res.add(conn.twriter.closeWait())
res.add(conn.transp.closeWait())
res
if len(pending) > 0: await allFutures(pending)
case conn.kind
of HttpClientScheme.Secure:
await allFutures(conn.treader.closeWait(), conn.twriter.closeWait())
of HttpClientScheme.NonSecure:
discard
await conn.transp.closeWait()
if len(pending) > 0: await noCancel(allFutures(pending))
conn.state = HttpClientConnectionState.Closed
untrackCounter(HttpClientConnectionTrackerName)
proc connect(session: HttpSessionRef,
ha: HttpAddress): Future[HttpClientConnectionRef] {.async.} =
ha: HttpAddress): Future[HttpClientConnectionRef] {.
async: (raises: [CancelledError, HttpConnectionError]).} =
## Establish new connection with remote server using ``url`` and ``flags``.
## On success returns ``HttpClientConnectionRef`` object.
var lastError = ""
# Here we trying to connect to every possible remote host address we got after
# DNS resolution.
for address in ha.addresses:
let transp =
try:
await connect(address, bufferSize = session.connectionBufferSize,
flags = session.socketFlags)
flags = session.socketFlags,
dualstack = session.dualstack)
except CancelledError as exc:
raise exc
except CatchableError:
except TransportError:
nil
if not(isNil(transp)):
let conn =
block:
let res = HttpClientConnectionRef.new(session, ha, transp)
case res.kind
of HttpClientScheme.Secure:
let res = HttpClientConnectionRef.new(session, ha, transp).valueOr:
raiseHttpConnectionError(
"Could not connect to remote host, reason: " & error)
if res.kind == HttpClientScheme.Secure:
try:
await res.tls.handshake()
res.state = HttpClientConnectionState.Ready
except CancelledError as exc:
await res.closeWait()
raise exc
except AsyncStreamError:
except TLSStreamProtocolError as exc:
await res.closeWait()
res.state = HttpClientConnectionState.Error
of HttpClientScheme.Nonsecure:
lastError = $exc.msg
except AsyncStreamError as exc:
await res.closeWait()
res.state = HttpClientConnectionState.Error
lastError = $exc.msg
else:
res.state = HttpClientConnectionState.Ready
res
if conn.state == HttpClientConnectionState.Ready:
return conn
# If all attempts to connect to the remote host have failed.
raiseHttpConnectionError("Could not connect to remote host")
if len(lastError) > 0:
raiseHttpConnectionError("Could not connect to remote host, reason: " &
lastError)
else:
raiseHttpConnectionError("Could not connect to remote host")
proc removeConnection(session: HttpSessionRef,
conn: HttpClientConnectionRef) {.async.} =
conn: HttpClientConnectionRef) {.async: (raises: []).} =
let removeHost =
block:
var res = false
@ -584,12 +698,13 @@ proc acquireConnection(
session: HttpSessionRef,
ha: HttpAddress,
flags: set[HttpClientRequestFlag]
): Future[HttpClientConnectionRef] {.async.} =
): Future[HttpClientConnectionRef] {.
async: (raises: [CancelledError, HttpConnectionError]).} =
## Obtain connection from ``session`` or establish a new one.
var default: seq[HttpClientConnectionRef]
let timestamp = Moment.now()
if session.connectionPoolEnabled(flags):
# Trying to reuse existing connection from our connection's pool.
let timestamp = Moment.now()
# We looking for non-idle connection at `Ready` state, all idle connections
# will be freed by sessionWatcher().
for connection in session.connections.getOrDefault(ha.id):
@ -606,10 +721,13 @@ proc acquireConnection(
connection.state = HttpClientConnectionState.Acquired
session.connections.mgetOrPut(ha.id, default).add(connection)
inc(session.connectionsCount)
return connection
connection.setTimestamp(timestamp)
connection.setDuration()
connection
proc releaseConnection(session: HttpSessionRef,
connection: HttpClientConnectionRef) {.async.} =
connection: HttpClientConnectionRef) {.
async: (raises: []).} =
## Return connection back to the ``session``.
let removeConnection =
if HttpClientFlag.Http11Pipeline notin session.flags:
@ -647,7 +765,7 @@ proc releaseConnection(session: HttpSessionRef,
HttpClientConnectionFlag.Response,
HttpClientConnectionFlag.NoBody})
proc releaseConnection(request: HttpClientRequestRef) {.async.} =
proc releaseConnection(request: HttpClientRequestRef) {.async: (raises: []).} =
let
session = request.session
connection = request.connection
@ -659,7 +777,8 @@ proc releaseConnection(request: HttpClientRequestRef) {.async.} =
if HttpClientConnectionFlag.Response notin connection.flags:
await session.releaseConnection(connection)
proc releaseConnection(response: HttpClientResponseRef) {.async.} =
proc releaseConnection(response: HttpClientResponseRef) {.
async: (raises: []).} =
let
session = response.session
connection = response.connection
@ -671,7 +790,7 @@ proc releaseConnection(response: HttpClientResponseRef) {.async.} =
if HttpClientConnectionFlag.Request notin connection.flags:
await session.releaseConnection(connection)
proc closeWait*(session: HttpSessionRef) {.async.} =
proc closeWait*(session: HttpSessionRef) {.async: (raises: []).} =
## Closes HTTP session object.
##
## This closes all the connections opened to remote servers.
@ -682,9 +801,9 @@ proc closeWait*(session: HttpSessionRef) {.async.} =
for connections in session.connections.values():
for conn in connections:
pending.add(closeWait(conn))
await allFutures(pending)
await noCancel(allFutures(pending))
proc sessionWatcher(session: HttpSessionRef) {.async.} =
proc sessionWatcher(session: HttpSessionRef) {.async: (raises: []).} =
while true:
let firstBreak =
try:
@ -715,45 +834,52 @@ proc sessionWatcher(session: HttpSessionRef) {.async.} =
var pending: seq[Future[void]]
let secondBreak =
try:
pending = idleConnections.mapIt(it.closeWait())
for conn in idleConnections:
pending.add(conn.closeWait())
await allFutures(pending)
false
except CancelledError:
# We still want to close connections to avoid socket leaks.
await allFutures(pending)
await noCancel(allFutures(pending))
true
if secondBreak:
break
proc closeWait*(request: HttpClientRequestRef) {.async.} =
proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} =
var pending: seq[FutureBase]
if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}:
request.state = HttpReqRespState.Closing
if not(isNil(request.writer)):
if not(request.writer.closed()):
await request.writer.closeWait()
pending.add(FutureBase(request.writer.closeWait()))
request.writer = nil
await request.releaseConnection()
pending.add(FutureBase(request.releaseConnection()))
await noCancel(allFutures(pending))
request.session = nil
request.error = nil
request.state = HttpReqRespState.Closed
untrackCounter(HttpClientRequestTrackerName)
proc closeWait*(response: HttpClientResponseRef) {.async.} =
proc closeWait*(response: HttpClientResponseRef) {.async: (raises: []).} =
var pending: seq[FutureBase]
if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}:
response.state = HttpReqRespState.Closing
if not(isNil(response.reader)):
if not(response.reader.closed()):
await response.reader.closeWait()
pending.add(FutureBase(response.reader.closeWait()))
response.reader = nil
await response.releaseConnection()
pending.add(FutureBase(response.releaseConnection()))
await noCancel(allFutures(pending))
response.session = nil
response.error = nil
response.state = HttpReqRespState.Closed
untrackCounter(HttpClientResponseTrackerName)
proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte]
): HttpResult[HttpClientResponseRef] {.raises: [] .} =
proc prepareResponse(
request: HttpClientRequestRef,
data: openArray[byte]
): HttpResult[HttpClientResponseRef] =
## Process response headers.
let resp = parseResponse(data, false)
if resp.failed():
@ -864,7 +990,7 @@ proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte]
ok(res)
proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
var buffer: array[HttpMaxHeadersSize, byte]
let timestamp = Moment.now()
req.connection.setTimestamp(timestamp)
@ -876,8 +1002,9 @@ proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {.
req.session.headersTimeout)
except AsyncTimeoutError:
raiseHttpReadError("Reading response headers timed out")
except AsyncStreamError:
raiseHttpReadError("Could not read response headers")
except AsyncStreamError as exc:
raiseHttpReadError(
"Could not read response headers, reason: " & $exc.msg)
let response = prepareResponse(req, buffer.toOpenArray(0, bytesRead - 1))
if response.isErr():
@ -891,8 +1018,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = [],
body: openArray[byte] = []): HttpClientRequestRef {.
raises: [].} =
body: openArray[byte] = []): HttpClientRequestRef =
let res = HttpClientRequestRef(
state: HttpReqRespState.Ready, session: session, meth: meth,
version: version, flags: flags, headers: HttpTable.init(headers),
@ -906,8 +1032,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = [],
body: openArray[byte] = []): HttpResult[HttpClientRequestRef] {.
raises: [].} =
body: openArray[byte] = []): HttpResult[HttpClientRequestRef] =
let address = ? session.getAddress(parseUri(url))
let res = HttpClientRequestRef(
state: HttpReqRespState.Ready, session: session, meth: meth,
@ -921,14 +1046,14 @@ proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
url: string, version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = []
): HttpResult[HttpClientRequestRef] {.raises: [].} =
): HttpResult[HttpClientRequestRef] =
HttpClientRequestRef.new(session, url, MethodGet, version, flags, headers)
proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
ha: HttpAddress, version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = []
): HttpClientRequestRef {.raises: [].} =
): HttpClientRequestRef =
HttpClientRequestRef.new(session, ha, MethodGet, version, flags, headers)
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
@ -936,7 +1061,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = [],
body: openArray[byte] = []
): HttpResult[HttpClientRequestRef] {.raises: [].} =
): HttpResult[HttpClientRequestRef] =
HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers,
body)
@ -944,8 +1069,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
url: string, version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = [],
body: openArray[char] = []): HttpResult[HttpClientRequestRef] {.
raises: [].} =
body: openArray[char] = []): HttpResult[HttpClientRequestRef] =
HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers,
body.toOpenArrayByte(0, len(body) - 1))
@ -953,8 +1077,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
ha: HttpAddress, version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = [],
body: openArray[byte] = []): HttpClientRequestRef {.
raises: [].} =
body: openArray[byte] = []): HttpClientRequestRef =
HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers,
body)
@ -962,13 +1085,11 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
ha: HttpAddress, version: HttpVersion = HttpVersion11,
flags: set[HttpClientRequestFlag] = {},
headers: openArray[HttpHeaderTuple] = [],
body: openArray[char] = []): HttpClientRequestRef {.
raises: [].} =
body: openArray[char] = []): HttpClientRequestRef =
HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers,
body.toOpenArrayByte(0, len(body) - 1))
proc prepareRequest(request: HttpClientRequestRef): string {.
raises: [].} =
proc prepareRequest(request: HttpClientRequestRef): string =
template hasChunkedEncoding(request: HttpClientRequestRef): bool =
toLowerAscii(request.headers.getString(TransferEncodingHeader)) == "chunked"
@ -1043,7 +1164,7 @@ proc prepareRequest(request: HttpClientRequestRef): string {.
res
proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
doAssert(request.state == HttpReqRespState.Ready,
"Request's state is " & $request.state)
let connection =
@ -1076,25 +1197,24 @@ proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {.
request.setDuration()
request.setError(newHttpInterruptError())
raise exc
except AsyncStreamError:
except AsyncStreamError as exc:
request.setDuration()
let error = newHttpWriteError("Could not send request headers")
let error = newHttpWriteError(
"Could not send request headers, reason: " & $exc.msg)
request.setError(error)
raise error
let resp =
try:
await request.getResponse()
except CancelledError as exc:
request.setError(newHttpInterruptError())
raise exc
except HttpError as exc:
request.setError(exc)
raise exc
return resp
try:
await request.getResponse()
except CancelledError as exc:
request.setError(newHttpInterruptError())
raise exc
except HttpError as exc:
request.setError(exc)
raise exc
proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
## Start sending request's headers and return `HttpBodyWriter`, which can be
## used to send request's body.
doAssert(request.state == HttpReqRespState.Ready,
@ -1124,8 +1244,9 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {.
request.setDuration()
request.setError(newHttpInterruptError())
raise exc
except AsyncStreamError:
let error = newHttpWriteError("Could not send request headers")
except AsyncStreamError as exc:
let error = newHttpWriteError(
"Could not send request headers, reason: " & $exc.msg)
request.setDuration()
request.setError(error)
raise error
@ -1147,10 +1268,10 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {.
request.writer = writer
request.state = HttpReqRespState.Open
request.connection.state = HttpClientConnectionState.RequestBodySending
return writer
writer
proc finish*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
## Finish sending request and receive response.
doAssert(not(isNil(request.connection)),
"Request missing connection instance")
@ -1187,7 +1308,8 @@ proc getNewLocation*(resp: HttpClientResponseRef): HttpResult[HttpAddress] =
else:
err("Location header is missing")
proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader =
proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader {.
raises: [HttpUseClosedError].} =
## Returns stream's reader instance which can be used to read response's body.
##
## Streams which was obtained using this procedure must be closed to avoid
@ -1216,7 +1338,8 @@ proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader =
response.reader = reader
response.reader
proc finish*(response: HttpClientResponseRef) {.async.} =
proc finish*(response: HttpClientResponseRef) {.
async: (raises: [HttpUseClosedError]).} =
## Finish receiving response.
##
## Because ``finish()`` returns nothing, this operation become NOP for
@ -1235,7 +1358,7 @@ proc finish*(response: HttpClientResponseRef) {.async.} =
response.setDuration()
proc getBodyBytes*(response: HttpClientResponseRef): Future[seq[byte]] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
## Read all bytes from response ``response``.
##
## Note: This procedure performs automatic finishing for ``response``.
@ -1245,21 +1368,22 @@ proc getBodyBytes*(response: HttpClientResponseRef): Future[seq[byte]] {.
await reader.closeWait()
reader = nil
await response.finish()
return data
data
except CancelledError as exc:
if not(isNil(reader)):
await reader.closeWait()
response.setError(newHttpInterruptError())
raise exc
except AsyncStreamError:
except AsyncStreamError as exc:
let error = newHttpReadError("Could not read response, reason: " & $exc.msg)
if not(isNil(reader)):
await reader.closeWait()
let error = newHttpReadError("Could not read response")
response.setError(error)
raise error
proc getBodyBytes*(response: HttpClientResponseRef,
nbytes: int): Future[seq[byte]] {.async.} =
nbytes: int): Future[seq[byte]] {.
async: (raises: [CancelledError, HttpError]).} =
## Read all bytes (nbytes <= 0) or exactly `nbytes` bytes from response
## ``response``.
##
@ -1270,20 +1394,21 @@ proc getBodyBytes*(response: HttpClientResponseRef,
await reader.closeWait()
reader = nil
await response.finish()
return data
data
except CancelledError as exc:
if not(isNil(reader)):
await reader.closeWait()
response.setError(newHttpInterruptError())
raise exc
except AsyncStreamError:
except AsyncStreamError as exc:
let error = newHttpReadError("Could not read response, reason: " & $exc.msg)
if not(isNil(reader)):
await reader.closeWait()
let error = newHttpReadError("Could not read response")
response.setError(error)
raise error
proc consumeBody*(response: HttpClientResponseRef): Future[int] {.async.} =
proc consumeBody*(response: HttpClientResponseRef): Future[int] {.
async: (raises: [CancelledError, HttpError]).} =
## Consume/discard response and return number of bytes consumed.
##
## Note: This procedure performs automatic finishing for ``response``.
@ -1293,16 +1418,17 @@ proc consumeBody*(response: HttpClientResponseRef): Future[int] {.async.} =
await reader.closeWait()
reader = nil
await response.finish()
return res
res
except CancelledError as exc:
if not(isNil(reader)):
await reader.closeWait()
response.setError(newHttpInterruptError())
raise exc
except AsyncStreamError:
except AsyncStreamError as exc:
let error = newHttpReadError(
"Could not consume response, reason: " & $exc.msg)
if not(isNil(reader)):
await reader.closeWait()
let error = newHttpReadError("Could not read response")
response.setError(error)
raise error
@ -1317,8 +1443,13 @@ proc redirect*(request: HttpClientRequestRef,
if redirectCount > request.session.maxRedirections:
err("Maximum number of redirects exceeded")
else:
let headers =
block:
var res = request.headers
res.set(HostHeader, ha.hostname)
res
var res = HttpClientRequestRef.new(request.session, ha, request.meth,
request.version, request.flags, request.headers.toList(), request.buffer)
request.version, request.flags, headers.toList(), request.buffer)
res.redirectCount = redirectCount
ok(res)
@ -1335,13 +1466,19 @@ proc redirect*(request: HttpClientRequestRef,
err("Maximum number of redirects exceeded")
else:
let address = ? request.session.redirect(request.address, uri)
# Update Host header to redirected URL hostname
let headers =
block:
var res = request.headers
res.set(HostHeader, address.hostname)
res
var res = HttpClientRequestRef.new(request.session, address, request.meth,
request.version, request.flags, request.headers.toList(), request.buffer)
request.version, request.flags, headers.toList(), request.buffer)
res.redirectCount = redirectCount
ok(res)
proc fetch*(request: HttpClientRequestRef): Future[HttpResponseTuple] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
var response: HttpClientResponseRef
try:
response = await request.send()
@ -1349,7 +1486,7 @@ proc fetch*(request: HttpClientRequestRef): Future[HttpResponseTuple] {.
let status = response.status
await response.closeWait()
response = nil
return (status, buffer)
(status, buffer)
except HttpError as exc:
if not(isNil(response)): await response.closeWait()
raise exc
@ -1358,7 +1495,7 @@ proc fetch*(request: HttpClientRequestRef): Future[HttpResponseTuple] {.
raise exc
proc fetch*(session: HttpSessionRef, url: Uri): Future[HttpResponseTuple] {.
async.} =
async: (raises: [CancelledError, HttpError]).} =
## Fetch resource pointed by ``url`` using HTTP GET method and ``session``
## parameters.
##
@ -1400,28 +1537,34 @@ proc fetch*(session: HttpSessionRef, url: Uri): Future[HttpResponseTuple] {.
request = redirect
redirect = nil
else:
let data = await response.getBodyBytes()
let code = response.status
let
data = await response.getBodyBytes()
code = response.status
await response.closeWait()
response = nil
await request.closeWait()
request = nil
return (code, data)
except CancelledError as exc:
if not(isNil(response)): await closeWait(response)
if not(isNil(request)): await closeWait(request)
if not(isNil(redirect)): await closeWait(redirect)
var pending: seq[Future[void]]
if not(isNil(response)): pending.add(closeWait(response))
if not(isNil(request)): pending.add(closeWait(request))
if not(isNil(redirect)): pending.add(closeWait(redirect))
await noCancel(allFutures(pending))
raise exc
except HttpError as exc:
if not(isNil(response)): await closeWait(response)
if not(isNil(request)): await closeWait(request)
if not(isNil(redirect)): await closeWait(redirect)
var pending: seq[Future[void]]
if not(isNil(response)): pending.add(closeWait(response))
if not(isNil(request)): pending.add(closeWait(request))
if not(isNil(redirect)): pending.add(closeWait(redirect))
await noCancel(allFutures(pending))
raise exc
proc getServerSentEvents*(
response: HttpClientResponseRef,
maxEventSize: int = -1
): Future[seq[ServerSentEvent]] {.async.} =
): Future[seq[ServerSentEvent]] {.
async: (raises: [CancelledError, HttpError]).} =
## Read number of server-sent events (SSE) from HTTP response ``response``.
##
## ``maxEventSize`` - maximum size of events chunk in one message, use
@ -1509,8 +1652,14 @@ proc getServerSentEvents*(
(i, false)
await reader.readMessage(predicate)
try:
await reader.readMessage(predicate)
except CancelledError as exc:
raise exc
except AsyncStreamError as exc:
raiseHttpReadError($exc.msg)
if not isNil(error):
raise error
else:
return res
res

View File

@ -6,8 +6,11 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import std/[strutils, uri]
import stew/results, httputils
import results, httputils
import ../../asyncloop, ../../asyncsync
import ../../streams/[asyncstream, boundstream]
export asyncloop, asyncsync, results, httputils, strutils
@ -40,30 +43,48 @@ const
ServerHeader* = "server"
LocationHeader* = "location"
AuthorizationHeader* = "authorization"
ContentDispositionHeader* = "content-disposition"
UrlEncodedContentType* = MediaType.init("application/x-www-form-urlencoded")
MultipartContentType* = MediaType.init("multipart/form-data")
type
HttpMessage* = object
code*: HttpCode
contentType*: MediaType
message*: string
HttpResult*[T] = Result[T, string]
HttpResultCode*[T] = Result[T, HttpCode]
HttpResultMessage*[T] = Result[T, HttpMessage]
HttpDefect* = object of Defect
HttpError* = object of CatchableError
HttpCriticalError* = object of HttpError
code*: HttpCode
HttpRecoverableError* = object of HttpError
code*: HttpCode
HttpDisconnectError* = object of HttpError
HttpConnectionError* = object of HttpError
HttpError* = object of AsyncError
HttpInterruptError* = object of HttpError
HttpReadError* = object of HttpError
HttpWriteError* = object of HttpError
HttpProtocolError* = object of HttpError
HttpRedirectError* = object of HttpError
HttpAddressError* = object of HttpError
HttpUseClosedError* = object of HttpError
HttpTransportError* = object of HttpError
HttpAddressError* = object of HttpTransportError
HttpRedirectError* = object of HttpTransportError
HttpConnectionError* = object of HttpTransportError
HttpReadError* = object of HttpTransportError
HttpReadLimitError* = object of HttpReadError
HttpDisconnectError* = object of HttpReadError
HttpWriteError* = object of HttpTransportError
HttpProtocolError* = object of HttpError
code*: HttpCode
HttpCriticalError* = object of HttpProtocolError # deprecated
HttpRecoverableError* = object of HttpProtocolError # deprecated
HttpRequestError* = object of HttpProtocolError
HttpRequestHeadersError* = object of HttpRequestError
HttpRequestBodyError* = object of HttpRequestError
HttpRequestHeadersTooLargeError* = object of HttpRequestHeadersError
HttpRequestBodyTooLargeError* = object of HttpRequestBodyError
HttpResponseError* = object of HttpProtocolError
HttpInvalidUsageError* = object of HttpError
HttpUseClosedError* = object of HttpInvalidUsageError
KeyValueTuple* = tuple
key: string
@ -82,35 +103,95 @@ type
HttpState* {.pure.} = enum
Alive, Closing, Closed
proc raiseHttpCriticalError*(msg: string,
code = Http400) {.noinline, noreturn.} =
HttpAddressErrorType* {.pure.} = enum
InvalidUrlScheme,
InvalidPortNumber,
MissingHostname,
InvalidIpHostname,
NameLookupFailed,
NoAddressResolved
const
CriticalHttpAddressError* = {
HttpAddressErrorType.InvalidUrlScheme,
HttpAddressErrorType.InvalidPortNumber,
HttpAddressErrorType.MissingHostname,
HttpAddressErrorType.InvalidIpHostname
}
RecoverableHttpAddressError* = {
HttpAddressErrorType.NameLookupFailed,
HttpAddressErrorType.NoAddressResolved
}
func isCriticalError*(error: HttpAddressErrorType): bool =
error in CriticalHttpAddressError
func isRecoverableError*(error: HttpAddressErrorType): bool =
error in RecoverableHttpAddressError
func toString*(error: HttpAddressErrorType): string =
case error
of HttpAddressErrorType.InvalidUrlScheme:
"URL scheme not supported"
of HttpAddressErrorType.InvalidPortNumber:
"Invalid URL port number"
of HttpAddressErrorType.MissingHostname:
"Missing URL hostname"
of HttpAddressErrorType.InvalidIpHostname:
"Invalid IPv4/IPv6 address in hostname"
of HttpAddressErrorType.NameLookupFailed:
"Could not resolve remote address"
of HttpAddressErrorType.NoAddressResolved:
"No address has been resolved"
proc raiseHttpRequestBodyTooLargeError*() {.
noinline, noreturn, raises: [HttpRequestBodyTooLargeError].} =
raise (ref HttpRequestBodyTooLargeError)(
code: Http413, msg: MaximumBodySizeError)
proc raiseHttpCriticalError*(msg: string, code = Http400) {.
noinline, noreturn, raises: [HttpCriticalError].} =
raise (ref HttpCriticalError)(code: code, msg: msg)
proc raiseHttpDisconnectError*() {.noinline, noreturn.} =
proc raiseHttpDisconnectError*() {.
noinline, noreturn, raises: [HttpDisconnectError].} =
raise (ref HttpDisconnectError)(msg: "Remote peer disconnected")
proc raiseHttpDefect*(msg: string) {.noinline, noreturn.} =
raise (ref HttpDefect)(msg: msg)
proc raiseHttpConnectionError*(msg: string) {.noinline, noreturn.} =
proc raiseHttpConnectionError*(msg: string) {.
noinline, noreturn, raises: [HttpConnectionError].} =
raise (ref HttpConnectionError)(msg: msg)
proc raiseHttpInterruptError*() {.noinline, noreturn.} =
proc raiseHttpInterruptError*() {.
noinline, noreturn, raises: [HttpInterruptError].} =
raise (ref HttpInterruptError)(msg: "Connection was interrupted")
proc raiseHttpReadError*(msg: string) {.noinline, noreturn.} =
proc raiseHttpReadError*(msg: string) {.
noinline, noreturn, raises: [HttpReadError].} =
raise (ref HttpReadError)(msg: msg)
proc raiseHttpProtocolError*(msg: string) {.noinline, noreturn.} =
raise (ref HttpProtocolError)(msg: msg)
proc raiseHttpProtocolError*(msg: string) {.
noinline, noreturn, raises: [HttpProtocolError].} =
raise (ref HttpProtocolError)(code: Http400, msg: msg)
proc raiseHttpWriteError*(msg: string) {.noinline, noreturn.} =
proc raiseHttpProtocolError*(code: HttpCode, msg: string) {.
noinline, noreturn, raises: [HttpProtocolError].} =
raise (ref HttpProtocolError)(code: code, msg: msg)
proc raiseHttpProtocolError*(msg: HttpMessage) {.
noinline, noreturn, raises: [HttpProtocolError].} =
raise (ref HttpProtocolError)(code: msg.code, msg: msg.message)
proc raiseHttpWriteError*(msg: string) {.
noinline, noreturn, raises: [HttpWriteError].} =
raise (ref HttpWriteError)(msg: msg)
proc raiseHttpRedirectError*(msg: string) {.noinline, noreturn.} =
proc raiseHttpRedirectError*(msg: string) {.
noinline, noreturn, raises: [HttpRedirectError].} =
raise (ref HttpRedirectError)(msg: msg)
proc raiseHttpAddressError*(msg: string) {.noinline, noreturn.} =
proc raiseHttpAddressError*(msg: string) {.
noinline, noreturn, raises: [HttpAddressError].} =
raise (ref HttpAddressError)(msg: msg)
template newHttpInterruptError*(): ref HttpInterruptError =
@ -125,9 +206,25 @@ template newHttpWriteError*(message: string): ref HttpWriteError =
template newHttpUseClosedError*(): ref HttpUseClosedError =
newException(HttpUseClosedError, "Connection was already closed")
func init*(t: typedesc[HttpMessage], code: HttpCode, message: string,
contentType: MediaType): HttpMessage =
HttpMessage(code: code, message: message, contentType: contentType)
func init*(t: typedesc[HttpMessage], code: HttpCode, message: string,
contentType: string): HttpMessage =
HttpMessage(code: code, message: message,
contentType: MediaType.init(contentType))
func init*(t: typedesc[HttpMessage], code: HttpCode,
message: string): HttpMessage =
HttpMessage(code: code, message: message,
contentType: MediaType.init("text/plain"))
func init*(t: typedesc[HttpMessage], code: HttpCode): HttpMessage =
HttpMessage(code: code)
iterator queryParams*(query: string,
flags: set[QueryParamsFlag] = {}): KeyValueTuple {.
raises: [].} =
flags: set[QueryParamsFlag] = {}): KeyValueTuple =
## Iterate over url-encoded query string.
for pair in query.split('&'):
let items = pair.split('=', maxsplit = 1)
@ -140,9 +237,9 @@ iterator queryParams*(query: string,
else:
yield (decodeUrl(k), decodeUrl(v))
func getTransferEncoding*(ch: openArray[string]): HttpResult[
set[TransferEncodingFlags]] {.
raises: [].} =
func getTransferEncoding*(
ch: openArray[string]
): HttpResult[set[TransferEncodingFlags]] =
## Parse value of multiple HTTP headers ``Transfer-Encoding`` and return
## it as set of ``TransferEncodingFlags``.
var res: set[TransferEncodingFlags] = {}
@ -171,9 +268,9 @@ func getTransferEncoding*(ch: openArray[string]): HttpResult[
return err("Incorrect Transfer-Encoding value")
ok(res)
func getContentEncoding*(ch: openArray[string]): HttpResult[
set[ContentEncodingFlags]] {.
raises: [].} =
func getContentEncoding*(
ch: openArray[string]
): HttpResult[set[ContentEncodingFlags]] =
## Parse value of multiple HTTP headers ``Content-Encoding`` and return
## it as set of ``ContentEncodingFlags``.
var res: set[ContentEncodingFlags] = {}
@ -202,8 +299,7 @@ func getContentEncoding*(ch: openArray[string]): HttpResult[
return err("Incorrect Content-Encoding value")
ok(res)
func getContentType*(ch: openArray[string]): HttpResult[ContentTypeData] {.
raises: [].} =
func getContentType*(ch: openArray[string]): HttpResult[ContentTypeData] =
## Check and prepare value of ``Content-Type`` header.
if len(ch) == 0:
err("No Content-Type values found")

View File

@ -6,8 +6,11 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import std/tables
import stew/results
import results
import ../../timer
import httpserver, shttpserver
from httpclient import HttpClientScheme
@ -16,8 +19,6 @@ from ../../osdefs import SocketHandle
from ../../transports/common import TransportAddress, ServerFlags
export HttpClientScheme, SocketHandle, TransportAddress, ServerFlags, HttpState
{.push raises: [].}
type
ConnectionType* {.pure.} = enum
NonSecure, Secure
@ -29,6 +30,7 @@ type
handle*: SocketHandle
connectionType*: ConnectionType
connectionState*: ConnectionState
query*: Opt[string]
remoteAddress*: Opt[TransportAddress]
localAddress*: Opt[TransportAddress]
acceptMoment*: Moment
@ -85,6 +87,12 @@ proc getConnectionState*(holder: HttpConnectionHolderRef): ConnectionState =
else:
ConnectionState.Accepted
proc getQueryString*(holder: HttpConnectionHolderRef): Opt[string] =
if not(isNil(holder.connection)):
holder.connection.currentRawQuery
else:
Opt.none(string)
proc init*(t: typedesc[ServerConnectionInfo],
holder: HttpConnectionHolderRef): ServerConnectionInfo =
let
@ -98,6 +106,7 @@ proc init*(t: typedesc[ServerConnectionInfo],
Opt.some(holder.transp.remoteAddress())
except CatchableError:
Opt.none(TransportAddress)
queryString = holder.getQueryString()
ServerConnectionInfo(
handle: SocketHandle(holder.transp.fd),
@ -106,6 +115,7 @@ proc init*(t: typedesc[ServerConnectionInfo],
remoteAddress: remoteAddress,
localAddress: localAddress,
acceptMoment: holder.acceptMoment,
query: queryString,
createMoment:
if not(isNil(holder.connection)):
Opt.some(holder.connection.createMoment)

File diff suppressed because it is too large Load Diff

View File

@ -197,3 +197,7 @@ proc toList*(ht: HttpTables, normKey = false): auto =
for key, value in ht.stringItems(normKey):
res.add((key, value))
res
proc clear*(ht: var HttpTables) =
## Resets the HtppTable so that it is empty.
ht.table.clear()

View File

@ -7,15 +7,19 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import std/[monotimes, strutils]
import stew/results, httputils
import results, httputils
import ../../asyncloop
import ../../streams/[asyncstream, boundstream, chunkstream]
import httptable, httpcommon, httpbodyrw
import "."/[httptable, httpcommon, httpbodyrw]
export asyncloop, httptable, httpcommon, httpbodyrw, asyncstream, httputils
const
UnableToReadMultipartBody = "Unable to read multipart message body"
UnableToReadMultipartBody = "Unable to read multipart message body, reason: "
UnableToSendMultipartMessage = "Unable to send multipart message, reason: "
type
MultiPartSource* {.pure.} = enum
@ -66,13 +70,12 @@ type
name*: string
filename*: string
MultipartError* = object of HttpCriticalError
MultipartError* = object of HttpProtocolError
MultipartEOMError* = object of MultipartError
BChar* = byte | char
proc startsWith(s, prefix: openArray[byte]): bool {.
raises: [].} =
proc startsWith(s, prefix: openArray[byte]): bool =
# This procedure is copy of strutils.startsWith() procedure, however,
# it is intended to work with arrays of bytes, but not with strings.
var i = 0
@ -81,8 +84,7 @@ proc startsWith(s, prefix: openArray[byte]): bool {.
if i >= len(s) or s[i] != prefix[i]: return false
inc(i)
proc parseUntil(s, until: openArray[byte]): int {.
raises: [].} =
proc parseUntil(s, until: openArray[byte]): int =
# This procedure is copy of parseutils.parseUntil() procedure, however,
# it is intended to work with arrays of bytes, but not with strings.
var i = 0
@ -95,8 +97,7 @@ proc parseUntil(s, until: openArray[byte]): int {.
inc(i)
-1
func setPartNames(part: var MultiPart): HttpResult[void] {.
raises: [].} =
func setPartNames(part: var MultiPart): HttpResult[void] =
if part.headers.count("content-disposition") != 1:
return err("Content-Disposition header is incorrect")
var header = part.headers.getString("content-disposition")
@ -105,7 +106,7 @@ func setPartNames(part: var MultiPart): HttpResult[void] {.
return err("Content-Disposition header value is incorrect")
let dtype = disp.dispositionType(header.toOpenArrayByte(0, len(header) - 1))
if dtype.toLowerAscii() != "form-data":
return err("Content-Disposition type is incorrect")
return err("Content-Disposition header type is incorrect")
for k, v in disp.fields(header.toOpenArrayByte(0, len(header) - 1)):
case k.toLowerAscii()
of "name":
@ -120,8 +121,7 @@ func setPartNames(part: var MultiPart): HttpResult[void] {.
proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader],
buffer: openArray[A],
boundary: openArray[B]): MultiPartReader {.
raises: [].} =
boundary: openArray[B]): MultiPartReader =
## Create new MultiPartReader instance with `buffer` interface.
##
## ``buffer`` - is buffer which will be used to read data.
@ -145,8 +145,7 @@ proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader],
proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef],
stream: HttpBodyReader,
boundary: openArray[B],
partHeadersMaxSize = 4096): MultiPartReaderRef {.
raises: [].} =
partHeadersMaxSize = 4096): MultiPartReaderRef =
## Create new MultiPartReader instance with `stream` interface.
##
## ``stream`` is stream used to read data.
@ -173,7 +172,17 @@ proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef],
stream: stream, offset: 0, boundary: fboundary,
buffer: newSeq[byte](partHeadersMaxSize))
proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} =
template handleAsyncStreamReaderError(targ, excarg: untyped) =
if targ.hasOverflow():
raiseHttpRequestBodyTooLargeError()
raiseHttpReadError(UnableToReadMultipartBody & $excarg.msg)
template handleAsyncStreamWriterError(targ, excarg: untyped) =
targ.state = MultiPartWriterState.MessageFailure
raiseHttpWriteError(UnableToSendMultipartMessage & $excarg.msg)
proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.
async: (raises: [CancelledError, HttpReadError, HttpProtocolError]).} =
doAssert(mpr.kind == MultiPartSource.Stream)
if mpr.firstTime:
try:
@ -182,14 +191,11 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} =
mpr.firstTime = false
if not(startsWith(mpr.buffer.toOpenArray(0, len(mpr.boundary) - 3),
mpr.boundary.toOpenArray(2, len(mpr.boundary) - 1))):
raiseHttpCriticalError("Unexpected boundary encountered")
raiseHttpProtocolError(Http400, "Unexpected boundary encountered")
except CancelledError as exc:
raise exc
except AsyncStreamError:
if mpr.stream.hasOverflow():
raiseHttpCriticalError(MaximumBodySizeError, Http413)
else:
raiseHttpCriticalError(UnableToReadMultipartBody)
except AsyncStreamError as exc:
handleAsyncStreamReaderError(mpr.stream, exc)
# Reading part's headers
try:
@ -203,9 +209,9 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} =
raise newException(MultipartEOMError,
"End of multipart message")
else:
raiseHttpCriticalError("Incorrect multipart header found")
raiseHttpProtocolError(Http400, "Incorrect multipart header found")
if mpr.buffer[0] != 0x0D'u8 or mpr.buffer[1] != 0x0A'u8:
raiseHttpCriticalError("Incorrect multipart boundary found")
raiseHttpProtocolError(Http400, "Incorrect multipart boundary found")
# If two bytes are CRLF we are at the part beginning.
# Reading part's headers
@ -213,7 +219,7 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} =
HeadersMark)
var headersList = parseHeaders(mpr.buffer.toOpenArray(0, res - 1), false)
if headersList.failed():
raiseHttpCriticalError("Incorrect multipart's headers found")
raiseHttpProtocolError(Http400, "Incorrect multipart's headers found")
inc(mpr.counter)
var part = MultiPart(
@ -229,48 +235,39 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} =
let sres = part.setPartNames()
if sres.isErr():
raiseHttpCriticalError($sres.error)
raiseHttpProtocolError(Http400, $sres.error)
return part
except CancelledError as exc:
raise exc
except AsyncStreamError:
if mpr.stream.hasOverflow():
raiseHttpCriticalError(MaximumBodySizeError, Http413)
else:
raiseHttpCriticalError(UnableToReadMultipartBody)
except AsyncStreamError as exc:
handleAsyncStreamReaderError(mpr.stream, exc)
proc getBody*(mp: MultiPart): Future[seq[byte]] {.async.} =
proc getBody*(mp: MultiPart): Future[seq[byte]] {.
async: (raises: [CancelledError, HttpReadError, HttpProtocolError]).} =
## Get multipart's ``mp`` value as sequence of bytes.
case mp.kind
of MultiPartSource.Stream:
try:
let res = await mp.stream.read()
return res
except AsyncStreamError:
if mp.breader.hasOverflow():
raiseHttpCriticalError(MaximumBodySizeError, Http413)
else:
raiseHttpCriticalError(UnableToReadMultipartBody)
await mp.stream.read()
except AsyncStreamError as exc:
handleAsyncStreamReaderError(mp.breader, exc)
of MultiPartSource.Buffer:
return mp.buffer
mp.buffer
proc consumeBody*(mp: MultiPart) {.async.} =
proc consumeBody*(mp: MultiPart) {.
async: (raises: [CancelledError, HttpReadError, HttpProtocolError]).} =
## Discard multipart's ``mp`` value.
case mp.kind
of MultiPartSource.Stream:
try:
discard await mp.stream.consume()
except AsyncStreamError:
if mp.breader.hasOverflow():
raiseHttpCriticalError(MaximumBodySizeError, Http413)
else:
raiseHttpCriticalError(UnableToReadMultipartBody)
except AsyncStreamError as exc:
handleAsyncStreamReaderError(mp.breader, exc)
of MultiPartSource.Buffer:
discard
proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] {.
raises: [].} =
proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] =
## Get multipart's ``mp`` stream, which can be used to obtain value of the
## part.
case mp.kind
@ -279,7 +276,7 @@ proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] {.
else:
err("Could not obtain stream from buffer-like part")
proc closeWait*(mp: MultiPart) {.async.} =
proc closeWait*(mp: MultiPart) {.async: (raises: []).} =
## Close and release MultiPart's ``mp`` stream and resources.
case mp.kind
of MultiPartSource.Stream:
@ -287,7 +284,7 @@ proc closeWait*(mp: MultiPart) {.async.} =
else:
discard
proc closeWait*(mpr: MultiPartReaderRef) {.async.} =
proc closeWait*(mpr: MultiPartReaderRef) {.async: (raises: []).} =
## Close and release MultiPartReader's ``mpr`` stream and resources.
case mpr.kind
of MultiPartSource.Stream:
@ -295,7 +292,7 @@ proc closeWait*(mpr: MultiPartReaderRef) {.async.} =
else:
discard
proc getBytes*(mp: MultiPart): seq[byte] {.raises: [].} =
proc getBytes*(mp: MultiPart): seq[byte] =
## Returns value for MultiPart ``mp`` as sequence of bytes.
case mp.kind
of MultiPartSource.Buffer:
@ -304,7 +301,7 @@ proc getBytes*(mp: MultiPart): seq[byte] {.raises: [].} =
doAssert(not(mp.stream.atEof()), "Value is not obtained yet")
mp.buffer
proc getString*(mp: MultiPart): string {.raises: [].} =
proc getString*(mp: MultiPart): string =
## Returns value for MultiPart ``mp`` as string.
case mp.kind
of MultiPartSource.Buffer:
@ -313,7 +310,7 @@ proc getString*(mp: MultiPart): string {.raises: [].} =
doAssert(not(mp.stream.atEof()), "Value is not obtained yet")
bytesToString(mp.buffer)
proc atEoM*(mpr: var MultiPartReader): bool {.raises: [].} =
proc atEoM*(mpr: var MultiPartReader): bool =
## Procedure returns ``true`` if MultiPartReader has reached the end of
## multipart message.
case mpr.kind
@ -322,7 +319,7 @@ proc atEoM*(mpr: var MultiPartReader): bool {.raises: [].} =
of MultiPartSource.Stream:
mpr.stream.atEof()
proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [].} =
proc atEoM*(mpr: MultiPartReaderRef): bool =
## Procedure returns ``true`` if MultiPartReader has reached the end of
## multipart message.
case mpr.kind
@ -331,8 +328,7 @@ proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [].} =
of MultiPartSource.Stream:
mpr.stream.atEof()
proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] {.
raises: [].} =
proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] =
## Get multipart part from MultiPartReader instance.
##
## This procedure will work only for MultiPartReader with buffer source.
@ -422,8 +418,7 @@ proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] {.
else:
err("Incorrect multipart form")
func isEmpty*(mp: MultiPart): bool {.
raises: [].} =
func isEmpty*(mp: MultiPart): bool =
## Returns ``true`` is multipart ``mp`` is not initialized/filled yet.
mp.counter == 0
@ -439,8 +434,7 @@ func validateBoundary[B: BChar](boundary: openArray[B]): HttpResult[void] =
return err("Content-Type boundary alphabet incorrect")
ok()
func getMultipartBoundary*(contentData: ContentTypeData): HttpResult[string] {.
raises: [].} =
func getMultipartBoundary*(contentData: ContentTypeData): HttpResult[string] =
## Returns ``multipart/form-data`` boundary value from ``Content-Type``
## header.
##
@ -480,8 +474,7 @@ proc quoteCheck(name: string): HttpResult[string] =
ok(name)
proc init*[B: BChar](mpt: typedesc[MultiPartWriter],
boundary: openArray[B]): MultiPartWriter {.
raises: [].} =
boundary: openArray[B]): MultiPartWriter =
## Create new MultiPartWriter instance with `buffer` interface.
##
## ``boundary`` - is multipart boundary, this value must not be empty.
@ -510,8 +503,7 @@ proc init*[B: BChar](mpt: typedesc[MultiPartWriter],
proc new*[B: BChar](mpt: typedesc[MultiPartWriterRef],
stream: HttpBodyWriter,
boundary: openArray[B]): MultiPartWriterRef {.
raises: [].} =
boundary: openArray[B]): MultiPartWriterRef =
doAssert(validateBoundary(boundary).isOk())
doAssert(not(isNil(stream)))
@ -538,7 +530,7 @@ proc new*[B: BChar](mpt: typedesc[MultiPartWriterRef],
proc prepareHeaders(partMark: openArray[byte], name: string, filename: string,
headers: HttpTable): string =
const ContentDisposition = "Content-Disposition"
const ContentDispositionHeader = "Content-Disposition"
let qname =
block:
let res = quoteCheck(name)
@ -551,10 +543,10 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string,
res.get()
var buffer = newString(len(partMark))
copyMem(addr buffer[0], unsafeAddr partMark[0], len(partMark))
buffer.add(ContentDisposition)
buffer.add(ContentDispositionHeader)
buffer.add(": ")
if ContentDisposition in headers:
buffer.add(headers.getString(ContentDisposition))
if ContentDispositionHeader in headers:
buffer.add(headers.getString(ContentDispositionHeader))
buffer.add("\r\n")
else:
buffer.add("form-data; name=\"")
@ -567,7 +559,7 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string,
buffer.add("\r\n")
for k, v in headers.stringItems():
if k != toLowerAscii(ContentDisposition):
if k != ContentDispositionHeader:
if len(v) > 0:
buffer.add(k)
buffer.add(": ")
@ -576,7 +568,8 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string,
buffer.add("\r\n")
buffer
proc begin*(mpw: MultiPartWriterRef) {.async.} =
proc begin*(mpw: MultiPartWriterRef) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Starts multipart message form and write approprate markers to output
## stream.
doAssert(mpw.kind == MultiPartSource.Stream)
@ -584,10 +577,9 @@ proc begin*(mpw: MultiPartWriterRef) {.async.} =
# write "--"
try:
await mpw.stream.write(mpw.beginMark)
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to start multipart message")
mpw.state = MultiPartWriterState.MessageStarted
mpw.state = MultiPartWriterState.MessageStarted
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc begin*(mpw: var MultiPartWriter) =
## Starts multipart message form and write approprate markers to output
@ -599,7 +591,8 @@ proc begin*(mpw: var MultiPartWriter) =
mpw.state = MultiPartWriterState.MessageStarted
proc beginPart*(mpw: MultiPartWriterRef, name: string,
filename: string, headers: HttpTable) {.async.} =
filename: string, headers: HttpTable) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Starts part of multipart message and write appropriate ``headers`` to the
## output stream.
##
@ -614,9 +607,8 @@ proc beginPart*(mpw: MultiPartWriterRef, name: string,
try:
await mpw.stream.write(buffer)
mpw.state = MultiPartWriterState.PartStarted
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to start multipart part")
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc beginPart*(mpw: var MultiPartWriter, name: string,
filename: string, headers: HttpTable) =
@ -634,38 +626,38 @@ proc beginPart*(mpw: var MultiPartWriter, name: string,
mpw.buffer.add(buffer.toOpenArrayByte(0, len(buffer) - 1))
mpw.state = MultiPartWriterState.PartStarted
proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {.async.} =
proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Write part's data ``data`` to the output stream.
doAssert(mpw.kind == MultiPartSource.Stream)
doAssert(mpw.state == MultiPartWriterState.PartStarted)
try:
# write <chunk> of data
await mpw.stream.write(pbytes, nbytes)
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to write multipart data")
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {.async.} =
proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Write part's data ``data`` to the output stream.
doAssert(mpw.kind == MultiPartSource.Stream)
doAssert(mpw.state == MultiPartWriterState.PartStarted)
try:
# write <chunk> of data
await mpw.stream.write(data)
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to write multipart data")
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc write*(mpw: MultiPartWriterRef, data: string) {.async.} =
proc write*(mpw: MultiPartWriterRef, data: string) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Write part's data ``data`` to the output stream.
doAssert(mpw.kind == MultiPartSource.Stream)
doAssert(mpw.state == MultiPartWriterState.PartStarted)
try:
# write <chunk> of data
await mpw.stream.write(data)
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to write multipart data")
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc write*(mpw: var MultiPartWriter, pbytes: pointer, nbytes: int) =
## Write part's data ``data`` to the output stream.
@ -688,16 +680,16 @@ proc write*(mpw: var MultiPartWriter, data: openArray[char]) =
doAssert(mpw.state == MultiPartWriterState.PartStarted)
mpw.buffer.add(data.toOpenArrayByte(0, len(data) - 1))
proc finishPart*(mpw: MultiPartWriterRef) {.async.} =
proc finishPart*(mpw: MultiPartWriterRef) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Finish multipart's message part and send proper markers to output stream.
doAssert(mpw.state == MultiPartWriterState.PartStarted)
try:
# write "<CR><LF>--"
await mpw.stream.write(mpw.finishPartMark)
mpw.state = MultiPartWriterState.PartFinished
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to finish multipart message part")
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc finishPart*(mpw: var MultiPartWriter) =
## Finish multipart's message part and send proper markers to output stream.
@ -707,7 +699,8 @@ proc finishPart*(mpw: var MultiPartWriter) =
mpw.buffer.add(mpw.finishPartMark)
mpw.state = MultiPartWriterState.PartFinished
proc finish*(mpw: MultiPartWriterRef) {.async.} =
proc finish*(mpw: MultiPartWriterRef) {.
async: (raises: [CancelledError, HttpWriteError]).} =
## Finish multipart's message form and send finishing markers to the output
## stream.
doAssert(mpw.kind == MultiPartSource.Stream)
@ -716,9 +709,8 @@ proc finish*(mpw: MultiPartWriterRef) {.async.} =
# write "<boundary>--"
await mpw.stream.write(mpw.finishMark)
mpw.state = MultiPartWriterState.MessageFinished
except AsyncStreamError:
mpw.state = MultiPartWriterState.MessageFailure
raiseHttpCriticalError("Unable to finish multipart message")
except AsyncStreamError as exc:
handleAsyncStreamWriterError(mpw, exc)
proc finish*(mpw: var MultiPartWriter): seq[byte] =
## Finish multipart's message form and send finishing markers to the output

View File

@ -6,6 +6,9 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.push raises: [].}
import httpserver
import ../../asyncloop, ../../asyncsync
import ../../streams/[asyncstream, tlsstream]
@ -24,63 +27,62 @@ type
SecureHttpConnectionRef* = ref SecureHttpConnection
proc closeSecConnection(conn: HttpConnectionRef) {.async.} =
proc closeSecConnection(conn: HttpConnectionRef) {.async: (raises: []).} =
if conn.state == HttpState.Alive:
conn.state = HttpState.Closing
var pending: seq[Future[void]]
pending.add(conn.writer.closeWait())
pending.add(conn.reader.closeWait())
try:
await allFutures(pending)
except CancelledError:
await allFutures(pending)
# After we going to close everything else.
pending.setLen(3)
pending[0] = conn.mainReader.closeWait()
pending[1] = conn.mainWriter.closeWait()
pending[2] = conn.transp.closeWait()
try:
await allFutures(pending)
except CancelledError:
await allFutures(pending)
pending.add(conn.mainReader.closeWait())
pending.add(conn.mainWriter.closeWait())
pending.add(conn.transp.closeWait())
await noCancel(allFutures(pending))
reset(cast[SecureHttpConnectionRef](conn)[])
untrackCounter(HttpServerSecureConnectionTrackerName)
conn.state = HttpState.Closed
proc new*(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef,
transp: StreamTransport): SecureHttpConnectionRef =
proc new(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef,
transp: StreamTransport): Result[SecureHttpConnectionRef, string] =
var res = SecureHttpConnectionRef()
HttpConnection(res[]).init(HttpServerRef(server), transp)
let tlsStream =
newTLSServerAsyncStream(res.mainReader, res.mainWriter,
server.tlsPrivateKey,
server.tlsCertificate,
minVersion = TLSVersion.TLS12,
flags = server.secureFlags)
try:
newTLSServerAsyncStream(res.mainReader, res.mainWriter,
server.tlsPrivateKey,
server.tlsCertificate,
minVersion = TLSVersion.TLS12,
flags = server.secureFlags)
except TLSStreamError as exc:
return err(exc.msg)
res.tlsStream = tlsStream
res.reader = AsyncStreamReader(tlsStream.reader)
res.writer = AsyncStreamWriter(tlsStream.writer)
res.closeCb = closeSecConnection
trackCounter(HttpServerSecureConnectionTrackerName)
res
ok(res)
proc createSecConnection(server: HttpServerRef,
transp: StreamTransport): Future[HttpConnectionRef] {.
async.} =
let secureServ = cast[SecureHttpServerRef](server)
var sconn = SecureHttpConnectionRef.new(secureServ, transp)
async: (raises: [CancelledError, HttpConnectionError]).} =
let
secureServ = cast[SecureHttpServerRef](server)
sconn = SecureHttpConnectionRef.new(secureServ, transp).valueOr:
raiseHttpConnectionError(error)
try:
await handshake(sconn.tlsStream)
return HttpConnectionRef(sconn)
HttpConnectionRef(sconn)
except CancelledError as exc:
await HttpConnectionRef(sconn).closeWait()
raise exc
except TLSStreamError:
except AsyncStreamError as exc:
await HttpConnectionRef(sconn).closeWait()
raiseHttpCriticalError("Unable to establish secure connection")
let msg = "Unable to establish secure connection, reason: " & $exc.msg
raiseHttpConnectionError(msg)
proc new*(htype: typedesc[SecureHttpServerRef],
address: TransportAddress,
processCallback: HttpProcessCallback,
processCallback: HttpProcessCallback2,
tlsPrivateKey: TLSPrivateKey,
tlsCertificate: TLSCertificate,
serverFlags: set[HttpServerFlags] = {},
@ -90,11 +92,12 @@ proc new*(htype: typedesc[SecureHttpServerRef],
secureFlags: set[TLSFlags] = {},
maxConnections: int = -1,
bufferSize: int = 4096,
backlogSize: int = 100,
backlogSize: int = DefaultBacklogSize,
httpHeadersTimeout = 10.seconds,
maxHeadersSize: int = 8192,
maxRequestBodySize: int = 1_048_576
): HttpResult[SecureHttpServerRef] {.raises: [].} =
maxRequestBodySize: int = 1_048_576,
dualstack = DualStackType.Auto
): HttpResult[SecureHttpServerRef] =
doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!")
doAssert(not(isNil(tlsCertificate)), "TLS certificate must not be nil!")
@ -111,11 +114,9 @@ proc new*(htype: typedesc[SecureHttpServerRef],
let serverInstance =
try:
createStreamServer(address, flags = socketFlags, bufferSize = bufferSize,
backlog = backlogSize)
backlog = backlogSize, dualstack = dualstack)
except TransportOsError as exc:
return err(exc.msg)
except CatchableError as exc:
return err(exc.msg)
let res = SecureHttpServerRef(
address: address,
@ -144,3 +145,52 @@ proc new*(htype: typedesc[SecureHttpServerRef],
secureFlags: secureFlags
)
ok(res)
proc new*(htype: typedesc[SecureHttpServerRef],
address: TransportAddress,
processCallback: HttpProcessCallback,
tlsPrivateKey: TLSPrivateKey,
tlsCertificate: TLSCertificate,
serverFlags: set[HttpServerFlags] = {},
socketFlags: set[ServerFlags] = {ReuseAddr},
serverUri = Uri(),
serverIdent = "",
secureFlags: set[TLSFlags] = {},
maxConnections: int = -1,
bufferSize: int = 4096,
backlogSize: int = DefaultBacklogSize,
httpHeadersTimeout = 10.seconds,
maxHeadersSize: int = 8192,
maxRequestBodySize: int = 1_048_576,
dualstack = DualStackType.Auto
): HttpResult[SecureHttpServerRef] {.
deprecated: "Callback could raise only CancelledError, annotate with " &
"{.async: (raises: [CancelledError]).}".} =
proc wrap(req: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
try:
await processCallback(req)
except CancelledError as exc:
raise exc
except CatchableError as exc:
defaultResponse(exc)
SecureHttpServerRef.new(
address = address,
processCallback = wrap,
tlsPrivateKey = tlsPrivateKey,
tlsCertificate = tlsCertificate,
serverFlags = serverFlags,
socketFlags = socketFlags,
serverUri = serverUri,
serverIdent = serverIdent,
secureFlags = secureFlags,
maxConnections = maxConnections,
bufferSize = bufferSize,
backlogSize = backlogSize,
httpHeadersTimeout = httpHeadersTimeout,
maxHeadersSize = maxHeadersSize,
maxRequestBodySize = maxRequestBodySize,
dualstack = dualstack
)

View File

@ -1,995 +0,0 @@
#
# Chronos
#
# (c) Copyright 2015 Dominik Picheta
# (c) Copyright 2018-2023 Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import std/sequtils
import stew/base10
when chronosStackTrace:
when defined(nimHasStacktracesModule):
import system/stacktraces
else:
const
reraisedFromBegin = -10
reraisedFromEnd = -100
template LocCreateIndex*: auto {.deprecated: "LocationKind.Create".} =
LocationKind.Create
template LocFinishIndex*: auto {.deprecated: "LocationKind.Finish".} =
LocationKind.Finish
template LocCompleteIndex*: untyped {.deprecated: "LocationKind.Finish".} =
LocationKind.Finish
func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {.deprecated: "use LocationKind".} =
case v
of 0: loc[LocationKind.Create]
of 1: loc[LocationKind.Finish]
else: raiseAssert("Unknown source location " & $v)
type
FutureStr*[T] = ref object of Future[T]
## Future to hold GC strings
gcholder*: string
FutureSeq*[A, B] = ref object of Future[A]
## Future to hold GC seqs
gcholder*: seq[B]
# Backwards compatibility for old FutureState name
template Finished* {.deprecated: "Use Completed instead".} = Completed
template Finished*(T: type FutureState): FutureState {.deprecated: "Use FutureState.Completed instead".} = FutureState.Completed
proc newFutureImpl[T](loc: ptr SrcLoc): Future[T] =
let fut = Future[T]()
internalInitFutureBase(fut, loc, FutureState.Pending)
fut
proc newFutureSeqImpl[A, B](loc: ptr SrcLoc): FutureSeq[A, B] =
let fut = FutureSeq[A, B]()
internalInitFutureBase(fut, loc, FutureState.Pending)
fut
proc newFutureStrImpl[T](loc: ptr SrcLoc): FutureStr[T] =
let fut = FutureStr[T]()
internalInitFutureBase(fut, loc, FutureState.Pending)
fut
template newFuture*[T](fromProc: static[string] = ""): Future[T] =
## Creates a new future.
##
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
newFutureImpl[T](getSrcLocation(fromProc))
template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] =
## Create a new future which can hold/preserve GC sequence until future will
## not be completed.
##
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
newFutureSeqImpl[A, B](getSrcLocation(fromProc))
template newFutureStr*[T](fromProc: static[string] = ""): FutureStr[T] =
## Create a new future which can hold/preserve GC string until future will
## not be completed.
##
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
newFutureStrImpl[T](getSrcLocation(fromProc))
proc done*(future: FutureBase): bool {.deprecated: "Use `completed` instead".} =
## This is an alias for ``completed(future)`` procedure.
completed(future)
when chronosFutureTracking:
proc futureDestructor(udata: pointer) =
## This procedure will be called when Future[T] got completed, cancelled or
## failed and all Future[T].callbacks are already scheduled and processed.
let future = cast[FutureBase](udata)
if future == futureList.tail: futureList.tail = future.prev
if future == futureList.head: futureList.head = future.next
if not(isNil(future.next)): future.next.internalPrev = future.prev
if not(isNil(future.prev)): future.prev.internalNext = future.next
futureList.count.dec()
proc scheduleDestructor(future: FutureBase) {.inline.} =
callSoon(futureDestructor, cast[pointer](future))
proc checkFinished(future: FutureBase, loc: ptr SrcLoc) =
## Checks whether `future` is finished. If it is then raises a
## ``FutureDefect``.
if future.finished():
var msg = ""
msg.add("An attempt was made to complete a Future more than once. ")
msg.add("Details:")
msg.add("\n Future ID: " & Base10.toString(future.id))
msg.add("\n Creation location:")
msg.add("\n " & $future.location[LocationKind.Create])
msg.add("\n First completion location:")
msg.add("\n " & $future.location[LocationKind.Finish])
msg.add("\n Second completion location:")
msg.add("\n " & $loc)
when chronosStackTrace:
msg.add("\n Stack trace to moment of creation:")
msg.add("\n" & indent(future.stackTrace.strip(), 4))
msg.add("\n Stack trace to moment of secondary completion:")
msg.add("\n" & indent(getStackTrace().strip(), 4))
msg.add("\n\n")
var err = newException(FutureDefect, msg)
err.cause = future
raise err
else:
future.internalLocation[LocationKind.Finish] = loc
proc finish(fut: FutureBase, state: FutureState) =
# We do not perform any checks here, because:
# 1. `finish()` is a private procedure and `state` is under our control.
# 2. `fut.state` is checked by `checkFinished()`.
fut.internalState = state
when chronosProfiling:
if not isNil(onBaseFutureEvent):
onBaseFutureEvent(fut, state)
when chronosStrictFutureAccess:
doAssert fut.internalCancelcb == nil or state != FutureState.Cancelled
fut.internalCancelcb = nil # release cancellation callback memory
for item in fut.internalCallbacks.mitems():
if not(isNil(item.function)):
callSoon(item)
item = default(AsyncCallback) # release memory as early as possible
fut.internalCallbacks = default(seq[AsyncCallback]) # release seq as well
when chronosFutureTracking:
scheduleDestructor(fut)
proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) =
if not(future.cancelled()):
checkFinished(future, loc)
doAssert(isNil(future.internalError))
future.internalValue = val
future.finish(FutureState.Completed)
template complete*[T](future: Future[T], val: T) =
## Completes ``future`` with value ``val``.
complete(future, val, getSrcLocation())
proc complete(future: Future[void], loc: ptr SrcLoc) =
if not(future.cancelled()):
checkFinished(future, loc)
doAssert(isNil(future.internalError))
future.finish(FutureState.Completed)
template complete*(future: Future[void]) =
## Completes a void ``future``.
complete(future, getSrcLocation())
proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) =
if not(future.cancelled()):
checkFinished(future, loc)
future.internalError = error
when chronosStackTrace:
future.internalErrorStackTrace = if getStackTrace(error) == "":
getStackTrace()
else:
getStackTrace(error)
future.finish(FutureState.Failed)
template fail*(future: FutureBase, error: ref CatchableError) =
## Completes ``future`` with ``error``.
fail(future, error, getSrcLocation())
template newCancelledError(): ref CancelledError =
(ref CancelledError)(msg: "Future operation cancelled!")
proc cancelAndSchedule(future: FutureBase, loc: ptr SrcLoc) =
if not(future.finished()):
checkFinished(future, loc)
future.internalError = newCancelledError()
when chronosStackTrace:
future.internalErrorStackTrace = getStackTrace()
future.finish(FutureState.Cancelled)
template cancelAndSchedule*(future: FutureBase) =
cancelAndSchedule(future, getSrcLocation())
proc cancel(future: FutureBase, loc: ptr SrcLoc): bool =
## Request that Future ``future`` cancel itself.
##
## This arranges for a `CancelledError` to be thrown into procedure which
## waits for ``future`` on the next cycle through the event loop.
## The procedure then has a chance to clean up or even deny the request
## using `try/except/finally`.
##
## This call do not guarantee that the ``future`` will be cancelled: the
## exception might be caught and acted upon, delaying cancellation of the
## ``future`` or preventing cancellation completely. The ``future`` may also
## return value or raise different exception.
##
## Immediately after this procedure is called, ``future.cancelled()`` will
## not return ``true`` (unless the Future was already cancelled).
if future.finished():
return false
if not(isNil(future.internalChild)):
# If you hit this assertion, you should have used the `CancelledError`
# mechanism and/or use a regular `addCallback`
when chronosStrictFutureAccess:
doAssert future.internalCancelcb.isNil,
"futures returned from `{.async.}` functions must not use `cancelCallback`"
if cancel(future.internalChild, getSrcLocation()):
return true
else:
if not(isNil(future.internalCancelcb)):
future.internalCancelcb(cast[pointer](future))
future.internalCancelcb = nil
cancelAndSchedule(future, getSrcLocation())
future.internalMustCancel = true
return true
template cancel*(future: FutureBase) =
## Cancel ``future``.
discard cancel(future, getSrcLocation())
proc clearCallbacks(future: FutureBase) =
future.internalCallbacks = default(seq[AsyncCallback])
proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer) =
## Adds the callbacks proc to be called when the future completes.
##
## If future has already completed then ``cb`` will be called immediately.
doAssert(not isNil(cb))
if future.finished():
callSoon(cb, udata)
else:
future.internalCallbacks.add AsyncCallback(function: cb, udata: udata)
proc addCallback*(future: FutureBase, cb: CallbackFunc) =
## Adds the callbacks proc to be called when the future completes.
##
## If future has already completed then ``cb`` will be called immediately.
future.addCallback(cb, cast[pointer](future))
proc removeCallback*(future: FutureBase, cb: CallbackFunc,
udata: pointer) =
## Remove future from list of callbacks - this operation may be slow if there
## are many registered callbacks!
doAssert(not isNil(cb))
# Make sure to release memory associated with callback, or reference chains
# may be created!
future.internalCallbacks.keepItIf:
it.function != cb or it.udata != udata
proc removeCallback*(future: FutureBase, cb: CallbackFunc) =
future.removeCallback(cb, cast[pointer](future))
proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer) =
## Clears the list of callbacks and sets the callback proc to be called when
## the future completes.
##
## If future has already completed then ``cb`` will be called immediately.
##
## It's recommended to use ``addCallback`` or ``then`` instead.
# ZAH: how about `setLen(1); callbacks[0] = cb`
future.clearCallbacks
future.addCallback(cb, udata)
proc `callback=`*(future: FutureBase, cb: CallbackFunc) =
## Sets the callback proc to be called when the future completes.
##
## If future has already completed then ``cb`` will be called immediately.
`callback=`(future, cb, cast[pointer](future))
proc `cancelCallback=`*(future: FutureBase, cb: CallbackFunc) =
## Sets the callback procedure to be called when the future is cancelled.
##
## This callback will be called immediately as ``future.cancel()`` invoked and
## must be set before future is finished.
when chronosStrictFutureAccess:
doAssert not future.finished(),
"cancellation callback must be set before finishing the future"
future.internalCancelcb = cb
{.push stackTrace: off.}
proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.}
proc internalContinue(fut: pointer) {.raises: [], gcsafe.} =
let asFut = cast[FutureBase](fut)
GC_unref(asFut)
futureContinue(asFut)
proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} =
# This function is responsible for calling the closure iterator generated by
# the `{.async.}` transformation either until it has completed its iteration
# or raised and error / been cancelled.
#
# Every call to an `{.async.}` proc is redirected to call this function
# instead with its original body captured in `fut.closure`.
var next: FutureBase
template iterate =
when chronosProfiling:
if not isNil(onAsyncFutureEvent):
onAsyncFutureEvent(fut, Running)
while true:
# Call closure to make progress on `fut` until it reaches `yield` (inside
# `await` typically) or completes / fails / is cancelled
next = fut.internalClosure(fut)
if fut.internalClosure.finished(): # Reached the end of the transformed proc
break
if next == nil:
raiseAssert "Async procedure (" & ($fut.location[LocationKind.Create]) &
") yielded `nil`, are you await'ing a `nil` Future?"
if not next.finished():
# We cannot make progress on `fut` until `next` has finished - schedule
# `fut` to continue running when that happens
GC_ref(fut)
next.addCallback(CallbackFunc(internalContinue), cast[pointer](fut))
when chronosProfiling:
if not isNil(onAsyncFutureEvent):
onAsyncFutureEvent(fut, Paused)
# return here so that we don't remove the closure below
return
# Continue while the yielded future is already finished.
when chronosStrictException:
try:
iterate
except CancelledError:
fut.cancelAndSchedule()
except CatchableError as exc:
fut.fail(exc)
finally:
next = nil # GC hygiene
else:
try:
iterate
except CancelledError:
fut.cancelAndSchedule()
except CatchableError as exc:
fut.fail(exc)
except Exception as exc:
if exc of Defect:
raise (ref Defect)(exc)
fut.fail((ref ValueError)(msg: exc.msg, parent: exc))
finally:
next = nil # GC hygiene
# `futureContinue` will not be called any more for this future so we can
# clean it up
fut.internalClosure = nil
fut.internalChild = nil
{.pop.}
when chronosStackTrace:
import std/strutils
template getFilenameProcname(entry: StackTraceEntry): (string, string) =
when compiles(entry.filenameStr) and compiles(entry.procnameStr):
# We can't rely on "entry.filename" and "entry.procname" still being valid
# cstring pointers, because the "string.data" buffers they pointed to might
# be already garbage collected (this entry being a non-shallow copy,
# "entry.filename" no longer points to "entry.filenameStr.data", but to the
# buffer of the original object).
(entry.filenameStr, entry.procnameStr)
else:
($entry.filename, $entry.procname)
proc `$`(stackTraceEntries: seq[StackTraceEntry]): string =
try:
when defined(nimStackTraceOverride) and declared(addDebuggingInfo):
let entries = addDebuggingInfo(stackTraceEntries)
else:
let entries = stackTraceEntries
# Find longest filename & line number combo for alignment purposes.
var longestLeft = 0
for entry in entries:
let (filename, procname) = getFilenameProcname(entry)
if procname == "": continue
let leftLen = filename.len + len($entry.line)
if leftLen > longestLeft:
longestLeft = leftLen
var indent = 2
# Format the entries.
for entry in entries:
let (filename, procname) = getFilenameProcname(entry)
if procname == "":
if entry.line == reraisedFromBegin:
result.add(spaces(indent) & "#[\n")
indent.inc(2)
elif entry.line == reraisedFromEnd:
indent.dec(2)
result.add(spaces(indent) & "]#\n")
continue
let left = "$#($#)" % [filename, $entry.line]
result.add((spaces(indent) & "$#$# $#\n") % [
left,
spaces(longestLeft - left.len + 2),
procname
])
except ValueError as exc:
return exc.msg # Shouldn't actually happen since we set the formatting
# string
proc injectStacktrace(error: ref Exception) =
const header = "\nAsync traceback:\n"
var exceptionMsg = error.msg
if header in exceptionMsg:
# This is messy: extract the original exception message from the msg
# containing the async traceback.
let start = exceptionMsg.find(header)
exceptionMsg = exceptionMsg[0..<start]
var newMsg = exceptionMsg & header
let entries = getStackTraceEntries(error)
newMsg.add($entries)
newMsg.add("Exception message: " & exceptionMsg & "\n")
# # For debugging purposes
# newMsg.add("Exception type:")
# for entry in getStackTraceEntries(future.error):
# newMsg.add "\n" & $entry
error.msg = newMsg
proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} =
# For internal use only. Used in asyncmacro
if not(isNil(fut.internalError)):
when chronosStackTrace:
injectStacktrace(fut.internalError)
raise fut.internalError
proc internalRead*[T](fut: Future[T]): T {.inline.} =
# For internal use only. Used in asyncmacro
when T isnot void:
return fut.internalValue
proc read*[T](future: Future[T] ): T {.raises: [CatchableError].} =
## Retrieves the value of ``future``. Future must be finished otherwise
## this function will fail with a ``ValueError`` exception.
##
## If the result of the future is an error then that error will be raised.
if future.finished():
internalCheckComplete(future)
internalRead(future)
else:
# TODO: Make a custom exception type for this?
raise newException(ValueError, "Future still in progress.")
proc readError*(future: FutureBase): ref CatchableError {.raises: [ValueError].} =
## Retrieves the exception stored in ``future``.
##
## An ``ValueError`` exception will be thrown if no exception exists
## in the specified Future.
if not(isNil(future.error)):
return future.error
else:
# TODO: Make a custom exception type for this?
raise newException(ValueError, "No error in future.")
template taskFutureLocation(future: FutureBase): string =
let loc = future.location[LocationKind.Create]
"[" & (
if len(loc.procedure) == 0: "[unspecified]" else: $loc.procedure & "()"
) & " at " & $loc.file & ":" & $(loc.line) & "]"
template taskErrorMessage(future: FutureBase): string =
"Asynchronous task " & taskFutureLocation(future) &
" finished with an exception \"" & $future.error.name &
"\"!\nMessage: " & future.error.msg &
"\nStack trace: " & future.error.getStackTrace()
template taskCancelMessage(future: FutureBase): string =
"Asynchronous task " & taskFutureLocation(future) & " was cancelled!"
proc asyncSpawn*(future: Future[void]) =
## Spawns a new concurrent async task.
##
## Tasks may not raise exceptions or be cancelled - a ``Defect`` will be
## raised when this happens.
##
## This should be used instead of ``discard`` and ``asyncCheck`` when calling
## an ``async`` procedure without ``await``, to ensure exceptions in the
## returned future are not silently discarded.
##
## Note, that if passed ``future`` is already finished, it will be checked
## and processed immediately.
doAssert(not isNil(future), "Future is nil")
proc cb(data: pointer) =
if future.failed():
raise newException(FutureDefect, taskErrorMessage(future))
elif future.cancelled():
raise newException(FutureDefect, taskCancelMessage(future))
if not(future.finished()):
# We adding completion callback only if ``future`` is not finished yet.
future.addCallback(cb)
else:
cb(nil)
proc asyncCheck*[T](future: Future[T]) {.
deprecated: "Raises Defect on future failure, fix your code and use" &
" asyncSpawn!".} =
## This function used to raise an exception through the `poll` call if
## the given future failed - there's no way to handle such exceptions so this
## function is now an alias for `asyncSpawn`
##
when T is void:
asyncSpawn(future)
else:
proc cb(data: pointer) =
if future.failed():
raise newException(FutureDefect, taskErrorMessage(future))
elif future.cancelled():
raise newException(FutureDefect, taskCancelMessage(future))
if not(future.finished()):
# We adding completion callback only if ``future`` is not finished yet.
future.addCallback(cb)
else:
cb(nil)
proc asyncDiscard*[T](future: Future[T]) {.
deprecated: "Use asyncSpawn or `discard await`".} = discard
## `asyncDiscard` will discard the outcome of the operation - unlike `discard`
## it also throws away exceptions! Use `asyncSpawn` if you're sure your
## code doesn't raise exceptions, or `discard await` to ignore successful
## outcomes
proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {.
deprecated: "Use allFutures[T](varargs[Future[T]])".} =
## Returns a future which will complete once both ``fut1`` and ``fut2``
## finish.
##
## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled.
var retFuture = newFuture[void]("chronos.`and`")
proc cb(data: pointer) =
if not(retFuture.finished()):
if fut1.finished() and fut2.finished():
if cast[pointer](fut1) == data:
if fut1.failed():
retFuture.fail(fut1.error)
else:
retFuture.complete()
else:
if fut2.failed():
retFuture.fail(fut2.error)
else:
retFuture.complete()
fut1.callback = cb
fut2.callback = cb
proc cancellation(udata: pointer) =
# On cancel we remove all our callbacks only.
if not(fut1.finished()):
fut1.removeCallback(cb)
if not(fut2.finished()):
fut2.removeCallback(cb)
retFuture.cancelCallback = cancellation
return retFuture
proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] =
## Returns a future which will complete once either ``fut1`` or ``fut2``
## finish.
##
## If ``fut1`` or ``fut2`` future is failed, the result future will also be
## failed with an error stored in ``fut1`` or ``fut2`` respectively.
##
## If both ``fut1`` and ``fut2`` future are completed or failed, the result
## future will depend on the state of ``fut1`` future. So if ``fut1`` future
## is failed, the result future will also be failed, if ``fut1`` future is
## completed, the result future will also be completed.
##
## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled.
var retFuture = newFuture[void]("chronos.or")
var cb: proc(udata: pointer) {.gcsafe, raises: [].}
cb = proc(udata: pointer) {.gcsafe, raises: [].} =
if not(retFuture.finished()):
var fut = cast[FutureBase](udata)
if cast[pointer](fut1) == udata:
fut2.removeCallback(cb)
else:
fut1.removeCallback(cb)
if fut.failed():
retFuture.fail(fut.error)
else:
retFuture.complete()
proc cancellation(udata: pointer) =
# On cancel we remove all our callbacks only.
if not(fut1.finished()):
fut1.removeCallback(cb)
if not(fut2.finished()):
fut2.removeCallback(cb)
if fut1.finished():
if fut1.failed():
retFuture.fail(fut1.error)
else:
retFuture.complete()
return retFuture
if fut2.finished():
if fut2.failed():
retFuture.fail(fut2.error)
else:
retFuture.complete()
return retFuture
fut1.addCallback(cb)
fut2.addCallback(cb)
retFuture.cancelCallback = cancellation
return retFuture
proc all*[T](futs: varargs[Future[T]]): auto {.
deprecated: "Use allFutures(varargs[Future[T]])".} =
## Returns a future which will complete once all futures in ``futs`` finish.
## If the argument is empty, the returned future completes immediately.
##
## If the awaited futures are not ``Future[void]``, the returned future
## will hold the values of all awaited futures in a sequence.
##
## If the awaited futures *are* ``Future[void]``, this proc returns
## ``Future[void]``.
##
## Note, that if one of the futures in ``futs`` will fail, result of ``all()``
## will also be failed with error from failed future.
##
## TODO: This procedure has bug on handling cancelled futures from ``futs``.
## So if future from ``futs`` list become cancelled, what must be returned?
## You can't cancel result ``retFuture`` because in such way infinite
## recursion will happen.
let totalFutures = len(futs)
var completedFutures = 0
# Because we can't capture varargs[T] in closures we need to create copy.
var nfuts = @futs
when T is void:
var retFuture = newFuture[void]("chronos.all(void)")
proc cb(udata: pointer) =
if not(retFuture.finished()):
inc(completedFutures)
if completedFutures == totalFutures:
for nfut in nfuts:
if nfut.failed():
retFuture.fail(nfut.error)
break
if not(retFuture.failed()):
retFuture.complete()
for fut in nfuts:
fut.addCallback(cb)
if len(nfuts) == 0:
retFuture.complete()
return retFuture
else:
var retFuture = newFuture[seq[T]]("chronos.all(T)")
var retValues = newSeq[T](totalFutures)
proc cb(udata: pointer) =
if not(retFuture.finished()):
inc(completedFutures)
if completedFutures == totalFutures:
for k, nfut in nfuts:
if nfut.failed():
retFuture.fail(nfut.error)
break
else:
retValues[k] = nfut.value
if not(retFuture.failed()):
retFuture.complete(retValues)
for fut in nfuts:
fut.addCallback(cb)
if len(nfuts) == 0:
retFuture.complete(retValues)
return retFuture
proc oneIndex*[T](futs: varargs[Future[T]]): Future[int] {.
deprecated: "Use one[T](varargs[Future[T]])".} =
## Returns a future which will complete once one of the futures in ``futs``
## complete.
##
## If the argument is empty, the returned future FAILS immediately.
##
## Returned future will hold index of completed/failed future in ``futs``
## argument.
var nfuts = @futs
var retFuture = newFuture[int]("chronos.oneIndex(T)")
proc cb(udata: pointer) =
var res = -1
if not(retFuture.finished()):
var rfut = cast[FutureBase](udata)
for i in 0..<len(nfuts):
if cast[FutureBase](nfuts[i]) != rfut:
nfuts[i].removeCallback(cb)
else:
res = i
retFuture.complete(res)
for fut in nfuts:
fut.addCallback(cb)
if len(nfuts) == 0:
retFuture.fail(newException(ValueError, "Empty Future[T] list"))
return retFuture
proc oneValue*[T](futs: varargs[Future[T]]): Future[T] {.
deprecated: "Use one[T](varargs[Future[T]])".} =
## Returns a future which will finish once one of the futures in ``futs``
## finish.
##
## If the argument is empty, returned future FAILS immediately.
##
## Returned future will hold value of completed ``futs`` future, or error
## if future was failed.
var nfuts = @futs
var retFuture = newFuture[T]("chronos.oneValue(T)")
proc cb(udata: pointer) =
var resFut: Future[T]
if not(retFuture.finished()):
var rfut = cast[FutureBase](udata)
for i in 0..<len(nfuts):
if cast[FutureBase](nfuts[i]) != rfut:
nfuts[i].removeCallback(cb)
else:
resFut = nfuts[i]
if resFut.failed():
retFuture.fail(resFut.error)
else:
when T is void:
retFuture.complete()
else:
retFuture.complete(resFut.read())
for fut in nfuts:
fut.addCallback(cb)
if len(nfuts) == 0:
retFuture.fail(newException(ValueError, "Empty Future[T] list"))
return retFuture
proc cancelAndWait*(fut: FutureBase): Future[void] =
## Initiate cancellation process for Future ``fut`` and wait until ``fut`` is
## done e.g. changes its state (become completed, failed or cancelled).
##
## If ``fut`` is already finished (completed, failed or cancelled) result
## Future[void] object will be returned complete.
var retFuture = newFuture[void]("chronos.cancelAndWait(T)")
proc continuation(udata: pointer) =
if not(retFuture.finished()):
retFuture.complete()
proc cancellation(udata: pointer) =
if not(fut.finished()):
fut.removeCallback(continuation)
if fut.finished():
retFuture.complete()
else:
fut.addCallback(continuation)
retFuture.cancelCallback = cancellation
# Initiate cancellation process.
fut.cancel()
return retFuture
proc allFutures*(futs: varargs[FutureBase]): Future[void] =
## Returns a future which will complete only when all futures in ``futs``
## will be completed, failed or canceled.
##
## If the argument is empty, the returned future COMPLETES immediately.
##
## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled.
var retFuture = newFuture[void]("chronos.allFutures()")
let totalFutures = len(futs)
var finishedFutures = 0
# Because we can't capture varargs[T] in closures we need to create copy.
var nfuts = @futs
proc cb(udata: pointer) =
if not(retFuture.finished()):
inc(finishedFutures)
if finishedFutures == totalFutures:
retFuture.complete()
proc cancellation(udata: pointer) =
# On cancel we remove all our callbacks only.
for i in 0..<len(nfuts):
if not(nfuts[i].finished()):
nfuts[i].removeCallback(cb)
for fut in nfuts:
if not(fut.finished()):
fut.addCallback(cb)
else:
inc(finishedFutures)
retFuture.cancelCallback = cancellation
if len(nfuts) == 0 or len(nfuts) == finishedFutures:
retFuture.complete()
return retFuture
proc allFutures*[T](futs: varargs[Future[T]]): Future[void] =
## Returns a future which will complete only when all futures in ``futs``
## will be completed, failed or canceled.
##
## If the argument is empty, the returned future COMPLETES immediately.
##
## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled.
# Because we can't capture varargs[T] in closures we need to create copy.
var nfuts: seq[FutureBase]
for future in futs:
nfuts.add(future)
allFutures(nfuts)
proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] =
## Returns a future which will complete only when all futures in ``futs``
## will be completed, failed or canceled.
##
## Returned sequence will hold all the Future[T] objects passed to
## ``allFinished`` with the order preserved.
##
## If the argument is empty, the returned future COMPLETES immediately.
##
## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled.
var retFuture = newFuture[seq[Future[T]]]("chronos.allFinished()")
let totalFutures = len(futs)
var finishedFutures = 0
var nfuts = @futs
proc cb(udata: pointer) =
if not(retFuture.finished()):
inc(finishedFutures)
if finishedFutures == totalFutures:
retFuture.complete(nfuts)
proc cancellation(udata: pointer) =
# On cancel we remove all our callbacks only.
for fut in nfuts.mitems():
if not(fut.finished()):
fut.removeCallback(cb)
for fut in nfuts:
if not(fut.finished()):
fut.addCallback(cb)
else:
inc(finishedFutures)
retFuture.cancelCallback = cancellation
if len(nfuts) == 0 or len(nfuts) == finishedFutures:
retFuture.complete(nfuts)
return retFuture
proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] =
## Returns a future which will complete and return completed Future[T] inside,
## when one of the futures in ``futs`` will be completed, failed or canceled.
##
## If the argument is empty, the returned future FAILS immediately.
##
## On success returned Future will hold finished Future[T].
##
## On cancel futures in ``futs`` WILL NOT BE cancelled.
var retFuture = newFuture[Future[T]]("chronos.one()")
if len(futs) == 0:
retFuture.fail(newException(ValueError, "Empty Future[T] list"))
return retFuture
# If one of the Future[T] already finished we return it as result
for fut in futs:
if fut.finished():
retFuture.complete(fut)
return retFuture
# Because we can't capture varargs[T] in closures we need to create copy.
var nfuts = @futs
var cb: proc(udata: pointer) {.gcsafe, raises: [].}
cb = proc(udata: pointer) {.gcsafe, raises: [].} =
if not(retFuture.finished()):
var res: Future[T]
var rfut = cast[FutureBase](udata)
for i in 0..<len(nfuts):
if cast[FutureBase](nfuts[i]) != rfut:
nfuts[i].removeCallback(cb)
else:
res = nfuts[i]
retFuture.complete(res)
proc cancellation(udata: pointer) =
# On cancel we remove all our callbacks only.
for i in 0..<len(nfuts):
if not(nfuts[i].finished()):
nfuts[i].removeCallback(cb)
for fut in nfuts:
fut.addCallback(cb)
retFuture.cancelCallback = cancellation
return retFuture
proc race*(futs: varargs[FutureBase]): Future[FutureBase] =
## Returns a future which will complete and return completed FutureBase,
## when one of the futures in ``futs`` will be completed, failed or canceled.
##
## If the argument is empty, the returned future FAILS immediately.
##
## On success returned Future will hold finished FutureBase.
##
## On cancel futures in ``futs`` WILL NOT BE cancelled.
let retFuture = newFuture[FutureBase]("chronos.race()")
if len(futs) == 0:
retFuture.fail(newException(ValueError, "Empty Future[T] list"))
return retFuture
# If one of the Future[T] already finished we return it as result
for fut in futs:
if fut.finished():
retFuture.complete(fut)
return retFuture
# Because we can't capture varargs[T] in closures we need to create copy.
var nfuts = @futs
var cb: proc(udata: pointer) {.gcsafe, raises: [].}
cb = proc(udata: pointer) {.gcsafe, raises: [].} =
if not(retFuture.finished()):
var res: FutureBase
var rfut = cast[FutureBase](udata)
for i in 0..<len(nfuts):
if nfuts[i] != rfut:
nfuts[i].removeCallback(cb)
else:
res = nfuts[i]
retFuture.complete(res)
proc cancellation(udata: pointer) =
# On cancel we remove all our callbacks only.
for i in 0..<len(nfuts):
if not(nfuts[i].finished()):
nfuts[i].removeCallback(cb)
for fut in nfuts:
fut.addCallback(cb, cast[pointer](fut))
retFuture.cancelCallback = cancellation
return retFuture

File diff suppressed because it is too large Load Diff

View File

@ -1,337 +0,0 @@
#
#
# Nim's Runtime Library
# (c) Copyright 2015 Dominik Picheta
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
import std/[macros]
# `quote do` will ruin line numbers so we avoid it using these helpers
proc completeWithResult(fut, baseType: NimNode): NimNode {.compileTime.} =
# when `baseType` is void:
# complete(`fut`)
# else:
# complete(`fut`, result)
if baseType.eqIdent("void"):
# Shortcut if we know baseType at macro expansion time
newCall(ident "complete", fut)
else:
# `baseType` might be generic and resolve to `void`
nnkWhenStmt.newTree(
nnkElifExpr.newTree(
nnkInfix.newTree(ident "is", baseType, ident "void"),
newCall(ident "complete", fut)
),
nnkElseExpr.newTree(
newCall(ident "complete", fut, ident "result")
)
)
proc completeWithNode(fut, baseType, node: NimNode): NimNode {.compileTime.} =
# when typeof(`node`) is void:
# `node` # statement / explicit return
# -> completeWithResult(fut, baseType)
# else: # expression / implicit return
# complete(`fut`, `node`)
if node.kind == nnkEmpty: # shortcut when known at macro expanstion time
completeWithResult(fut, baseType)
else:
# Handle both expressions and statements - since the type is not know at
# macro expansion time, we delegate this choice to a later compilation stage
# with `when`.
nnkWhenStmt.newTree(
nnkElifExpr.newTree(
nnkInfix.newTree(
ident "is", nnkTypeOfExpr.newTree(node), ident "void"),
newStmtList(
node,
completeWithResult(fut, baseType)
)
),
nnkElseExpr.newTree(
newCall(ident "complete", fut, node)
)
)
proc processBody(node, fut, baseType: NimNode): NimNode {.compileTime.} =
#echo(node.treeRepr)
case node.kind
of nnkReturnStmt:
let
res = newNimNode(nnkStmtList, node)
res.add completeWithNode(fut, baseType, processBody(node[0], fut, baseType))
res.add newNimNode(nnkReturnStmt, node).add(newNilLit())
res
of RoutineNodes-{nnkTemplateDef}:
# skip all the nested procedure definitions
node
else:
for i in 0 ..< node.len:
# We must not transform nested procedures of any form, otherwise
# `fut` will be used for all nested procedures as their own
# `retFuture`.
node[i] = processBody(node[i], fut, baseType)
node
proc getName(node: NimNode): string {.compileTime.} =
case node.kind
of nnkSym:
return node.strVal
of nnkPostfix:
return node[1].strVal
of nnkIdent:
return node.strVal
of nnkEmpty:
return "anonymous"
else:
error("Unknown name.")
macro unsupported(s: static[string]): untyped =
error s
proc params2(someProc: NimNode): NimNode =
# until https://github.com/nim-lang/Nim/pull/19563 is available
if someProc.kind == nnkProcTy:
someProc[0]
else:
params(someProc)
proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} =
# Replace every Call -> OpenSymChoice by a Bracket expr
# ref https://github.com/nim-lang/Nim/issues/11091
if node.kind in nnkCallKinds and
node[0].kind == nnkOpenSymChoice and node[0].eqIdent("[]"):
result = newNimNode(nnkBracketExpr)
for child in node[1..^1]:
result.add(cleanupOpenSymChoice(child))
else:
result = node.copyNimNode()
for child in node:
result.add(cleanupOpenSymChoice(child))
proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
## This macro transforms a single procedure into a closure iterator.
## The ``async`` macro supports a stmtList holding multiple async procedures.
if prc.kind notin {nnkProcTy, nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}:
error("Cannot transform " & $prc.kind & " into an async proc." &
" proc/method definition or lambda node expected.", prc)
let returnType = cleanupOpenSymChoice(prc.params2[0])
# Verify that the return type is a Future[T]
let baseType =
if returnType.kind == nnkEmpty:
ident "void"
elif not (
returnType.kind == nnkBracketExpr and eqIdent(returnType[0], "Future")):
error(
"Expected return type of 'Future' got '" & repr(returnType) & "'", prc)
return
else:
returnType[1]
let
baseTypeIsVoid = baseType.eqIdent("void")
futureVoidType = nnkBracketExpr.newTree(ident "Future", ident "void")
if prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}:
let
prcName = prc.name.getName
outerProcBody = newNimNode(nnkStmtList, prc.body)
# Copy comment for nimdoc
if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt:
outerProcBody.add(prc.body[0])
let
internalFutureSym = ident "chronosInternalRetFuture"
internalFutureType =
if baseTypeIsVoid: futureVoidType
else: returnType
castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym)
procBody = prc.body.processBody(castFutureSym, baseType)
# don't do anything with forward bodies (empty)
if procBody.kind != nnkEmpty:
let
# fix #13899, `defer` should not escape its original scope
procBodyBlck = nnkBlockStmt.newTree(newEmptyNode(), procBody)
resultDecl = nnkWhenStmt.newTree(
# when `baseType` is void:
nnkElifExpr.newTree(
nnkInfix.newTree(ident "is", baseType, ident "void"),
quote do:
template result: auto {.used.} =
{.fatal: "You should not reference the `result` variable inside" &
" a void async proc".}
),
# else:
nnkElseExpr.newTree(
newStmtList(
quote do: {.push warning[resultshadowed]: off.},
# var result {.used.}: `baseType`
# In the proc body, result may or may not end up being used
# depending on how the body is written - with implicit returns /
# expressions in particular, it is likely but not guaranteed that
# it is not used. Ideally, we would avoid emitting it in this
# case to avoid the default initializaiton. {.used.} typically
# works better than {.push.} which has a tendency to leak out of
# scope.
# TODO figure out if there's a way to detect `result` usage in
# the proc body _after_ template exapnsion, and therefore
# avoid creating this variable - one option is to create an
# addtional when branch witha fake `result` and check
# `compiles(procBody)` - this is not without cost though
nnkVarSection.newTree(nnkIdentDefs.newTree(
nnkPragmaExpr.newTree(
ident "result",
nnkPragma.newTree(ident "used")),
baseType, newEmptyNode())
),
quote do: {.pop.},
)
)
)
completeDecl = completeWithNode(castFutureSym, baseType, procBodyBlck)
closureBody = newStmtList(resultDecl, completeDecl)
internalFutureParameter = nnkIdentDefs.newTree(
internalFutureSym, newIdentNode("FutureBase"), newEmptyNode())
iteratorNameSym = genSym(nskIterator, $prcName)
closureIterator = newProc(
iteratorNameSym,
[newIdentNode("FutureBase"), internalFutureParameter],
closureBody, nnkIteratorDef)
iteratorNameSym.copyLineInfo(prc)
closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body)
closureIterator.addPragma(newIdentNode("closure"))
# `async` code must be gcsafe
closureIterator.addPragma(newIdentNode("gcsafe"))
# TODO when push raises is active in a module, the iterator here inherits
# that annotation - here we explicitly disable it again which goes
# against the spirit of the raises annotation - one should investigate
# here the possibility of transporting more specific error types here
# for example by casting exceptions coming out of `await`..
let raises = nnkBracket.newTree()
when chronosStrictException:
raises.add(newIdentNode("CatchableError"))
else:
raises.add(newIdentNode("Exception"))
closureIterator.addPragma(nnkExprColonExpr.newTree(
newIdentNode("raises"),
raises
))
# If proc has an explicit gcsafe pragma, we add it to iterator as well.
# TODO if these lines are not here, srcloc tests fail (!)
if prc.pragma.findChild(it.kind in {nnkSym, nnkIdent} and
it.strVal == "gcsafe") != nil:
closureIterator.addPragma(newIdentNode("gcsafe"))
outerProcBody.add(closureIterator)
# -> let resultFuture = newFuture[T]()
# declared at the end to be sure that the closure
# doesn't reference it, avoid cyclic ref (#203)
let
retFutureSym = ident "resultFuture"
retFutureSym.copyLineInfo(prc)
# Do not change this code to `quote do` version because `instantiationInfo`
# will be broken for `newFuture()` call.
outerProcBody.add(
newLetStmt(
retFutureSym,
newCall(newTree(nnkBracketExpr, ident "newFuture", baseType),
newLit(prcName))
)
)
# -> resultFuture.internalClosure = iterator
outerProcBody.add(
newAssignment(
newDotExpr(retFutureSym, newIdentNode("internalClosure")),
iteratorNameSym)
)
# -> futureContinue(resultFuture))
outerProcBody.add(
newCall(newIdentNode("futureContinue"), retFutureSym)
)
# -> return resultFuture
outerProcBody.add newNimNode(nnkReturnStmt, prc.body[^1]).add(retFutureSym)
prc.body = outerProcBody
if prc.kind notin {nnkProcTy, nnkLambda}: # TODO: Nim bug?
prc.addPragma(newColonExpr(ident "stackTrace", ident "off"))
# See **Remark 435** in this file.
# https://github.com/nim-lang/RFCs/issues/435
prc.addPragma(newIdentNode("gcsafe"))
prc.addPragma(nnkExprColonExpr.newTree(
newIdentNode("raises"),
nnkBracket.newTree()
))
if baseTypeIsVoid:
if returnType.kind == nnkEmpty:
# Add Future[void]
prc.params2[0] = futureVoidType
prc
template await*[T](f: Future[T]): untyped =
when declared(chronosInternalRetFuture):
chronosInternalRetFuture.internalChild = f
# `futureContinue` calls the iterator generated by the `async`
# transformation - `yield` gives control back to `futureContinue` which is
# responsible for resuming execution once the yielded future is finished
yield chronosInternalRetFuture.internalChild
# `child` is guaranteed to have been `finished` after the yield
if chronosInternalRetFuture.internalMustCancel:
raise newCancelledError()
# `child` released by `futureContinue`
chronosInternalRetFuture.internalChild.internalCheckComplete()
when T isnot void:
cast[type(f)](chronosInternalRetFuture.internalChild).internalRead()
else:
unsupported "await is only available within {.async.}"
template awaitne*[T](f: Future[T]): Future[T] =
when declared(chronosInternalRetFuture):
chronosInternalRetFuture.internalChild = f
yield chronosInternalRetFuture.internalChild
if chronosInternalRetFuture.internalMustCancel:
raise newCancelledError()
cast[type(f)](chronosInternalRetFuture.internalChild)
else:
unsupported "awaitne is only available within {.async.}"
macro async*(prc: untyped): untyped =
## Macro which processes async procedures into the appropriate
## iterators and yield statements.
if prc.kind == nnkStmtList:
result = newStmtList()
for oneProc in prc:
result.add asyncSingleProc(oneProc)
else:
result = asyncSingleProc(prc)
when chronosDumpAsync:
echo repr result

View File

@ -13,7 +13,7 @@
import std/strtabs
import "."/[config, asyncloop, handles, osdefs, osutils, oserrno],
streams/asyncstream
import stew/[results, byteutils]
import stew/[byteutils], results
from std/os import quoteShell, quoteShellWindows, quoteShellPosix, envPairs
export strtabs, results
@ -24,7 +24,8 @@ const
## AsyncProcess leaks tracker name
type
AsyncProcessError* = object of CatchableError
AsyncProcessError* = object of AsyncError
AsyncProcessTimeoutError* = object of AsyncProcessError
AsyncProcessResult*[T] = Result[T, OSErrorCode]
@ -107,6 +108,9 @@ type
stdError*: string
status*: int
WaitOperation {.pure.} = enum
Kill, Terminate
template Pipe*(t: typedesc[AsyncProcess]): ProcessStreamHandle =
ProcessStreamHandle(kind: ProcessStreamHandleKind.Auto)
@ -294,6 +298,11 @@ proc raiseAsyncProcessError(msg: string, exc: ref CatchableError = nil) {.
msg & " ([" & $exc.name & "]: " & $exc.msg & ")"
raise newException(AsyncProcessError, message)
proc raiseAsyncProcessTimeoutError() {.
noreturn, noinit, noinline, raises: [AsyncProcessTimeoutError].} =
let message = "Operation timed out"
raise newException(AsyncProcessTimeoutError, message)
proc raiseAsyncProcessError(msg: string, error: OSErrorCode|cint) {.
noreturn, noinit, noinline, raises: [AsyncProcessError].} =
when error is OSErrorCode:
@ -1189,11 +1198,50 @@ proc closeProcessStreams(pipes: AsyncProcessPipes,
res
allFutures(pending)
proc opAndWaitForExit(p: AsyncProcessRef, op: WaitOperation,
timeout = InfiniteDuration): Future[int] {.async.} =
let timerFut =
if timeout == InfiniteDuration:
newFuture[void]("chronos.killAndwaitForExit")
else:
sleepAsync(timeout)
while true:
if p.running().get(true):
# We ignore operation errors because we going to repeat calling
# operation until process will not exit.
case op
of WaitOperation.Kill:
discard p.kill()
of WaitOperation.Terminate:
discard p.terminate()
else:
let exitCode = p.peekExitCode().valueOr:
raiseAsyncProcessError("Unable to peek process exit code", error)
if not(timerFut.finished()):
await cancelAndWait(timerFut)
return exitCode
let waitFut = p.waitForExit().wait(100.milliseconds)
discard await race(FutureBase(waitFut), FutureBase(timerFut))
if waitFut.finished() and not(waitFut.failed()):
let res = p.peekExitCode()
if res.isOk():
if not(timerFut.finished()):
await cancelAndWait(timerFut)
return res.get()
if timerFut.finished():
if not(waitFut.finished()):
await waitFut.cancelAndWait()
raiseAsyncProcessTimeoutError()
proc closeWait*(p: AsyncProcessRef) {.async.} =
# Here we ignore all possible errrors, because we do not want to raise
# exceptions.
discard closeProcessHandles(p.pipes, p.options, OSErrorCode(0))
await p.pipes.closeProcessStreams(p.options)
await noCancel(p.pipes.closeProcessStreams(p.options))
discard p.closeThreadAndProcessHandle()
untrackCounter(AsyncProcessTrackerName)
@ -1216,14 +1264,15 @@ proc execCommand*(command: string,
options = {AsyncProcessOption.EvalCommand},
timeout = InfiniteDuration
): Future[int] {.async.} =
let poptions = options + {AsyncProcessOption.EvalCommand}
let process = await startProcess(command, options = poptions)
let res =
try:
await process.waitForExit(timeout)
finally:
await process.closeWait()
return res
let
poptions = options + {AsyncProcessOption.EvalCommand}
process = await startProcess(command, options = poptions)
res =
try:
await process.waitForExit(timeout)
finally:
await process.closeWait()
res
proc execCommandEx*(command: string,
options = {AsyncProcessOption.EvalCommand},
@ -1256,10 +1305,43 @@ proc execCommandEx*(command: string,
finally:
await process.closeWait()
return res
res
proc pid*(p: AsyncProcessRef): int =
## Returns process ``p`` identifier.
int(p.processId)
template processId*(p: AsyncProcessRef): int = pid(p)
proc killAndWaitForExit*(p: AsyncProcessRef,
timeout = InfiniteDuration): Future[int] =
## Perform continuous attempts to kill the ``p`` process for specified period
## of time ``timeout``.
##
## On Posix systems, killing means sending ``SIGKILL`` to the process ``p``,
## On Windows, it uses ``TerminateProcess`` to kill the process ``p``.
##
## If the process ``p`` fails to be killed within the ``timeout`` time, it
## will raise ``AsyncProcessTimeoutError``.
##
## In case of error this it will raise ``AsyncProcessError``.
##
## Returns process ``p`` exit code.
opAndWaitForExit(p, WaitOperation.Kill, timeout)
proc terminateAndWaitForExit*(p: AsyncProcessRef,
timeout = InfiniteDuration): Future[int] =
## Perform continuous attempts to terminate the ``p`` process for specified
## period of time ``timeout``.
##
## On Posix systems, terminating means sending ``SIGTERM`` to the process
## ``p``, on Windows, it uses ``TerminateProcess`` to terminate the process
## ``p``.
##
## If the process ``p`` fails to be terminated within the ``timeout`` time, it
## will raise ``AsyncProcessTimeoutError``.
##
## In case of error this it will raise ``AsyncProcessError``.
##
## Returns process ``p`` exit code.
opAndWaitForExit(p, WaitOperation.Terminate, timeout)

View File

@ -28,7 +28,7 @@ type
## is blocked in ``acquire()`` is being processed.
locked: bool
acquired: bool
waiters: seq[Future[void]]
waiters: seq[Future[void].Raising([CancelledError])]
AsyncEvent* = ref object of RootRef
## A primitive event object.
@ -41,7 +41,7 @@ type
## state to be signaled, when event get fired, then all coroutines
## continue proceeds in order, they have entered waiting state.
flag: bool
waiters: seq[Future[void]]
waiters: seq[Future[void].Raising([CancelledError])]
AsyncQueue*[T] = ref object of RootRef
## A queue, useful for coordinating producer and consumer coroutines.
@ -50,8 +50,8 @@ type
## infinite. If it is an integer greater than ``0``, then "await put()"
## will block when the queue reaches ``maxsize``, until an item is
## removed by "await get()".
getters: seq[Future[void]]
putters: seq[Future[void]]
getters: seq[Future[void].Raising([CancelledError])]
putters: seq[Future[void].Raising([CancelledError])]
queue: Deque[T]
maxsize: int
@ -62,50 +62,6 @@ type
AsyncLockError* = object of AsyncError
## ``AsyncLock`` is either locked or unlocked.
EventBusSubscription*[T] = proc(bus: AsyncEventBus,
payload: EventPayload[T]): Future[void] {.
gcsafe, raises: [].}
## EventBus subscription callback type.
EventBusAllSubscription* = proc(bus: AsyncEventBus,
event: AwaitableEvent): Future[void] {.
gcsafe, raises: [].}
## EventBus subscription callback type.
EventBusCallback = proc(bus: AsyncEventBus, event: string, key: EventBusKey,
data: EventPayloadBase) {.
gcsafe, raises: [].}
EventBusKey* = object
## Unique subscription key.
eventName: string
typeName: string
unique: uint64
cb: EventBusCallback
EventItem = object
waiters: seq[FutureBase]
subscribers: seq[EventBusKey]
AsyncEventBus* = ref object of RootObj
## An eventbus object.
counter: uint64
events: Table[string, EventItem]
subscribers: seq[EventBusKey]
waiters: seq[Future[AwaitableEvent]]
EventPayloadBase* = ref object of RootObj
loc: ptr SrcLoc
EventPayload*[T] = ref object of EventPayloadBase
## Eventbus' event payload object
value: T
AwaitableEvent* = object
## Eventbus' event payload object
eventName: string
payload: EventPayloadBase
AsyncEventQueueFullError* = object of AsyncError
EventQueueKey* = distinct uint64
@ -113,7 +69,7 @@ type
EventQueueReader* = object
key: EventQueueKey
offset: int
waiter: Future[void]
waiter: Future[void].Raising([CancelledError])
overflow: bool
AsyncEventQueue*[T] = ref object of RootObj
@ -134,17 +90,14 @@ proc newAsyncLock*(): AsyncLock =
## The ``release()`` procedure changes the state to unlocked and returns
## immediately.
# Workaround for callSoon() not worked correctly before
# getThreadDispatcher() call.
discard getThreadDispatcher()
AsyncLock(waiters: newSeq[Future[void]](), locked: false, acquired: false)
AsyncLock()
proc wakeUpFirst(lock: AsyncLock): bool {.inline.} =
## Wake up the first waiter if it isn't done.
var i = 0
var res = false
while i < len(lock.waiters):
var waiter = lock.waiters[i]
let waiter = lock.waiters[i]
inc(i)
if not(waiter.finished()):
waiter.complete()
@ -164,7 +117,7 @@ proc checkAll(lock: AsyncLock): bool {.inline.} =
return false
return true
proc acquire*(lock: AsyncLock) {.async.} =
proc acquire*(lock: AsyncLock) {.async: (raises: [CancelledError]).} =
## Acquire a lock ``lock``.
##
## This procedure blocks until the lock ``lock`` is unlocked, then sets it
@ -173,7 +126,7 @@ proc acquire*(lock: AsyncLock) {.async.} =
lock.acquired = true
lock.locked = true
else:
var w = newFuture[void]("AsyncLock.acquire")
let w = Future[void].Raising([CancelledError]).init("AsyncLock.acquire")
lock.waiters.add(w)
await w
lock.acquired = true
@ -209,13 +162,10 @@ proc newAsyncEvent*(): AsyncEvent =
## procedure and reset to `false` with the `clear()` procedure.
## The `wait()` procedure blocks until the flag is `true`. The flag is
## initially `false`.
AsyncEvent()
# Workaround for callSoon() not worked correctly before
# getThreadDispatcher() call.
discard getThreadDispatcher()
AsyncEvent(waiters: newSeq[Future[void]](), flag: false)
proc wait*(event: AsyncEvent): Future[void] =
proc wait*(event: AsyncEvent): Future[void] {.
async: (raw: true, raises: [CancelledError]).} =
## Block until the internal flag of ``event`` is `true`.
## If the internal flag is `true` on entry, return immediately. Otherwise,
## block until another task calls `fire()` to set the flag to `true`,
@ -254,20 +204,15 @@ proc isSet*(event: AsyncEvent): bool =
proc newAsyncQueue*[T](maxsize: int = 0): AsyncQueue[T] =
## Creates a new asynchronous queue ``AsyncQueue``.
# Workaround for callSoon() not worked correctly before
# getThreadDispatcher() call.
discard getThreadDispatcher()
AsyncQueue[T](
getters: newSeq[Future[void]](),
putters: newSeq[Future[void]](),
queue: initDeque[T](),
maxsize: maxsize
)
proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} =
proc wakeupNext(waiters: var seq) {.inline.} =
var i = 0
while i < len(waiters):
var waiter = waiters[i]
let waiter = waiters[i]
inc(i)
if not(waiter.finished()):
@ -294,119 +239,141 @@ proc empty*[T](aq: AsyncQueue[T]): bool {.inline.} =
## Return ``true`` if the queue is empty, ``false`` otherwise.
(len(aq.queue) == 0)
proc addFirstImpl[T](aq: AsyncQueue[T], item: T) =
aq.queue.addFirst(item)
aq.getters.wakeupNext()
proc addLastImpl[T](aq: AsyncQueue[T], item: T) =
aq.queue.addLast(item)
aq.getters.wakeupNext()
proc popFirstImpl[T](aq: AsyncQueue[T]): T =
let res = aq.queue.popFirst()
aq.putters.wakeupNext()
res
proc popLastImpl[T](aq: AsyncQueue[T]): T =
let res = aq.queue.popLast()
aq.putters.wakeupNext()
res
proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {.
raises: [AsyncQueueFullError].}=
raises: [AsyncQueueFullError].} =
## Put an item ``item`` to the beginning of the queue ``aq`` immediately.
##
## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised.
if aq.full():
raise newException(AsyncQueueFullError, "AsyncQueue is full!")
aq.queue.addFirst(item)
aq.getters.wakeupNext()
aq.addFirstImpl(item)
proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {.
raises: [AsyncQueueFullError].}=
raises: [AsyncQueueFullError].} =
## Put an item ``item`` at the end of the queue ``aq`` immediately.
##
## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised.
if aq.full():
raise newException(AsyncQueueFullError, "AsyncQueue is full!")
aq.queue.addLast(item)
aq.getters.wakeupNext()
aq.addLastImpl(item)
proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {.
raises: [AsyncQueueEmptyError].} =
raises: [AsyncQueueEmptyError].} =
## Get an item from the beginning of the queue ``aq`` immediately.
##
## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised.
if aq.empty():
raise newException(AsyncQueueEmptyError, "AsyncQueue is empty!")
let res = aq.queue.popFirst()
aq.putters.wakeupNext()
res
aq.popFirstImpl()
proc popLastNoWait*[T](aq: AsyncQueue[T]): T {.
raises: [AsyncQueueEmptyError].} =
raises: [AsyncQueueEmptyError].} =
## Get an item from the end of the queue ``aq`` immediately.
##
## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised.
if aq.empty():
raise newException(AsyncQueueEmptyError, "AsyncQueue is empty!")
let res = aq.queue.popLast()
aq.putters.wakeupNext()
res
aq.popLastImpl()
proc addFirst*[T](aq: AsyncQueue[T], item: T) {.async.} =
proc addFirst*[T](aq: AsyncQueue[T], item: T) {.
async: (raises: [CancelledError]).} =
## Put an ``item`` to the beginning of the queue ``aq``. If the queue is full,
## wait until a free slot is available before adding item.
while aq.full():
var putter = newFuture[void]("AsyncQueue.addFirst")
let putter =
Future[void].Raising([CancelledError]).init("AsyncQueue.addFirst")
aq.putters.add(putter)
try:
await putter
except CatchableError as exc:
except CancelledError as exc:
if not(aq.full()) and not(putter.cancelled()):
aq.putters.wakeupNext()
raise exc
aq.addFirstNoWait(item)
aq.addFirstImpl(item)
proc addLast*[T](aq: AsyncQueue[T], item: T) {.async.} =
proc addLast*[T](aq: AsyncQueue[T], item: T) {.
async: (raises: [CancelledError]).} =
## Put an ``item`` to the end of the queue ``aq``. If the queue is full,
## wait until a free slot is available before adding item.
while aq.full():
var putter = newFuture[void]("AsyncQueue.addLast")
let putter =
Future[void].Raising([CancelledError]).init("AsyncQueue.addLast")
aq.putters.add(putter)
try:
await putter
except CatchableError as exc:
except CancelledError as exc:
if not(aq.full()) and not(putter.cancelled()):
aq.putters.wakeupNext()
raise exc
aq.addLastNoWait(item)
aq.addLastImpl(item)
proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {.async.} =
proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {.
async: (raises: [CancelledError]).} =
## Remove and return an ``item`` from the beginning of the queue ``aq``.
## If the queue is empty, wait until an item is available.
while aq.empty():
var getter = newFuture[void]("AsyncQueue.popFirst")
let getter =
Future[void].Raising([CancelledError]).init("AsyncQueue.popFirst")
aq.getters.add(getter)
try:
await getter
except CatchableError as exc:
except CancelledError as exc:
if not(aq.empty()) and not(getter.cancelled()):
aq.getters.wakeupNext()
raise exc
return aq.popFirstNoWait()
aq.popFirstImpl()
proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async.} =
proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.
async: (raises: [CancelledError]).} =
## Remove and return an ``item`` from the end of the queue ``aq``.
## If the queue is empty, wait until an item is available.
while aq.empty():
var getter = newFuture[void]("AsyncQueue.popLast")
let getter =
Future[void].Raising([CancelledError]).init("AsyncQueue.popLast")
aq.getters.add(getter)
try:
await getter
except CatchableError as exc:
except CancelledError as exc:
if not(aq.empty()) and not(getter.cancelled()):
aq.getters.wakeupNext()
raise exc
return aq.popLastNoWait()
aq.popLastImpl()
proc putNoWait*[T](aq: AsyncQueue[T], item: T) {.
raises: [AsyncQueueFullError].} =
raises: [AsyncQueueFullError].} =
## Alias of ``addLastNoWait()``.
aq.addLastNoWait(item)
proc getNoWait*[T](aq: AsyncQueue[T]): T {.
raises: [AsyncQueueEmptyError].} =
raises: [AsyncQueueEmptyError].} =
## Alias of ``popFirstNoWait()``.
aq.popFirstNoWait()
proc put*[T](aq: AsyncQueue[T], item: T): Future[void] {.inline.} =
proc put*[T](aq: AsyncQueue[T], item: T): Future[void] {.
async: (raw: true, raises: [CancelledError]).} =
## Alias of ``addLast()``.
aq.addLast(item)
proc get*[T](aq: AsyncQueue[T]): Future[T] {.inline.} =
proc get*[T](aq: AsyncQueue[T]): Future[T] {.
async: (raw: true, raises: [CancelledError]).} =
## Alias of ``popFirst()``.
aq.popFirst()
@ -460,7 +427,7 @@ proc contains*[T](aq: AsyncQueue[T], item: T): bool {.inline.} =
## via the ``in`` operator.
for e in aq.queue.items():
if e == item: return true
return false
false
proc `$`*[T](aq: AsyncQueue[T]): string =
## Turn an async queue ``aq`` into its string representation.
@ -471,190 +438,6 @@ proc `$`*[T](aq: AsyncQueue[T]): string =
res.add("]")
res
template generateKey(typeName, eventName: string): string =
"type[" & typeName & "]-key[" & eventName & "]"
proc newAsyncEventBus*(): AsyncEventBus {.
deprecated: "Implementation has unfixable flaws, please use" &
"AsyncEventQueue[T] instead".} =
## Creates new ``AsyncEventBus``.
AsyncEventBus(counter: 0'u64, events: initTable[string, EventItem]())
template get*[T](payload: EventPayload[T]): T =
## Returns event payload data.
payload.value
template location*(payload: EventPayloadBase): SrcLoc =
## Returns source location address of event emitter.
payload.loc[]
proc get*(event: AwaitableEvent, T: typedesc): T {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue[T] instead".} =
## Returns event's payload of type ``T`` from event ``event``.
cast[EventPayload[T]](event.payload).value
template event*(event: AwaitableEvent): string =
## Returns event's name from event ``event``.
event.eventName
template location*(event: AwaitableEvent): SrcLoc =
## Returns source location address of event emitter.
event.payload.loc[]
proc waitEvent*(bus: AsyncEventBus, T: typedesc, event: string): Future[T] {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue[T] instead".} =
## Wait for the event from AsyncEventBus ``bus`` with name ``event``.
##
## Returned ``Future[T]`` will hold event's payload of type ``T``.
var default: EventItem
var retFuture = newFuture[T]("AsyncEventBus.waitEvent")
let eventKey = generateKey(T.name, event)
proc cancellation(udata: pointer) {.gcsafe, raises: [].} =
if not(retFuture.finished()):
bus.events.withValue(eventKey, item):
item.waiters.keepItIf(it != cast[FutureBase](retFuture))
retFuture.cancelCallback = cancellation
let baseFuture = cast[FutureBase](retFuture)
bus.events.mgetOrPut(eventKey, default).waiters.add(baseFuture)
retFuture
proc waitAllEvents*(bus: AsyncEventBus): Future[AwaitableEvent] {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue[T] instead".} =
## Wait for any event from AsyncEventBus ``bus``.
##
## Returns ``Future`` which holds helper object. Using this object you can
## retrieve event's name and payload.
var retFuture = newFuture[AwaitableEvent]("AsyncEventBus.waitAllEvents")
proc cancellation(udata: pointer) {.gcsafe, raises: [].} =
if not(retFuture.finished()):
bus.waiters.keepItIf(it != retFuture)
retFuture.cancelCallback = cancellation
bus.waiters.add(retFuture)
retFuture
proc subscribe*[T](bus: AsyncEventBus, event: string,
callback: EventBusSubscription[T]): EventBusKey {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue[T] instead".} =
## Subscribe to the event ``event`` passed through eventbus ``bus`` with
## callback ``callback``.
##
## Returns key that can be used to unsubscribe.
proc trampoline(tbus: AsyncEventBus, event: string, key: EventBusKey,
data: EventPayloadBase) {.gcsafe, raises: [].} =
let payload = cast[EventPayload[T]](data)
asyncSpawn callback(bus, payload)
let subkey =
block:
inc(bus.counter)
EventBusKey(eventName: event, typeName: T.name, unique: bus.counter,
cb: trampoline)
var default: EventItem
let eventKey = generateKey(T.name, event)
bus.events.mgetOrPut(eventKey, default).subscribers.add(subkey)
subkey
proc subscribeAll*(bus: AsyncEventBus,
callback: EventBusAllSubscription): EventBusKey {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue instead".} =
## Subscribe to all events passed through eventbus ``bus`` with callback
## ``callback``.
##
## Returns key that can be used to unsubscribe.
proc trampoline(tbus: AsyncEventBus, event: string, key: EventBusKey,
data: EventPayloadBase) {.gcsafe, raises: [].} =
let event = AwaitableEvent(eventName: event, payload: data)
asyncSpawn callback(bus, event)
let subkey =
block:
inc(bus.counter)
EventBusKey(eventName: "", typeName: "", unique: bus.counter,
cb: trampoline)
bus.subscribers.add(subkey)
subkey
proc unsubscribe*(bus: AsyncEventBus, key: EventBusKey) {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue instead".} =
## Cancel subscription of subscriber with key ``key`` from eventbus ``bus``.
let eventKey = generateKey(key.typeName, key.eventName)
# Clean event's subscribers.
bus.events.withValue(eventKey, item):
item.subscribers.keepItIf(it.unique != key.unique)
# Clean subscribers subscribed to all events.
bus.subscribers.keepItIf(it.unique != key.unique)
proc emit[T](bus: AsyncEventBus, event: string, data: T, loc: ptr SrcLoc) =
let
eventKey = generateKey(T.name, event)
payload =
block:
var data = EventPayload[T](value: data, loc: loc)
cast[EventPayloadBase](data)
# Used to capture the "subscriber" variable in the loops
# sugar.capture doesn't work in Nim <1.6
proc triggerSubscriberCallback(subscriber: EventBusKey) =
callSoon(proc(udata: pointer) =
subscriber.cb(bus, event, subscriber, payload)
)
bus.events.withValue(eventKey, item):
# Schedule waiters which are waiting for the event ``event``.
for waiter in item.waiters:
var fut = cast[Future[T]](waiter)
fut.complete(data)
# Clear all the waiters.
item.waiters.setLen(0)
# Schedule subscriber's callbacks, which are subscribed to the event.
for subscriber in item.subscribers:
triggerSubscriberCallback(subscriber)
# Schedule waiters which are waiting all events
for waiter in bus.waiters:
waiter.complete(AwaitableEvent(eventName: event, payload: payload))
# Clear all the waiters.
bus.waiters.setLen(0)
# Schedule subscriber's callbacks which are subscribed to all events.
for subscriber in bus.subscribers:
triggerSubscriberCallback(subscriber)
template emit*[T](bus: AsyncEventBus, event: string, data: T) {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue instead".} =
## Emit new event ``event`` to the eventbus ``bus`` with payload ``data``.
emit(bus, event, data, getSrcLocation())
proc emitWait[T](bus: AsyncEventBus, event: string, data: T,
loc: ptr SrcLoc): Future[void] =
var retFuture = newFuture[void]("AsyncEventBus.emitWait")
proc continuation(udata: pointer) {.gcsafe.} =
if not(retFuture.finished()):
retFuture.complete()
emit(bus, event, data, loc)
callSoon(continuation)
return retFuture
template emitWait*[T](bus: AsyncEventBus, event: string,
data: T): Future[void] {.
deprecated: "Implementation has unfixable flaws, please use " &
"AsyncEventQueue instead".} =
## Emit new event ``event`` to the eventbus ``bus`` with payload ``data`` and
## wait until all the subscribers/waiters will receive notification about
## event.
emitWait(bus, event, data, getSrcLocation())
proc `==`(a, b: EventQueueKey): bool {.borrow.}
proc compact(ab: AsyncEventQueue) {.raises: [].} =
@ -680,8 +463,7 @@ proc compact(ab: AsyncEventQueue) {.raises: [].} =
else:
ab.queue.clear()
proc getReaderIndex(ab: AsyncEventQueue, key: EventQueueKey): int {.
raises: [].} =
proc getReaderIndex(ab: AsyncEventQueue, key: EventQueueKey): int =
for index, value in ab.readers.pairs():
if value.key == key:
return index
@ -735,14 +517,21 @@ proc close*(ab: AsyncEventQueue) {.raises: [].} =
ab.readers.reset()
ab.queue.clear()
proc closeWait*(ab: AsyncEventQueue): Future[void] {.raises: [].} =
var retFuture = newFuture[void]("AsyncEventQueue.closeWait()")
proc closeWait*(ab: AsyncEventQueue): Future[void] {.
async: (raw: true, raises: []).} =
let retFuture = newFuture[void]("AsyncEventQueue.closeWait()",
{FutureFlag.OwnCancelSchedule})
proc continuation(udata: pointer) {.gcsafe.} =
if not(retFuture.finished()):
retFuture.complete()
retFuture.complete()
proc cancellation(udata: pointer) {.gcsafe.} =
# We are not going to change the state of `retFuture` to cancelled, so we
# will prevent the entire sequence of Futures from being cancelled.
discard
ab.close()
# Schedule `continuation` to be called only after all the `reader`
# notifications will be scheduled and processed.
retFuture.cancelCallback = cancellation
callSoon(continuation)
retFuture
@ -750,7 +539,7 @@ template readerOverflow*(ab: AsyncEventQueue,
reader: EventQueueReader): bool =
ab.limit + (reader.offset - ab.offset) <= len(ab.queue)
proc emit*[T](ab: AsyncEventQueue[T], data: T) {.raises: [].} =
proc emit*[T](ab: AsyncEventQueue[T], data: T) =
if len(ab.readers) > 0:
# We enqueue `data` only if there active reader present.
var changesPresent = false
@ -787,7 +576,8 @@ proc emit*[T](ab: AsyncEventQueue[T], data: T) {.raises: [].} =
proc waitEvents*[T](ab: AsyncEventQueue[T],
key: EventQueueKey,
eventsCount = -1): Future[seq[T]] {.async.} =
eventsCount = -1): Future[seq[T]] {.
async: (raises: [AsyncEventQueueFullError, CancelledError]).} =
## Wait for events
var
events: seq[T]
@ -817,7 +607,8 @@ proc waitEvents*[T](ab: AsyncEventQueue[T],
doAssert(length >= ab.readers[index].offset)
if length == ab.readers[index].offset:
# We are at the end of queue, it means that we should wait for new events.
let waitFuture = newFuture[void]("AsyncEventQueue.waitEvents")
let waitFuture = Future[void].Raising([CancelledError]).init(
"AsyncEventQueue.waitEvents")
ab.readers[index].waiter = waitFuture
resetFuture = true
await waitFuture
@ -848,4 +639,4 @@ proc waitEvents*[T](ab: AsyncEventQueue[T],
if (eventsCount <= 0) or (len(events) == eventsCount):
break
return events
events

View File

@ -11,70 +11,85 @@
## `chronosDebug` can be defined to enable several debugging helpers that come
## with a runtime cost - it is recommeneded to not enable these in production
## code.
when (NimMajor, NimMinor) >= (1, 4):
const
chronosStrictException* {.booldefine.}: bool = defined(chronosPreviewV4)
## Require that `async` code raises only derivatives of `CatchableError`
## and not `Exception` - forward declarations, methods and `proc` types
## used from within `async` code may need to be be explicitly annotated
## with `raises: [CatchableError]` when this mode is enabled.
const
chronosHandleException* {.booldefine.}: bool = false
## Remap `Exception` to `AsyncExceptionError` for all `async` functions.
##
## This modes provides backwards compatibility when using functions with
## inaccurate `{.raises.}` effects such as unannotated forward declarations,
## methods and `proc` types - it is recommened to annotate such code
## explicitly as the `Exception` handling mode may introduce surprising
## behavior in exception handlers, should `Exception` actually be raised.
##
## The setting provides the default for the per-function-based
## `handleException` parameter which has precedence over this global setting.
##
## `Exception` handling may be removed in future chronos versions.
chronosStrictFutureAccess* {.booldefine.}: bool = defined(chronosPreviewV4)
chronosStrictFutureAccess* {.booldefine.}: bool = defined(chronosPreviewV4)
chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug)
## Include stack traces in futures for creation and completion points
chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug)
## Include stack traces in futures for creation and completion points
chronosFutureId* {.booldefine.}: bool = defined(chronosDebug)
## Generate a unique `id` for every future - when disabled, the address of
## the future will be used instead
chronosFutureId* {.booldefine.}: bool = defined(chronosDebug)
## Generate a unique `id` for every future - when disabled, the address of
## the future will be used instead
chronosFutureTracking* {.booldefine.}: bool = defined(chronosDebug)
## Keep track of all pending futures and allow iterating over them -
## useful for detecting hung tasks
chronosFutureTracking* {.booldefine.}: bool = defined(chronosDebug)
## Keep track of all pending futures and allow iterating over them -
## useful for detecting hung tasks
chronosDumpAsync* {.booldefine.}: bool = defined(nimDumpAsync)
## Print code generated by {.async.} transformation
chronosDumpAsync* {.booldefine.}: bool = defined(nimDumpAsync)
## Print code generated by {.async.} transformation
chronosProcShell* {.strdefine.}: string =
when defined(windows):
"cmd.exe"
chronosProcShell* {.strdefine.}: string =
when defined(windows):
"cmd.exe"
else:
when defined(android):
"/system/bin/sh"
else:
when defined(android):
"/system/bin/sh"
else:
"/bin/sh"
## Default shell binary path.
##
## The shell is used as command for command line when process started
## using `AsyncProcessOption.EvalCommand` and API calls such as
## ``execCommand(command)`` and ``execCommandEx(command)``.
"/bin/sh"
## Default shell binary path.
##
## The shell is used as command for command line when process started
## using `AsyncProcessOption.EvalCommand` and API calls such as
## ``execCommand(command)`` and ``execCommandEx(command)``.
chronosProfiling* {.booldefine.} = defined(chronosProfiling)
## Enable instrumentation callbacks which are called at
## the start, pause, or end of a Future's lifetime.
## Useful for implementing metrics or other instrumentation.
chronosProfiling* {.booldefine.} = defined(chronosProfiling)
## Enable instrumentation callbacks which are called at
## the start, pause, or end of a Future's lifetime.
## Useful for implementing metrics or other instrumentation.
else:
# 1.2 doesn't support `booldefine` in `when` properly
const
chronosStrictException*: bool =
defined(chronosPreviewV4) or defined(chronosStrictException)
chronosStrictFutureAccess*: bool =
defined(chronosPreviewV4) or defined(chronosStrictFutureAccess)
chronosStackTrace*: bool = defined(chronosDebug) or defined(chronosStackTrace)
chronosFutureId*: bool = defined(chronosDebug) or defined(chronosFutureId)
chronosFutureTracking*: bool =
defined(chronosDebug) or defined(chronosFutureTracking)
chronosDumpAsync*: bool = defined(nimDumpAsync)
chronosProfiling*: bool = defined(chronosProfiling)
chronosProcShell* {.strdefine.}: string =
when defined(windows):
"cmd.exe"
else:
when defined(android):
"/system/bin/sh"
else:
"/bin/sh"
chronosEventsCount* {.intdefine.} = 64
## Number of OS poll events retrieved by syscall (epoll, kqueue, poll).
chronosInitialSize* {.intdefine.} = 64
## Initial size of Selector[T]'s array of file descriptors.
chronosEventEngine* {.strdefine.}: string =
when defined(nimdoc):
""
elif defined(linux) and not(defined(android) or defined(emscripten)):
"epoll"
elif defined(macosx) or defined(macos) or defined(ios) or
defined(freebsd) or defined(netbsd) or defined(openbsd) or
defined(dragonfly):
"kqueue"
elif defined(android) or defined(emscripten):
"poll"
elif defined(posix):
"poll"
else:
""
## OS polling engine type which is going to be used by chronos.
when defined(chronosStrictException):
{.warning: "-d:chronosStrictException has been deprecated in favor of handleException".}
# In chronos v3, this setting was used as the opposite of
# `chronosHandleException` - the setting is deprecated to encourage
# migration to the new mode.
>>>>>>> master
when defined(debug) or defined(chronosConfig):
import std/macros
@ -83,9 +98,49 @@ when defined(debug) or defined(chronosConfig):
hint("Chronos configuration:")
template printOption(name: string, value: untyped) =
hint(name & ": " & $value)
printOption("chronosStrictException", chronosStrictException)
printOption("chronosHandleException", chronosHandleException)
printOption("chronosStackTrace", chronosStackTrace)
printOption("chronosFutureId", chronosFutureId)
printOption("chronosFutureTracking", chronosFutureTracking)
printOption("chronosDumpAsync", chronosDumpAsync)
printOption("chronosProcShell", chronosProcShell)
printOption("chronosEventEngine", chronosEventEngine)
printOption("chronosEventsCount", chronosEventsCount)
printOption("chronosInitialSize", chronosInitialSize)
# In nim 1.6, `sink` + local variable + `move` generates the best code for
# moving a proc parameter into a closure - this only works for closure
# procedures however - in closure iterators, the parameter is always copied
# into the closure (!) meaning that non-raw `{.async.}` functions always carry
# this overhead, sink or no. See usages of chronosMoveSink for examples.
# In addition, we need to work around https://github.com/nim-lang/Nim/issues/22175
# which has not been backported to 1.6.
# Long story short, the workaround is not needed in non-raw {.async.} because
# a copy of the literal is always made.
# TODO review the above for 2.0 / 2.0+refc
type
SeqHeader = object
length, reserved: int
proc isLiteral(s: string): bool {.inline.} =
when defined(gcOrc) or defined(gcArc):
false
else:
s.len > 0 and (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0
proc isLiteral[T](s: seq[T]): bool {.inline.} =
when defined(gcOrc) or defined(gcArc):
false
else:
s.len > 0 and (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0
template chronosMoveSink*(val: auto): untyped =
bind isLiteral
when not (defined(gcOrc) or defined(gcArc)) and val is seq|string:
if isLiteral(val):
val
else:
move(val)
else:
move(val)

View File

@ -17,11 +17,6 @@ export srcloc
when chronosStackTrace:
type StackTrace = string
when chronosStrictException:
{.pragma: closureIter, raises: [CatchableError], gcsafe.}
else:
{.pragma: closureIter, raises: [Exception], gcsafe.}
type
LocationKind* {.pure.} = enum
Create
@ -37,6 +32,11 @@ type
FutureState* {.pure.} = enum
Pending, Completed, Cancelled, Failed
FutureFlag* {.pure.} = enum
OwnCancelSchedule
FutureFlags* = set[FutureFlag]
InternalFutureBase* = object of RootObj
# Internal untyped future representation - the fields are not part of the
# public API and neither is `InternalFutureBase`, ie the inheritance
@ -47,9 +47,9 @@ type
internalCancelcb*: CallbackFunc
internalChild*: FutureBase
internalState*: FutureState
internalFlags*: FutureFlags
internalError*: ref CatchableError ## Stored exception
internalMustCancel*: bool
internalClosure*: iterator(f: FutureBase): FutureBase {.closureIter.}
internalClosure*: iterator(f: FutureBase): FutureBase {.raises: [], gcsafe.}
when chronosFutureId:
internalId*: uint
@ -73,10 +73,15 @@ type
cause*: FutureBase
FutureError* = object of CatchableError
future*: FutureBase
CancelledError* = object of FutureError
## Exception raised when accessing the value of a cancelled future
func raiseFutureDefect(msg: static string, fut: FutureBase) {.
noinline, noreturn.} =
raise (ref FutureDefect)(msg: msg, cause: fut)
when chronosFutureId:
var currentID* {.threadvar.}: uint
template id*(fut: FutureBase): uint = fut.internalId
@ -101,12 +106,11 @@ when chronosProfiling:
var onAsyncFutureEvent* {.threadvar.}: proc(fut: FutureBase, state: AsyncFutureState): void {.nimcall, gcsafe, raises: [].}
# Internal utilities - these are not part of the stable API
proc internalInitFutureBase*(
fut: FutureBase,
loc: ptr SrcLoc,
state: FutureState) =
proc internalInitFutureBase*(fut: FutureBase, loc: ptr SrcLoc,
state: FutureState, flags: FutureFlags) =
fut.internalState = state
fut.internalLocation[LocationKind.Create] = loc
fut.internalFlags = flags
if state != FutureState.Pending:
fut.internalLocation[LocationKind.Finish] = loc
@ -139,21 +143,34 @@ template init*[T](F: type Future[T], fromProc: static[string] = ""): Future[T] =
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
let res = Future[T]()
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending)
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {})
res
template init*[T](F: type Future[T], fromProc: static[string] = "",
flags: static[FutureFlags]): Future[T] =
## Creates a new pending future.
##
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
let res = Future[T]()
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending,
flags)
res
template completed*(
F: type Future, fromProc: static[string] = ""): Future[void] =
## Create a new completed future
let res = Future[T]()
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed)
let res = Future[void]()
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed,
{})
res
template completed*[T: not void](
F: type Future, valueParam: T, fromProc: static[string] = ""): Future[T] =
## Create a new completed future
let res = Future[T](internalValue: valueParam)
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed)
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed,
{})
res
template failed*[T](
@ -161,19 +178,21 @@ template failed*[T](
fromProc: static[string] = ""): Future[T] =
## Create a new failed future
let res = Future[T](internalError: errorParam)
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Failed)
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Failed, {})
when chronosStackTrace:
res.internalErrorStackTrace =
if getStackTrace(res.error) == "":
getStackTrace()
else:
getStackTrace(res.error)
res
func state*(future: FutureBase): FutureState =
future.internalState
func flags*(future: FutureBase): FutureFlags =
future.internalFlags
func finished*(future: FutureBase): bool {.inline.} =
## Determines whether ``future`` has finished, i.e. ``future`` state changed
## from state ``Pending`` to one of the states (``Finished``, ``Cancelled``,
@ -195,20 +214,27 @@ func completed*(future: FutureBase): bool {.inline.} =
func location*(future: FutureBase): array[LocationKind, ptr SrcLoc] =
future.internalLocation
func value*[T](future: Future[T]): T =
func value*[T: not void](future: Future[T]): lent T =
## Return the value in a completed future - raises Defect when
## `fut.completed()` is `false`.
##
## See `read` for a version that raises an catchable error when future
## See `read` for a version that raises a catchable error when future
## has not completed.
when chronosStrictFutureAccess:
if not future.completed():
raise (ref FutureDefect)(
msg: "Future not completed while accessing value",
cause: future)
raiseFutureDefect("Future not completed while accessing value", future)
when T isnot void:
future.internalValue
future.internalValue
func value*(future: Future[void]) =
## Return the value in a completed future - raises Defect when
## `fut.completed()` is `false`.
##
## See `read` for a version that raises a catchable error when future
## has not completed.
when chronosStrictFutureAccess:
if not future.completed():
raiseFutureDefect("Future not completed while accessing value", future)
func error*(future: FutureBase): ref CatchableError =
## Return the error of `future`, or `nil` if future did not fail.
@ -217,9 +243,8 @@ func error*(future: FutureBase): ref CatchableError =
## future has not failed.
when chronosStrictFutureAccess:
if not future.failed() and not future.cancelled():
raise (ref FutureDefect)(
msg: "Future not failed/cancelled while accessing error",
cause: future)
raiseFutureDefect(
"Future not failed/cancelled while accessing error", future)
future.internalError

View File

@ -10,7 +10,7 @@
{.push raises: [].}
import "."/[asyncloop, osdefs, osutils]
import stew/results
import results
from nativesockets import Domain, Protocol, SockType, toInt
export Domain, Protocol, SockType, results
@ -21,66 +21,113 @@ const
asyncInvalidSocket* = AsyncFD(osdefs.INVALID_SOCKET)
asyncInvalidPipe* = asyncInvalidSocket
proc setSocketBlocking*(s: SocketHandle, blocking: bool): bool =
proc setSocketBlocking*(s: SocketHandle, blocking: bool): bool {.
deprecated: "Please use setDescriptorBlocking() instead".} =
## Sets blocking mode on socket.
when defined(windows) or defined(nimdoc):
var mode = clong(ord(not blocking))
if osdefs.ioctlsocket(s, osdefs.FIONBIO, addr(mode)) == -1:
false
else:
true
else:
let x: int = osdefs.fcntl(s, osdefs.F_GETFL, 0)
if x == -1:
false
else:
let mode =
if blocking: x and not osdefs.O_NONBLOCK else: x or osdefs.O_NONBLOCK
if osdefs.fcntl(s, osdefs.F_SETFL, mode) == -1:
false
else:
true
setDescriptorBlocking(s, blocking).isOkOr:
return false
true
proc setSockOpt*(socket: AsyncFD, level, optname, optval: int): bool =
## `setsockopt()` for integer options.
## Returns ``true`` on success, ``false`` on error.
proc setSockOpt2*(socket: AsyncFD,
level, optname, optval: int): Result[void, OSErrorCode] =
var value = cint(optval)
osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname),
addr(value), SockLen(sizeof(value))) >= cint(0)
let res = osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname),
addr(value), SockLen(sizeof(value)))
if res == -1:
return err(osLastError())
ok()
proc setSockOpt*(socket: AsyncFD, level, optname: int, value: pointer,
valuelen: int): bool =
proc setSockOpt2*(socket: AsyncFD, level, optname: int, value: pointer,
valuelen: int): Result[void, OSErrorCode] =
## `setsockopt()` for custom options (pointer and length).
## Returns ``true`` on success, ``false`` on error.
osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname), value,
SockLen(valuelen)) >= cint(0)
let res = osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname),
value, SockLen(valuelen))
if res == -1:
return err(osLastError())
ok()
proc getSockOpt*(socket: AsyncFD, level, optname: int, value: var int): bool =
proc setSockOpt*(socket: AsyncFD, level, optname, optval: int): bool {.
deprecated: "Please use setSockOpt2() instead".} =
## `setsockopt()` for integer options.
## Returns ``true`` on success, ``false`` on error.
setSockOpt2(socket, level, optname, optval).isOk
proc setSockOpt*(socket: AsyncFD, level, optname: int, value: pointer,
valuelen: int): bool {.
deprecated: "Please use setSockOpt2() instead".} =
## `setsockopt()` for custom options (pointer and length).
## Returns ``true`` on success, ``false`` on error.
setSockOpt2(socket, level, optname, value, valuelen).isOk
proc getSockOpt2*(socket: AsyncFD,
level, optname: int): Result[cint, OSErrorCode] =
var
value: cint
size = SockLen(sizeof(value))
let res = osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname),
addr(value), addr(size))
if res == -1:
return err(osLastError())
ok(value)
proc getSockOpt2*(socket: AsyncFD, level, optname: int,
T: type): Result[T, OSErrorCode] =
var
value = default(T)
size = SockLen(sizeof(value))
let res = osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname),
cast[ptr byte](addr(value)), addr(size))
if res == -1:
return err(osLastError())
ok(value)
proc getSockOpt*(socket: AsyncFD, level, optname: int, value: var int): bool {.
deprecated: "Please use getSockOpt2() instead".} =
## `getsockopt()` for integer options.
## Returns ``true`` on success, ``false`` on error.
var res: cint
var size = SockLen(sizeof(res))
if osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname),
addr(res), addr(size)) >= cint(0):
value = int(res)
true
else:
false
value = getSockOpt2(socket, level, optname).valueOr:
return false
true
proc getSockOpt*(socket: AsyncFD, level, optname: int, value: pointer,
valuelen: var int): bool =
proc getSockOpt*(socket: AsyncFD, level, optname: int, value: var pointer,
valuelen: var int): bool {.
deprecated: "Please use getSockOpt2() instead".} =
## `getsockopt()` for custom options (pointer and length).
## Returns ``true`` on success, ``false`` on error.
osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname),
value, cast[ptr SockLen](addr valuelen)) >= cint(0)
proc getSocketError*(socket: AsyncFD, err: var int): bool =
proc getSocketError*(socket: AsyncFD, err: var int): bool {.
deprecated: "Please use getSocketError() instead".} =
## Recover error code associated with socket handle ``socket``.
getSockOpt(socket, cint(osdefs.SOL_SOCKET), cint(osdefs.SO_ERROR), err)
err = getSockOpt2(socket, cint(osdefs.SOL_SOCKET),
cint(osdefs.SO_ERROR)).valueOr:
return false
true
proc getSocketError2*(socket: AsyncFD): Result[cint, OSErrorCode] =
getSockOpt2(socket, cint(osdefs.SOL_SOCKET), cint(osdefs.SO_ERROR))
proc isAvailable*(domain: Domain): bool =
when defined(windows):
let fd = wsaSocket(toInt(domain), toInt(SockType.SOCK_STREAM),
toInt(Protocol.IPPROTO_TCP), nil, GROUP(0), 0'u32)
if fd == osdefs.INVALID_SOCKET:
return if osLastError() == osdefs.WSAEAFNOSUPPORT: false else: true
discard closeFd(fd)
true
else:
let fd = osdefs.socket(toInt(domain), toInt(SockType.SOCK_STREAM),
toInt(Protocol.IPPROTO_TCP))
if fd == -1:
return if osLastError() == osdefs.EAFNOSUPPORT: false else: true
discard closeFd(fd)
true
proc createAsyncSocket2*(domain: Domain, sockType: SockType,
protocol: Protocol,
inherit = true): Result[AsyncFD, OSErrorCode] =
protocol: Protocol,
inherit = true): Result[AsyncFD, OSErrorCode] =
## Creates new asynchronous socket.
when defined(windows):
let flags =
@ -93,15 +140,12 @@ proc createAsyncSocket2*(domain: Domain, sockType: SockType,
if fd == osdefs.INVALID_SOCKET:
return err(osLastError())
let bres = setDescriptorBlocking(fd, false)
if bres.isErr():
setDescriptorBlocking(fd, false).isOkOr:
discard closeFd(fd)
return err(bres.error())
let res = register2(AsyncFD(fd))
if res.isErr():
return err(error)
register2(AsyncFD(fd)).isOkOr:
discard closeFd(fd)
return err(res.error())
return err(error)
ok(AsyncFD(fd))
else:
@ -114,23 +158,20 @@ proc createAsyncSocket2*(domain: Domain, sockType: SockType,
let fd = osdefs.socket(toInt(domain), socketType, toInt(protocol))
if fd == -1:
return err(osLastError())
let res = register2(AsyncFD(fd))
if res.isErr():
register2(AsyncFD(fd)).isOkOr:
discard closeFd(fd)
return err(res.error())
return err(error)
ok(AsyncFD(fd))
else:
let fd = osdefs.socket(toInt(domain), toInt(sockType), toInt(protocol))
if fd == -1:
return err(osLastError())
let bres = setDescriptorFlags(cint(fd), true, true)
if bres.isErr():
setDescriptorFlags(cint(fd), true, true).isOkOr:
discard closeFd(fd)
return err(bres.error())
let res = register2(AsyncFD(fd))
if res.isErr():
return err(error)
register2(AsyncFD(fd)).isOkOr:
discard closeFd(fd)
return err(bres.error())
return err(error)
ok(AsyncFD(fd))
proc wrapAsyncSocket2*(sock: cint|SocketHandle): Result[AsyncFD, OSErrorCode] =
@ -230,3 +271,26 @@ proc createAsyncPipe*(): tuple[read: AsyncFD, write: AsyncFD] =
else:
let pipes = res.get()
(read: AsyncFD(pipes.read), write: AsyncFD(pipes.write))
proc getDualstack*(fd: AsyncFD): Result[bool, OSErrorCode] =
## Returns `true` if `IPV6_V6ONLY` socket option set to `false`.
var
flag = cint(0)
size = SockLen(sizeof(flag))
let res = osdefs.getsockopt(SocketHandle(fd), cint(osdefs.IPPROTO_IPV6),
cint(osdefs.IPV6_V6ONLY), addr(flag), addr(size))
if res == -1:
return err(osLastError())
ok(flag == cint(0))
proc setDualstack*(fd: AsyncFD, value: bool): Result[void, OSErrorCode] =
## Sets `IPV6_V6ONLY` socket option value to `false` if `value == true` and
## to `true` if `value == false`.
var
flag = cint(if value: 0 else: 1)
size = SockLen(sizeof(flag))
let res = osdefs.setsockopt(SocketHandle(fd), cint(osdefs.IPPROTO_IPV6),
cint(osdefs.IPV6_V6ONLY), addr(flag), size)
if res == -1:
return err(osLastError())
ok()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,590 @@
#
#
# Nim's Runtime Library
# (c) Copyright 2015 Dominik Picheta
# (c) Copyright 2018-Present Status Research & Development GmbH
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
import
std/[macros],
../[futures, config],
./raisesfutures
proc processBody(node, setResultSym: NimNode): NimNode {.compileTime.} =
case node.kind
of nnkReturnStmt:
# `return ...` -> `setResult(...); return`
let
res = newNimNode(nnkStmtList, node)
if node[0].kind != nnkEmpty:
res.add newCall(setResultSym, processBody(node[0], setResultSym))
res.add newNimNode(nnkReturnStmt, node).add(newEmptyNode())
res
of RoutineNodes-{nnkTemplateDef}:
# Skip nested routines since they have their own return value distinct from
# the Future we inject
node
else:
if node.kind == nnkYieldStmt:
# asyncdispatch allows `yield` but this breaks cancellation
warning(
"`yield` in async procedures not supported - use `awaitne` instead",
node)
for i in 0 ..< node.len:
node[i] = processBody(node[i], setResultSym)
node
proc wrapInTryFinally(
fut, baseType, body, raises: NimNode,
handleException: bool): NimNode {.compileTime.} =
# creates:
# try: `body`
# [for raise in raises]:
# except `raise`: closureSucceeded = false; `castFutureSym`.fail(exc)
# finally:
# if closureSucceeded:
# `castFutureSym`.complete(result)
#
# Calling `complete` inside `finally` ensures that all success paths
# (including early returns and code inside nested finally statements and
# defer) are completed with the final contents of `result`
let
closureSucceeded = genSym(nskVar, "closureSucceeded")
nTry = nnkTryStmt.newTree(body)
excName = ident"exc"
# Depending on the exception type, we must have at most one of each of these
# "special" exception handlers that are needed to implement cancellation and
# Defect propagation
var
hasDefect = false
hasCancelledError = false
hasCatchableError = false
template addDefect =
if not hasDefect:
hasDefect = true
# When a Defect is raised, the program is in an undefined state and
# continuing running other tasks while the Future completion sits on the
# callback queue may lead to further damage so we re-raise them eagerly.
nTry.add nnkExceptBranch.newTree(
nnkInfix.newTree(ident"as", ident"Defect", excName),
nnkStmtList.newTree(
nnkAsgn.newTree(closureSucceeded, ident"false"),
nnkRaiseStmt.newTree(excName)
)
)
template addCancelledError =
if not hasCancelledError:
hasCancelledError = true
nTry.add nnkExceptBranch.newTree(
ident"CancelledError",
nnkStmtList.newTree(
nnkAsgn.newTree(closureSucceeded, ident"false"),
newCall(ident "cancelAndSchedule", fut)
)
)
template addCatchableError =
if not hasCatchableError:
hasCatchableError = true
nTry.add nnkExceptBranch.newTree(
nnkInfix.newTree(ident"as", ident"CatchableError", excName),
nnkStmtList.newTree(
nnkAsgn.newTree(closureSucceeded, ident"false"),
newCall(ident "fail", fut, excName)
))
var raises = if raises == nil:
nnkTupleConstr.newTree(ident"CatchableError")
elif isNoRaises(raises):
nnkTupleConstr.newTree()
else:
raises.copyNimTree()
if handleException:
raises.add(ident"Exception")
for exc in raises:
if exc.eqIdent("Exception"):
addCancelledError
addCatchableError
addDefect
# Because we store `CatchableError` in the Future, we cannot re-raise the
# original exception
nTry.add nnkExceptBranch.newTree(
nnkInfix.newTree(ident"as", ident"Exception", excName),
newCall(ident "fail", fut,
nnkStmtList.newTree(
nnkAsgn.newTree(closureSucceeded, ident"false"),
quote do:
(ref AsyncExceptionError)(
msg: `excName`.msg, parent: `excName`)))
)
elif exc.eqIdent("CancelledError"):
addCancelledError
elif exc.eqIdent("CatchableError"):
# Ensure cancellations are re-routed to the cancellation handler even if
# not explicitly specified in the raises list
addCancelledError
addCatchableError
else:
nTry.add nnkExceptBranch.newTree(
nnkInfix.newTree(ident"as", exc, excName),
nnkStmtList.newTree(
nnkAsgn.newTree(closureSucceeded, ident"false"),
newCall(ident "fail", fut, excName)
))
addDefect # Must not complete future on defect
nTry.add nnkFinally.newTree(
nnkIfStmt.newTree(
nnkElifBranch.newTree(
closureSucceeded,
if baseType.eqIdent("void"): # shortcut for non-generic void
newCall(ident "complete", fut)
else:
nnkWhenStmt.newTree(
nnkElifExpr.newTree(
nnkInfix.newTree(ident "is", baseType, ident "void"),
newCall(ident "complete", fut)
),
nnkElseExpr.newTree(
newCall(ident "complete", fut, newCall(ident "move", ident "result"))
)
)
)
)
)
nnkStmtList.newTree(
newVarStmt(closureSucceeded, ident"true"),
nTry
)
proc getName(node: NimNode): string {.compileTime.} =
case node.kind
of nnkSym:
return node.strVal
of nnkPostfix:
return node[1].strVal
of nnkIdent:
return node.strVal
of nnkEmpty:
return "anonymous"
else:
error("Unknown name.")
macro unsupported(s: static[string]): untyped =
error s
proc params2(someProc: NimNode): NimNode {.compileTime.} =
# until https://github.com/nim-lang/Nim/pull/19563 is available
if someProc.kind == nnkProcTy:
someProc[0]
else:
params(someProc)
proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} =
# Replace every Call -> OpenSymChoice by a Bracket expr
# ref https://github.com/nim-lang/Nim/issues/11091
if node.kind in nnkCallKinds and
node[0].kind == nnkOpenSymChoice and node[0].eqIdent("[]"):
result = newNimNode(nnkBracketExpr)
for child in node[1..^1]:
result.add(cleanupOpenSymChoice(child))
else:
result = node.copyNimNode()
for child in node:
result.add(cleanupOpenSymChoice(child))
type
AsyncParams = tuple
raw: bool
raises: NimNode
handleException: bool
proc decodeParams(params: NimNode): AsyncParams =
# decodes the parameter tuple given in `async: (name: value, ...)` to its
# recognised parts
params.expectKind(nnkTupleConstr)
var
raw = false
raises: NimNode = nil
handleException = chronosHandleException
for param in params:
param.expectKind(nnkExprColonExpr)
if param[0].eqIdent("raises"):
param[1].expectKind(nnkBracket)
if param[1].len == 0:
raises = makeNoRaises()
else:
raises = nnkTupleConstr.newTree()
for possibleRaise in param[1]:
raises.add(possibleRaise)
elif param[0].eqIdent("raw"):
# boolVal doesn't work in untyped macros it seems..
raw = param[1].eqIdent("true")
elif param[0].eqIdent("handleException"):
handleException = param[1].eqIdent("true")
else:
warning("Unrecognised async parameter: " & repr(param[0]), param)
(raw, raises, handleException)
proc isEmpty(n: NimNode): bool {.compileTime.} =
# true iff node recursively contains only comments or empties
case n.kind
of nnkEmpty, nnkCommentStmt: true
of nnkStmtList:
for child in n:
if not isEmpty(child): return false
true
else:
false
proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} =
## This macro transforms a single procedure into a closure iterator.
## The ``async`` macro supports a stmtList holding multiple async procedures.
if prc.kind notin {nnkProcTy, nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}:
error("Cannot transform " & $prc.kind & " into an async proc." &
" proc/method definition or lambda node expected.", prc)
for pragma in prc.pragma():
if pragma.kind == nnkExprColonExpr and pragma[0].eqIdent("raises"):
warning("The raises pragma doesn't work on async procedures - use " &
"`async: (raises: [...]) instead.", prc)
let returnType = cleanupOpenSymChoice(prc.params2[0])
# Verify that the return type is a Future[T]
let baseType =
if returnType.kind == nnkEmpty:
ident "void"
elif not (
returnType.kind == nnkBracketExpr and
(eqIdent(returnType[0], "Future") or eqIdent(returnType[0], "InternalRaisesFuture"))):
error(
"Expected return type of 'Future' got '" & repr(returnType) & "'", prc)
return
else:
returnType[1]
let
# When the base type is known to be void (and not generic), we can simplify
# code generation - however, in the case of generic async procedures it
# could still end up being void, meaning void detection needs to happen
# post-macro-expansion.
baseTypeIsVoid = baseType.eqIdent("void")
(raw, raises, handleException) = decodeParams(params)
internalFutureType =
if baseTypeIsVoid:
newNimNode(nnkBracketExpr, prc).
add(newIdentNode("Future")).
add(baseType)
else:
returnType
internalReturnType = if raises == nil:
internalFutureType
else:
nnkBracketExpr.newTree(
newIdentNode("InternalRaisesFuture"),
baseType,
raises
)
prc.params2[0] = internalReturnType
if prc.kind notin {nnkProcTy, nnkLambda}:
prc.addPragma(newColonExpr(ident "stackTrace", ident "off"))
# The proc itself doesn't raise
prc.addPragma(
nnkExprColonExpr.newTree(newIdentNode("raises"), nnkBracket.newTree()))
# `gcsafe` isn't deduced even though we require async code to be gcsafe
# https://github.com/nim-lang/RFCs/issues/435
prc.addPragma(newIdentNode("gcsafe"))
if raw: # raw async = body is left as-is
if raises != nil and prc.kind notin {nnkProcTy, nnkLambda} and not isEmpty(prc.body):
# Inject `raises` type marker that causes `newFuture` to return a raise-
# tracking future instead of an ordinary future:
#
# type InternalRaisesFutureRaises = `raisesTuple`
# `body`
prc.body = nnkStmtList.newTree(
nnkTypeSection.newTree(
nnkTypeDef.newTree(
nnkPragmaExpr.newTree(
ident"InternalRaisesFutureRaises",
nnkPragma.newTree(ident "used")),
newEmptyNode(),
raises,
)
),
prc.body
)
elif prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo} and
not isEmpty(prc.body):
let
setResultSym = ident "setResult"
procBody = prc.body.processBody(setResultSym)
resultIdent = ident "result"
fakeResult = quote do:
template result: auto {.used.} =
{.fatal: "You should not reference the `result` variable inside" &
" a void async proc".}
resultDecl =
if baseTypeIsVoid: fakeResult
else: nnkWhenStmt.newTree(
# when `baseType` is void:
nnkElifExpr.newTree(
nnkInfix.newTree(ident "is", baseType, ident "void"),
fakeResult
),
# else:
nnkElseExpr.newTree(
newStmtList(
quote do: {.push warning[resultshadowed]: off.},
# var result {.used.}: `baseType`
# In the proc body, result may or may not end up being used
# depending on how the body is written - with implicit returns /
# expressions in particular, it is likely but not guaranteed that
# it is not used. Ideally, we would avoid emitting it in this
# case to avoid the default initializaiton. {.used.} typically
# works better than {.push.} which has a tendency to leak out of
# scope.
# TODO figure out if there's a way to detect `result` usage in
# the proc body _after_ template exapnsion, and therefore
# avoid creating this variable - one option is to create an
# addtional when branch witha fake `result` and check
# `compiles(procBody)` - this is not without cost though
nnkVarSection.newTree(nnkIdentDefs.newTree(
nnkPragmaExpr.newTree(
resultIdent,
nnkPragma.newTree(ident "used")),
baseType, newEmptyNode())
),
quote do: {.pop.},
)
)
)
# ```nim
# template `setResultSym`(code: untyped) {.used.} =
# when typeof(code) is void: code
# else: `resultIdent` = code
# ```
#
# this is useful to handle implicit returns, but also
# to bind the `result` to the one we declare here
setResultDecl =
if baseTypeIsVoid: # shortcut for non-generic void
newEmptyNode()
else:
nnkTemplateDef.newTree(
setResultSym,
newEmptyNode(), newEmptyNode(),
nnkFormalParams.newTree(
newEmptyNode(),
nnkIdentDefs.newTree(
ident"code",
ident"untyped",
newEmptyNode(),
)
),
nnkPragma.newTree(ident"used"),
newEmptyNode(),
nnkWhenStmt.newTree(
nnkElifBranch.newTree(
nnkInfix.newTree(
ident"is", nnkTypeOfExpr.newTree(ident"code"), ident"void"),
ident"code"
),
nnkElse.newTree(
newAssignment(resultIdent, ident"code")
)
)
)
internalFutureSym = ident "chronosInternalRetFuture"
castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym)
# Wrapping in try/finally ensures that early returns are handled properly
# and that `defer` is processed in the right scope
completeDecl = wrapInTryFinally(
castFutureSym, baseType,
if baseTypeIsVoid: procBody # shortcut for non-generic `void`
else: newCall(setResultSym, procBody),
raises,
handleException
)
closureBody = newStmtList(resultDecl, setResultDecl, completeDecl)
internalFutureParameter = nnkIdentDefs.newTree(
internalFutureSym, newIdentNode("FutureBase"), newEmptyNode())
prcName = prc.name.getName
iteratorNameSym = genSym(nskIterator, $prcName)
closureIterator = newProc(
iteratorNameSym,
[newIdentNode("FutureBase"), internalFutureParameter],
closureBody, nnkIteratorDef)
iteratorNameSym.copyLineInfo(prc)
closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body)
closureIterator.addPragma(newIdentNode("closure"))
# `async` code must be gcsafe
closureIterator.addPragma(newIdentNode("gcsafe"))
# Exceptions are caught inside the iterator and stored in the future
closureIterator.addPragma(nnkExprColonExpr.newTree(
newIdentNode("raises"),
nnkBracket.newTree()
))
# The body of the original procedure (now moved to the iterator) is replaced
# with:
#
# ```nim
# let resultFuture = newFuture[T]()
# resultFuture.internalClosure = `iteratorNameSym`
# futureContinue(resultFuture)
# return resultFuture
# ```
#
# Declared at the end to be sure that the closure doesn't reference it,
# avoid cyclic ref (#203)
#
# Do not change this code to `quote do` version because `instantiationInfo`
# will be broken for `newFuture()` call.
let
outerProcBody = newNimNode(nnkStmtList, prc.body)
# Copy comment for nimdoc
if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt:
outerProcBody.add(prc.body[0])
outerProcBody.add(closureIterator)
let
retFutureSym = ident "resultFuture"
newFutProc = if raises == nil:
nnkBracketExpr.newTree(ident "newFuture", baseType)
else:
nnkBracketExpr.newTree(ident "newInternalRaisesFuture", baseType, raises)
retFutureSym.copyLineInfo(prc)
outerProcBody.add(
newLetStmt(
retFutureSym,
newCall(newFutProc, newLit(prcName))
)
)
outerProcBody.add(
newAssignment(
newDotExpr(retFutureSym, newIdentNode("internalClosure")),
iteratorNameSym)
)
outerProcBody.add(
newCall(newIdentNode("futureContinue"), retFutureSym)
)
outerProcBody.add newNimNode(nnkReturnStmt, prc.body[^1]).add(retFutureSym)
prc.body = outerProcBody
when chronosDumpAsync:
echo repr prc
prc
template await*[T](f: Future[T]): T =
## Ensure that the given `Future` is finished, then return its value.
##
## If the `Future` failed or was cancelled, the corresponding exception will
## be raised instead.
##
## If the `Future` is pending, execution of the current `async` procedure
## will be suspended until the `Future` is finished.
when declared(chronosInternalRetFuture):
chronosInternalRetFuture.internalChild = f
# `futureContinue` calls the iterator generated by the `async`
# transformation - `yield` gives control back to `futureContinue` which is
# responsible for resuming execution once the yielded future is finished
yield chronosInternalRetFuture.internalChild
# `child` released by `futureContinue`
cast[type(f)](chronosInternalRetFuture.internalChild).internalCheckComplete()
when T isnot void:
cast[type(f)](chronosInternalRetFuture.internalChild).value()
else:
unsupported "await is only available within {.async.}"
template await*[T, E](fut: InternalRaisesFuture[T, E]): T =
## Ensure that the given `Future` is finished, then return its value.
##
## If the `Future` failed or was cancelled, the corresponding exception will
## be raised instead.
##
## If the `Future` is pending, execution of the current `async` procedure
## will be suspended until the `Future` is finished.
when declared(chronosInternalRetFuture):
chronosInternalRetFuture.internalChild = fut
# `futureContinue` calls the iterator generated by the `async`
# transformation - `yield` gives control back to `futureContinue` which is
# responsible for resuming execution once the yielded future is finished
yield chronosInternalRetFuture.internalChild
# `child` released by `futureContinue`
cast[type(fut)](
chronosInternalRetFuture.internalChild).internalCheckComplete(E)
when T isnot void:
cast[type(fut)](chronosInternalRetFuture.internalChild).value()
else:
unsupported "await is only available within {.async.}"
template awaitne*[T](f: Future[T]): Future[T] =
when declared(chronosInternalRetFuture):
chronosInternalRetFuture.internalChild = f
yield chronosInternalRetFuture.internalChild
cast[type(f)](chronosInternalRetFuture.internalChild)
else:
unsupported "awaitne is only available within {.async.}"
macro async*(params, prc: untyped): untyped =
## Macro which processes async procedures into the appropriate
## iterators and yield statements.
if prc.kind == nnkStmtList:
result = newStmtList()
for oneProc in prc:
result.add asyncSingleProc(oneProc, params)
else:
result = asyncSingleProc(prc, params)
macro async*(prc: untyped): untyped =
## Macro which processes async procedures into the appropriate
## iterators and yield statements.
if prc.kind == nnkStmtList:
result = newStmtList()
for oneProc in prc:
result.add asyncSingleProc(oneProc, nnkTupleConstr.newTree())
else:
result = asyncSingleProc(prc, nnkTupleConstr.newTree())

View File

@ -0,0 +1,9 @@
type
AsyncError* = object of CatchableError
## Generic async exception
AsyncTimeoutError* = object of AsyncError
## Timeout exception
AsyncExceptionError* = object of AsyncError
## Error raised in `handleException` mode - the original exception is
## available from the `parent` field.

View File

@ -0,0 +1,205 @@
import
std/[macros, sequtils],
../futures
type
InternalRaisesFuture*[T, E] = ref object of Future[T]
## Future with a tuple of possible exception types
## eg InternalRaisesFuture[void, (ValueError, OSError)]
##
## This type gets injected by `async: (raises: ...)` and similar utilities
## and should not be used manually as the internal exception representation
## is subject to change in future chronos versions.
proc makeNoRaises*(): NimNode {.compileTime.} =
# An empty tuple would have been easier but...
# https://github.com/nim-lang/Nim/issues/22863
# https://github.com/nim-lang/Nim/issues/22865
ident"void"
macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped =
## Given a Future type instance, return a type storing `{.raises.}`
## information
##
## Note; this type may change in the future
E.expectKind(nnkBracket)
let raises = if E.len == 0:
makeNoRaises()
else:
nnkTupleConstr.newTree(E.mapIt(it))
nnkBracketExpr.newTree(
ident "InternalRaisesFuture",
nnkDotExpr.newTree(F, ident"T"),
raises
)
template init*[T, E](
F: type InternalRaisesFuture[T, E], fromProc: static[string] = ""): F =
## Creates a new pending future.
##
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
let res = F()
internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {})
res
template init*[T, E](
F: type InternalRaisesFuture[T, E], fromProc: static[string] = "",
flags: static[FutureFlags]): F =
## Creates a new pending future.
##
## Specifying ``fromProc``, which is a string specifying the name of the proc
## that this future belongs to, is a good habit as it helps with debugging.
let res = F()
internalInitFutureBase(
res, getSrcLocation(fromProc), FutureState.Pending, flags)
res
proc dig(n: NimNode): NimNode {.compileTime.} =
# Dig through the layers of type to find the raises list
if n.eqIdent("void"):
n
elif n.kind == nnkBracketExpr:
if n[0].eqIdent("tuple"):
n
elif n[0].eqIdent("typeDesc"):
dig(getType(n[1]))
else:
echo astGenRepr(n)
raiseAssert "Unkown bracket"
elif n.kind == nnkTupleConstr:
n
else:
dig(getType(getTypeInst(n)))
proc isNoRaises*(n: NimNode): bool {.compileTime.} =
dig(n).eqIdent("void")
iterator members(tup: NimNode): NimNode =
# Given a typedesc[tuple] = (A, B, C), yields the tuple members (A, B C)
if not isNoRaises(tup):
for n in getType(getTypeInst(tup)[1])[1..^1]:
yield n
proc members(tup: NimNode): seq[NimNode] {.compileTime.} =
for t in tup.members():
result.add(t)
proc containsSignature(members: openArray[NimNode], typ: NimNode): bool {.compileTime.} =
let typHash = signatureHash(typ)
for err in members:
if signatureHash(err) == typHash:
return true
false
# Utilities for working with the E part of InternalRaisesFuture - unstable
macro prepend*(tup: typedesc, typs: varargs[typed]): typedesc =
result = nnkTupleConstr.newTree()
for err in typs:
if not tup.members().containsSignature(err):
result.add err
for err in tup.members():
result.add err
if result.len == 0:
result = makeNoRaises()
macro remove*(tup: typedesc, typs: varargs[typed]): typedesc =
result = nnkTupleConstr.newTree()
for err in tup.members():
if not typs[0..^1].containsSignature(err):
result.add err
if result.len == 0:
result = makeNoRaises()
macro union*(tup0: typedesc, tup1: typedesc): typedesc =
## Join the types of the two tuples deduplicating the entries
result = nnkTupleConstr.newTree()
for err in tup0.members():
var found = false
for err2 in tup1.members():
if signatureHash(err) == signatureHash(err2):
found = true
if not found:
result.add err
for err2 in getType(getTypeInst(tup1)[1])[1..^1]:
result.add err2
if result.len == 0:
result = makeNoRaises()
proc getRaisesTypes*(raises: NimNode): NimNode =
let typ = getType(raises)
case typ.typeKind
of ntyTypeDesc: typ[1]
else: typ
macro checkRaises*[T: CatchableError](
future: InternalRaisesFuture, raises: typed, error: ref T,
warn: static bool = true): untyped =
## Generate code that checks that the given error is compatible with the
## raises restrictions of `future`.
##
## This check is done either at compile time or runtime depending on the
## information available at compile time - in particular, if the raises
## inherit from `error`, we end up with the equivalent of a downcast which
## raises a Defect if it fails.
let
raises = getRaisesTypes(raises)
expectKind(getTypeInst(error), nnkRefTy)
let toMatch = getTypeInst(error)[0]
if isNoRaises(raises):
error(
"`fail`: `" & repr(toMatch) & "` incompatible with `raises: []`", future)
return
var
typeChecker = ident"false"
maybeChecker = ident"false"
runtimeChecker = ident"false"
for errorType in raises[1..^1]:
typeChecker = infix(typeChecker, "or", infix(toMatch, "is", errorType))
maybeChecker = infix(maybeChecker, "or", infix(errorType, "is", toMatch))
runtimeChecker = infix(
runtimeChecker, "or",
infix(error, "of", nnkBracketExpr.newTree(ident"typedesc", errorType)))
let
errorMsg = "`fail`: `" & repr(toMatch) & "` incompatible with `raises: " & repr(raises[1..^1]) & "`"
warningMsg = "Can't verify `fail` exception type at compile time - expected one of " & repr(raises[1..^1]) & ", got `" & repr(toMatch) & "`"
# A warning from this line means exception type will be verified at runtime
warning = if warn:
quote do: {.warning: `warningMsg`.}
else: newEmptyNode()
# Cannot check inhertance in macro so we let `static` do the heavy lifting
quote do:
when not(`typeChecker`):
when not(`maybeChecker`):
static:
{.error: `errorMsg`.}
else:
`warning`
assert(`runtimeChecker`, `errorMsg`)
proc error*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {.
raises: [].} =
static:
warning("No exceptions possible with this operation, `error` always returns nil")
nil
proc readError*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {.
raises: [ValueError].} =
static:
warning("No exceptions possible with this operation, `readError` always raises")
raise newException(ValueError, "No error in future.")

View File

@ -97,12 +97,12 @@ proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] =
var nmask: Sigset
if sigemptyset(nmask) < 0:
return err(osLastError())
let epollFd = epoll_create(asyncEventsCount)
let epollFd = epoll_create(chronosEventsCount)
if epollFd < 0:
return err(osLastError())
let selector = Selector[T](
epollFd: epollFd,
fds: initTable[int32, SelectorKey[T]](asyncInitialSize),
fds: initTable[int32, SelectorKey[T]](chronosInitialSize),
signalMask: nmask,
virtualId: -1'i32, # Should start with -1, because `InvalidIdent` == -1
childrenExited: false,
@ -411,7 +411,7 @@ proc registerProcess*[T](s: Selector, pid: int, data: T): SelectResult[cint] =
s.freeKey(fdi32)
s.freeProcess(int32(pid))
return err(res.error())
s.pidFd = Opt.some(cast[cint](res.get()))
s.pidFd = Opt.some(res.get())
ok(cint(fdi32))
@ -627,7 +627,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
readyKeys: var openArray[ReadyKey]
): SelectResult[int] =
var
queueEvents: array[asyncEventsCount, EpollEvent]
queueEvents: array[chronosEventsCount, EpollEvent]
k: int = 0
verifySelectParams(timeout, -1, int(high(cint)))
@ -668,7 +668,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
ok(k)
proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] =
var res = newSeq[ReadyKey](asyncEventsCount)
var res = newSeq[ReadyKey](chronosEventsCount)
let count = ? selectInto2(s, timeout, res)
res.setLen(count)
ok(res)

View File

@ -110,7 +110,7 @@ proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] =
let selector = Selector[T](
kqFd: kqFd,
fds: initTable[int32, SelectorKey[T]](asyncInitialSize),
fds: initTable[int32, SelectorKey[T]](chronosInitialSize),
virtualId: -1'i32, # Should start with -1, because `InvalidIdent` == -1
virtualHoles: initDeque[int32]()
)
@ -559,7 +559,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
): SelectResult[int] =
var
tv: Timespec
queueEvents: array[asyncEventsCount, KEvent]
queueEvents: array[chronosEventsCount, KEvent]
verifySelectParams(timeout, -1, high(int))
@ -575,7 +575,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
addr tv
else:
nil
maxEventsCount = cint(min(asyncEventsCount, len(readyKeys)))
maxEventsCount = cint(min(chronosEventsCount, len(readyKeys)))
eventsCount =
block:
var res = 0
@ -601,7 +601,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
proc select2*[T](s: Selector[T],
timeout: int): Result[seq[ReadyKey], OSErrorCode] =
var res = newSeq[ReadyKey](asyncEventsCount)
var res = newSeq[ReadyKey](chronosEventsCount)
let count = ? selectInto2(s, timeout, res)
res.setLen(count)
ok(res)

View File

@ -16,7 +16,7 @@ import stew/base10
type
SelectorImpl[T] = object
fds: Table[int32, SelectorKey[T]]
pollfds: seq[TPollFd]
pollfds: seq[TPollfd]
Selector*[T] = ref SelectorImpl[T]
type
@ -50,7 +50,7 @@ proc freeKey[T](s: Selector[T], key: int32) =
proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] =
let selector = Selector[T](
fds: initTable[int32, SelectorKey[T]](asyncInitialSize)
fds: initTable[int32, SelectorKey[T]](chronosInitialSize)
)
ok(selector)
@ -72,7 +72,7 @@ proc trigger2*(event: SelectEvent): SelectResult[void] =
if res == -1:
err(osLastError())
elif res != sizeof(uint64):
err(OSErrorCode(osdefs.EINVAL))
err(osdefs.EINVAL)
else:
ok()
@ -98,13 +98,14 @@ template toPollEvents(events: set[Event]): cshort =
res
template pollAdd[T](s: Selector[T], sock: cint, events: set[Event]) =
s.pollfds.add(TPollFd(fd: sock, events: toPollEvents(events), revents: 0))
s.pollfds.add(TPollfd(fd: sock, events: toPollEvents(events), revents: 0))
template pollUpdate[T](s: Selector[T], sock: cint, events: set[Event]) =
var updated = false
for mitem in s.pollfds.mitems():
if mitem.fd == sock:
mitem.events = toPollEvents(events)
updated = true
break
if not(updated):
raiseAssert "Descriptor [" & $sock & "] is not registered in the queue!"
@ -177,7 +178,6 @@ proc unregister2*[T](s: Selector[T], event: SelectEvent): SelectResult[void] =
proc prepareKey[T](s: Selector[T], event: var TPollfd): Opt[ReadyKey] =
let
defaultKey = SelectorKey[T](ident: InvalidIdent)
fdi32 = int32(event.fd)
revents = event.revents
@ -224,7 +224,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
eventsCount =
if maxEventsCount > 0:
let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount),
timeout))
cint(timeout)))
if res < 0:
return err(osLastError())
res
@ -241,7 +241,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
ok(k)
proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] =
var res = newSeq[ReadyKey](asyncEventsCount)
var res = newSeq[ReadyKey](chronosEventsCount)
let count = ? selectInto2(s, timeout, res)
res.setLen(count)
ok(res)

View File

@ -122,6 +122,7 @@ when defined(windows):
SO_UPDATE_ACCEPT_CONTEXT* = 0x700B
SO_CONNECT_TIME* = 0x700C
SO_UPDATE_CONNECT_CONTEXT* = 0x7010
SO_PROTOCOL_INFOW* = 0x2005
FILE_FLAG_FIRST_PIPE_INSTANCE* = 0x00080000'u32
FILE_FLAG_OPEN_NO_RECALL* = 0x00100000'u32
@ -258,6 +259,9 @@ when defined(windows):
FIONBIO* = WSAIOW(102, 126)
HANDLE_FLAG_INHERIT* = 1'u32
IPV6_V6ONLY* = 27
MAX_PROTOCOL_CHAIN* = 7
WSAPROTOCOL_LEN* = 255
type
LONG* = int32
@ -441,6 +445,32 @@ when defined(windows):
prefix*: SOCKADDR_INET
prefixLength*: uint8
WSAPROTOCOLCHAIN* {.final, pure.} = object
chainLen*: int32
chainEntries*: array[MAX_PROTOCOL_CHAIN, DWORD]
WSAPROTOCOL_INFO* {.final, pure.} = object
dwServiceFlags1*: uint32
dwServiceFlags2*: uint32
dwServiceFlags3*: uint32
dwServiceFlags4*: uint32
dwProviderFlags*: uint32
providerId*: GUID
dwCatalogEntryId*: DWORD
protocolChain*: WSAPROTOCOLCHAIN
iVersion*: int32
iAddressFamily*: int32
iMaxSockAddr*: int32
iMinSockAddr*: int32
iSocketType*: int32
iProtocol*: int32
iProtocolMaxOffset*: int32
iNetworkByteOrder*: int32
iSecurityScheme*: int32
dwMessageSize*: uint32
dwProviderReserved*: uint32
szProtocol*: array[WSAPROTOCOL_LEN + 1, WCHAR]
MibIpForwardRow2* {.final, pure.} = object
interfaceLuid*: uint64
interfaceIndex*: uint32
@ -708,7 +738,7 @@ when defined(windows):
res: var ptr AddrInfo): cint {.
stdcall, dynlib: "ws2_32", importc: "getaddrinfo", sideEffect.}
proc freeaddrinfo*(ai: ptr AddrInfo) {.
proc freeAddrInfo*(ai: ptr AddrInfo) {.
stdcall, dynlib: "ws2_32", importc: "freeaddrinfo", sideEffect.}
proc createIoCompletionPort*(fileHandle: HANDLE,
@ -880,7 +910,7 @@ elif defined(macos) or defined(macosx):
sigemptyset, sigaddset, sigismember, fcntl, accept,
pipe, write, signal, read, setsockopt, getsockopt,
getcwd, chdir, waitpid, kill, select, pselect,
socketpair,
socketpair, poll, freeAddrInfo,
Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr,
SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6,
Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet,
@ -890,7 +920,7 @@ elif defined(macos) or defined(macosx):
O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM,
SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK,
AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6,
IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE,
SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR,
SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT,
@ -905,7 +935,7 @@ elif defined(macos) or defined(macosx):
sigemptyset, sigaddset, sigismember, fcntl, accept,
pipe, write, signal, read, setsockopt, getsockopt,
getcwd, chdir, waitpid, kill, select, pselect,
socketpair,
socketpair, poll, freeAddrInfo,
Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr,
SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6,
Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet,
@ -915,7 +945,7 @@ elif defined(macos) or defined(macosx):
O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM,
SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK,
AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6,
IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE,
SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR,
SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT,
@ -929,6 +959,21 @@ elif defined(macos) or defined(macosx):
numer*: uint32
denom*: uint32
TPollfd* {.importc: "struct pollfd", pure, final,
header: "<poll.h>".} = object
fd*: cint
events*: cshort
revents*: cshort
Tnfds* {.importc: "nfds_t", header: "<poll.h>".} = cuint
const
POLLIN* = 0x0001
POLLOUT* = 0x0004
POLLERR* = 0x0008
POLLHUP* = 0x0010
POLLNVAL* = 0x0020
proc posix_gettimeofday*(tp: var Timeval, unused: pointer = nil) {.
importc: "gettimeofday", header: "<sys/time.h>".}
@ -938,6 +983,9 @@ elif defined(macos) or defined(macosx):
proc mach_absolute_time*(): uint64 {.
importc, header: "<mach/mach_time.h>".}
proc poll*(a1: ptr TPollfd, a2: Tnfds, a3: cint): cint {.
importc, header: "<poll.h>", sideEffect.}
elif defined(linux):
from std/posix import close, shutdown, sigemptyset, sigaddset, sigismember,
sigdelset, write, read, waitid, getaddrinfo,
@ -947,20 +995,22 @@ elif defined(linux):
unlink, listen, sendmsg, recvmsg, getpid, fcntl,
pthread_sigmask, sigprocmask, clock_gettime, signal,
getcwd, chdir, waitpid, kill, select, pselect,
socketpair,
socketpair, poll, freeAddrInfo,
ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode,
SigInfo, Id, Tmsghdr, IOVec, RLimit, Timeval, TFdSet,
SockAddr, SockLen, Sockaddr_storage, Sockaddr_in,
Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle,
Suseconds,
Suseconds, TPollfd, Tnfds,
FD_CLR, FD_ISSET, FD_SET, FD_ZERO,
CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD,
FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK,
SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL,
MSG_PEEK,
AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT,
SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS,
SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6,
IPV6_MULTICAST_HOPS,
SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR,
POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL,
SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT,
SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2,
SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP,
@ -974,20 +1024,21 @@ elif defined(linux):
unlink, listen, sendmsg, recvmsg, getpid, fcntl,
pthread_sigmask, sigprocmask, clock_gettime, signal,
getcwd, chdir, waitpid, kill, select, pselect,
socketpair,
socketpair, poll, freeAddrInfo,
ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode,
SigInfo, Id, Tmsghdr, IOVec, RLimit, TFdSet, Timeval,
SockAddr, SockLen, Sockaddr_storage, Sockaddr_in,
Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle,
Suseconds,
Suseconds, TPollfd, Tnfds,
FD_CLR, FD_ISSET, FD_SET, FD_ZERO,
CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD,
FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK,
SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL,
MSG_PEEK,
AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT,
SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS,
SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS,
SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR,
POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL,
SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT,
SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2,
SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP,
@ -1097,20 +1148,21 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or
sigaddset, sigismember, fcntl, accept, pipe, write,
signal, read, setsockopt, getsockopt, clock_gettime,
getcwd, chdir, waitpid, kill, select, pselect,
socketpair,
socketpair, poll, freeAddrInfo,
Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr,
SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6,
Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet,
Suseconds,
Suseconds, TPollfd, Tnfds,
FD_CLR, FD_ISSET, FD_SET, FD_ZERO,
F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC,
O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM,
SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK,
AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6,
IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE,
SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC,
SHUT_RD, SHUT_WR, SHUT_RDWR,
POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL,
SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT,
SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2,
SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP,
@ -1123,20 +1175,21 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or
sigaddset, sigismember, fcntl, accept, pipe, write,
signal, read, setsockopt, getsockopt, clock_gettime,
getcwd, chdir, waitpid, kill, select, pselect,
socketpair,
socketpair, poll, freeAddrInfo,
Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr,
SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6,
Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet,
Suseconds,
Suseconds, TPollfd, Tnfds,
FD_CLR, FD_ISSET, FD_SET, FD_ZERO,
F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC,
O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM,
SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK,
AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP,
SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6,
IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE,
SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC,
SHUT_RD, SHUT_WR, SHUT_RDWR,
POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL,
SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT,
SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2,
SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP,
@ -1160,47 +1213,52 @@ when defined(linux):
SOCK_CLOEXEC* = 0x80000
TCP_NODELAY* = cint(1)
IPPROTO_TCP* = 6
elif defined(freebsd) or defined(netbsd) or defined(dragonfly):
O_CLOEXEC* = 0x80000
POSIX_SPAWN_USEVFORK* = 0x40
IPV6_V6ONLY* = 26
elif defined(freebsd):
const
SOCK_NONBLOCK* = 0x20000000
SOCK_CLOEXEC* = 0x10000000
TCP_NODELAY* = cint(1)
IPPROTO_TCP* = 6
O_CLOEXEC* = 0x00100000
POSIX_SPAWN_USEVFORK* = 0x00
IPV6_V6ONLY* = 27
elif defined(netbsd):
const
SOCK_NONBLOCK* = 0x20000000
SOCK_CLOEXEC* = 0x10000000
TCP_NODELAY* = cint(1)
IPPROTO_TCP* = 6
O_CLOEXEC* = 0x00400000
POSIX_SPAWN_USEVFORK* = 0x00
IPV6_V6ONLY* = 27
elif defined(dragonfly):
const
SOCK_NONBLOCK* = 0x20000000
SOCK_CLOEXEC* = 0x10000000
TCP_NODELAY* = cint(1)
IPPROTO_TCP* = 6
O_CLOEXEC* = 0x00020000
POSIX_SPAWN_USEVFORK* = 0x00
IPV6_V6ONLY* = 27
elif defined(openbsd):
const
SOCK_CLOEXEC* = 0x8000
SOCK_NONBLOCK* = 0x4000
TCP_NODELAY* = cint(1)
IPPROTO_TCP* = 6
O_CLOEXEC* = 0x10000
POSIX_SPAWN_USEVFORK* = 0x00
IPV6_V6ONLY* = 27
elif defined(macos) or defined(macosx):
const
TCP_NODELAY* = cint(1)
IP_MULTICAST_TTL* = cint(10)
IPPROTO_TCP* = 6
when defined(linux):
const
O_CLOEXEC* = 0x80000
POSIX_SPAWN_USEVFORK* = 0x40
elif defined(freebsd):
const
O_CLOEXEC* = 0x00100000
POSIX_SPAWN_USEVFORK* = 0x00
elif defined(openbsd):
const
O_CLOEXEC* = 0x10000
POSIX_SPAWN_USEVFORK* = 0x00
elif defined(netbsd):
const
O_CLOEXEC* = 0x00400000
POSIX_SPAWN_USEVFORK* = 0x00
elif defined(dragonfly):
const
O_CLOEXEC* = 0x00020000
POSIX_SPAWN_USEVFORK* = 0x00
elif defined(macos) or defined(macosx):
const
POSIX_SPAWN_USEVFORK* = 0x00
IPV6_V6ONLY* = 27
when defined(linux) or defined(macos) or defined(macosx) or defined(freebsd) or
defined(openbsd) or defined(netbsd) or defined(dragonfly):
@ -1468,6 +1526,8 @@ when defined(posix):
INVALID_HANDLE_VALUE* = cint(-1)
proc `==`*(x: SocketHandle, y: int): bool = int(x) == y
when defined(nimdoc):
proc `==`*(x: SocketHandle, y: SocketHandle): bool {.borrow.}
when defined(macosx) or defined(macos) or defined(bsd):
const
@ -1595,6 +1655,8 @@ elif defined(linux):
# RTA_PRIORITY* = 6'u16
RTA_PREFSRC* = 7'u16
# RTA_METRICS* = 8'u16
RTM_NEWLINK* = 16'u16
RTM_NEWROUTE* = 24'u16
RTM_F_LOOKUP_TABLE* = 0x1000

View File

@ -1328,6 +1328,7 @@ elif defined(windows):
ERROR_CONNECTION_REFUSED* = OSErrorCode(1225)
ERROR_CONNECTION_ABORTED* = OSErrorCode(1236)
WSAEMFILE* = OSErrorCode(10024)
WSAEAFNOSUPPORT* = OSErrorCode(10047)
WSAENETDOWN* = OSErrorCode(10050)
WSAENETRESET* = OSErrorCode(10052)
WSAECONNABORTED* = OSErrorCode(10053)

View File

@ -6,8 +6,8 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import stew/results
import osdefs, oserrno
import results
import "."/[osdefs, oserrno]
export results
@ -346,6 +346,10 @@ else:
return err(osLastError())
ok()
proc setDescriptorBlocking*(s: SocketHandle,
value: bool): Result[void, OSErrorCode] =
setDescriptorBlocking(cint(s), value)
proc setDescriptorInheritance*(s: cint,
value: bool): Result[void, OSErrorCode] =
let flags = handleEintr(osdefs.fcntl(s, osdefs.F_GETFD))

View File

@ -88,8 +88,8 @@ proc worker(bucket: TokenBucket) {.async.} =
#buckets
sleeper = sleepAsync(milliseconds(timeToTarget))
await sleeper or eventWaiter
sleeper.cancel()
eventWaiter.cancel()
sleeper.cancelSoon()
eventWaiter.cancelSoon()
else:
await eventWaiter

View File

@ -31,32 +31,11 @@
# support - changes could potentially be backported to nim but are not
# backwards-compatible.
import stew/results
import osdefs, osutils, oserrno
import results
import "."/[config, osdefs, osutils, oserrno]
export results, oserrno
const
asyncEventsCount* {.intdefine.} = 64
## Number of epoll events retrieved by syscall.
asyncInitialSize* {.intdefine.} = 64
## Initial size of Selector[T]'s array of file descriptors.
asyncEventEngine* {.strdefine.} =
when defined(linux):
"epoll"
elif defined(macosx) or defined(macos) or defined(ios) or
defined(freebsd) or defined(netbsd) or defined(openbsd) or
defined(dragonfly):
"kqueue"
elif defined(posix):
"poll"
else:
""
## Engine type which is going to be used by module.
hasThreadSupport = compileOption("threads")
when defined(nimdoc):
type
Selector*[T] = ref object
## An object which holds descriptors to be checked for read/write status
@ -281,7 +260,9 @@ else:
var err = newException(IOSelectorsException, msg)
raise err
when asyncEventEngine in ["epoll", "kqueue"]:
when chronosEventEngine in ["epoll", "kqueue"]:
const hasThreadSupport = compileOption("threads")
proc blockSignals(newmask: Sigset,
oldmask: var Sigset): Result[void, OSErrorCode] =
var nmask = newmask
@ -324,11 +305,11 @@ else:
doAssert((timeout >= min) and (timeout <= max),
"Cannot select with incorrect timeout value, got " & $timeout)
when asyncEventEngine == "epoll":
include ./ioselects/ioselectors_epoll
elif asyncEventEngine == "kqueue":
include ./ioselects/ioselectors_kqueue
elif asyncEventEngine == "poll":
include ./ioselects/ioselectors_poll
else:
{.fatal: "Event engine `" & asyncEventEngine & "` is not supported!".}
when chronosEventEngine == "epoll":
include ./ioselects/ioselectors_epoll
elif chronosEventEngine == "kqueue":
include ./ioselects/ioselectors_kqueue
elif chronosEventEngine == "poll":
include ./ioselects/ioselectors_poll
else:
{.fatal: "Event engine `" & chronosEventEngine & "` is not supported!".}

View File

@ -38,8 +38,12 @@ when defined(nimdoc):
## be prepared to retry the call if there were unsent bytes.
##
## On error, ``-1`` is returned.
elif defined(emscripten):
elif defined(linux) or defined(android):
proc sendfile*(outfd, infd: int, offset: int, count: var int): int =
raiseAssert "sendfile() is not implemented yet"
elif (defined(linux) or defined(android)) and not(defined(emscripten)):
proc osSendFile*(outfd, infd: cint, offset: ptr int, count: int): int
{.importc: "sendfile", header: "<sys/sendfile.h>".}

View File

@ -24,15 +24,13 @@ const
## AsyncStreamWriter leaks tracker name
type
AsyncStreamError* = object of CatchableError
AsyncStreamError* = object of AsyncError
AsyncStreamIncorrectDefect* = object of Defect
AsyncStreamIncompleteError* = object of AsyncStreamError
AsyncStreamLimitError* = object of AsyncStreamError
AsyncStreamUseClosedError* = object of AsyncStreamError
AsyncStreamReadError* = object of AsyncStreamError
par*: ref CatchableError
AsyncStreamWriteError* = object of AsyncStreamError
par*: ref CatchableError
AsyncStreamWriteEOFError* = object of AsyncStreamWriteError
AsyncBuffer* = object
@ -53,7 +51,7 @@ type
dataStr*: string
size*: int
offset*: int
future*: Future[void]
future*: Future[void].Raising([CancelledError, AsyncStreamError])
AsyncStreamState* = enum
Running, ## Stream is online and working
@ -64,10 +62,10 @@ type
Closed ## Stream was closed
StreamReaderLoop* = proc (stream: AsyncStreamReader): Future[void] {.
gcsafe, raises: [].}
async: (raises: []).}
## Main read loop for read streams.
StreamWriterLoop* = proc (stream: AsyncStreamWriter): Future[void] {.
gcsafe, raises: [].}
async: (raises: []).}
## Main write loop for write streams.
AsyncStreamReader* = ref object of RootRef
@ -124,12 +122,12 @@ proc `[]`*(sb: AsyncBuffer, index: int): byte {.inline.} =
proc update*(sb: var AsyncBuffer, size: int) {.inline.} =
sb.offset += size
proc wait*(sb: var AsyncBuffer): Future[void] =
template wait*(sb: var AsyncBuffer): untyped =
sb.events[0].clear()
sb.events[1].fire()
sb.events[0].wait()
proc transfer*(sb: var AsyncBuffer): Future[void] =
template transfer*(sb: var AsyncBuffer): untyped =
sb.events[1].clear()
sb.events[0].fire()
sb.events[1].wait()
@ -150,7 +148,8 @@ proc copyData*(sb: AsyncBuffer, dest: pointer, offset, length: int) {.inline.} =
unsafeAddr sb.buffer[0], length)
proc upload*(sb: ptr AsyncBuffer, pbytes: ptr byte,
nbytes: int): Future[void] {.async.} =
nbytes: int): Future[void] {.
async: (raises: [CancelledError]).} =
## You can upload any amount of bytes to the buffer. If size of internal
## buffer is not enough to fit all the data at once, data will be uploaded
## via chunks of size up to internal buffer size.
@ -186,18 +185,20 @@ template copyOut*(dest: pointer, item: WriteItem, length: int) =
elif item.kind == String:
copyMem(dest, unsafeAddr item.dataStr[item.offset], length)
proc newAsyncStreamReadError(p: ref CatchableError): ref AsyncStreamReadError {.
noinline.} =
proc newAsyncStreamReadError(
p: ref TransportError
): ref AsyncStreamReadError {.noinline.} =
var w = newException(AsyncStreamReadError, "Read stream failed")
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
w.parent = p
w
proc newAsyncStreamWriteError(p: ref CatchableError): ref AsyncStreamWriteError {.
noinline.} =
proc newAsyncStreamWriteError(
p: ref TransportError
): ref AsyncStreamWriteError {.noinline.} =
var w = newException(AsyncStreamWriteError, "Write stream failed")
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
w.parent = p
w
proc newAsyncStreamIncompleteError*(): ref AsyncStreamIncompleteError {.
@ -344,7 +345,8 @@ template readLoop(body: untyped): untyped =
await rstream.buffer.wait()
proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer,
nbytes: int) {.async.} =
nbytes: int) {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store
## it to ``pbytes``.
##
@ -365,7 +367,7 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer,
raise exc
except TransportIncompleteError:
raise newAsyncStreamIncompleteError()
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -384,7 +386,8 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer,
(consumed: count, done: index == nbytes)
proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
nbytes: int): Future[int] {.async.} =
nbytes: int): Future[int] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Perform one read operation on read-only stream ``rstream``.
##
## If internal buffer is not empty, ``nbytes`` bytes will be transferred from
@ -398,7 +401,7 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
return await readOnce(rstream.tsource, pbytes, nbytes)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -415,7 +418,8 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
return count
proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
sep: seq[byte]): Future[int] {.async.} =
sep: seq[byte]): Future[int] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Read data from the read-only stream ``rstream`` until separator ``sep`` is
## found.
##
@ -446,7 +450,7 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
raise newAsyncStreamIncompleteError()
except TransportLimitError:
raise newAsyncStreamLimitError()
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -476,7 +480,8 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
return k
proc readLine*(rstream: AsyncStreamReader, limit = 0,
sep = "\r\n"): Future[string] {.async.} =
sep = "\r\n"): Future[string] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Read one line from read-only stream ``rstream``, where ``"line"`` is a
## sequence of bytes ending with ``sep`` (default is ``"\r\n"``).
##
@ -495,7 +500,7 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0,
return await readLine(rstream.tsource, limit, sep)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -530,7 +535,8 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0,
(index, (state == len(sep)) or (lim == len(res)))
return res
proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} =
proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Read all bytes from read-only stream ``rstream``.
##
## This procedure allocates buffer seq[byte] and return it as result.
@ -543,7 +549,7 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} =
raise exc
except TransportLimitError:
raise newAsyncStreamLimitError()
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -559,7 +565,8 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} =
(count, false)
return res
proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} =
proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Read all bytes (n <= 0) or exactly `n` bytes from read-only stream
## ``rstream``.
##
@ -571,7 +578,7 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} =
return await read(rstream.tsource, n)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -590,7 +597,8 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} =
(count, len(res) == n)
return res
proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} =
proc consume*(rstream: AsyncStreamReader): Future[int] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Consume (discard) all bytes from read-only stream ``rstream``.
##
## Return number of bytes actually consumed (discarded).
@ -603,7 +611,7 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} =
raise exc
except TransportLimitError:
raise newAsyncStreamLimitError()
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -618,7 +626,8 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} =
(rstream.buffer.dataLen(), false)
return res
proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} =
proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Consume (discard) all bytes (n <= 0) or ``n`` bytes from read-only stream
## ``rstream``.
##
@ -632,7 +641,7 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} =
raise exc
except TransportLimitError:
raise newAsyncStreamLimitError()
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -652,7 +661,7 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} =
return res
proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
async.} =
async: (raises: [CancelledError, AsyncStreamError]).} =
## Read all bytes from stream ``rstream`` until ``predicate`` callback
## will not be satisfied.
##
@ -673,7 +682,7 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
await readMessage(rstream.tsource, pred)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamReadError(exc)
else:
if isNil(rstream.readerLoop):
@ -691,7 +700,8 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
pred(rstream.buffer.buffer.toOpenArray(0, count - 1))
proc write*(wstream: AsyncStreamWriter, pbytes: pointer,
nbytes: int) {.async.} =
nbytes: int) {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Write sequence of bytes pointed by ``pbytes`` of length ``nbytes`` to
## writer stream ``wstream``.
##
@ -708,9 +718,7 @@ proc write*(wstream: AsyncStreamWriter, pbytes: pointer,
res = await write(wstream.tsource, pbytes, nbytes)
except CancelledError as exc:
raise exc
except AsyncStreamError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamWriteError(exc)
if res != nbytes:
raise newAsyncStreamIncompleteError()
@ -720,23 +728,17 @@ proc write*(wstream: AsyncStreamWriter, pbytes: pointer,
await write(wstream.wsource, pbytes, nbytes)
wstream.bytesCount = wstream.bytesCount + uint64(nbytes)
else:
var item = WriteItem(kind: Pointer)
item.dataPtr = pbytes
item.size = nbytes
item.future = newFuture[void]("async.stream.write(pointer)")
try:
await wstream.queue.put(item)
await item.future
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
except CancelledError as exc:
raise exc
except AsyncStreamError as exc:
raise exc
except CatchableError as exc:
raise newAsyncStreamWriteError(exc)
let item = WriteItem(
kind: Pointer, dataPtr: pbytes, size: nbytes,
future: Future[void].Raising([CancelledError, AsyncStreamError])
.init("async.stream.write(pointer)"))
await wstream.queue.put(item)
await item.future
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte],
msglen = -1) {.async.} =
msglen = -1) {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer
## stream ``wstream``.
##
@ -758,7 +760,7 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte],
res = await write(wstream.tsource, sbytes, length)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamWriteError(exc)
if res != length:
raise newAsyncStreamIncompleteError()
@ -768,29 +770,17 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte],
await write(wstream.wsource, sbytes, length)
wstream.bytesCount = wstream.bytesCount + uint64(length)
else:
var item = WriteItem(kind: Sequence)
when declared(shallowCopy):
if not(isLiteral(sbytes)):
shallowCopy(item.dataSeq, sbytes)
else:
item.dataSeq = sbytes
else:
item.dataSeq = sbytes
item.size = length
item.future = newFuture[void]("async.stream.write(seq)")
try:
await wstream.queue.put(item)
await item.future
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
except CancelledError as exc:
raise exc
except AsyncStreamError as exc:
raise exc
except CatchableError as exc:
raise newAsyncStreamWriteError(exc)
let item = WriteItem(
kind: Sequence, dataSeq: move(sbytes), size: length,
future: Future[void].Raising([CancelledError, AsyncStreamError])
.init("async.stream.write(seq)"))
await wstream.queue.put(item)
await item.future
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
proc write*(wstream: AsyncStreamWriter, sbytes: sink string,
msglen = -1) {.async.} =
msglen = -1) {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``.
##
## String ``sbytes`` must not be zero-length.
@ -811,7 +801,7 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink string,
res = await write(wstream.tsource, sbytes, length)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except TransportError as exc:
raise newAsyncStreamWriteError(exc)
if res != length:
raise newAsyncStreamIncompleteError()
@ -821,28 +811,16 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink string,
await write(wstream.wsource, sbytes, length)
wstream.bytesCount = wstream.bytesCount + uint64(length)
else:
var item = WriteItem(kind: String)
when declared(shallowCopy):
if not(isLiteral(sbytes)):
shallowCopy(item.dataStr, sbytes)
else:
item.dataStr = sbytes
else:
item.dataStr = sbytes
item.size = length
item.future = newFuture[void]("async.stream.write(string)")
try:
await wstream.queue.put(item)
await item.future
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
except CancelledError as exc:
raise exc
except AsyncStreamError as exc:
raise exc
except CatchableError as exc:
raise newAsyncStreamWriteError(exc)
let item = WriteItem(
kind: String, dataStr: move(sbytes), size: length,
future: Future[void].Raising([CancelledError, AsyncStreamError])
.init("async.stream.write(string)"))
await wstream.queue.put(item)
await item.future
wstream.bytesCount = wstream.bytesCount + uint64(item.size)
proc finish*(wstream: AsyncStreamWriter) {.async.} =
proc finish*(wstream: AsyncStreamWriter) {.
async: (raises: [CancelledError, AsyncStreamError]).} =
## Finish write stream ``wstream``.
checkStreamClosed(wstream)
# For AsyncStreamWriter Finished state could be set manually or by stream's
@ -852,20 +830,15 @@ proc finish*(wstream: AsyncStreamWriter) {.async.} =
if isNil(wstream.writerLoop):
await wstream.wsource.finish()
else:
var item = WriteItem(kind: Pointer)
item.size = 0
item.future = newFuture[void]("async.stream.finish")
try:
await wstream.queue.put(item)
await item.future
except CancelledError as exc:
raise exc
except AsyncStreamError as exc:
raise exc
except CatchableError as exc:
raise newAsyncStreamWriteError(exc)
let item = WriteItem(
kind: Pointer, size: 0,
future: Future[void].Raising([CancelledError, AsyncStreamError])
.init("async.stream.finish"))
await wstream.queue.put(item)
await item.future
proc join*(rw: AsyncStreamRW): Future[void] =
proc join*(rw: AsyncStreamRW): Future[void] {.
async: (raw: true, raises: [CancelledError]).} =
## Get Future[void] which will be completed when stream become finished or
## closed.
when rw is AsyncStreamReader:
@ -873,10 +846,10 @@ proc join*(rw: AsyncStreamRW): Future[void] =
else:
var retFuture = newFuture[void]("async.stream.writer.join")
proc continuation(udata: pointer) {.gcsafe.} =
proc continuation(udata: pointer) {.gcsafe, raises:[].} =
retFuture.complete()
proc cancellation(udata: pointer) {.gcsafe.} =
proc cancellation(udata: pointer) {.gcsafe, raises:[].} =
rw.future.removeCallback(continuation, cast[pointer](retFuture))
if not(rw.future.finished()):
@ -913,7 +886,7 @@ proc close*(rw: AsyncStreamRW) =
callSoon(continuation)
else:
rw.future.addCallback(continuation)
rw.future.cancel()
rw.future.cancelSoon()
elif rw is AsyncStreamWriter:
if isNil(rw.wsource) or isNil(rw.writerLoop) or isNil(rw.future):
callSoon(continuation)
@ -922,12 +895,32 @@ proc close*(rw: AsyncStreamRW) =
callSoon(continuation)
else:
rw.future.addCallback(continuation)
rw.future.cancel()
rw.future.cancelSoon()
proc closeWait*(rw: AsyncStreamRW): Future[void] =
proc closeWait*(rw: AsyncStreamRW): Future[void] {.
async: (raw: true, raises: []).} =
## Close and frees resources of stream ``rw``.
const FutureName =
when rw is AsyncStreamReader:
"async.stream.reader.closeWait"
else:
"async.stream.writer.closeWait"
let retFuture = Future[void].Raising([]).init(FutureName)
if rw.closed():
retFuture.complete()
return retFuture
proc continuation(udata: pointer) {.gcsafe, raises:[].} =
retFuture.complete()
rw.close()
rw.join()
if rw.future.finished():
retFuture.complete()
else:
rw.future.addCallback(continuation, cast[pointer](retFuture))
retFuture
proc startReader(rstream: AsyncStreamReader) =
rstream.state = Running

View File

@ -14,7 +14,10 @@
##
## For stream writing it means that you should write exactly bounded size
## of bytes.
import stew/results
{.push raises: [].}
import results
import ../asyncloop, ../timer
import asyncstream, ../transports/stream, ../transports/common
export asyncloop, asyncstream, stream, timer, common
@ -52,7 +55,8 @@ template newBoundedStreamOverflowError(): ref BoundedStreamOverflowError =
newException(BoundedStreamOverflowError, "Stream boundary exceeded")
proc readUntilBoundary(rstream: AsyncStreamReader, pbytes: pointer,
nbytes: int, sep: seq[byte]): Future[int] {.async.} =
nbytes: int, sep: seq[byte]): Future[int] {.
async: (raises: [CancelledError, AsyncStreamError]).} =
doAssert(not(isNil(pbytes)), "pbytes must not be nil")
doAssert(nbytes >= 0, "nbytes must be non-negative value")
checkStreamClosed(rstream)
@ -96,7 +100,7 @@ func endsWith(s, suffix: openArray[byte]): bool =
inc(i)
if i >= len(suffix): return true
proc boundedReadLoop(stream: AsyncStreamReader) {.async.} =
proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
var rstream = BoundedStreamReader(stream)
rstream.state = AsyncStreamState.Running
var buffer = newSeq[byte](rstream.buffer.bufferLen())
@ -186,12 +190,16 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async.} =
break
of AsyncStreamState.Finished:
# Send `EOF` state to the consumer and wait until it will be received.
await rstream.buffer.transfer()
try:
await rstream.buffer.transfer()
except CancelledError:
rstream.state = AsyncStreamState.Error
rstream.error = newBoundedStreamIncompleteError()
break
of AsyncStreamState.Closing, AsyncStreamState.Closed:
break
proc boundedWriteLoop(stream: AsyncStreamWriter) {.async.} =
proc boundedWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} =
var error: ref AsyncStreamError
var wstream = BoundedStreamWriter(stream)
@ -255,7 +263,11 @@ proc boundedWriteLoop(stream: AsyncStreamWriter) {.async.} =
doAssert(not(isNil(error)))
while not(wstream.queue.empty()):
let item = wstream.queue.popFirstNoWait()
let item =
try:
wstream.queue.popFirstNoWait()
except AsyncQueueEmptyError:
raiseAssert "AsyncQueue should not be empty at this moment"
if not(item.future.finished()):
item.future.fail(error)

View File

@ -8,9 +8,12 @@
# MIT license (LICENSE-MIT)
## This module implements HTTP/1.1 chunked-encoded stream reading and writing.
{.push raises: [].}
import ../asyncloop, ../timer
import asyncstream, ../transports/stream, ../transports/common
import stew/results
import results
export asyncloop, asyncstream, stream, timer, common, results
const
@ -95,7 +98,7 @@ proc setChunkSize(buffer: var openArray[byte], length: int64): int =
buffer[c + 1] = byte(0x0A)
(c + 2)
proc chunkedReadLoop(stream: AsyncStreamReader) {.async.} =
proc chunkedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
var rstream = ChunkedStreamReader(stream)
var buffer = newSeq[byte](MaxChunkHeaderSize)
rstream.state = AsyncStreamState.Running
@ -156,6 +159,10 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async.} =
if rstream.state == AsyncStreamState.Running:
rstream.state = AsyncStreamState.Error
rstream.error = exc
except AsyncStreamError as exc:
if rstream.state == AsyncStreamState.Running:
rstream.state = AsyncStreamState.Error
rstream.error = exc
if rstream.state != AsyncStreamState.Running:
# We need to notify consumer about error/close, but we do not care about
@ -163,7 +170,7 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async.} =
rstream.buffer.forget()
break
proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async.} =
proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} =
var wstream = ChunkedStreamWriter(stream)
var buffer: array[16, byte]
var error: ref AsyncStreamError
@ -220,7 +227,11 @@ proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async.} =
if not(item.future.finished()):
item.future.fail(error)
while not(wstream.queue.empty()):
let pitem = wstream.queue.popFirstNoWait()
let pitem =
try:
wstream.queue.popFirstNoWait()
except AsyncQueueEmptyError:
raiseAssert "AsyncQueue should not be empty at this moment"
if not(pitem.future.finished()):
pitem.future.fail(error)
break

View File

@ -9,10 +9,13 @@
## This module implements Transport Layer Security (TLS) stream. This module
## uses sources of BearSSL <https://www.bearssl.org> by Thomas Pornin.
{.push raises: [].}
import
bearssl/[brssl, ec, errors, pem, rsa, ssl, x509],
bearssl/certs/cacert
import ../asyncloop, ../timer, ../asyncsync
import ".."/[asyncloop, asyncsync, config, timer]
import asyncstream, ../transports/stream, ../transports/common
export asyncloop, asyncsync, timer, asyncstream
@ -59,7 +62,7 @@ type
PEMContext = ref object
data: seq[byte]
TrustAnchorStore* = ref object
anchors: seq[X509TrustAnchor]
@ -71,7 +74,7 @@ type
scontext: ptr SslServerContext
stream*: TLSAsyncStream
handshaked*: bool
handshakeFut*: Future[void]
handshakeFut*: Future[void].Raising([CancelledError, AsyncStreamError])
TLSStreamReader* = ref object of AsyncStreamReader
case kind: TLSStreamKind
@ -81,7 +84,7 @@ type
scontext: ptr SslServerContext
stream*: TLSAsyncStream
handshaked*: bool
handshakeFut*: Future[void]
handshakeFut*: Future[void].Raising([CancelledError, AsyncStreamError])
TLSAsyncStream* = ref object of RootRef
xwc*: X509NoanchorContext
@ -91,18 +94,17 @@ type
x509*: X509MinimalContext
reader*: TLSStreamReader
writer*: TLSStreamWriter
mainLoop*: Future[void]
mainLoop*: Future[void].Raising([])
trustAnchors: TrustAnchorStore
SomeTLSStreamType* = TLSStreamReader|TLSStreamWriter|TLSAsyncStream
SomeTrustAnchorType* = TrustAnchorStore | openArray[X509TrustAnchor]
TLSStreamError* = object of AsyncStreamError
TLSStreamHandshakeError* = object of TLSStreamError
TLSStreamInitError* = object of TLSStreamError
TLSStreamReadError* = object of TLSStreamError
par*: ref AsyncStreamError
TLSStreamWriteError* = object of TLSStreamError
par*: ref AsyncStreamError
TLSStreamProtocolError* = object of TLSStreamError
errCode*: int
@ -110,7 +112,7 @@ proc newTLSStreamWriteError(p: ref AsyncStreamError): ref TLSStreamWriteError {.
noinline.} =
var w = newException(TLSStreamWriteError, "Write stream failed")
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
w.parent = p
w
template newTLSStreamProtocolImpl[T](message: T): ref TLSStreamProtocolError =
@ -136,38 +138,41 @@ template newTLSUnexpectedProtocolError(): ref TLSStreamProtocolError =
proc newTLSStreamProtocolError[T](message: T): ref TLSStreamProtocolError =
newTLSStreamProtocolImpl(message)
proc raiseTLSStreamProtocolError[T](message: T) {.noreturn, noinline.} =
proc raiseTLSStreamProtocolError[T](message: T) {.
noreturn, noinline, raises: [TLSStreamProtocolError].} =
raise newTLSStreamProtocolImpl(message)
proc new*(T: typedesc[TrustAnchorStore], anchors: openArray[X509TrustAnchor]): TrustAnchorStore =
proc new*(T: typedesc[TrustAnchorStore],
anchors: openArray[X509TrustAnchor]): TrustAnchorStore =
var res: seq[X509TrustAnchor]
for anchor in anchors:
res.add(anchor)
doAssert(unsafeAddr(anchor) != unsafeAddr(res[^1]), "Anchors should be copied")
return TrustAnchorStore(anchors: res)
doAssert(unsafeAddr(anchor) != unsafeAddr(res[^1]),
"Anchors should be copied")
TrustAnchorStore(anchors: res)
proc tlsWriteRec(engine: ptr SslEngineContext,
writer: TLSStreamWriter): Future[TLSResult] {.async.} =
writer: TLSStreamWriter): Future[TLSResult] {.
async: (raises: []).} =
try:
var length = 0'u
var buf = sslEngineSendrecBuf(engine[], length)
doAssert(length != 0 and not isNil(buf))
await writer.wsource.write(buf, int(length))
await writer.wsource.write(chronosMoveSink(buf), int(length))
sslEngineSendrecAck(engine[], length)
return TLSResult.Success
TLSResult.Success
except AsyncStreamError as exc:
writer.state = AsyncStreamState.Error
writer.error = exc
return TLSResult.Error
TLSResult.Error
except CancelledError:
if writer.state == AsyncStreamState.Running:
writer.state = AsyncStreamState.Stopped
return TLSResult.Stopped
return TLSResult.Error
TLSResult.Stopped
proc tlsWriteApp(engine: ptr SslEngineContext,
writer: TLSStreamWriter): Future[TLSResult] {.async.} =
writer: TLSStreamWriter): Future[TLSResult] {.
async: (raises: []).} =
try:
var item = await writer.queue.get()
if item.size > 0:
@ -179,7 +184,6 @@ proc tlsWriteApp(engine: ptr SslEngineContext,
# (and discarded).
writer.state = AsyncStreamState.Finished
return TLSResult.WriteEof
let toWrite = min(int(length), item.size)
copyOut(buf, item, toWrite)
if int(length) >= item.size:
@ -187,28 +191,29 @@ proc tlsWriteApp(engine: ptr SslEngineContext,
sslEngineSendappAck(engine[], uint(item.size))
sslEngineFlush(engine[], 0)
item.future.complete()
return TLSResult.Success
else:
# BearSSL is not ready to accept whole item, so we will send
# only part of item and adjust offset.
item.offset = item.offset + int(length)
item.size = item.size - int(length)
writer.queue.addFirstNoWait(item)
try:
writer.queue.addFirstNoWait(item)
except AsyncQueueFullError:
raiseAssert "AsyncQueue should not be full at this moment"
sslEngineSendappAck(engine[], length)
return TLSResult.Success
TLSResult.Success
else:
sslEngineClose(engine[])
item.future.complete()
return TLSResult.Success
TLSResult.Success
except CancelledError:
if writer.state == AsyncStreamState.Running:
writer.state = AsyncStreamState.Stopped
return TLSResult.Stopped
return TLSResult.Error
TLSResult.Stopped
proc tlsReadRec(engine: ptr SslEngineContext,
reader: TLSStreamReader): Future[TLSResult] {.async.} =
reader: TLSStreamReader): Future[TLSResult] {.
async: (raises: []).} =
try:
var length = 0'u
var buf = sslEngineRecvrecBuf(engine[], length)
@ -216,38 +221,35 @@ proc tlsReadRec(engine: ptr SslEngineContext,
sslEngineRecvrecAck(engine[], uint(res))
if res == 0:
sslEngineClose(engine[])
return TLSResult.ReadEof
TLSResult.ReadEof
else:
return TLSResult.Success
TLSResult.Success
except AsyncStreamError as exc:
reader.state = AsyncStreamState.Error
reader.error = exc
return TLSResult.Error
TLSResult.Error
except CancelledError:
if reader.state == AsyncStreamState.Running:
reader.state = AsyncStreamState.Stopped
return TLSResult.Stopped
return TLSResult.Error
TLSResult.Stopped
proc tlsReadApp(engine: ptr SslEngineContext,
reader: TLSStreamReader): Future[TLSResult] {.async.} =
reader: TLSStreamReader): Future[TLSResult] {.
async: (raises: []).} =
try:
var length = 0'u
var buf = sslEngineRecvappBuf(engine[], length)
await upload(addr reader.buffer, buf, int(length))
sslEngineRecvappAck(engine[], length)
return TLSResult.Success
TLSResult.Success
except CancelledError:
if reader.state == AsyncStreamState.Running:
reader.state = AsyncStreamState.Stopped
return TLSResult.Stopped
return TLSResult.Error
TLSResult.Stopped
template readAndReset(fut: untyped) =
if fut.finished():
let res = fut.read()
let res = fut.value()
case res
of TLSResult.Success, TLSResult.WriteEof, TLSResult.Stopped:
fut = nil
@ -263,22 +265,6 @@ template readAndReset(fut: untyped) =
loopState = AsyncStreamState.Finished
break
proc cancelAndWait*(a, b, c, d: Future[TLSResult]): Future[void] =
var waiting: seq[Future[TLSResult]]
if not(isNil(a)) and not(a.finished()):
a.cancel()
waiting.add(a)
if not(isNil(b)) and not(b.finished()):
b.cancel()
waiting.add(b)
if not(isNil(c)) and not(c.finished()):
c.cancel()
waiting.add(c)
if not(isNil(d)) and not(d.finished()):
d.cancel()
waiting.add(d)
allFutures(waiting)
proc dumpState*(state: cuint): string =
var res = ""
if (state and SSL_CLOSED) == SSL_CLOSED:
@ -298,10 +284,10 @@ proc dumpState*(state: cuint): string =
res.add("SSL_RECVAPP")
"{" & res & "}"
proc tlsLoop*(stream: TLSAsyncStream) {.async.} =
proc tlsLoop*(stream: TLSAsyncStream) {.async: (raises: []).} =
var
sendRecFut, sendAppFut: Future[TLSResult]
recvRecFut, recvAppFut: Future[TLSResult]
sendRecFut, sendAppFut: Future[TLSResult].Raising([])
recvRecFut, recvAppFut: Future[TLSResult].Raising([])
let engine =
case stream.reader.kind
@ -313,7 +299,7 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} =
var loopState = AsyncStreamState.Running
while true:
var waiting: seq[Future[TLSResult]]
var waiting: seq[Future[TLSResult].Raising([])]
var state = sslEngineCurrentState(engine[])
if (state and SSL_CLOSED) == SSL_CLOSED:
@ -364,6 +350,8 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} =
if len(waiting) > 0:
try:
discard await one(waiting)
except ValueError:
raiseAssert "array should not be empty at this moment"
except CancelledError:
if loopState == AsyncStreamState.Running:
loopState = AsyncStreamState.Stopped
@ -371,8 +359,18 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} =
if loopState != AsyncStreamState.Running:
break
# Cancelling and waiting all the pending operations
await cancelAndWait(sendRecFut, sendAppFut, recvRecFut, recvAppFut)
# Cancelling and waiting and all the pending operations
var pending: seq[FutureBase]
if not(isNil(sendRecFut)) and not(sendRecFut.finished()):
pending.add(sendRecFut.cancelAndWait())
if not(isNil(sendAppFut)) and not(sendAppFut.finished()):
pending.add(sendAppFut.cancelAndWait())
if not(isNil(recvRecFut)) and not(recvRecFut.finished()):
pending.add(recvRecFut.cancelAndWait())
if not(isNil(recvAppFut)) and not(recvAppFut.finished()):
pending.add(recvAppFut.cancelAndWait())
await noCancel(allFutures(pending))
# Calculating error
let error =
case loopState
@ -406,7 +404,11 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} =
if not(isNil(error)):
# Completing all pending writes
while(not(stream.writer.queue.empty())):
let item = stream.writer.queue.popFirstNoWait()
let item =
try:
stream.writer.queue.popFirstNoWait()
except AsyncQueueEmptyError:
raiseAssert "AsyncQueue should not be empty at this moment"
if not(item.future.finished()):
item.future.fail(error)
# Completing handshake
@ -426,18 +428,18 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} =
# Completing readers
stream.reader.buffer.forget()
proc tlsWriteLoop(stream: AsyncStreamWriter) {.async.} =
proc tlsWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} =
var wstream = TLSStreamWriter(stream)
wstream.state = AsyncStreamState.Running
await stepsAsync(1)
await noCancel(sleepAsync(0.milliseconds))
if isNil(wstream.stream.mainLoop):
wstream.stream.mainLoop = tlsLoop(wstream.stream)
await wstream.stream.mainLoop
proc tlsReadLoop(stream: AsyncStreamReader) {.async.} =
proc tlsReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
var rstream = TLSStreamReader(stream)
rstream.state = AsyncStreamState.Running
await stepsAsync(1)
await noCancel(sleepAsync(0.milliseconds))
if isNil(rstream.stream.mainLoop):
rstream.stream.mainLoop = tlsLoop(rstream.stream)
await rstream.stream.mainLoop
@ -453,15 +455,16 @@ proc getSignerAlgo(xc: X509Certificate): int =
else:
int(x509DecoderGetSignerKeyType(dc))
proc newTLSClientAsyncStream*(rsource: AsyncStreamReader,
wsource: AsyncStreamWriter,
serverName: string,
bufferSize = SSL_BUFSIZE_BIDI,
minVersion = TLSVersion.TLS12,
maxVersion = TLSVersion.TLS12,
flags: set[TLSFlags] = {},
trustAnchors: TrustAnchorStore | openArray[X509TrustAnchor] = MozillaTrustAnchors
): TLSAsyncStream =
proc newTLSClientAsyncStream*(
rsource: AsyncStreamReader,
wsource: AsyncStreamWriter,
serverName: string,
bufferSize = SSL_BUFSIZE_BIDI,
minVersion = TLSVersion.TLS12,
maxVersion = TLSVersion.TLS12,
flags: set[TLSFlags] = {},
trustAnchors: SomeTrustAnchorType = MozillaTrustAnchors
): TLSAsyncStream {.raises: [TLSStreamInitError].} =
## Create new TLS asynchronous stream for outbound (client) connections
## using reading stream ``rsource`` and writing stream ``wsource``.
##
@ -478,13 +481,14 @@ proc newTLSClientAsyncStream*(rsource: AsyncStreamReader,
## ``minVersion`` of bigger then ``maxVersion`` you will get an error.
##
## ``flags`` - custom TLS connection flags.
##
##
## ``trustAnchors`` - use this if you want to use certificate trust
## anchors other than the default Mozilla trust anchors. If you pass
## a ``TrustAnchorStore`` you should reuse the same instance for
## every call to avoid making a copy of the trust anchors per call.
when trustAnchors is TrustAnchorStore:
doAssert(len(trustAnchors.anchors) > 0, "Empty trust anchor list is invalid")
doAssert(len(trustAnchors.anchors) > 0,
"Empty trust anchor list is invalid")
else:
doAssert(len(trustAnchors) > 0, "Empty trust anchor list is invalid")
var res = TLSAsyncStream()
@ -524,7 +528,7 @@ proc newTLSClientAsyncStream*(rsource: AsyncStreamReader,
uint16(maxVersion))
if TLSFlags.NoVerifyServerName in flags:
let err = sslClientReset(res.ccontext, "", 0)
let err = sslClientReset(res.ccontext, nil, 0)
if err == 0:
raise newException(TLSStreamInitError, "Could not initialize TLS layer")
else:
@ -550,7 +554,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader,
minVersion = TLSVersion.TLS11,
maxVersion = TLSVersion.TLS12,
cache: TLSSessionCache = nil,
flags: set[TLSFlags] = {}): TLSAsyncStream =
flags: set[TLSFlags] = {}): TLSAsyncStream {.
raises: [TLSStreamInitError, TLSStreamProtocolError].} =
## Create new TLS asynchronous stream for inbound (server) connections
## using reading stream ``rsource`` and writing stream ``wsource``.
##
@ -618,10 +623,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader,
if err == 0:
raise newException(TLSStreamInitError, "Could not initialize TLS layer")
init(AsyncStreamWriter(res.writer), wsource, tlsWriteLoop,
bufferSize)
init(AsyncStreamReader(res.reader), rsource, tlsReadLoop,
bufferSize)
init(AsyncStreamWriter(res.writer), wsource, tlsWriteLoop, bufferSize)
init(AsyncStreamReader(res.reader), rsource, tlsReadLoop, bufferSize)
res
proc copyKey(src: RsaPrivateKey): TLSPrivateKey =
@ -662,7 +665,8 @@ proc copyKey(src: EcPrivateKey): TLSPrivateKey =
res.eckey.curve = src.curve
res
proc init*(tt: typedesc[TLSPrivateKey], data: openArray[byte]): TLSPrivateKey =
proc init*(tt: typedesc[TLSPrivateKey], data: openArray[byte]): TLSPrivateKey {.
raises: [TLSStreamProtocolError].} =
## Initialize TLS private key from array of bytes ``data``.
##
## This procedure initializes private key using raw, DER-encoded format,
@ -685,7 +689,8 @@ proc init*(tt: typedesc[TLSPrivateKey], data: openArray[byte]): TLSPrivateKey =
raiseTLSStreamProtocolError("Unknown key type (" & $keyType & ")")
res
proc pemDecode*(data: openArray[char]): seq[PEMElement] =
proc pemDecode*(data: openArray[char]): seq[PEMElement] {.
raises: [TLSStreamProtocolError].} =
## Decode PEM encoded string and get array of binary blobs.
if len(data) == 0:
raiseTLSStreamProtocolError("Empty PEM message")
@ -726,7 +731,8 @@ proc pemDecode*(data: openArray[char]): seq[PEMElement] =
raiseTLSStreamProtocolError("Invalid PEM encoding")
res
proc init*(tt: typedesc[TLSPrivateKey], data: openArray[char]): TLSPrivateKey =
proc init*(tt: typedesc[TLSPrivateKey], data: openArray[char]): TLSPrivateKey {.
raises: [TLSStreamProtocolError].} =
## Initialize TLS private key from string ``data``.
##
## This procedure initializes private key using unencrypted PKCS#8 PEM
@ -744,7 +750,8 @@ proc init*(tt: typedesc[TLSPrivateKey], data: openArray[char]): TLSPrivateKey =
res
proc init*(tt: typedesc[TLSCertificate],
data: openArray[char]): TLSCertificate =
data: openArray[char]): TLSCertificate {.
raises: [TLSStreamProtocolError].} =
## Initialize TLS certificates from string ``data``.
##
## This procedure initializes array of certificates from PEM encoded string.
@ -779,9 +786,11 @@ proc init*(tt: typedesc[TLSSessionCache], size: int = 4096): TLSSessionCache =
sslSessionCacheLruInit(addr res.context, addr res.storage[0], rsize)
res
proc handshake*(rws: SomeTLSStreamType): Future[void] =
proc handshake*(rws: SomeTLSStreamType): Future[void] {.
async: (raw: true, raises: [CancelledError, AsyncStreamError]).} =
## Wait until initial TLS handshake will be successfully performed.
var retFuture = newFuture[void]("tlsstream.handshake")
let retFuture = Future[void].Raising([CancelledError, AsyncStreamError])
.init("tlsstream.handshake")
when rws is TLSStreamReader:
if rws.handshaked:
retFuture.complete()

View File

@ -8,7 +8,7 @@
# MIT license (LICENSE-MIT)
## This module implements some core async thread synchronization primitives.
import stew/results
import results
import "."/[timer, asyncloop]
export results

View File

@ -11,7 +11,7 @@
import std/[strutils]
import stew/[base10, byteutils]
import ".."/[asyncloop, osdefs, oserrno]
import ".."/[asyncloop, osdefs, oserrno, handles]
from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress,
SockType, Protocol, Port, `$`
@ -31,6 +31,9 @@ type
ReuseAddr, ReusePort, TcpNoDelay, NoAutoRead, GCUserData, FirstPipe,
NoPipeFlash, Broadcast
DualStackType* {.pure.} = enum
Auto, Enabled, Disabled, Default
AddressFamily* {.pure.} = enum
None, IPv4, IPv6, Unix
@ -76,6 +79,7 @@ when defined(windows) or defined(nimdoc):
asock*: AsyncFD # Current AcceptEx() socket
errorCode*: OSErrorCode # Current error code
abuffer*: array[128, byte] # Windows AcceptEx() buffer
dualstack*: DualStackType # IPv4/IPv6 dualstack parameters
when defined(windows):
aovl*: CustomOverlapped # AcceptEx OVERLAPPED structure
else:
@ -90,6 +94,7 @@ else:
bufferSize*: int # Size of internal transports' buffer
loopFuture*: Future[void] # Server's main Future
errorCode*: OSErrorCode # Current error code
dualstack*: DualStackType # IPv4/IPv6 dualstack parameters
type
TransportError* = object of AsyncError
@ -108,6 +113,8 @@ type
## Transport's capability not supported exception
TransportUseClosedError* = object of TransportError
## Usage after transport close exception
TransportUseEofError* = object of TransportError
## Usage after transport half-close exception
TransportTooManyError* = object of TransportError
## Too many open file descriptors exception
TransportAbortedError* = object of TransportError
@ -194,7 +201,7 @@ proc `$`*(address: TransportAddress): string =
"None"
proc toHex*(address: TransportAddress): string =
## Returns hexadecimal representation of ``address`.
## Returns hexadecimal representation of ``address``.
case address.family
of AddressFamily.IPv4:
"0x" & address.address_v4.toHex()
@ -298,6 +305,9 @@ proc getAddrInfo(address: string, port: Port, domain: Domain,
raises: [TransportAddressError].} =
## We have this one copy of ``getAddrInfo()`` because of AI_V4MAPPED in
## ``net.nim:getAddrInfo()``, which is not cross-platform.
##
## Warning: `ptr AddrInfo` returned by `getAddrInfo()` needs to be freed by
## calling `freeAddrInfo()`.
var hints: AddrInfo
var res: ptr AddrInfo = nil
hints.ai_family = toInt(domain)
@ -420,6 +430,7 @@ proc resolveTAddress*(address: string, port: Port,
if ta notin res:
res.add(ta)
it = it.ai_next
freeAddrInfo(aiList)
res
proc resolveTAddress*(address: string, domain: Domain): seq[TransportAddress] {.
@ -558,11 +569,11 @@ template checkClosed*(t: untyped, future: untyped) =
template checkWriteEof*(t: untyped, future: untyped) =
if (WriteEof in (t).state):
future.fail(newException(TransportError,
future.fail(newException(TransportUseEofError,
"Transport connection is already dropped!"))
return future
template getError*(t: untyped): ref CatchableError =
template getError*(t: untyped): ref TransportError =
var err = (t).error
(t).error = nil
err
@ -585,22 +596,6 @@ proc raiseTransportOsError*(err: OSErrorCode) {.
## Raises transport specific OS error.
raise getTransportOsError(err)
type
SeqHeader = object
length, reserved: int
proc isLiteral*(s: string): bool {.inline.} =
when defined(gcOrc) or defined(gcArc):
false
else:
(cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0
proc isLiteral*[T](s: seq[T]): bool {.inline.} =
when defined(gcOrc) or defined(gcArc):
false
else:
(cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0
template getTransportTooManyError*(
code = OSErrorCode(0)
): ref TransportTooManyError =
@ -716,3 +711,75 @@ proc raiseTransportError*(ecode: OSErrorCode) {.
raise getTransportTooManyError(ecode)
else:
raise getTransportOsError(ecode)
proc isAvailable*(family: AddressFamily): bool =
case family
of AddressFamily.None:
raiseAssert "Invalid address family"
of AddressFamily.IPv4:
isAvailable(Domain.AF_INET)
of AddressFamily.IPv6:
isAvailable(Domain.AF_INET6)
of AddressFamily.Unix:
isAvailable(Domain.AF_UNIX)
proc getDomain*(socket: AsyncFD): Result[AddressFamily, OSErrorCode] =
## Returns address family which is used to create socket ``socket``.
##
## Note: `chronos` supports only `AF_INET`, `AF_INET6` and `AF_UNIX` sockets.
## For all other types of sockets this procedure returns
## `EAFNOSUPPORT/WSAEAFNOSUPPORT` error.
when defined(windows):
let protocolInfo = ? getSockOpt2(socket, cint(osdefs.SOL_SOCKET),
cint(osdefs.SO_PROTOCOL_INFOW),
WSAPROTOCOL_INFO)
if protocolInfo.iAddressFamily == toInt(Domain.AF_INET):
ok(AddressFamily.IPv4)
elif protocolInfo.iAddressFamily == toInt(Domain.AF_INET6):
ok(AddressFamily.IPv6)
else:
err(WSAEAFNOSUPPORT)
else:
var
saddr = Sockaddr_storage()
slen = SockLen(sizeof(saddr))
if getsockname(SocketHandle(socket), cast[ptr SockAddr](addr saddr),
addr slen) != 0:
return err(osLastError())
if int(saddr.ss_family) == toInt(Domain.AF_INET):
ok(AddressFamily.IPv4)
elif int(saddr.ss_family) == toInt(Domain.AF_INET6):
ok(AddressFamily.IPv6)
elif int(saddr.ss_family) == toInt(Domain.AF_UNIX):
ok(AddressFamily.Unix)
else:
err(EAFNOSUPPORT)
proc setDualstack*(socket: AsyncFD, family: AddressFamily,
flag: DualStackType): Result[void, OSErrorCode] =
if family == AddressFamily.IPv6:
case flag
of DualStackType.Auto:
# In case of `Auto` we going to ignore all the errors.
discard setDualstack(socket, true)
ok()
of DualStackType.Enabled:
? setDualstack(socket, true)
ok()
of DualStackType.Disabled:
? setDualstack(socket, false)
ok()
of DualStackType.Default:
ok()
else:
ok()
proc setDualstack*(socket: AsyncFD,
flag: DualStackType): Result[void, OSErrorCode] =
let family =
case flag
of DualStackType.Auto:
getDomain(socket).get(AddressFamily.IPv6)
else:
? getDomain(socket)
setDualstack(socket, family, flag)

View File

@ -11,7 +11,7 @@
import std/deques
when not(defined(windows)): import ".."/selectors2
import ".."/[asyncloop, osdefs, oserrno, handles]
import ".."/[asyncloop, config, osdefs, oserrno, osutils, handles]
import "."/common
type
@ -27,7 +27,10 @@ type
DatagramCallback* = proc(transp: DatagramTransport,
remote: TransportAddress): Future[void] {.
gcsafe, raises: [].}
async: (raises: []).}
UnsafeDatagramCallback* = proc(transp: DatagramTransport,
remote: TransportAddress): Future[void] {.async.}
DatagramTransport* = ref object of RootRef
fd*: AsyncFD # File descriptor
@ -35,7 +38,7 @@ type
flags: set[ServerFlags] # Flags
buffer: seq[byte] # Reading buffer
buflen: int # Reading buffer effective size
error: ref CatchableError # Current error
error: ref TransportError # Current error
queue: Deque[GramVector] # Writer queue
local: TransportAddress # Local address
remote: TransportAddress # Remote address
@ -247,57 +250,65 @@ when defined(windows):
udata: pointer,
child: DatagramTransport,
bufferSize: int,
ttl: int): DatagramTransport {.
ttl: int,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
var localSock: AsyncFD
doAssert(remote.family == local.family)
doAssert(not isNil(cbproc))
doAssert(remote.family in {AddressFamily.IPv4, AddressFamily.IPv6})
var res = if isNil(child): DatagramTransport() else: child
if sock == asyncInvalidSocket:
localSock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM,
Protocol.IPPROTO_UDP)
if localSock == asyncInvalidSocket:
raiseTransportOsError(osLastError())
else:
if not setSocketBlocking(SocketHandle(sock), false):
raiseTransportOsError(osLastError())
localSock = sock
let bres = register2(localSock)
if bres.isErr():
raiseTransportOsError(bres.error())
let localSock =
if sock == asyncInvalidSocket:
let proto =
if local.family == AddressFamily.Unix:
Protocol.IPPROTO_IP
else:
Protocol.IPPROTO_UDP
let res = createAsyncSocket2(local.getDomain(), SockType.SOCK_DGRAM,
proto)
if res.isErr():
raiseTransportOsError(res.error)
res.get()
else:
setDescriptorBlocking(SocketHandle(sock), false).isOkOr:
raiseTransportOsError(error)
register2(sock).isOkOr:
raiseTransportOsError(error)
sock
## Apply ServerFlags here
if ServerFlags.ReuseAddr in flags:
if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEADDR, 1):
let err = osLastError()
setSockOpt2(localSock, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
if ServerFlags.ReusePort in flags:
if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEPORT, 1):
let err = osLastError()
setSockOpt2(localSock, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
if ServerFlags.Broadcast in flags:
if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_BROADCAST, 1):
let err = osLastError()
setSockOpt2(localSock, SOL_SOCKET, SO_BROADCAST, 1).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
if ttl > 0:
if not setSockOpt(localSock, osdefs.IPPROTO_IP, osdefs.IP_TTL, ttl):
let err = osLastError()
setSockOpt2(localSock, osdefs.IPPROTO_IP, osdefs.IP_TTL, ttl).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
## IPV6_V6ONLY
if sock == asyncInvalidSocket:
setDualstack(localSock, local.family, dualstack).isOkOr:
closeSocket(localSock)
raiseTransportOsError(error)
else:
setDualstack(localSock, dualstack).isOkOr:
raiseTransportOsError(error)
## Fix for Q263823.
var bytesRet: DWORD
@ -457,70 +468,75 @@ else:
udata: pointer,
child: DatagramTransport,
bufferSize: int,
ttl: int): DatagramTransport {.
ttl: int,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
var localSock: AsyncFD
doAssert(remote.family == local.family)
doAssert(not isNil(cbproc))
var res = if isNil(child): DatagramTransport() else: child
if sock == asyncInvalidSocket:
var proto = Protocol.IPPROTO_UDP
if local.family == AddressFamily.Unix:
# `Protocol` enum is missing `0` value, so we making here cast, until
# `Protocol` enum will not support IPPROTO_IP == 0.
proto = cast[Protocol](0)
localSock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM,
proto)
if localSock == asyncInvalidSocket:
raiseTransportOsError(osLastError())
else:
if not setSocketBlocking(SocketHandle(sock), false):
raiseTransportOsError(osLastError())
localSock = sock
let bres = register2(localSock)
if bres.isErr():
raiseTransportOsError(bres.error())
let localSock =
if sock == asyncInvalidSocket:
let proto =
if local.family == AddressFamily.Unix:
Protocol.IPPROTO_IP
else:
Protocol.IPPROTO_UDP
let res = createAsyncSocket2(local.getDomain(), SockType.SOCK_DGRAM,
proto)
if res.isErr():
raiseTransportOsError(res.error)
res.get()
else:
setDescriptorBlocking(SocketHandle(sock), false).isOkOr:
raiseTransportOsError(error)
register2(sock).isOkOr:
raiseTransportOsError(error)
sock
## Apply ServerFlags here
if ServerFlags.ReuseAddr in flags:
if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEADDR, 1):
let err = osLastError()
setSockOpt2(localSock, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
if ServerFlags.ReusePort in flags:
if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEPORT, 1):
let err = osLastError()
setSockOpt2(localSock, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
if ServerFlags.Broadcast in flags:
if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_BROADCAST, 1):
let err = osLastError()
setSockOpt2(localSock, SOL_SOCKET, SO_BROADCAST, 1).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
raiseTransportOsError(error)
if ttl > 0:
let tres =
if local.family == AddressFamily.IPv4:
setSockOpt(localSock, osdefs.IPPROTO_IP, osdefs.IP_MULTICAST_TTL,
cint(ttl))
elif local.family == AddressFamily.IPv6:
setSockOpt(localSock, osdefs.IPPROTO_IP, osdefs.IPV6_MULTICAST_HOPS,
cint(ttl))
else:
raiseAssert "Unsupported address bound to local socket"
if local.family == AddressFamily.IPv4:
setSockOpt2(localSock, osdefs.IPPROTO_IP, osdefs.IP_MULTICAST_TTL,
cint(ttl)).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(error)
elif local.family == AddressFamily.IPv6:
setSockOpt2(localSock, osdefs.IPPROTO_IP, osdefs.IPV6_MULTICAST_HOPS,
cint(ttl)).isOkOr:
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(error)
else:
raiseAssert "Unsupported address bound to local socket"
if not tres:
let err = osLastError()
if sock == asyncInvalidSocket:
closeSocket(localSock)
raiseTransportOsError(err)
## IPV6_V6ONLY
if sock == asyncInvalidSocket:
setDualstack(localSock, local.family, dualstack).isOkOr:
closeSocket(localSock)
raiseTransportOsError(error)
else:
setDualstack(localSock, dualstack).isOkOr:
raiseTransportOsError(error)
if local.family != AddressFamily.None:
var saddr: Sockaddr_storage
@ -586,6 +602,41 @@ proc close*(transp: DatagramTransport) =
transp.state.incl({WriteClosed, ReadClosed})
closeSocket(transp.fd, continuation)
proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback,
remote: TransportAddress,
local: TransportAddress,
sock: AsyncFD,
flags: set[ServerFlags],
udata: pointer,
child: DatagramTransport,
bufferSize: int,
ttl: int,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
## Create new UDP datagram transport (IPv4).
##
## ``cbproc`` - callback which will be called, when new datagram received.
## ``remote`` - bind transport to remote address (optional).
## ``local`` - bind transport to local address (to serving incoming
## datagrams, optional)
## ``sock`` - application-driven socket to use.
## ``flags`` - flags that will be applied to socket.
## ``udata`` - custom argument which will be passed to ``cbproc``.
## ``bufSize`` - size of internal buffer.
## ``ttl`` - TTL for UDP datagram packet (only usable when flags has
## ``Broadcast`` option).
proc wrap(transp: DatagramTransport,
remote: TransportAddress) {.async: (raises: []).} =
try:
cbproc(transp, remote)
except CatchableError as exc:
raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg
newDatagramTransportCommon(wrap, remote, local, sock, flags, udata, child,
bufferSize, ttl, dualstack)
proc newDatagramTransport*(cbproc: DatagramCallback,
remote: TransportAddress = AnyAddress,
local: TransportAddress = AnyAddress,
@ -594,8 +645,9 @@ proc newDatagramTransport*(cbproc: DatagramCallback,
udata: pointer = nil,
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0
): DatagramTransport {.
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
## Create new UDP datagram transport (IPv4).
##
@ -610,7 +662,7 @@ proc newDatagramTransport*(cbproc: DatagramCallback,
## ``ttl`` - TTL for UDP datagram packet (only usable when flags has
## ``Broadcast`` option).
newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child,
bufSize, ttl)
bufSize, ttl, dualstack)
proc newDatagramTransport*[T](cbproc: DatagramCallback,
udata: ref T,
@ -620,13 +672,15 @@ proc newDatagramTransport*[T](cbproc: DatagramCallback,
flags: set[ServerFlags] = {},
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0
): DatagramTransport {.
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
var fflags = flags + {GCUserData}
GC_ref(udata)
newDatagramTransportCommon(cbproc, remote, local, sock, fflags,
cast[pointer](udata), child, bufSize, ttl)
cast[pointer](udata), child, bufSize, ttl,
dualstack)
proc newDatagramTransport6*(cbproc: DatagramCallback,
remote: TransportAddress = AnyAddress6,
@ -636,8 +690,9 @@ proc newDatagramTransport6*(cbproc: DatagramCallback,
udata: pointer = nil,
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0
): DatagramTransport {.
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
## Create new UDP datagram transport (IPv6).
##
@ -652,7 +707,7 @@ proc newDatagramTransport6*(cbproc: DatagramCallback,
## ``ttl`` - TTL for UDP datagram packet (only usable when flags has
## ``Broadcast`` option).
newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child,
bufSize, ttl)
bufSize, ttl, dualstack)
proc newDatagramTransport6*[T](cbproc: DatagramCallback,
udata: ref T,
@ -662,15 +717,112 @@ proc newDatagramTransport6*[T](cbproc: DatagramCallback,
flags: set[ServerFlags] = {},
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0
): DatagramTransport {.
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError].} =
var fflags = flags + {GCUserData}
GC_ref(udata)
newDatagramTransportCommon(cbproc, remote, local, sock, fflags,
cast[pointer](udata), child, bufSize, ttl)
cast[pointer](udata), child, bufSize, ttl,
dualstack)
proc join*(transp: DatagramTransport): Future[void] =
proc newDatagramTransport*(cbproc: UnsafeDatagramCallback,
remote: TransportAddress = AnyAddress,
local: TransportAddress = AnyAddress,
sock: AsyncFD = asyncInvalidSocket,
flags: set[ServerFlags] = {},
udata: pointer = nil,
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError],
deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} =
## Create new UDP datagram transport (IPv4).
##
## ``cbproc`` - callback which will be called, when new datagram received.
## ``remote`` - bind transport to remote address (optional).
## ``local`` - bind transport to local address (to serving incoming
## datagrams, optional)
## ``sock`` - application-driven socket to use.
## ``flags`` - flags that will be applied to socket.
## ``udata`` - custom argument which will be passed to ``cbproc``.
## ``bufSize`` - size of internal buffer.
## ``ttl`` - TTL for UDP datagram packet (only usable when flags has
## ``Broadcast`` option).
newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child,
bufSize, ttl, dualstack)
proc newDatagramTransport*[T](cbproc: UnsafeDatagramCallback,
udata: ref T,
remote: TransportAddress = AnyAddress,
local: TransportAddress = AnyAddress,
sock: AsyncFD = asyncInvalidSocket,
flags: set[ServerFlags] = {},
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError],
deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} =
var fflags = flags + {GCUserData}
GC_ref(udata)
newDatagramTransportCommon(cbproc, remote, local, sock, fflags,
cast[pointer](udata), child, bufSize, ttl,
dualstack)
proc newDatagramTransport6*(cbproc: UnsafeDatagramCallback,
remote: TransportAddress = AnyAddress6,
local: TransportAddress = AnyAddress6,
sock: AsyncFD = asyncInvalidSocket,
flags: set[ServerFlags] = {},
udata: pointer = nil,
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError],
deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} =
## Create new UDP datagram transport (IPv6).
##
## ``cbproc`` - callback which will be called, when new datagram received.
## ``remote`` - bind transport to remote address (optional).
## ``local`` - bind transport to local address (to serving incoming
## datagrams, optional)
## ``sock`` - application-driven socket to use.
## ``flags`` - flags that will be applied to socket.
## ``udata`` - custom argument which will be passed to ``cbproc``.
## ``bufSize`` - size of internal buffer.
## ``ttl`` - TTL for UDP datagram packet (only usable when flags has
## ``Broadcast`` option).
newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child,
bufSize, ttl, dualstack)
proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback,
udata: ref T,
remote: TransportAddress = AnyAddress6,
local: TransportAddress = AnyAddress6,
sock: AsyncFD = asyncInvalidSocket,
flags: set[ServerFlags] = {},
child: DatagramTransport = nil,
bufSize: int = DefaultDatagramBufferSize,
ttl: int = 0,
dualstack = DualStackType.Auto
): DatagramTransport {.
raises: [TransportOsError],
deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} =
var fflags = flags + {GCUserData}
GC_ref(udata)
newDatagramTransportCommon(cbproc, remote, local, sock, fflags,
cast[pointer](udata), child, bufSize, ttl,
dualstack)
proc join*(transp: DatagramTransport): Future[void] {.
async: (raw: true, raises: [CancelledError]).} =
## Wait until the transport ``transp`` will be closed.
var retFuture = newFuture[void]("datagram.transport.join")
@ -688,13 +840,35 @@ proc join*(transp: DatagramTransport): Future[void] =
return retFuture
proc closeWait*(transp: DatagramTransport): Future[void] =
proc closeWait*(transp: DatagramTransport): Future[void] {.
async: (raw: true, raises: []).} =
## Close transport ``transp`` and release all resources.
let retFuture = newFuture[void](
"datagram.transport.closeWait", {FutureFlag.OwnCancelSchedule})
if {ReadClosed, WriteClosed} * transp.state != {}:
retFuture.complete()
return retFuture
proc continuation(udata: pointer) {.gcsafe.} =
retFuture.complete()
proc cancellation(udata: pointer) {.gcsafe.} =
# We are not going to change the state of `retFuture` to cancelled, so we
# will prevent the entire sequence of Futures from being cancelled.
discard
transp.close()
transp.join()
if transp.future.finished():
retFuture.complete()
else:
transp.future.addCallback(continuation, cast[pointer](retFuture))
retFuture.cancelCallback = cancellation
retFuture
proc send*(transp: DatagramTransport, pbytes: pointer,
nbytes: int): Future[void] =
nbytes: int): Future[void] {.
async: (raw: true, raises: [TransportError, CancelledError]).} =
## Send buffer with pointer ``pbytes`` and size ``nbytes`` using transport
## ``transp`` to remote destination address which was bounded on transport.
var retFuture = newFuture[void]("datagram.transport.send(pointer)")
@ -712,22 +886,21 @@ proc send*(transp: DatagramTransport, pbytes: pointer,
return retFuture
proc send*(transp: DatagramTransport, msg: sink string,
msglen = -1): Future[void] =
msglen = -1): Future[void] {.
async: (raw: true, raises: [TransportError, CancelledError]).} =
## Send string ``msg`` using transport ``transp`` to remote destination
## address which was bounded on transport.
var retFuture = newFutureStr[void]("datagram.transport.send(string)")
var retFuture = newFuture[void]("datagram.transport.send(string)")
transp.checkClosed(retFuture)
when declared(shallowCopy):
if not(isLiteral(msg)):
shallowCopy(retFuture.gcholder, msg)
else:
retFuture.gcholder = msg
else:
retFuture.gcholder = msg
let length = if msglen <= 0: len(msg) else: msglen
let vector = GramVector(kind: WithoutAddress, buf: addr retFuture.gcholder[0],
var localCopy = chronosMoveSink(msg)
retFuture.addCallback(proc(_: pointer) = reset(localCopy))
let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0],
buflen: length,
writer: cast[Future[void]](retFuture))
writer: retFuture)
transp.queue.addLast(vector)
if WritePaused in transp.state:
let wres = transp.resumeWrite()
@ -736,22 +909,20 @@ proc send*(transp: DatagramTransport, msg: sink string,
return retFuture
proc send*[T](transp: DatagramTransport, msg: sink seq[T],
msglen = -1): Future[void] =
msglen = -1): Future[void] {.
async: (raw: true, raises: [TransportError, CancelledError]).} =
## Send string ``msg`` using transport ``transp`` to remote destination
## address which was bounded on transport.
var retFuture = newFutureSeq[void, T]("datagram.transport.send(seq)")
var retFuture = newFuture[void]("datagram.transport.send(seq)")
transp.checkClosed(retFuture)
when declared(shallowCopy):
if not(isLiteral(msg)):
shallowCopy(retFuture.gcholder, msg)
else:
retFuture.gcholder = msg
else:
retFuture.gcholder = msg
let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T))
let vector = GramVector(kind: WithoutAddress, buf: addr retFuture.gcholder[0],
var localCopy = chronosMoveSink(msg)
retFuture.addCallback(proc(_: pointer) = reset(localCopy))
let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0],
buflen: length,
writer: cast[Future[void]](retFuture))
writer: retFuture)
transp.queue.addLast(vector)
if WritePaused in transp.state:
let wres = transp.resumeWrite()
@ -760,7 +931,8 @@ proc send*[T](transp: DatagramTransport, msg: sink seq[T],
return retFuture
proc sendTo*(transp: DatagramTransport, remote: TransportAddress,
pbytes: pointer, nbytes: int): Future[void] =
pbytes: pointer, nbytes: int): Future[void] {.
async: (raw: true, raises: [TransportError, CancelledError]).} =
## Send buffer with pointer ``pbytes`` and size ``nbytes`` using transport
## ``transp`` to remote destination address ``remote``.
var retFuture = newFuture[void]("datagram.transport.sendTo(pointer)")
@ -775,22 +947,20 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress,
return retFuture
proc sendTo*(transp: DatagramTransport, remote: TransportAddress,
msg: sink string, msglen = -1): Future[void] =
msg: sink string, msglen = -1): Future[void] {.
async: (raw: true, raises: [TransportError, CancelledError]).} =
## Send string ``msg`` using transport ``transp`` to remote destination
## address ``remote``.
var retFuture = newFutureStr[void]("datagram.transport.sendTo(string)")
var retFuture = newFuture[void]("datagram.transport.sendTo(string)")
transp.checkClosed(retFuture)
when declared(shallowCopy):
if not(isLiteral(msg)):
shallowCopy(retFuture.gcholder, msg)
else:
retFuture.gcholder = msg
else:
retFuture.gcholder = msg
let length = if msglen <= 0: len(msg) else: msglen
let vector = GramVector(kind: WithAddress, buf: addr retFuture.gcholder[0],
var localCopy = chronosMoveSink(msg)
retFuture.addCallback(proc(_: pointer) = reset(localCopy))
let vector = GramVector(kind: WithAddress, buf: addr localCopy[0],
buflen: length,
writer: cast[Future[void]](retFuture),
writer: retFuture,
address: remote)
transp.queue.addLast(vector)
if WritePaused in transp.state:
@ -800,20 +970,17 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress,
return retFuture
proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress,
msg: sink seq[T], msglen = -1): Future[void] =
msg: sink seq[T], msglen = -1): Future[void] {.
async: (raw: true, raises: [TransportError, CancelledError]).} =
## Send sequence ``msg`` using transport ``transp`` to remote destination
## address ``remote``.
var retFuture = newFutureSeq[void, T]("datagram.transport.sendTo(seq)")
var retFuture = newFuture[void]("datagram.transport.sendTo(seq)")
transp.checkClosed(retFuture)
when declared(shallowCopy):
if not(isLiteral(msg)):
shallowCopy(retFuture.gcholder, msg)
else:
retFuture.gcholder = msg
else:
retFuture.gcholder = msg
let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T))
let vector = GramVector(kind: WithAddress, buf: addr retFuture.gcholder[0],
var localCopy = chronosMoveSink(msg)
retFuture.addCallback(proc(_: pointer) = reset(localCopy))
let vector = GramVector(kind: WithAddress, buf: addr localCopy[0],
buflen: length,
writer: cast[Future[void]](retFuture),
address: remote)
@ -825,7 +992,7 @@ proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress,
return retFuture
proc peekMessage*(transp: DatagramTransport, msg: var seq[byte],
msglen: var int) {.raises: [CatchableError].} =
msglen: var int) {.raises: [TransportError].} =
## Get access to internal message buffer and length of incoming datagram.
if ReadError in transp.state:
transp.state.excl(ReadError)
@ -837,7 +1004,7 @@ proc peekMessage*(transp: DatagramTransport, msg: var seq[byte],
msglen = transp.buflen
proc getMessage*(transp: DatagramTransport): seq[byte] {.
raises: [CatchableError].} =
raises: [TransportError].} =
## Copy data from internal message buffer and return result.
var default: seq[byte]
if ReadError in transp.state:

View File

@ -677,10 +677,10 @@ when defined(linux):
var msg = cast[ptr NlMsgHeader](addr data[0])
var endflag = false
while NLMSG_OK(msg, length):
if msg.nlmsg_type == NLMSG_ERROR:
if msg.nlmsg_type in [uint16(NLMSG_DONE), uint16(NLMSG_ERROR)]:
endflag = true
break
else:
elif msg.nlmsg_type == RTM_NEWROUTE:
res = processRoute(msg)
endflag = true
break

File diff suppressed because it is too large Load Diff

View File

@ -21,9 +21,9 @@ template asyncTest*(name: string, body: untyped): untyped =
template checkLeaks*(name: string): untyped =
let counter = getTrackerCounter(name)
if counter.opened != counter.closed:
echo "[" & name & "] opened = ", counter.opened,
", closed = ", counter.closed
checkpoint:
"[" & name & "] opened = " & $counter.opened &
", closed = " & $ counter.closed
check counter.opened == counter.closed
template checkLeaks*(): untyped =

1
docs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
book

20
docs/book.toml Normal file
View File

@ -0,0 +1,20 @@
[book]
authors = ["Jacek Sieka"]
language = "en"
multilingual = false
src = "src"
title = "Chronos"
[preprocessor.toc]
command = "mdbook-toc"
renderer = ["html"]
max-level = 2
[preprocessor.open-on-gh]
command = "mdbook-open-on-gh"
renderer = ["html"]
[output.html]
git-repository-url = "https://github.com/status-im/nim-chronos/"
git-branch = "master"
additional-css = ["open-in.css"]

View File

@ -0,0 +1,21 @@
## Simple cancellation example
import chronos
proc someTask() {.async.} = await sleepAsync(10.minutes)
proc cancellationExample() {.async.} =
# Start a task but don't wait for it to finish
let future = someTask()
future.cancelSoon()
# `cancelSoon` schedules but does not wait for the future to get cancelled -
# it might still be pending here
let future2 = someTask() # Start another task concurrently
await future2.cancelAndWait()
# Using `cancelAndWait`, we can be sure that `future2` is either
# complete, failed or cancelled at this point. `future` could still be
# pending!
assert future2.finished()
waitFor(cancellationExample())

View File

@ -0,0 +1,28 @@
## The peculiarities of `discard` in `async` procedures
import chronos
proc failingOperation() {.async.} =
echo "Raising!"
raise (ref ValueError)(msg: "My error")
proc myApp() {.async.} =
# This style of discard causes the `ValueError` to be discarded, hiding the
# failure of the operation - avoid!
discard failingOperation()
proc runAsTask(fut: Future[void]): Future[void] {.async: (raises: []).} =
# runAsTask uses `raises: []` to ensure at compile-time that no exceptions
# escape it!
try:
await fut
except CatchableError as exc:
echo "The task failed! ", exc.msg
# asyncSpawn ensures that errors don't leak unnoticed from tasks without
# blocking:
asyncSpawn runAsTask(failingOperation())
# If we didn't catch the exception with `runAsTask`, the program will crash:
asyncSpawn failingOperation()
waitFor myApp()

15
docs/examples/httpget.nim Normal file
View File

@ -0,0 +1,15 @@
import chronos/apps/http/httpclient
proc retrievePage*(uri: string): Future[string] {.async.} =
# Create a new HTTP session
let httpSession = HttpSessionRef.new()
try:
# Fetch page contents
let resp = await httpSession.fetch(parseUri(uri))
# Convert response to a string, assuming its encoding matches the terminal!
bytesToString(resp.data)
finally: # Close the session
await noCancel(httpSession.closeWait())
echo waitFor retrievePage(
"https://raw.githubusercontent.com/status-im/nim-chronos/master/README.md")

1
docs/examples/nim.cfg Normal file
View File

@ -0,0 +1 @@
path = "../.."

View File

@ -0,0 +1,25 @@
## Single timeout for several operations
import chronos
proc shortTask {.async.} =
try:
await sleepAsync(1.seconds)
except CancelledError as exc:
echo "Short task was cancelled!"
raise exc # Propagate cancellation to the next operation
proc composedTimeout() {.async.} =
let
# Common timout for several sub-tasks
timeout = sleepAsync(10.seconds)
while not timeout.finished():
let task = shortTask() # Start a task but don't `await` it
if (await race(task, timeout)) == task:
echo "Ran one more task"
else:
# This cancellation may or may not happen as task might have finished
# right at the timeout!
task.cancelSoon()
waitFor composedTimeout()

View File

@ -0,0 +1,20 @@
## Simple timeouts
import chronos
proc longTask {.async.} =
try:
await sleepAsync(10.minutes)
except CancelledError as exc:
echo "Long task was cancelled!"
raise exc # Propagate cancellation to the next operation
proc simpleTimeout() {.async.} =
let
task = longTask() # Start a task but don't `await` it
if not await task.withTimeout(1.seconds):
echo "Timeout reached - withTimeout should have cancelled the task"
else:
echo "Task completed"
waitFor simpleTimeout()

24
docs/examples/twogets.nim Normal file
View File

@ -0,0 +1,24 @@
## Make two http requests concurrently and output the one that wins
import chronos
import ./httpget
proc twoGets() {.async.} =
let
futs = @[
# Both pages will start downloading concurrently...
httpget.retrievePage("https://duckduckgo.com/?q=chronos"),
httpget.retrievePage("https://www.google.fr/search?q=chronos")
]
# Wait for at least one request to finish..
let winner = await one(futs)
# ..and cancel the others since we won't need them
for fut in futs:
# Trying to cancel an already-finished future is harmless
fut.cancelSoon()
# An exception could be raised here if the winning request failed!
echo "Result: ", winner.read()
waitFor(twoGets())

7
docs/open-in.css Normal file
View File

@ -0,0 +1,7 @@
footer {
font-size: 0.8em;
text-align: center;
border-top: 1px solid black;
padding: 5px 0;
}

14
docs/src/SUMMARY.md Normal file
View File

@ -0,0 +1,14 @@
- [Introduction](./introduction.md)
- [Examples](./examples.md)
# User guide
- [Core concepts](./concepts.md)
- [`async` functions](async_procs.md)
- [Errors and exceptions](./error_handling.md)
- [Tips, tricks and best practices](./tips.md)
- [Porting code to `chronos`](./porting.md)
# Developer guide
- [Updating this book](./book.md)

123
docs/src/async_procs.md Normal file
View File

@ -0,0 +1,123 @@
# Async procedures
Async procedures are those that interact with `chronos` to cooperatively
suspend and resume their execution depending on the completion of other
async procedures, timers, tasks on other threads or asynchronous I/O scheduled
with the operating system.
Async procedures are marked with the `{.async.}` pragma and return a `Future`
indicating the state of the operation.
<!-- toc -->
## The `async` pragma
The `{.async.}` pragma will transform a procedure (or a method) returning a
`Future` into a closure iterator. If there is no return type specified,
`Future[void]` is returned.
```nim
proc p() {.async.} =
await sleepAsync(100.milliseconds)
echo p().type # prints "Future[system.void]"
```
## `await` keyword
The `await` keyword operates on `Future` instances typically returned from an
`async` procedure.
Whenever `await` is encountered inside an async procedure, control is given
back to the dispatcher for as many steps as it's necessary for the awaited
future to complete, fail or be cancelled. `await` calls the
equivalent of `Future.read()` on the completed future to return the
encapsulated value when the operation finishes.
```nim
proc p1() {.async.} =
await sleepAsync(1.seconds)
proc p2() {.async.} =
await sleepAsync(1.seconds)
proc p3() {.async.} =
let
fut1 = p1()
fut2 = p2()
# Just by executing the async procs, both resulting futures entered the
# dispatcher queue and their "clocks" started ticking.
await fut1
await fut2
# Only one second passed while awaiting them both, not two.
waitFor p3()
```
```admonition warning
Because `async` procedures are executed concurrently, they are subject to many
of the same risks that typically accompany multithreaded programming.
In particular, if two `async` procedures have access to the same mutable state,
the value before and after `await` might not be the same as the order of execution is not guaranteed!
```
## Raw async procedures
Raw async procedures are those that interact with `chronos` via the `Future`
type but whose body does not go through the async transformation.
Such functions are created by adding `raw: true` to the `async` parameters:
```nim
proc rawAsync(): Future[void] {.async: (raw: true).} =
let fut = newFuture[void]("rawAsync")
fut.complete()
fut
```
Raw functions must not raise exceptions directly - they are implicitly declared
as `raises: []` - instead they should store exceptions in the returned `Future`:
```nim
proc rawFailure(): Future[void] {.async: (raw: true).} =
let fut = newFuture[void]("rawAsync")
fut.fail((ref ValueError)(msg: "Oh no!"))
fut
```
Raw procedures can also use checked exceptions:
```nim
proc rawAsyncRaises(): Future[void] {.async: (raw: true, raises: [IOError]).} =
let fut = newFuture[void]()
assert not (compiles do: fut.fail((ref ValueError)(msg: "uh-uh")))
fut.fail((ref IOError)(msg: "IO"))
fut
```
## Callbacks and closures
Callback/closure types are declared using the `async` annotation as usual:
```nim
type MyCallback = proc(): Future[void] {.async.}
proc runCallback(cb: MyCallback) {.async: (raises: []).} =
try:
await cb()
except CatchableError:
discard # handle errors as usual
```
When calling a callback, it is important to remember that it may raise exceptions that need to be handled.
Checked exceptions can be used to limit the exceptions that a callback can
raise:
```nim
type MyEasyCallback = proc(): Future[void] {.async: (raises: []).}
proc runCallback(cb: MyEasyCallback) {.async: (raises: [])} =
await cb()
```

134
docs/src/concepts.md Normal file
View File

@ -0,0 +1,134 @@
# Concepts
Async/await is a programming model that relies on cooperative multitasking to
coordinate the concurrent execution of procedures, using event notifications
from the operating system or other treads to resume execution.
<!-- toc -->
## The dispatcher
The event handler loop is called a "dispatcher" and a single instance per
thread is created, as soon as one is needed.
Scheduling is done by calling [async procedures](./async_procs.md) that return
`Future` objects - each time a procedure is unable to make further
progress, for example because it's waiting for some data to arrive, it hands
control back to the dispatcher which ensures that the procedure is resumed when
ready.
A single thread, and thus a single dispatcher, is typically able to handle
thousands of concurrent in-progress requests.
## The `Future` type
`Future` objects encapsulate the outcome of executing an `async` procedure. The
`Future` may be `pending` meaning that the outcome is not yet known or
`finished` meaning that the return value is available, the operation failed
with an exception or was cancelled.
Inside an async procedure, you can `await` the outcome of another async
procedure - if the `Future` representing that operation is still `pending`, a
callback representing where to resume execution will be added to it and the
dispatcher will be given back control to deal with other tasks.
When a `Future` is `finished`, all its callbacks are scheduled to be run by
the dispatcher, thus continuing any operations that were waiting for an outcome.
## The `poll` call
To trigger the processing step of the dispatcher, we need to call `poll()` -
either directly or through a wrapper like `runForever()` or `waitFor()`.
Each call to poll handles any file descriptors, timers and callbacks that are
ready to be processed.
Using `waitFor`, the result of a single asynchronous operation can be obtained:
```nim
proc myApp() {.async.} =
echo "Waiting for a second..."
await sleepAsync(1.seconds)
echo "done!"
waitFor myApp()
```
It is also possible to keep running the event loop forever using `runForever`:
```nim
proc myApp() {.async.} =
while true:
await sleepAsync(1.seconds)
echo "A bit more than a second passed!"
let future = myApp()
runForever()
```
Such an application never terminates, thus it is rare that applications are
structured this way.
```admonish warning
Both `waitFor` and `runForever` call `poll` which offers fine-grained
control over the event loop steps.
Nested calls to `poll` - directly or indirectly via `waitFor` and `runForever`
are not allowed.
```
## Cancellation
Any pending `Future` can be cancelled. This can be used for timeouts, to start
multiple parallel operations and cancel the rest as soon as one finishes,
to initiate the orderely shutdown of an application etc.
```nim
{{#include ../examples/cancellation.nim}}
```
Even if cancellation is initiated, it is not guaranteed that the operation gets
cancelled - the future might still be completed or fail depending on the
order of events in the dispatcher and the specifics of the operation.
If the future indeed gets cancelled, `await` will raise a
`CancelledError` as is likely to happen in the following example:
```nim
proc c1 {.async.} =
echo "Before sleep"
try:
await sleepAsync(10.minutes)
echo "After sleep" # not reach due to cancellation
except CancelledError as exc:
echo "We got cancelled!"
# `CancelledError` is typically re-raised to notify the caller that the
# operation is being cancelled
raise exc
proc c2 {.async.} =
await c1()
echo "Never reached, since the CancelledError got re-raised"
let work = c2()
waitFor(work.cancelAndWait())
```
The `CancelledError` will now travel up the stack like any other exception.
It can be caught for instance to free some resources and is then typically
re-raised for the whole chain operations to get cancelled.
Alternatively, the cancellation request can be translated to a regular outcome of the operation - for example, a `read` operation might return an empty result.
Cancelling an already-finished `Future` has no effect, as the following example
of downloading two web pages concurrently shows:
```nim
{{#include ../examples/twogets.nim}}
```
## Compile-time configuration
`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant.
Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information.

149
docs/src/error_handling.md Normal file
View File

@ -0,0 +1,149 @@
# Errors and exceptions
<!-- toc -->
## Exceptions
Exceptions inheriting from [`CatchableError`](https://nim-lang.org/docs/system.html#CatchableError)
interrupt execution of an `async` procedure. The exception is placed in the
`Future.error` field while changing the status of the `Future` to `Failed`
and callbacks are scheduled.
When a future is read or awaited the exception is re-raised, traversing the
`async` execution chain until handled.
```nim
proc p1() {.async.} =
await sleepAsync(1.seconds)
raise newException(ValueError, "ValueError inherits from CatchableError")
proc p2() {.async.} =
await sleepAsync(1.seconds)
proc p3() {.async.} =
let
fut1 = p1()
fut2 = p2()
await fut1
echo "unreachable code here"
await fut2
# `waitFor()` would call `Future.read()` unconditionally, which would raise the
# exception in `Future.error`.
let fut3 = p3()
while not(fut3.finished()):
poll()
echo "fut3.state = ", fut3.state # "Failed"
if fut3.failed():
echo "p3() failed: ", fut3.error.name, ": ", fut3.error.msg
# prints "p3() failed: ValueError: ValueError inherits from CatchableError"
```
You can put the `await` in a `try` block, to deal with that exception sooner:
```nim
proc p3() {.async.} =
let
fut1 = p1()
fut2 = p2()
try:
await fut1
except CachableError:
echo "p1() failed: ", fut1.error.name, ": ", fut1.error.msg
echo "reachable code here"
await fut2
```
Because `chronos` ensures that all exceptions are re-routed to the `Future`,
`poll` will not itself raise exceptions.
`poll` may still panic / raise `Defect` if such are raised in user code due to
undefined behavior.
## Checked exceptions
By specifying a `raises` list to an async procedure, you can check which
exceptions can be raised by it:
```nim
proc p1(): Future[void] {.async: (raises: [IOError]).} =
assert not (compiles do: raise newException(ValueError, "uh-uh"))
raise newException(IOError, "works") # Or any child of IOError
proc p2(): Future[void] {.async, (raises: [IOError]).} =
await p1() # Works, because await knows that p1
# can only raise IOError
```
Under the hood, the return type of `p1` will be rewritten to an internal type
which will convey raises informations to `await`.
```admonition note
Most `async` include `CancelledError` in the list of `raises`, indicating that
the operation they implement might get cancelled resulting in neither value nor
error!
```
When using checked exceptions, the `Future` type is modified to include
`raises` information - it can be constructed with the `Raising` helper:
```nim
# Create a variable of the type that will be returned by a an async function
# raising `[CancelledError]`:
var fut: Future[int].Raising([CancelledError])
```
```admonition note
`Raising` creates a specialization of `InternalRaisesFuture` type - as the name
suggests, this is an internal type whose implementation details are likely to
change in future `chronos` versions.
```
## The `Exception` type
Exceptions deriving from `Exception` are not caught by default as these may
include `Defect` and other forms undefined or uncatchable behavior.
Because exception effect tracking is turned on for `async` functions, this may
sometimes lead to compile errors around forward declarations, methods and
closures as Nim conservatively asssumes that any `Exception` might be raised
from those.
Make sure to excplicitly annotate these with `{.raises.}`:
```nim
# Forward declarations need to explicitly include a raises list:
proc myfunction() {.raises: [ValueError].}
# ... as do `proc` types
type MyClosure = proc() {.raises: [ValueError].}
proc myfunction() =
raise (ref ValueError)(msg: "Implementation here")
let closure: MyClosure = myfunction
```
For compatibility, `async` functions can be instructed to handle `Exception` as
well, specifying `handleException: true`. `Exception` that is not a `Defect` and
not a `CatchableError` will then be caught and remapped to
`AsyncExceptionError`:
```nim
proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} =
raise (ref Exception)(msg: "Raising Exception is UB")
proc callRaiseException() {.async: (raises: []).} =
try:
raiseException()
except AsyncExceptionError as exc:
# The original Exception is available from the `parent` field
echo exc.parent.msg
```
This mode can be enabled globally with `-d:chronosHandleException` as a help
when porting code to `chronos` but should generally be avoided as global
configuration settings may interfere with libraries that use `chronos` leading
to unexpected behavior.

18
docs/src/examples.md Normal file
View File

@ -0,0 +1,18 @@
# Examples
Examples are available in the [`docs/examples/`](https://github.com/status-im/nim-chronos/tree/master/docs/examples/) folder.
## Basic concepts
* [cancellation](https://github.com/status-im/nim-chronos/tree/master/docs/examples/cancellation.nim) - Cancellation primer
* [timeoutsimple](https://github.com/status-im/nim-chronos/tree/master/docs/examples/timeoutsimple.nim) - Simple timeouts
* [timeoutcomposed](https://github.com/status-im/nim-chronos/tree/master/docs/examples/examples/timeoutcomposed.nim) - Shared timeout of multiple tasks
## TCP
* [tcpserver](https://github.com/status-im/nim-chronos/tree/master/docs/examples/tcpserver.nim) - Simple TCP/IP v4/v6 echo server
## HTTP
* [httpget](https://github.com/status-im/nim-chronos/tree/master/docs/examples/httpget.nim) - Downloading a web page using the http client
* [twogets](https://github.com/status-im/nim-chronos/tree/master/docs/examples/twogets.nim) - Download two pages concurrently

View File

@ -0,0 +1,19 @@
## Getting started
Install `chronos` using `nimble`:
```text
nimble install chronos
```
or add a dependency to your `.nimble` file:
```text
requires "chronos"
```
and start using it:
```nim
{{#include ../examples/httpget.nim}}
```

50
docs/src/introduction.md Normal file
View File

@ -0,0 +1,50 @@
# Introduction
Chronos implements the [async/await](https://en.wikipedia.org/wiki/Async/await)
paradigm in a self-contained library using macro and closure iterator
transformation features provided by Nim.
Features include:
* Asynchronous socket and process I/O
* HTTP client / server with SSL/TLS support out of the box (no OpenSSL needed)
* Synchronization primitivies like queues, events and locks
* [Cancellation](./concepts.md#cancellation)
* Efficient dispatch pipeline with excellent multi-platform support
* Exception [effect support](./guide.md#error-handling)
## Installation
Install `chronos` using `nimble`:
```text
nimble install chronos
```
or add a dependency to your `.nimble` file:
```text
requires "chronos"
```
and start using it:
```nim
{{#include ../examples/httpget.nim}}
```
There are more [examples](./examples.md) throughout the manual!
## Platform support
Several platforms are supported, with different backend [options](./concepts.md#compile-time-configuration):
* Windows: [`IOCP`](https://learn.microsoft.com/en-us/windows/win32/fileio/i-o-completion-ports)
* Linux: [`epoll`](https://en.wikipedia.org/wiki/Epoll) / `poll`
* OSX / BSD: [`kqueue`](https://en.wikipedia.org/wiki/Kqueue) / `poll`
* Android / Emscripten / posix: `poll`
## API documentation
This guide covers basic usage of chronos - for details, see the
[API reference](./api/chronos.html).

59
docs/src/porting.md Normal file
View File

@ -0,0 +1,59 @@
# Porting code to `chronos` v4
<!-- toc -->
Thanks to its macro support, Nim allows `async`/`await` to be implemented in
libraries with only minimal support from the language - as such, multiple
`async` libraries exist, including `chronos` and `asyncdispatch`, and more may
come to be developed in the futures.
## Chronos v3
Chronos v4 introduces new features for IPv6, exception effects, a stand-alone
`Future` type as well as several other changes - when upgrading from chronos v3,
here are several things to consider:
* Exception handling is now strict by default - see the [error handling](./error_handling.md)
chapter for how to deal with `raises` effects
* `AsyncEventBus` was removed - use `AsyncEventQueue` instead
* `Future.value` and `Future.error` panic when accessed in the wrong state
* `Future.read` and `Future.readError` raise `FutureError` instead of
`ValueError` when accessed in the wrong state
## `asyncdispatch`
Code written for `asyncdispatch` and `chronos` looks similar but there are
several differences to be aware of:
* `chronos` has its own dispatch loop - you can typically not mix `chronos` and
`asyncdispatch` in the same thread
* `import chronos` instead of `import asyncdispatch`
* cleanup is important - make sure to use `closeWait` to release any resources
you're using or file descriptor and other leaks will ensue
* cancellation support means that `CancelledError` may be raised from most
`{.async.}` functions
* Calling `yield` directly in tasks is not supported - instead, use `awaitne`.
* `asyncSpawn` is used instead of `asyncCheck` - note that exceptions raised
in tasks that are `asyncSpawn`:ed cause panic
## Supporting multiple backends
Libraries built on top of `async`/`await` may wish to support multiple async
backends - the best way to do so is to create separate modules for each backend
that may be imported side-by-side - see [nim-metrics](https://github.com/status-im/nim-metrics/blob/master/metrics/)
for an example.
An alternative way is to select backend using a global compile flag - this
method makes it diffucult to compose applications that use both backends as may
happen with transitive dependencies, but may be appropriate in some cases -
libraries choosing this path should call the flag `asyncBackend`, allowing
applications to choose the backend with `-d:asyncBackend=<backend_name>`.
Known `async` backends include:
* `chronos` - this library (`-d:asyncBackend=chronos`)
* `asyncdispatch` the standard library `asyncdispatch` [module](https://nim-lang.org/docs/asyncdispatch.html) (`-d:asyncBackend=asyncdispatch`)
* `none` - ``-d:asyncBackend=none`` - disable ``async`` support completely
``none`` can be used when a library supports both a synchronous and
asynchronous API, to disable the latter.

34
docs/src/tips.md Normal file
View File

@ -0,0 +1,34 @@
# Tips, tricks and best practices
## Timeouts
To prevent a single task from taking too long, `withTimeout` can be used:
```nim
{{#include ../examples/timeoutsimple.nim}}
```
When several tasks should share a single timeout, a common timer can be created
with `sleepAsync`:
```nim
{{#include ../examples/timeoutcomposed.nim}}
```
## `discard`
When calling an asynchronous procedure without `await`, the operation is started
but its result is not processed until corresponding `Future` is `read`.
It is therefore important to never `discard` futures directly - instead, one
can discard the result of awaiting the future or use `asyncSpawn` to monitor
the outcome of the future as if it were running in a separate thread.
Similar to threads, tasks managed by `asyncSpawn` may causes the application to
crash if any exceptions leak out of it - use
[checked exceptions](./error_handling.md#checked-exceptions) to avoid this
problem.
```nim
{{#include ../examples/discards.nim}}
```

1
nim.cfg Normal file
View File

@ -0,0 +1 @@
nimcache = "build/nimcache/$projectName"

View File

@ -5,10 +5,22 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import testmacro, testsync, testsoon, testtime, testfut, testsignal,
testaddress, testdatagram, teststream, testserver, testbugs, testnet,
testasyncstream, testhttpserver, testshttpserver, testhttpclient,
testproc, testratelimit, testfutures, testthreadsync, testprofiler
import ".."/chronos/config
# Must be imported last to check for Pending futures
import testutils
when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows):
import testmacro, testsync, testsoon, testtime, testfut, testsignal,
testaddress, testdatagram, teststream, testserver, testbugs, testnet,
testasyncstream, testhttpserver, testshttpserver, testhttpclient,
testproc, testratelimit, testfutures, testthreadsync, testprofiler
# Must be imported last to check for Pending futures
import testutils
elif chronosEventEngine == "poll":
# `poll` engine do not support signals and processes
import testmacro, testsync, testsoon, testtime, testfut, testaddress,
testdatagram, teststream, testserver, testbugs, testnet,
testasyncstream, testhttpserver, testshttpserver, testhttpclient,
testratelimit, testfutures, testthreadsync, testprofiler
# Must be imported last to check for Pending futures
import testutils

63
tests/testasyncstream.c Normal file
View File

@ -0,0 +1,63 @@
#include <brssl.h>
// This is the X509TrustAnchor for the SelfSignedRsaCert above
// Generate by doing the following:
// 1. Compile `brssl` from BearSSL
// 2. Run `brssl ta filewithSelfSignedRsaCert.pem`
// 3. Paste the output in the emit block below
// 4. Rename `TAs` to `SelfSignedTAs`
static const unsigned char TA0_DN[] = {
0x30, 0x5F, 0x31, 0x0B, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13,
0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08,
0x0C, 0x0A, 0x53, 0x6F, 0x6D, 0x65, 0x2D, 0x53, 0x74, 0x61, 0x74, 0x65,
0x31, 0x21, 0x30, 0x1F, 0x06, 0x03, 0x55, 0x04, 0x0A, 0x0C, 0x18, 0x49,
0x6E, 0x74, 0x65, 0x72, 0x6E, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67,
0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4C, 0x74, 0x64, 0x31,
0x18, 0x30, 0x16, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0C, 0x0F, 0x31, 0x32,
0x37, 0x2E, 0x30, 0x2E, 0x30, 0x2E, 0x31, 0x3A, 0x34, 0x33, 0x38, 0x30,
0x38
};
static const unsigned char TA0_RSA_N[] = {
0xA7, 0xEE, 0xD5, 0xC6, 0x2C, 0xA3, 0x08, 0x33, 0x33, 0x86, 0xB5, 0x5C,
0xD4, 0x8B, 0x16, 0xB1, 0xD7, 0xF7, 0xED, 0x95, 0x22, 0xDC, 0xA4, 0x40,
0x24, 0x64, 0xC3, 0x91, 0xBA, 0x20, 0x82, 0x9D, 0x88, 0xED, 0x20, 0x98,
0x46, 0x65, 0xDC, 0xD1, 0x15, 0x90, 0xBC, 0x7C, 0x19, 0x5F, 0x00, 0x96,
0x69, 0x2C, 0x80, 0x0E, 0x7D, 0x7D, 0x8B, 0xD9, 0xFD, 0x49, 0x66, 0xEC,
0x29, 0xC0, 0x39, 0x0E, 0x22, 0xF3, 0x6A, 0x28, 0xC0, 0x6B, 0x97, 0x93,
0x2F, 0x92, 0x5E, 0x5A, 0xCC, 0xF4, 0xF4, 0xAE, 0xD9, 0xE3, 0xBB, 0x0A,
0xDC, 0xA8, 0xDE, 0x4D, 0x16, 0xD6, 0xE6, 0x64, 0xF2, 0x85, 0x62, 0xF6,
0xE3, 0x7B, 0x1D, 0x9A, 0x5C, 0x6A, 0xA3, 0x97, 0x93, 0x16, 0x9D, 0x02,
0x2C, 0xFD, 0x90, 0x3E, 0xF8, 0x35, 0x44, 0x5E, 0x66, 0x8D, 0xF6, 0x80,
0xF1, 0x71, 0x9B, 0x2F, 0x44, 0xC0, 0xCA, 0x7E, 0xB1, 0x90, 0x7F, 0xD8,
0x8B, 0x7A, 0x85, 0x4B, 0xE3, 0xB1, 0xB1, 0xF4, 0xAA, 0x6A, 0x36, 0xA0,
0xFF, 0x24, 0xB2, 0x27, 0xE0, 0xBA, 0x62, 0x7A, 0xE9, 0x95, 0xC9, 0x88,
0x9D, 0x9B, 0xAB, 0xA4, 0x4C, 0xEA, 0x87, 0x46, 0xFA, 0xD6, 0x9B, 0x7E,
0xB2, 0xE9, 0x5B, 0xCA, 0x5B, 0x84, 0xC4, 0xF7, 0xB4, 0xC7, 0x69, 0xC5,
0x0B, 0x9A, 0x47, 0x9A, 0x86, 0xD4, 0xDF, 0xF3, 0x30, 0xC9, 0x6D, 0xB8,
0x78, 0x10, 0xEF, 0xA0, 0x89, 0xF8, 0x30, 0x80, 0x9D, 0x96, 0x05, 0x44,
0xB4, 0xFB, 0x98, 0x4C, 0x71, 0x6B, 0xBC, 0xD7, 0x5D, 0x66, 0x5E, 0x66,
0xA7, 0x94, 0xE5, 0x65, 0x72, 0x85, 0xBC, 0x7C, 0x7F, 0x11, 0x98, 0xF8,
0xCB, 0xD5, 0xE2, 0xB5, 0x67, 0x78, 0xF7, 0x49, 0x51, 0xC4, 0x7F, 0xBA,
0x16, 0x66, 0xD2, 0x15, 0x5B, 0x98, 0x06, 0x03, 0x48, 0xD0, 0x9D, 0xF0,
0x38, 0x2B, 0x9D, 0x51
};
static const unsigned char TA0_RSA_E[] = {
0x01, 0x00, 0x01
};
const br_x509_trust_anchor SelfSignedTAs[1] = {
{
{ (unsigned char *)TA0_DN, sizeof TA0_DN },
BR_X509_TA_CA,
{
BR_KEYTYPE_RSA,
{ .rsa = {
(unsigned char *)TA0_RSA_N, sizeof TA0_RSA_N,
(unsigned char *)TA0_RSA_E, sizeof TA0_RSA_E,
} }
}
}
};

File diff suppressed because it is too large Load Diff

View File

@ -14,23 +14,26 @@ suite "Asynchronous issues test suite":
const HELLO_PORT = 45679
const TEST_MSG = "testmsg"
const MSG_LEN = TEST_MSG.len()
const TestsCount = 500
const TestsCount = 100
type
CustomData = ref object
test: string
proc udp4DataAvailable(transp: DatagramTransport,
remote: TransportAddress) {.async, gcsafe.} =
var udata = getUserData[CustomData](transp)
var expect = TEST_MSG
var data: seq[byte]
var datalen: int
transp.peekMessage(data, datalen)
if udata.test == "CHECK" and datalen == MSG_LEN and
equalMem(addr data[0], addr expect[0], datalen):
udata.test = "OK"
transp.close()
remote: TransportAddress) {.async: (raises: []).} =
try:
var udata = getUserData[CustomData](transp)
var expect = TEST_MSG
var data: seq[byte]
var datalen: int
transp.peekMessage(data, datalen)
if udata.test == "CHECK" and datalen == MSG_LEN and
equalMem(addr data[0], addr expect[0], datalen):
udata.test = "OK"
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc issue6(): Future[bool] {.async.} =
var myself = initTAddress("127.0.0.1:" & $HELLO_PORT)

View File

@ -6,6 +6,7 @@
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import std/[strutils, net]
import stew/byteutils
import ".."/chronos/unittest2/asynctests
import ".."/chronos
@ -29,286 +30,319 @@ suite "Datagram Transport test suite":
" clients x " & $MessagesCount & " messages)"
proc client1(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("REQUEST"):
var numstr = data[7..^1]
var num = parseInt(numstr)
var ans = "ANSWER" & $num
await transp.sendTo(raddr, addr ans[0], len(ans))
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("REQUEST"):
var numstr = data[7..^1]
var num = parseInt(numstr)
var ans = "ANSWER" & $num
await transp.sendTo(raddr, addr ans[0], len(ans))
else:
var err = "ERROR"
await transp.sendTo(raddr, addr err[0], len(err))
else:
var err = "ERROR"
await transp.sendTo(raddr, addr err[0], len(err))
else:
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client2(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
else:
var ta = initTAddress("127.0.0.1:33336")
var req = "REQUEST" & $counterPtr[]
await transp.sendTo(ta, addr req[0], len(req))
else:
var ta = initTAddress("127.0.0.1:33336")
var req = "REQUEST" & $counterPtr[]
await transp.sendTo(ta, addr req[0], len(req))
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client3(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
await transp.send(addr req[0], len(req))
else:
var req = "REQUEST" & $counterPtr[]
await transp.send(addr req[0], len(req))
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client4(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == MessagesCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == MessagesCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
await transp.send(addr req[0], len(req))
else:
var req = "REQUEST" & $counterPtr[]
await transp.send(addr req[0], len(req))
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client5(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == MessagesCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == MessagesCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
await transp.sendTo(raddr, addr req[0], len(req))
else:
var req = "REQUEST" & $counterPtr[]
await transp.sendTo(raddr, addr req[0], len(req))
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client6(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("REQUEST"):
var numstr = data[7..^1]
var num = parseInt(numstr)
var ans = "ANSWER" & $num
await transp.sendTo(raddr, ans)
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("REQUEST"):
var numstr = data[7..^1]
var num = parseInt(numstr)
var ans = "ANSWER" & $num
await transp.sendTo(raddr, ans)
else:
var err = "ERROR"
await transp.sendTo(raddr, err)
else:
var err = "ERROR"
await transp.sendTo(raddr, err)
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client7(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
await transp.sendTo(raddr, req)
else:
var req = "REQUEST" & $counterPtr[]
await transp.sendTo(raddr, req)
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client8(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
await transp.send(req)
else:
var req = "REQUEST" & $counterPtr[]
await transp.send(req)
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client9(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("REQUEST"):
var numstr = data[7..^1]
var num = parseInt(numstr)
var ans = "ANSWER" & $num
var ansseq = newSeq[byte](len(ans))
copyMem(addr ansseq[0], addr ans[0], len(ans))
await transp.sendTo(raddr, ansseq)
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("REQUEST"):
var numstr = data[7..^1]
var num = parseInt(numstr)
var ans = "ANSWER" & $num
var ansseq = newSeq[byte](len(ans))
copyMem(addr ansseq[0], addr ans[0], len(ans))
await transp.sendTo(raddr, ansseq)
else:
var err = "ERROR"
var errseq = newSeq[byte](len(err))
copyMem(addr errseq[0], addr err[0], len(err))
await transp.sendTo(raddr, errseq)
else:
var err = "ERROR"
var errseq = newSeq[byte](len(err))
copyMem(addr errseq[0], addr err[0], len(err))
await transp.sendTo(raddr, errseq)
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client10(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
var reqseq = newSeq[byte](len(req))
copyMem(addr reqseq[0], addr req[0], len(req))
await transp.sendTo(raddr, reqseq)
else:
var req = "REQUEST" & $counterPtr[]
var reqseq = newSeq[byte](len(req))
copyMem(addr reqseq[0], addr req[0], len(req))
await transp.sendTo(raddr, reqseq)
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc client11(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var pbytes = transp.getMessage()
var nbytes = len(pbytes)
if nbytes > 0:
var data = newString(nbytes + 1)
copyMem(addr data[0], addr pbytes[0], nbytes)
data.setLen(nbytes)
if data.startsWith("ANSWER"):
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = counterPtr[] + 1
if counterPtr[] == TestsCount:
transp.close()
else:
var req = "REQUEST" & $counterPtr[]
var reqseq = newSeq[byte](len(req))
copyMem(addr reqseq[0], addr req[0], len(req))
await transp.send(reqseq)
else:
var req = "REQUEST" & $counterPtr[]
var reqseq = newSeq[byte](len(req))
copyMem(addr reqseq[0], addr req[0], len(req))
await transp.send(reqseq)
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
else:
## Read operation failed with error
var counterPtr = cast[ptr int](transp.udata)
counterPtr[] = -1
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
proc testPointerSendTo(): Future[int] {.async.} =
## sendTo(pointer) test
@ -438,7 +472,7 @@ suite "Datagram Transport test suite":
var ta = initTAddress("127.0.0.1:0")
var counter = 0
proc clientMark(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
counter = 1
transp.close()
var dgram1 = newDatagramTransport(client1, local = ta)
@ -456,7 +490,7 @@ suite "Datagram Transport test suite":
proc testTransportClose(): Future[bool] {.async.} =
var ta = initTAddress("127.0.0.1:45000")
proc clientMark(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
discard
var dgram = newDatagramTransport(clientMark, local = ta)
dgram.close()
@ -472,12 +506,15 @@ suite "Datagram Transport test suite":
var bta = initTAddress("255.255.255.255:45010")
var res = 0
proc clientMark(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var bmsg = transp.getMessage()
var smsg = cast[string](bmsg)
if smsg == expectMessage:
inc(res)
transp.close()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var bmsg = transp.getMessage()
var smsg = string.fromBytes(bmsg)
if smsg == expectMessage:
inc(res)
transp.close()
except CatchableError as exc:
raiseAssert exc.msg
var dgram1 = newDatagramTransport(clientMark, local = ta1,
flags = {Broadcast}, ttl = 2)
await dgram1.sendTo(bta, expectMessage)
@ -486,21 +523,25 @@ suite "Datagram Transport test suite":
proc testAnyAddress(): Future[int] {.async.} =
var expectStr = "ANYADDRESS MESSAGE"
var expectSeq = cast[seq[byte]](expectStr)
var expectSeq = expectStr.toBytes()
let ta = initTAddress("0.0.0.0:0")
var res = 0
var event = newAsyncEvent()
proc clientMark1(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
var bmsg = transp.getMessage()
var smsg = cast[string](bmsg)
if smsg == expectStr:
inc(res)
event.fire()
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var bmsg = transp.getMessage()
var smsg = string.fromBytes(bmsg)
if smsg == expectStr:
inc(res)
event.fire()
except CatchableError as exc:
raiseAssert exc.msg
proc clientMark2(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async.} =
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
discard
var dgram1 = newDatagramTransport(clientMark1, local = ta)
@ -533,6 +574,57 @@ suite "Datagram Transport test suite":
result = res
proc performDualstackTest(
sstack: DualStackType, saddr: TransportAddress,
cstack: DualStackType, caddr: TransportAddress
): Future[bool] {.async.} =
var
expectStr = "ANYADDRESS MESSAGE"
event = newAsyncEvent()
res = 0
proc process1(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
try:
var bmsg = transp.getMessage()
var smsg = string.fromBytes(bmsg)
if smsg == expectStr:
inc(res)
event.fire()
except CatchableError as exc:
raiseAssert exc.msg
proc process2(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
discard
let
sdgram = newDatagramTransport(process1, local = saddr,
dualstack = sstack)
localcaddr =
if caddr.family == AddressFamily.IPv4:
AnyAddress
else:
AnyAddress6
cdgram = newDatagramTransport(process2, local = localcaddr,
dualstack = cstack)
var address = caddr
address.port = sdgram.localAddress().port
try:
await cdgram.sendTo(address, addr expectStr[0], len(expectStr))
except CatchableError:
discard
try:
await event.wait().wait(500.milliseconds)
except CatchableError:
discard
await allFutures(sdgram.closeWait(), cdgram.closeWait())
res == 1
test "close(transport) test":
check waitFor(testTransportClose()) == true
test m1:
@ -557,5 +649,83 @@ suite "Datagram Transport test suite":
check waitFor(testBroadcast()) == 1
test "0.0.0.0/::0 (INADDR_ANY) test":
check waitFor(testAnyAddress()) == 6
asyncTest "[IP] getDomain(socket) [SOCK_DGRAM] test":
if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6):
block:
let res = createAsyncSocket2(Domain.AF_INET, SockType.SOCK_DGRAM,
Protocol.IPPROTO_UDP)
check res.isOk()
let fres = getDomain(res.get())
check fres.isOk()
discard unregisterAndCloseFd(res.get())
check fres.get() == AddressFamily.IPv4
block:
let res = createAsyncSocket2(Domain.AF_INET6, SockType.SOCK_DGRAM,
Protocol.IPPROTO_UDP)
check res.isOk()
let fres = getDomain(res.get())
check fres.isOk()
discard unregisterAndCloseFd(res.get())
check fres.get() == AddressFamily.IPv6
when not(defined(windows)):
block:
let res = createAsyncSocket2(Domain.AF_UNIX, SockType.SOCK_DGRAM,
Protocol.IPPROTO_IP)
check res.isOk()
let fres = getDomain(res.get())
check fres.isOk()
discard unregisterAndCloseFd(res.get())
check fres.get() == AddressFamily.Unix
else:
skip()
asyncTest "[IP] DualStack [UDP] server [DualStackType.Auto] test":
if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6):
let serverAddress = initTAddress("[::]:0")
check:
(await performDualstackTest(
DualStackType.Auto, serverAddress,
DualStackType.Auto, initTAddress("127.0.0.1:0"))) == true
check:
(await performDualstackTest(
DualStackType.Auto, serverAddress,
DualStackType.Auto, initTAddress("127.0.0.1:0").toIPv6())) == true
check:
(await performDualstackTest(
DualStackType.Auto, serverAddress,
DualStackType.Auto, initTAddress("[::1]:0"))) == true
else:
skip()
asyncTest "[IP] DualStack [UDP] server [DualStackType.Enabled] test":
if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6):
let serverAddress = initTAddress("[::]:0")
check:
(await performDualstackTest(
DualStackType.Enabled, serverAddress,
DualStackType.Auto, initTAddress("127.0.0.1:0"))) == true
(await performDualstackTest(
DualStackType.Enabled, serverAddress,
DualStackType.Auto, initTAddress("127.0.0.1:0").toIPv6())) == true
(await performDualstackTest(
DualStackType.Enabled, serverAddress,
DualStackType.Auto, initTAddress("[::1]:0"))) == true
else:
skip()
asyncTest "[IP] DualStack [UDP] server [DualStackType.Disabled] test":
if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6):
let serverAddress = initTAddress("[::]:0")
check:
(await performDualstackTest(
DualStackType.Disabled, serverAddress,
DualStackType.Auto, initTAddress("127.0.0.1:0"))) == false
(await performDualstackTest(
DualStackType.Disabled, serverAddress,
DualStackType.Auto, initTAddress("127.0.0.1:0").toIPv6())) == false
(await performDualstackTest(
DualStackType.Disabled, serverAddress,
DualStackType.Auto, initTAddress("[::1]:0"))) == true
else:
skip()
test "Transports leak test":
checkLeaks()

View File

@ -6,10 +6,15 @@
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import unittest2
import stew/results
import ../chronos, ../chronos/unittest2/asynctests
{.used.}
type
TestFooConnection* = ref object
id*: int
suite "Future[T] behavior test suite":
proc testFuture1(): Future[int] {.async.} =
await sleepAsync(0.milliseconds)
@ -49,7 +54,6 @@ suite "Future[T] behavior test suite":
fut.addCallback proc(udata: pointer) =
testResult &= "5"
discard waitFor(fut)
poll()
check:
fut.finished
@ -75,7 +79,6 @@ suite "Future[T] behavior test suite":
fut.addCallback cb5
fut.removeCallback cb3
discard waitFor(fut)
poll()
check:
fut.finished
testResult == "1245"
@ -960,7 +963,7 @@ suite "Future[T] behavior test suite":
let discarded {.used.} = await fut1
check res
asyncTest "cancel() async procedure test":
asyncTest "tryCancel() async procedure test":
var completed = 0
proc client1() {.async.} =
@ -980,7 +983,7 @@ suite "Future[T] behavior test suite":
inc(completed)
var fut = client4()
fut.cancel()
discard fut.tryCancel()
# Future must not be cancelled immediately, because it has many nested
# futures.
@ -1031,7 +1034,7 @@ suite "Future[T] behavior test suite":
var fut1 = client2()
var fut2 = client2()
fut1.cancel()
discard fut1.tryCancel()
await fut1
await cancelAndWait(fut2)
check:
@ -1054,17 +1057,17 @@ suite "Future[T] behavior test suite":
if not(retFuture.finished()):
retFuture.complete()
proc cancel(udata: pointer) {.gcsafe.} =
proc cancellation(udata: pointer) {.gcsafe.} =
inc(cancelled)
if not(retFuture.finished()):
removeTimer(moment, completion, cast[pointer](retFuture))
retFuture.cancelCallback = cancel
retFuture.cancelCallback = cancellation
discard setTimer(moment, completion, cast[pointer](retFuture))
return retFuture
var fut = client1(100.milliseconds)
fut.cancel()
discard fut.tryCancel()
await sleepAsync(500.milliseconds)
check:
fut.cancelled()
@ -1112,8 +1115,8 @@ suite "Future[T] behavior test suite":
neverFlag3 = true
res.addCallback(continuation)
res.cancelCallback = cancellation
result = res
neverFlag1 = true
res
proc withTimeoutProc() {.async.} =
try:
@ -1149,12 +1152,12 @@ suite "Future[T] behavior test suite":
someFut = newFuture[void]()
var raceFut3 = raceProc()
someFut.cancel()
discard someFut.tryCancel()
await cancelAndWait(raceFut3)
check:
raceFut1.state == FutureState.Cancelled
raceFut2.state == FutureState.Cancelled
raceFut1.state == FutureState.Completed
raceFut2.state == FutureState.Failed
raceFut3.state == FutureState.Cancelled
asyncTest "asyncSpawn() test":
@ -1218,11 +1221,11 @@ suite "Future[T] behavior test suite":
test "location test":
# WARNING: This test is very sensitive to line numbers and module name.
proc macroFuture() {.async.} = # LINE POSITION 1
let someVar {.used.} = 5 # LINE POSITION 2
proc macroFuture() {.async.} =
let someVar {.used.} = 5 # LINE POSITION 1
let someOtherVar {.used.} = 4
if true:
let otherVar {.used.} = 3
let otherVar {.used.} = 3 # LINE POSITION 2
template templateFuture(): untyped =
newFuture[void]("template")
@ -1237,12 +1240,14 @@ suite "Future[T] behavior test suite":
fut2.complete() # LINE POSITION 4
fut3.complete() # LINE POSITION 6
{.push warning[Deprecated]: off.} # testing backwards compatibility interface
let loc10 = fut1.location[0]
let loc11 = fut1.location[1]
let loc20 = fut2.location[0]
let loc21 = fut2.location[1]
let loc30 = fut3.location[0]
let loc31 = fut3.location[1]
{.pop.}
proc chk(loc: ptr SrcLoc, file: string, line: int,
procedure: string): bool =
@ -1253,12 +1258,12 @@ suite "Future[T] behavior test suite":
(loc.procedure == procedure)
check:
chk(loc10, "testfut.nim", 1221, "macroFuture")
chk(loc11, "testfut.nim", 1222, "")
chk(loc20, "testfut.nim", 1234, "template")
chk(loc21, "testfut.nim", 1237, "")
chk(loc30, "testfut.nim", 1231, "procedure")
chk(loc31, "testfut.nim", 1238, "")
chk(loc10, "testfut.nim", 1225, "macroFuture")
chk(loc11, "testfut.nim", 1228, "")
chk(loc20, "testfut.nim", 1237, "template")
chk(loc21, "testfut.nim", 1240, "")
chk(loc30, "testfut.nim", 1234, "procedure")
chk(loc31, "testfut.nim", 1241, "")
asyncTest "withTimeout(fut) should wait cancellation test":
proc futureNeverEnds(): Future[void] =
@ -1309,12 +1314,17 @@ suite "Future[T] behavior test suite":
test "race(zero) test":
var tseq = newSeq[FutureBase]()
var fut1 = race(tseq)
var fut2 = race()
var fut3 = race([])
check:
# https://github.com/nim-lang/Nim/issues/22964
not compiles(block:
var fut2 = race())
not compiles(block:
var fut3 = race([]))
check:
fut1.failed()
fut2.failed()
fut3.failed()
# fut2.failed()
# fut3.failed()
asyncTest "race(varargs) test":
proc vlient1() {.async.} =
@ -1533,3 +1543,468 @@ suite "Future[T] behavior test suite":
check:
v1_u == 0'u
v2_u + 1'u == 0'u
asyncTest "wait() cancellation undefined behavior test #1":
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
async.} =
await fooFut
return TestFooConnection()
proc testFoo(fooFut: Future[void]) {.async.} =
let connection =
try:
let res = await testInnerFoo(fooFut).wait(10.seconds)
Result[TestFooConnection, int].ok(res)
except CancelledError:
Result[TestFooConnection, int].err(0)
except CatchableError:
Result[TestFooConnection, int].err(1)
check connection.isOk()
var future = newFuture[void]("last.child.future")
var someFut = testFoo(future)
future.complete()
discard someFut.tryCancel()
await someFut
asyncTest "wait() cancellation undefined behavior test #2":
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
async.} =
await fooFut
return TestFooConnection()
proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {.
async.} =
await testInnerFoo(fooFut)
proc testFoo(fooFut: Future[void]) {.async.} =
let connection =
try:
let res = await testMiddleFoo(fooFut).wait(10.seconds)
Result[TestFooConnection, int].ok(res)
except CancelledError:
Result[TestFooConnection, int].err(0)
except CatchableError:
Result[TestFooConnection, int].err(1)
check connection.isOk()
var future = newFuture[void]("last.child.future")
var someFut = testFoo(future)
future.complete()
discard someFut.tryCancel()
await someFut
asyncTest "withTimeout() cancellation undefined behavior test #1":
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
async.} =
await fooFut
return TestFooConnection()
proc testFoo(fooFut: Future[void]) {.async.} =
let connection =
try:
let
checkFut = testInnerFoo(fooFut)
res = await withTimeout(checkFut, 10.seconds)
if res:
Result[TestFooConnection, int].ok(checkFut.value)
else:
Result[TestFooConnection, int].err(0)
except CancelledError:
Result[TestFooConnection, int].err(1)
except CatchableError:
Result[TestFooConnection, int].err(2)
check connection.isOk()
var future = newFuture[void]("last.child.future")
var someFut = testFoo(future)
future.complete()
discard someFut.tryCancel()
await someFut
asyncTest "withTimeout() cancellation undefined behavior test #2":
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
async.} =
await fooFut
return TestFooConnection()
proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {.
async.} =
await testInnerFoo(fooFut)
proc testFoo(fooFut: Future[void]) {.async.} =
let connection =
try:
let
checkFut = testMiddleFoo(fooFut)
res = await withTimeout(checkFut, 10.seconds)
if res:
Result[TestFooConnection, int].ok(checkFut.value)
else:
Result[TestFooConnection, int].err(0)
except CancelledError:
Result[TestFooConnection, int].err(1)
except CatchableError:
Result[TestFooConnection, int].err(2)
check connection.isOk()
var future = newFuture[void]("last.child.future")
var someFut = testFoo(future)
future.complete()
discard someFut.tryCancel()
await someFut
asyncTest "Cancellation behavior test":
proc testInnerFoo(fooFut: Future[void]) {.async.} =
await fooFut
proc testMiddleFoo(fooFut: Future[void]) {.async.} =
await testInnerFoo(fooFut)
proc testOuterFoo(fooFut: Future[void]) {.async.} =
await testMiddleFoo(fooFut)
block:
# Cancellation of pending Future
let future = newFuture[void]("last.child.pending.future")
await cancelAndWait(future)
check:
future.cancelled() == true
block:
# Cancellation of completed Future
let future = newFuture[void]("last.child.completed.future")
future.complete()
await cancelAndWait(future)
check:
future.cancelled() == false
future.completed() == true
block:
# Cancellation of failed Future
let future = newFuture[void]("last.child.failed.future")
future.fail(newException(ValueError, "ABCD"))
await cancelAndWait(future)
check:
future.cancelled() == false
future.failed() == true
block:
# Cancellation of already cancelled Future
let future = newFuture[void]("last.child.cancelled.future")
future.cancelAndSchedule()
await cancelAndWait(future)
check:
future.cancelled() == true
block:
# Cancellation of Pending->Pending->Pending->Pending sequence
let future = newFuture[void]("last.child.pending.future")
let testFut = testOuterFoo(future)
await cancelAndWait(testFut)
check:
testFut.cancelled() == true
block:
# Cancellation of Pending->Pending->Pending->Completed sequence
let future = newFuture[void]("last.child.completed.future")
let testFut = testOuterFoo(future)
future.complete()
await cancelAndWait(testFut)
check:
testFut.cancelled() == false
testFut.completed() == true
block:
# Cancellation of Pending->Pending->Pending->Failed sequence
let future = newFuture[void]("last.child.failed.future")
let testFut = testOuterFoo(future)
future.fail(newException(ValueError, "ABCD"))
await cancelAndWait(testFut)
check:
testFut.cancelled() == false
testFut.failed() == true
block:
# Cancellation of Pending->Pending->Pending->Cancelled sequence
let future = newFuture[void]("last.child.cancelled.future")
let testFut = testOuterFoo(future)
future.cancelAndSchedule()
await cancelAndWait(testFut)
check:
testFut.cancelled() == true
block:
# Cancellation of pending Future, when automatic scheduling disabled
let future = newFuture[void]("last.child.pending.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
discard
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
let cancelFut = cancelAndWait(future)
await sleepAsync(100.milliseconds)
check:
cancelFut.finished() == false
future.cancelled() == false
# Now we manually changing Future's state, so `cancelAndWait` could
# finish
future.complete()
await cancelFut
check:
cancelFut.finished() == true
future.cancelled() == false
future.finished() == true
block:
# Cancellation of pending Future, which will fail Future on cancellation,
# when automatic scheduling disabled
let future = newFuture[void]("last.child.completed.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
future.complete()
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
await cancelAndWait(future)
check:
future.cancelled() == false
future.completed() == true
block:
# Cancellation of pending Future, which will fail Future on cancellation,
# when automatic scheduling disabled
let future = newFuture[void]("last.child.failed.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
future.fail(newException(ValueError, "ABCD"))
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
await cancelAndWait(future)
check:
future.cancelled() == false
future.failed() == true
block:
# Cancellation of pending Future, which will fail Future on cancellation,
# when automatic scheduling disabled
let future = newFuture[void]("last.child.cancelled.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
future.cancelAndSchedule()
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
await cancelAndWait(future)
check:
future.cancelled() == true
block:
# Cancellation of pending Pending->Pending->Pending->Pending, when
# automatic scheduling disabled and Future do nothing in cancellation
# callback
let future = newFuture[void]("last.child.pending.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
discard
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
let testFut = testOuterFoo(future)
let cancelFut = cancelAndWait(testFut)
await sleepAsync(100.milliseconds)
check:
cancelFut.finished() == false
testFut.cancelled() == false
future.cancelled() == false
# Now we manually changing Future's state, so `cancelAndWait` could
# finish
future.complete()
await cancelFut
check:
cancelFut.finished() == true
future.cancelled() == false
future.finished() == true
testFut.cancelled() == false
testFut.finished() == true
block:
# Cancellation of pending Pending->Pending->Pending->Pending, when
# automatic scheduling disabled and Future completes in cancellation
# callback
let future = newFuture[void]("last.child.pending.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
future.complete()
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
let testFut = testOuterFoo(future)
await cancelAndWait(testFut)
await sleepAsync(100.milliseconds)
check:
testFut.cancelled() == false
testFut.finished() == true
future.cancelled() == false
future.finished() == true
block:
# Cancellation of pending Pending->Pending->Pending->Pending, when
# automatic scheduling disabled and Future fails in cancellation callback
let future = newFuture[void]("last.child.pending.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
future.fail(newException(ValueError, "ABCD"))
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
let testFut = testOuterFoo(future)
await cancelAndWait(testFut)
await sleepAsync(100.milliseconds)
check:
testFut.cancelled() == false
testFut.failed() == true
future.cancelled() == false
future.failed() == true
block:
# Cancellation of pending Pending->Pending->Pending->Pending, when
# automatic scheduling disabled and Future fails in cancellation callback
let future = newFuture[void]("last.child.pending.future",
{FutureFlag.OwnCancelSchedule})
proc cancellation(udata: pointer) {.gcsafe.} =
future.cancelAndSchedule()
future.cancelCallback = cancellation
# Note, future will never be finished in such case, until we manually not
# finish it
let testFut = testOuterFoo(future)
await cancelAndWait(testFut)
await sleepAsync(100.milliseconds)
check:
testFut.cancelled() == true
future.cancelled() == true
test "Issue #334 test":
proc test(): bool =
var testres = ""
proc a() {.async.} =
try:
await sleepAsync(seconds(1))
except CatchableError as exc:
testres.add("A")
raise exc
proc b() {.async.} =
try:
await a()
except CatchableError as exc:
testres.add("B")
raise exc
proc c() {.async.} =
try:
echo $(await b().withTimeout(seconds(2)))
except CatchableError as exc:
testres.add("C")
raise exc
let x = c()
x.cancelSoon()
try:
waitFor x
except CatchableError:
testres.add("D")
testres.add("E")
waitFor sleepAsync(milliseconds(100))
testres == "ABCDE"
check test() == true
asyncTest "cancelAndWait() should be able to cancel test":
proc test1() {.async.} =
await noCancel sleepAsync(100.milliseconds)
await noCancel sleepAsync(100.milliseconds)
await sleepAsync(100.milliseconds)
proc test2() {.async.} =
await noCancel sleepAsync(100.milliseconds)
await sleepAsync(100.milliseconds)
await noCancel sleepAsync(100.milliseconds)
proc test3() {.async.} =
await sleepAsync(100.milliseconds)
await noCancel sleepAsync(100.milliseconds)
await noCancel sleepAsync(100.milliseconds)
proc test4() {.async.} =
while true:
await noCancel sleepAsync(50.milliseconds)
await sleepAsync(0.milliseconds)
proc test5() {.async.} =
while true:
await sleepAsync(0.milliseconds)
await noCancel sleepAsync(50.milliseconds)
block:
let future1 = test1()
await cancelAndWait(future1)
let future2 = test1()
await sleepAsync(10.milliseconds)
await cancelAndWait(future2)
check:
future1.cancelled() == true
future2.cancelled() == true
block:
let future1 = test2()
await cancelAndWait(future1)
let future2 = test2()
await sleepAsync(10.milliseconds)
await cancelAndWait(future2)
check:
future1.cancelled() == true
future2.cancelled() == true
block:
let future1 = test3()
await cancelAndWait(future1)
let future2 = test3()
await sleepAsync(10.milliseconds)
await cancelAndWait(future2)
check:
future1.cancelled() == true
future2.cancelled() == true
block:
let future1 = test4()
await cancelAndWait(future1)
let future2 = test4()
await sleepAsync(333.milliseconds)
await cancelAndWait(future2)
check:
future1.cancelled() == true
future2.cancelled() == true
block:
let future1 = test5()
await cancelAndWait(future1)
let future2 = test5()
await sleepAsync(333.milliseconds)
await cancelAndWait(future2)
check:
future1.cancelled() == true
future2.cancelled() == true
test "Sink with literals":
# https://github.com/nim-lang/Nim/issues/22175
let fut = newFuture[string]()
fut.complete("test")
check:
fut.value() == "test"

View File

@ -9,7 +9,7 @@ import std/[strutils, sha1]
import ".."/chronos/unittest2/asynctests
import ".."/chronos,
".."/chronos/apps/http/[httpserver, shttpserver, httpclient]
import stew/base10
import stew/[byteutils, base10]
{.used.}
@ -85,7 +85,8 @@ suite "HTTP client testing suite":
res
proc createServer(address: TransportAddress,
process: HttpProcessCallback, secure: bool): HttpServerRef =
process: HttpProcessCallback2,
secure: bool): HttpServerRef =
let
socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
serverFlags = {HttpServerFlags.Http11Pipeline}
@ -128,18 +129,24 @@ suite "HTTP client testing suite":
(MethodPatch, "/test/patch")
]
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test/get", "/test/post", "/test/head", "/test/put",
"/test/delete", "/test/trace", "/test/options", "/test/connect",
"/test/patch", "/test/error":
return await request.respond(Http200, request.uri.path)
try:
await request.respond(Http200, request.uri.path)
except HttpWriteError as exc:
defaultResponse(exc)
else:
return await request.respond(Http404, "Page not found")
try:
await request.respond(Http404, "Page not found")
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -157,7 +164,7 @@ suite "HTTP client testing suite":
var req = HttpClientRequestRef.new(session, ha, item[0])
let response = await fetch(req)
if response.status == 200:
let data = cast[string](response.data)
let data = string.fromBytes(response.data)
if data == item[1]:
inc(counter)
await req.closeWait()
@ -173,7 +180,7 @@ suite "HTTP client testing suite":
var req = HttpClientRequestRef.new(session, ha, item[0])
let response = await fetch(req)
if response.status == 200:
let data = cast[string](response.data)
let data = string.fromBytes(response.data)
if data == item[1]:
inc(counter)
await req.closeWait()
@ -187,15 +194,15 @@ suite "HTTP client testing suite":
let ResponseTests = [
(MethodGet, "/test/short_size_response", 65600, 1024,
"SHORTSIZERESPONSE"),
(MethodGet, "/test/long_size_response", 262400, 1024,
(MethodGet, "/test/long_size_response", 131200, 1024,
"LONGSIZERESPONSE"),
(MethodGet, "/test/short_chunked_response", 65600, 1024,
"SHORTCHUNKRESPONSE"),
(MethodGet, "/test/long_chunked_response", 262400, 1024,
(MethodGet, "/test/long_chunked_response", 131200, 1024,
"LONGCHUNKRESPONSE")
]
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
@ -203,46 +210,58 @@ suite "HTTP client testing suite":
var response = request.getResponse()
var data = createBigMessage(ResponseTests[0][4], ResponseTests[0][2])
response.status = Http200
await response.sendBody(data)
return response
try:
await response.sendBody(data)
except HttpWriteError as exc:
return defaultResponse(exc)
response
of "/test/long_size_response":
var response = request.getResponse()
var data = createBigMessage(ResponseTests[1][4], ResponseTests[1][2])
response.status = Http200
await response.sendBody(data)
return response
try:
await response.sendBody(data)
except HttpWriteError as exc:
return defaultResponse(exc)
response
of "/test/short_chunked_response":
var response = request.getResponse()
var data = createBigMessage(ResponseTests[2][4], ResponseTests[2][2])
response.status = Http200
await response.prepare()
var offset = 0
while true:
if len(data) == offset:
break
let toWrite = min(1024, len(data) - offset)
await response.sendChunk(addr data[offset], toWrite)
offset = offset + toWrite
await response.finish()
return response
try:
await response.prepare()
var offset = 0
while true:
if len(data) == offset:
break
let toWrite = min(1024, len(data) - offset)
await response.sendChunk(addr data[offset], toWrite)
offset = offset + toWrite
await response.finish()
except HttpWriteError as exc:
return defaultResponse(exc)
response
of "/test/long_chunked_response":
var response = request.getResponse()
var data = createBigMessage(ResponseTests[3][4], ResponseTests[3][2])
response.status = Http200
await response.prepare()
var offset = 0
while true:
if len(data) == offset:
break
let toWrite = min(1024, len(data) - offset)
await response.sendChunk(addr data[offset], toWrite)
offset = offset + toWrite
await response.finish()
return response
try:
await response.prepare()
var offset = 0
while true:
if len(data) == offset:
break
let toWrite = min(1024, len(data) - offset)
await response.sendChunk(addr data[offset], toWrite)
offset = offset + toWrite
await response.finish()
except HttpWriteError as exc:
return defaultResponse(exc)
response
else:
return await request.respond(Http404, "Page not found")
defaultResponse()
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -311,21 +330,26 @@ suite "HTTP client testing suite":
(MethodPost, "/test/big_request", 262400)
]
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test/big_request":
if request.hasBody():
let body = await request.getBody()
let digest = $secureHash(cast[string](body))
return await request.respond(Http200, digest)
else:
return await request.respond(Http400, "Missing content body")
try:
if request.hasBody():
let body = await request.getBody()
let digest = $secureHash(string.fromBytes(body))
await request.respond(Http200, digest)
else:
await request.respond(Http400, "Missing content body")
except HttpProtocolError as exc:
defaultResponse(exc)
except HttpTransportError as exc:
defaultResponse(exc)
else:
return await request.respond(Http404, "Page not found")
defaultResponse()
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -348,7 +372,7 @@ suite "HTTP client testing suite":
session, ha, item[0], headers = headers
)
var expectDigest = $secureHash(cast[string](data))
var expectDigest = $secureHash(string.fromBytes(data))
# Sending big request by 1024bytes long chunks
var writer = await open(request)
var offset = 0
@ -364,7 +388,7 @@ suite "HTTP client testing suite":
if response.status == 200:
var res = await response.getBodyBytes()
if cast[string](res) == expectDigest:
if string.fromBytes(res) == expectDigest:
inc(counter)
await response.closeWait()
await request.closeWait()
@ -381,21 +405,27 @@ suite "HTTP client testing suite":
(MethodPost, "/test/big_chunk_request", 262400)
]
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test/big_chunk_request":
if request.hasBody():
let body = await request.getBody()
let digest = $secureHash(cast[string](body))
return await request.respond(Http200, digest)
else:
return await request.respond(Http400, "Missing content body")
try:
if request.hasBody():
let
body = await request.getBody()
digest = $secureHash(string.fromBytes(body))
await request.respond(Http200, digest)
else:
await request.respond(Http400, "Missing content body")
except HttpProtocolError as exc:
defaultResponse(exc)
except HttpTransportError as exc:
defaultResponse(exc)
else:
return await request.respond(Http404, "Page not found")
defaultResponse()
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -418,7 +448,7 @@ suite "HTTP client testing suite":
session, ha, item[0], headers = headers
)
var expectDigest = $secureHash(cast[string](data))
var expectDigest = $secureHash(string.fromBytes(data))
# Sending big request by 1024bytes long chunks
var writer = await open(request)
var offset = 0
@ -434,7 +464,7 @@ suite "HTTP client testing suite":
if response.status == 200:
var res = await response.getBodyBytes()
if cast[string](res) == expectDigest:
if string.fromBytes(res) == expectDigest:
inc(counter)
await response.closeWait()
await request.closeWait()
@ -455,23 +485,28 @@ suite "HTTP client testing suite":
]
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test/post/urlencoded_size", "/test/post/urlencoded_chunked":
if request.hasBody():
var postTable = await request.post()
let body = postTable.getString("field1") & ":" &
postTable.getString("field2") & ":" &
postTable.getString("field3")
return await request.respond(Http200, body)
else:
return await request.respond(Http400, "Missing content body")
try:
if request.hasBody():
var postTable = await request.post()
let body = postTable.getString("field1") & ":" &
postTable.getString("field2") & ":" &
postTable.getString("field3")
await request.respond(Http200, body)
else:
await request.respond(Http400, "Missing content body")
except HttpTransportError as exc:
defaultResponse(exc)
except HttpProtocolError as exc:
defaultResponse(exc)
else:
return await request.respond(Http404, "Page not found")
defaultResponse()
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -491,12 +526,12 @@ suite "HTTP client testing suite":
]
var request = HttpClientRequestRef.new(
session, ha, MethodPost, headers = headers,
body = cast[seq[byte]](PostRequests[0][1]))
body = PostRequests[0][1].toBytes())
var response = await send(request)
if response.status == 200:
var res = await response.getBodyBytes()
if cast[string](res) == PostRequests[0][2]:
if string.fromBytes(res) == PostRequests[0][2]:
inc(counter)
await response.closeWait()
await request.closeWait()
@ -532,7 +567,7 @@ suite "HTTP client testing suite":
var response = await request.finish()
if response.status == 200:
var res = await response.getBodyBytes()
if cast[string](res) == PostRequests[1][2]:
if string.fromBytes(res) == PostRequests[1][2]:
inc(counter)
await response.closeWait()
await request.closeWait()
@ -554,23 +589,28 @@ suite "HTTP client testing suite":
]
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test/post/multipart_size", "/test/post/multipart_chunked":
if request.hasBody():
var postTable = await request.post()
let body = postTable.getString("field1") & ":" &
postTable.getString("field2") & ":" &
postTable.getString("field3")
return await request.respond(Http200, body)
else:
return await request.respond(Http400, "Missing content body")
try:
if request.hasBody():
var postTable = await request.post()
let body = postTable.getString("field1") & ":" &
postTable.getString("field2") & ":" &
postTable.getString("field3")
await request.respond(Http200, body)
else:
await request.respond(Http400, "Missing content body")
except HttpProtocolError as exc:
defaultResponse(exc)
except HttpTransportError as exc:
defaultResponse(exc)
else:
return await request.respond(Http404, "Page not found")
defaultResponse()
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -601,7 +641,7 @@ suite "HTTP client testing suite":
var response = await send(request)
if response.status == 200:
var res = await response.getBodyBytes()
if cast[string](res) == PostRequests[0][3]:
if string.fromBytes(res) == PostRequests[0][3]:
inc(counter)
await response.closeWait()
await request.closeWait()
@ -634,7 +674,7 @@ suite "HTTP client testing suite":
let response = await request.finish()
if response.status == 200:
var res = await response.getBodyBytes()
if cast[string](res) == PostRequests[1][3]:
if string.fromBytes(res) == PostRequests[1][3]:
inc(counter)
await response.closeWait()
await request.closeWait()
@ -649,26 +689,29 @@ suite "HTTP client testing suite":
var lastAddress: Uri
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/":
return await request.redirect(Http302, "/redirect/1")
of "/redirect/1":
return await request.redirect(Http302, "/next/redirect/2")
of "/next/redirect/2":
return await request.redirect(Http302, "redirect/3")
of "/next/redirect/redirect/3":
return await request.redirect(Http302, "next/redirect/4")
of "/next/redirect/redirect/next/redirect/4":
return await request.redirect(Http302, lastAddress)
of "/final/5":
return await request.respond(Http200, "ok-5")
else:
return await request.respond(Http404, "Page not found")
try:
case request.uri.path
of "/":
await request.redirect(Http302, "/redirect/1")
of "/redirect/1":
await request.redirect(Http302, "/next/redirect/2")
of "/next/redirect/2":
await request.redirect(Http302, "redirect/3")
of "/next/redirect/redirect/3":
await request.redirect(Http302, "next/redirect/4")
of "/next/redirect/redirect/next/redirect/4":
await request.redirect(Http302, lastAddress)
of "/final/5":
await request.respond(Http200, "ok-5")
else:
await request.respond(Http404, "Page not found")
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -704,6 +747,107 @@ suite "HTTP client testing suite":
await server.closeWait()
return "redirect-" & $res
proc testSendCancelLeaksTest(secure: bool): Future[bool] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
let address = server.instance.localAddress()
let ha =
if secure:
getAddress(address, HttpClientScheme.Secure, "/")
else:
getAddress(address, HttpClientScheme.NonSecure, "/")
var counter = 0
while true:
let
session = createSession(secure)
request = HttpClientRequestRef.new(session, ha, MethodGet)
requestFut = request.send()
if counter > 0:
await stepsAsync(counter)
let exitLoop =
if not(requestFut.finished()):
await cancelAndWait(requestFut)
doAssert(cancelled(requestFut) or completed(requestFut),
"Future should be Cancelled or Completed at this point")
if requestFut.completed():
let response = await requestFut
await response.closeWait()
inc(counter)
false
else:
let response = await requestFut
await response.closeWait()
true
await request.closeWait()
await session.closeWait()
if exitLoop:
break
await server.stop()
await server.closeWait()
return true
proc testOpenCancelLeaksTest(secure: bool): Future[bool] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
let address = server.instance.localAddress()
let ha =
if secure:
getAddress(address, HttpClientScheme.Secure, "/")
else:
getAddress(address, HttpClientScheme.NonSecure, "/")
var counter = 0
while true:
let
session = createSession(secure)
request = HttpClientRequestRef.new(session, ha, MethodPost)
bodyFut = request.open()
if counter > 0:
await stepsAsync(counter)
let exitLoop =
if not(bodyFut.finished()):
await cancelAndWait(bodyFut)
doAssert(cancelled(bodyFut) or completed(bodyFut),
"Future should be Cancelled or Completed at this point")
if bodyFut.completed():
let bodyWriter = await bodyFut
await bodyWriter.closeWait()
inc(counter)
false
else:
let bodyWriter = await bodyFut
await bodyWriter.closeWait()
true
await request.closeWait()
await session.closeWait()
if exitLoop:
break
await server.stop()
await server.closeWait()
return true
# proc testBasicAuthorization(): Future[bool] {.async.} =
# let session = HttpSessionRef.new({HttpClientFlag.NoVerifyHost},
# maxRedirections = 10)
@ -766,20 +910,24 @@ suite "HTTP client testing suite":
return @[(data1.status, data1.data.bytesToString(), count),
(data2.status, data2.data.bytesToString(), count)]
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/keep":
let headers = HttpTable.init([("connection", "keep-alive")])
return await request.respond(Http200, "ok", headers = headers)
of "/drop":
let headers = HttpTable.init([("connection", "close")])
return await request.respond(Http200, "ok", headers = headers)
else:
return await request.respond(Http404, "Page not found")
try:
case request.uri.path
of "/keep":
let headers = HttpTable.init([("connection", "keep-alive")])
await request.respond(Http200, "ok", headers = headers)
of "/drop":
let headers = HttpTable.init([("connection", "close")])
await request.respond(Http200, "ok", headers = headers)
else:
await request.respond(Http404, "Page not found")
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, false)
server.start()
@ -901,16 +1049,20 @@ suite "HTTP client testing suite":
await request.closeWait()
return (data.status, data.data.bytesToString(), 0)
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test":
return await request.respond(Http200, "ok")
else:
return await request.respond(Http404, "Page not found")
try:
case request.uri.path
of "/test":
await request.respond(Http200, "ok")
else:
await request.respond(Http404, "Page not found")
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, false)
server.start()
@ -960,19 +1112,23 @@ suite "HTTP client testing suite":
await request.closeWait()
return (data.status, data.data.bytesToString(), 0)
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
case request.uri.path
of "/test":
return await request.respond(Http200, "ok")
of "/keep-test":
let headers = HttpTable.init([("Connection", "keep-alive")])
return await request.respond(Http200, "not-alive", headers)
else:
return await request.respond(Http404, "Page not found")
try:
case request.uri.path
of "/test":
await request.respond(Http200, "ok")
of "/keep-test":
let headers = HttpTable.init([("Connection", "keep-alive")])
await request.respond(Http200, "not-alive", headers)
else:
await request.respond(Http404, "Page not found")
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, false)
server.start()
@ -1075,58 +1231,62 @@ suite "HTTP client testing suite":
return false
true
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
if request.uri.path.startsWith("/test/single/"):
let index =
block:
var res = -1
for index, value in SingleGoodTests.pairs():
if value[0] == request.uri.path:
res = index
break
res
if index < 0:
return await request.respond(Http404, "Page not found")
var response = request.getResponse()
response.status = Http200
await response.sendBody(SingleGoodTests[index][1])
return response
elif request.uri.path.startsWith("/test/multiple/"):
let index =
block:
var res = -1
for index, value in MultipleGoodTests.pairs():
if value[0] == request.uri.path:
res = index
break
res
if index < 0:
return await request.respond(Http404, "Page not found")
var response = request.getResponse()
response.status = Http200
await response.sendBody(MultipleGoodTests[index][1])
return response
elif request.uri.path.startsWith("/test/overflow/"):
let index =
block:
var res = -1
for index, value in OverflowTests.pairs():
if value[0] == request.uri.path:
res = index
break
res
if index < 0:
return await request.respond(Http404, "Page not found")
var response = request.getResponse()
response.status = Http200
await response.sendBody(OverflowTests[index][1])
return response
else:
return await request.respond(Http404, "Page not found")
try:
if request.uri.path.startsWith("/test/single/"):
let index =
block:
var res = -1
for index, value in SingleGoodTests.pairs():
if value[0] == request.uri.path:
res = index
break
res
if index < 0:
return await request.respond(Http404, "Page not found")
var response = request.getResponse()
response.status = Http200
await response.sendBody(SingleGoodTests[index][1])
response
elif request.uri.path.startsWith("/test/multiple/"):
let index =
block:
var res = -1
for index, value in MultipleGoodTests.pairs():
if value[0] == request.uri.path:
res = index
break
res
if index < 0:
return await request.respond(Http404, "Page not found")
var response = request.getResponse()
response.status = Http200
await response.sendBody(MultipleGoodTests[index][1])
response
elif request.uri.path.startsWith("/test/overflow/"):
let index =
block:
var res = -1
for index, value in OverflowTests.pairs():
if value[0] == request.uri.path:
res = index
break
res
if index < 0:
return await request.respond(Http404, "Page not found")
var response = request.getResponse()
response.status = Http200
await response.sendBody(OverflowTests[index][1])
response
else:
defaultResponse()
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
var server = createServer(initTAddress("127.0.0.1:0"), process, secure)
server.start()
@ -1243,6 +1403,18 @@ suite "HTTP client testing suite":
test "HTTP(S) client maximum redirections test":
check waitFor(testRequestRedirectTest(true, 4)) == "redirect-true"
test "HTTP send() cancellation leaks test":
check waitFor(testSendCancelLeaksTest(false)) == true
test "HTTP(S) send() cancellation leaks test":
check waitFor(testSendCancelLeaksTest(true)) == true
test "HTTP open() cancellation leaks test":
check waitFor(testOpenCancelLeaksTest(false)) == true
test "HTTP(S) open() cancellation leaks test":
check waitFor(testOpenCancelLeaksTest(true)) == true
test "HTTPS basic authorization test":
skip()
# This test disabled because remote service is pretty flaky and fails pretty
@ -1262,5 +1434,88 @@ suite "HTTP client testing suite":
test "HTTP client server-sent events test":
check waitFor(testServerSentEvents(false)) == true
test "HTTP getHttpAddress() test":
block:
# HTTP client supports only `http` and `https` schemes in URL.
let res = getHttpAddress("ftp://ftp.scene.org")
check:
res.isErr()
res.error == HttpAddressErrorType.InvalidUrlScheme
res.error.isCriticalError()
block:
# HTTP URL default ports and custom ports test
let
res1 = getHttpAddress("http://www.google.com")
res2 = getHttpAddress("https://www.google.com")
res3 = getHttpAddress("http://www.google.com:35000")
res4 = getHttpAddress("https://www.google.com:25000")
check:
res1.isOk()
res2.isOk()
res3.isOk()
res4.isOk()
res1.get().port == 80
res2.get().port == 443
res3.get().port == 35000
res4.get().port == 25000
block:
# HTTP URL invalid port values test
let
res1 = getHttpAddress("http://www.google.com:-80")
res2 = getHttpAddress("http://www.google.com:0")
res3 = getHttpAddress("http://www.google.com:65536")
res4 = getHttpAddress("http://www.google.com:65537")
res5 = getHttpAddress("https://www.google.com:-443")
res6 = getHttpAddress("https://www.google.com:0")
res7 = getHttpAddress("https://www.google.com:65536")
res8 = getHttpAddress("https://www.google.com:65537")
check:
res1.isErr() and res1.error == HttpAddressErrorType.InvalidPortNumber
res1.error.isCriticalError()
res2.isOk()
res2.get().port == 0
res3.isErr() and res3.error == HttpAddressErrorType.InvalidPortNumber
res3.error.isCriticalError()
res4.isErr() and res4.error == HttpAddressErrorType.InvalidPortNumber
res4.error.isCriticalError()
res5.isErr() and res5.error == HttpAddressErrorType.InvalidPortNumber
res5.error.isCriticalError()
res6.isOk()
res6.get().port == 0
res7.isErr() and res7.error == HttpAddressErrorType.InvalidPortNumber
res7.error.isCriticalError()
res8.isErr() and res8.error == HttpAddressErrorType.InvalidPortNumber
res8.error.isCriticalError()
block:
# HTTP URL missing hostname
let
res1 = getHttpAddress("http://")
res2 = getHttpAddress("https://")
check:
res1.isErr() and res1.error == HttpAddressErrorType.MissingHostname
res1.error.isCriticalError()
res2.isErr() and res2.error == HttpAddressErrorType.MissingHostname
res2.error.isCriticalError()
block:
# No resolution flags and incorrect URL
let
flags = {HttpClientFlag.NoInet4Resolution,
HttpClientFlag.NoInet6Resolution}
res1 = getHttpAddress("http://256.256.256.256", flags)
res2 = getHttpAddress(
"http://[FFFFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]", flags)
check:
res1.isErr() and res1.error == HttpAddressErrorType.InvalidIpHostname
res1.error.isCriticalError()
res2.isErr() and res2.error == HttpAddressErrorType.InvalidIpHostname
res2.error.isCriticalError()
block:
# Resolution of non-existent hostname
let res = getHttpAddress("http://eYr6bdBo.com")
check:
res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed
res.error.isRecoverableError()
not(res.error.isCriticalError())
test "Leaks test":
checkLeaks()

View File

@ -7,9 +7,8 @@
# MIT license (LICENSE-MIT)
import std/[strutils, algorithm]
import ".."/chronos/unittest2/asynctests,
".."/chronos, ".."/chronos/apps/http/httpserver,
".."/chronos/apps/http/httpcommon,
".."/chronos/apps/http/httpdebug
".."/chronos,
".."/chronos/apps/http/[httpserver, httpcommon, httpdebug]
import stew/base10
{.used.}
@ -65,7 +64,7 @@ suite "HTTP server testing suite":
proc testTooBigBodyChunked(operation: TooBigTest): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
try:
@ -78,13 +77,15 @@ suite "HTTP server testing suite":
let ptable {.used.} = await request.post()
of PostMultipartTest:
let ptable {.used.} = await request.post()
except HttpCriticalError as exc:
defaultResponse()
except HttpTransportError as exc:
defaultResponse(exc)
except HttpProtocolError as exc:
if exc.code == Http413:
serverRes = true
# Reraising exception, because processor should properly handle it.
raise exc
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -129,14 +130,17 @@ suite "HTTP server testing suite":
proc testTimeout(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
return await request.respond(Http200, "TEST_OK", HttpTable.init())
try:
await request.respond(Http200, "TEST_OK", HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
if r.error.kind == HttpServerError.TimeoutError:
serverRes = true
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"),
@ -159,14 +163,17 @@ suite "HTTP server testing suite":
proc testEmpty(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
return await request.respond(Http200, "TEST_OK", HttpTable.init())
try:
await request.respond(Http200, "TEST_OK", HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
if r.error.kind == HttpServerError.CriticalError:
if r.error.kind == HttpServerError.ProtocolError:
serverRes = true
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"),
@ -189,14 +196,17 @@ suite "HTTP server testing suite":
proc testTooBig(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
return await request.respond(Http200, "TEST_OK", HttpTable.init())
try:
await request.respond(Http200, "TEST_OK", HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
if r.error.error == HttpServerError.CriticalError:
if r.error.error == HttpServerError.ProtocolError:
serverRes = true
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -220,13 +230,11 @@ suite "HTTP server testing suite":
proc testTooBigBody(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
if r.isOk():
discard
else:
if r.error.error == HttpServerError.CriticalError:
async: (raises: [CancelledError]).} =
if r.isErr():
if r.error.error == HttpServerError.ProtocolError:
serverRes = true
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -267,7 +275,7 @@ suite "HTTP server testing suite":
proc testQuery(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
var kres = newSeq[string]()
@ -275,11 +283,14 @@ suite "HTTP server testing suite":
kres.add(k & ":" & v)
sort(kres)
serverRes = true
return await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
try:
await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -297,10 +308,9 @@ suite "HTTP server testing suite":
"GET /?a=%D0%9F&%D0%A4=%D0%91&b=%D0%A6&c=%D0%AE HTTP/1.0\r\n\r\n")
await server.stop()
await server.closeWait()
let r = serverRes and
(data1.find("TEST_OK:a:1:a:2:b:3:c:4") >= 0) and
(data2.find("TEST_OK:a:П:b:Ц:c:Ю:Ф:Б") >= 0)
return r
serverRes and
(data1.find("TEST_OK:a:1:a:2:b:3:c:4") >= 0) and
(data2.find("TEST_OK:a:П:b:Ц:c:Ю:Ф:Б") >= 0)
check waitFor(testQuery()) == true
@ -308,7 +318,7 @@ suite "HTTP server testing suite":
proc testHeaders(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
var kres = newSeq[string]()
@ -316,11 +326,14 @@ suite "HTTP server testing suite":
kres.add(k & ":" & v)
sort(kres)
serverRes = true
return await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
try:
await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -352,21 +365,30 @@ suite "HTTP server testing suite":
proc testPostUrl(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
var kres = newSeq[string]()
let request = r.get()
if request.meth in PostMethods:
let post = await request.post()
let post =
try:
await request.post()
except HttpProtocolError as exc:
return defaultResponse(exc)
except HttpTransportError as exc:
return defaultResponse(exc)
for k, v in post.stringItems():
kres.add(k & ":" & v)
sort(kres)
serverRes = true
return await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
serverRes = true
try:
await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -396,21 +418,30 @@ suite "HTTP server testing suite":
proc testPostUrl2(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
var kres = newSeq[string]()
let request = r.get()
if request.meth in PostMethods:
let post = await request.post()
let post =
try:
await request.post()
except HttpProtocolError as exc:
return defaultResponse(exc)
except HttpTransportError as exc:
return defaultResponse(exc)
for k, v in post.stringItems():
kres.add(k & ":" & v)
sort(kres)
serverRes = true
return await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
serverRes = true
try:
await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -441,21 +472,30 @@ suite "HTTP server testing suite":
proc testPostMultipart(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
var kres = newSeq[string]()
let request = r.get()
if request.meth in PostMethods:
let post = await request.post()
let post =
try:
await request.post()
except HttpProtocolError as exc:
return defaultResponse(exc)
except HttpTransportError as exc:
return defaultResponse(exc)
for k, v in post.stringItems():
kres.add(k & ":" & v)
sort(kres)
serverRes = true
return await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
serverRes = true
try:
await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -497,21 +537,31 @@ suite "HTTP server testing suite":
proc testPostMultipart2(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
var kres = newSeq[string]()
let request = r.get()
if request.meth in PostMethods:
let post = await request.post()
let post =
try:
await request.post()
except HttpProtocolError as exc:
return defaultResponse(exc)
except HttpTransportError as exc:
return defaultResponse(exc)
for k, v in post.stringItems():
kres.add(k & ":" & v)
sort(kres)
serverRes = true
return await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
try:
await request.respond(Http200, "TEST_OK:" & kres.join(":"),
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -566,16 +616,20 @@ suite "HTTP server testing suite":
var eventContinue = newAsyncEvent()
var count = 0
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
inc(count)
if count == ClientsCount:
eventWait.fire()
await eventContinue.wait()
return await request.respond(Http404, "", HttpTable.init())
try:
await request.respond(Http404, "", HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -1230,23 +1284,26 @@ suite "HTTP server testing suite":
proc testPostMultipart2(): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
let response = request.getResponse()
await response.prepareSSE()
await response.send("event: event1\r\ndata: data1\r\n\r\n")
await response.send("event: event2\r\ndata: data2\r\n\r\n")
await response.sendEvent("event3", "data3")
await response.sendEvent("event4", "data4")
await response.send("data: data5\r\n\r\n")
await response.sendEvent("", "data6")
await response.finish()
serverRes = true
return response
try:
await response.prepareSSE()
await response.send("event: event1\r\ndata: data1\r\n\r\n")
await response.send("event: event2\r\ndata: data2\r\n\r\n")
await response.sendEvent("event3", "data3")
await response.sendEvent("event4", "data4")
await response.send("data: data5\r\n\r\n")
await response.sendEvent("", "data6")
await response.finish()
serverRes = true
response
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process,
@ -1305,12 +1362,16 @@ suite "HTTP server testing suite":
{}, false, "close")
]
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
return await request.respond(Http200, "TEST_OK", HttpTable.init())
try:
await request.respond(Http200, "TEST_OK", HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
for test in TestMessages:
let
@ -1327,44 +1388,47 @@ suite "HTTP server testing suite":
server.start()
var transp: StreamTransport
try:
transp = await connect(address)
block:
let response = await transp.httpClient2(test[0], 7)
check:
response.data == "TEST_OK"
response.headers.getString("connection") == test[3]
# We do this sleeping here just because we running both server and
# client in single process, so when we received response from server
# it does not mean that connection has been immediately closed - it
# takes some more calls, so we trying to get this calls happens.
await sleepAsync(50.milliseconds)
let connectionStillAvailable =
try:
let response {.used.} = await transp.httpClient2(test[0], 7)
true
except CatchableError:
false
check connectionStillAvailable == test[2]
transp = await connect(address)
block:
let response = await transp.httpClient2(test[0], 7)
check:
response.data == "TEST_OK"
response.headers.getString("connection") == test[3]
# We do this sleeping here just because we running both server and
# client in single process, so when we received response from server
# it does not mean that connection has been immediately closed - it
# takes some more calls, so we trying to get this calls happens.
await sleepAsync(50.milliseconds)
let connectionStillAvailable =
try:
let response {.used.} = await transp.httpClient2(test[0], 7)
true
except CatchableError:
false
finally:
if not(isNil(transp)):
await transp.closeWait()
await server.stop()
await server.closeWait()
check connectionStillAvailable == test[2]
if not(isNil(transp)):
await transp.closeWait()
await server.stop()
await server.closeWait()
asyncTest "HTTP debug tests":
const
TestsCount = 10
TestRequest = "GET / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
TestRequest = "GET /httpdebug HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
proc process(r: RequestFence): Future[HttpResponseRef] {.async.} =
proc process(r: RequestFence): Future[HttpResponseRef] {.
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
return await request.respond(Http200, "TEST_OK", HttpTable.init())
try:
await request.respond(Http200, "TEST_OK", HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
return defaultResponse()
defaultResponse()
proc client(address: TransportAddress,
data: string): Future[StreamTransport] {.async.} =
@ -1401,31 +1465,30 @@ suite "HTTP server testing suite":
info.flags == {HttpServerFlags.Http11Pipeline}
info.socketFlags == socketFlags
try:
var clientFutures: seq[Future[StreamTransport]]
for i in 0 ..< TestsCount:
clientFutures.add(client(address, TestRequest))
await allFutures(clientFutures)
var clientFutures: seq[Future[StreamTransport]]
for i in 0 ..< TestsCount:
clientFutures.add(client(address, TestRequest))
await allFutures(clientFutures)
let connections = server.getConnections()
check len(connections) == TestsCount
let currentTime = Moment.now()
for index, connection in connections.pairs():
let transp = clientFutures[index].read()
check:
connection.remoteAddress.get() == transp.localAddress()
connection.localAddress.get() == transp.remoteAddress()
connection.connectionType == ConnectionType.NonSecure
connection.connectionState == ConnectionState.Alive
(currentTime - connection.createMoment.get()) != ZeroDuration
(currentTime - connection.acceptMoment) != ZeroDuration
var pending: seq[Future[void]]
for transpFut in clientFutures:
pending.add(closeWait(transpFut.read()))
await allFutures(pending)
finally:
await server.stop()
await server.closeWait()
let connections = server.getConnections()
check len(connections) == TestsCount
let currentTime = Moment.now()
for index, connection in connections.pairs():
let transp = clientFutures[index].read()
check:
connection.remoteAddress.get() == transp.localAddress()
connection.localAddress.get() == transp.remoteAddress()
connection.connectionType == ConnectionType.NonSecure
connection.connectionState == ConnectionState.Alive
connection.query.get("") == "/httpdebug"
(currentTime - connection.createMoment.get()) != ZeroDuration
(currentTime - connection.acceptMoment) != ZeroDuration
var pending: seq[Future[void]]
for transpFut in clientFutures:
pending.add(closeWait(transpFut.read()))
await allFutures(pending)
await server.stop()
await server.closeWait()
test "Leaks test":
checkLeaks()

View File

@ -94,6 +94,11 @@ proc testAwaitne(): Future[bool] {.async.} =
return true
template returner =
# can't use `return 5`
result = 5
return
suite "Macro transformations test suite":
test "`await` command test":
check waitFor(testAwait()) == true
@ -136,6 +141,151 @@ suite "Macro transformations test suite":
check:
waitFor(gen(int)) == default(int)
test "Nested return":
proc nr: Future[int] {.async.} =
return
if 1 == 1:
return 42
else:
33
check waitFor(nr()) == 42
# There are a few unreacheable statements to ensure that we don't regress in
# generated code
{.push warning[UnreachableCode]: off.}
suite "Macro transformations - completions":
test "Run closure to completion on return": # issue #415
var x = 0
proc test415 {.async.} =
try:
return
finally:
await sleepAsync(1.milliseconds)
x = 5
waitFor(test415())
check: x == 5
test "Run closure to completion on defer":
var x = 0
proc testDefer {.async.} =
defer:
await sleepAsync(1.milliseconds)
x = 5
return
waitFor(testDefer())
check: x == 5
test "Run closure to completion with exceptions":
var x = 0
proc testExceptionHandling {.async.} =
try:
return
finally:
try:
await sleepAsync(1.milliseconds)
raise newException(ValueError, "")
except ValueError:
await sleepAsync(1.milliseconds)
await sleepAsync(1.milliseconds)
x = 5
waitFor(testExceptionHandling())
check: x == 5
test "Correct return value when updating result after return":
proc testWeirdCase: int =
try: return 33
finally: result = 55
proc testWeirdCaseAsync: Future[int] {.async.} =
try:
await sleepAsync(1.milliseconds)
return 33
finally: result = 55
check:
testWeirdCase() == waitFor(testWeirdCaseAsync())
testWeirdCase() == 55
test "Correct return value with result assignment in defer":
proc testWeirdCase: int =
defer:
result = 55
result = 33
proc testWeirdCaseAsync: Future[int] {.async.} =
defer:
result = 55
await sleepAsync(1.milliseconds)
return 33
check:
testWeirdCase() == waitFor(testWeirdCaseAsync())
testWeirdCase() == 55
test "Generic & finally calling async":
proc testGeneric(T: type): Future[T] {.async.} =
try:
try:
await sleepAsync(1.milliseconds)
return
finally:
await sleepAsync(1.milliseconds)
await sleepAsync(1.milliseconds)
result = 11
finally:
await sleepAsync(1.milliseconds)
await sleepAsync(1.milliseconds)
result = 12
check waitFor(testGeneric(int)) == 12
proc testFinallyCallsAsync(T: type): Future[T] {.async.} =
try:
await sleepAsync(1.milliseconds)
return
finally:
result = await testGeneric(T)
check waitFor(testFinallyCallsAsync(int)) == 12
test "templates returning":
proc testReturner: Future[int] {.async.} =
returner
doAssert false
check waitFor(testReturner()) == 5
proc testReturner2: Future[int] {.async.} =
template returner2 =
return 6
returner2
doAssert false
check waitFor(testReturner2()) == 6
test "raising defects":
proc raiser {.async.} =
# sleeping to make sure our caller is the poll loop
await sleepAsync(0.milliseconds)
raise newException(Defect, "uh-oh")
let fut = raiser()
expect(Defect): waitFor(fut)
check not fut.completed()
fut.complete()
test "return result":
proc returnResult: Future[int] {.async.} =
var result: int
result = 12
return result
check waitFor(returnResult()) == 12
test "async in async":
proc asyncInAsync: Future[int] {.async.} =
proc a2: Future[int] {.async.} =
result = 12
result = await a2()
check waitFor(asyncInAsync()) == 12
{.pop.}
suite "Macro transformations - implicit returns":
test "Implicit return":
proc implicit(): Future[int] {.async.} =
42
@ -232,3 +382,176 @@ suite "Closure iterator's exception transformation issues":
waitFor(x())
suite "Exceptions tracking":
template checkNotCompiles(body: untyped) =
check (not compiles(body))
test "Can raise valid exception":
proc test1 {.async.} = raise newException(ValueError, "hey")
proc test2 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey")
proc test3 {.async: (raises: [IOError, ValueError]).} =
if 1 == 2:
raise newException(ValueError, "hey")
else:
raise newException(IOError, "hey")
proc test4 {.async: (raises: []), used.} = raise newException(Defect, "hey")
proc test5 {.async: (raises: []).} = discard
proc test6 {.async: (raises: []).} = await test5()
expect(ValueError): waitFor test1()
expect(ValueError): waitFor test2()
expect(IOError): waitFor test3()
waitFor test6()
test "Cannot raise invalid exception":
checkNotCompiles:
proc test3 {.async: (raises: [IOError]).} = raise newException(ValueError, "hey")
test "Explicit return in non-raising proc":
proc test(): Future[int] {.async: (raises: []).} = return 12
check:
waitFor(test()) == 12
test "Non-raising compatibility":
proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey")
let testVar: Future[void] = test1()
proc test2 {.async.} = raise newException(ValueError, "hey")
let testVar2: proc: Future[void] = test2
# Doesn't work unfortunately
#let testVar3: proc: Future[void] = test1
test "Cannot store invalid future types":
proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey")
proc test2 {.async: (raises: [IOError]).} = raise newException(IOError, "hey")
var a = test1()
checkNotCompiles:
a = test2()
test "Await raises the correct types":
proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey")
proc test2 {.async: (raises: [ValueError, CancelledError]).} = await test1()
checkNotCompiles:
proc test3 {.async: (raises: [CancelledError]).} = await test1()
test "Can create callbacks":
proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey")
let callback: proc() {.async: (raises: [ValueError]).} = test1
test "Can return values":
proc test1: Future[int] {.async: (raises: [ValueError]).} =
if 1 == 0: raise newException(ValueError, "hey")
return 12
proc test2: Future[int] {.async: (raises: [ValueError, IOError, CancelledError]).} =
return await test1()
checkNotCompiles:
proc test3: Future[int] {.async: (raises: [CancelledError]).} = await test1()
check waitFor(test2()) == 12
test "Manual tracking":
proc test1: Future[int] {.async: (raw: true, raises: [ValueError]).} =
result = newFuture[int]()
result.complete(12)
check waitFor(test1()) == 12
proc test2: Future[int] {.async: (raw: true, raises: [IOError, OSError]).} =
checkNotCompiles:
result.fail(newException(ValueError, "fail"))
result = newFuture[int]()
result.fail(newException(IOError, "fail"))
proc test3: Future[void] {.async: (raw: true, raises: []).} =
result = newFuture[void]()
checkNotCompiles:
result.fail(newException(ValueError, "fail"))
result.complete()
# Inheritance
proc test4: Future[void] {.async: (raw: true, raises: [CatchableError]).} =
result = newFuture[void]()
result.fail(newException(IOError, "fail"))
check:
waitFor(test1()) == 12
expect(IOError):
discard waitFor(test2())
waitFor(test3())
expect(IOError):
waitFor(test4())
test "or errors":
proc testit {.async: (raises: [ValueError]).} =
raise (ref ValueError)()
proc testit2 {.async: (raises: [IOError]).} =
raise (ref IOError)()
proc test {.async: (raises: [ValueError, IOError]).} =
await testit() or testit2()
proc noraises() {.raises: [].} =
expect(ValueError):
try:
let f = test()
waitFor(f)
except IOError:
doAssert false
noraises()
test "Wait errors":
proc testit {.async: (raises: [ValueError]).} =
raise newException(ValueError, "hey")
proc test {.async: (raises: [ValueError, AsyncTimeoutError, CancelledError]).} =
await wait(testit(), 1000.milliseconds)
proc noraises() {.raises: [].} =
try:
expect(ValueError): waitFor(test())
except CancelledError: doAssert false
except AsyncTimeoutError: doAssert false
noraises()
test "Nocancel errors":
proc testit {.async: (raises: [ValueError, CancelledError]).} =
await sleepAsync(5.milliseconds)
raise (ref ValueError)()
proc test {.async: (raises: [ValueError]).} =
await noCancel testit()
proc noraises() {.raises: [].} =
expect(ValueError):
let f = test()
waitFor(f.cancelAndWait())
waitFor(f)
noraises()
test "Defect on wrong exception type at runtime":
{.push warning[User]: off}
let f = InternalRaisesFuture[void, (ValueError,)]()
expect(Defect): f.fail((ref CatchableError)())
{.pop.}
check: not f.finished()
expect(Defect): f.fail((ref CatchableError)(), warn = false)
check: not f.finished()
test "handleException behavior":
proc raiseException() {.
async: (handleException: true, raises: [AsyncExceptionError]).} =
raise (ref Exception)(msg: "Raising Exception is UB and support for it may change in the future")
proc callCatchAll() {.async: (raises: []).} =
expect(AsyncExceptionError):
await raiseException()
waitFor(callCatchAll())

View File

@ -2,6 +2,8 @@
IF /I "%1" == "STDIN" (
GOTO :STDINTEST
) ELSE IF /I "%1" == "TIMEOUT1" (
GOTO :TIMEOUTTEST1
) ELSE IF /I "%1" == "TIMEOUT2" (
GOTO :TIMEOUTTEST2
) ELSE IF /I "%1" == "TIMEOUT10" (
@ -19,6 +21,10 @@ SET /P "INPUTDATA="
ECHO STDIN DATA: %INPUTDATA%
EXIT 0
:TIMEOUTTEST1
ping -n 1 127.0.0.1 > NUL
EXIT 1
:TIMEOUTTEST2
ping -n 2 127.0.0.1 > NUL
EXIT 2
@ -28,7 +34,7 @@ ping -n 10 127.0.0.1 > NUL
EXIT 0
:BIGDATA
FOR /L %%G IN (1, 1, 400000) DO ECHO ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO
FOR /L %%G IN (1, 1, 100000) DO ECHO ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO
EXIT 0
:ENVTEST

View File

@ -8,6 +8,7 @@
import std/os
import stew/[base10, byteutils]
import ".."/chronos/unittest2/asynctests
import ".."/chronos/asyncproc
when defined(posix):
from ".."/chronos/osdefs import SIGKILL
@ -96,7 +97,11 @@ suite "Asynchronous process management test suite":
let
options = {AsyncProcessOption.EvalCommand}
command = "exit 1"
command =
when defined(windows):
"tests\\testproc.bat timeout1"
else:
"tests/testproc.sh timeout1"
process = await startProcess(command, options = options)
@ -209,9 +214,9 @@ suite "Asynchronous process management test suite":
"tests/testproc.sh bigdata"
let expect =
when defined(windows):
400_000 * (64 + 2)
100_000 * (64 + 2)
else:
400_000 * (64 + 1)
100_000 * (64 + 1)
let process = await startProcess(command, options = options,
stdoutHandle = AsyncProcess.Pipe,
stderrHandle = AsyncProcess.Pipe)
@ -407,6 +412,52 @@ suite "Asynchronous process management test suite":
finally:
await process.closeWait()
asyncTest "killAndWaitForExit() test":
let command =
when defined(windows):
("tests\\testproc.bat", "timeout10", 0)
else:
("tests/testproc.sh", "timeout10", 128 + int(SIGKILL))
let process = await startProcess(command[0], arguments = @[command[1]])
try:
let exitCode = await process.killAndWaitForExit(10.seconds)
check exitCode == command[2]
finally:
await process.closeWait()
asyncTest "terminateAndWaitForExit() test":
let command =
when defined(windows):
("tests\\testproc.bat", "timeout10", 0)
else:
("tests/testproc.sh", "timeout10", 128 + int(SIGTERM))
let process = await startProcess(command[0], arguments = @[command[1]])
try:
let exitCode = await process.terminateAndWaitForExit(10.seconds)
check exitCode == command[2]
finally:
await process.closeWait()
asyncTest "terminateAndWaitForExit() timeout test":
when defined(windows):
skip()
else:
let
command = ("tests/testproc.sh", "noterm", 128 + int(SIGKILL))
process = await startProcess(command[0], arguments = @[command[1]])
# We should wait here to allow `bash` execute `trap` command, otherwise
# our test script will be killed with SIGTERM. Increase this timeout
# if test become flaky.
await sleepAsync(1.seconds)
try:
expect AsyncProcessTimeoutError:
let exitCode {.used.} =
await process.terminateAndWaitForExit(1.seconds)
let exitCode = await process.killAndWaitForExit(10.seconds)
check exitCode == command[2]
finally:
await process.closeWait()
test "File descriptors leaks test":
when defined(windows):
skip()

View File

@ -3,18 +3,26 @@
if [ "$1" == "stdin" ]; then
read -r inputdata
echo "STDIN DATA: $inputdata"
elif [ "$1" == "timeout1" ]; then
sleep 1
exit 1
elif [ "$1" == "timeout2" ]; then
sleep 2
exit 2
elif [ "$1" == "timeout10" ]; then
sleep 10
elif [ "$1" == "bigdata" ]; then
for i in {1..400000}
for i in {1..100000}
do
echo "ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO"
done
elif [ "$1" == "envtest" ]; then
echo "$CHRONOSASYNC"
elif [ "$1" == "noterm" ]; then
trap -- '' SIGTERM
while true; do
sleep 1
done
else
echo "arguments missing"
fi

View File

@ -49,7 +49,7 @@ suite "Token Bucket":
# Consume 10* the budget cap
let beforeStart = Moment.now()
waitFor(bucket.consume(1000).wait(5.seconds))
check Moment.now() - beforeStart in 900.milliseconds .. 1500.milliseconds
check Moment.now() - beforeStart in 900.milliseconds .. 2200.milliseconds
test "Sync manual replenish":
var bucket = TokenBucket.new(1000, 0.seconds)
@ -96,7 +96,7 @@ suite "Token Bucket":
futBlocker.finished == false
fut2.finished == false
futBlocker.cancel()
futBlocker.cancelSoon()
waitFor(fut2.wait(10.milliseconds))
test "Very long replenish":
@ -117,9 +117,14 @@ suite "Token Bucket":
check bucket.tryConsume(1, fakeNow) == true
test "Short replenish":
var bucket = TokenBucket.new(15000, 1.milliseconds)
let start = Moment.now()
check bucket.tryConsume(15000, start)
check bucket.tryConsume(1, start) == false
skip()
# TODO (cheatfate): This test was disabled, because it continuosly fails in
# Github Actions Windows x64 CI when using Nim 1.6.14 version.
# Unable to reproduce failure locally.
check bucket.tryConsume(15000, start + 1.milliseconds) == true
# var bucket = TokenBucket.new(15000, 1.milliseconds)
# let start = Moment.now()
# check bucket.tryConsume(15000, start)
# check bucket.tryConsume(1, start) == false
# check bucket.tryConsume(15000, start + 1.milliseconds) == true

View File

@ -5,8 +5,8 @@
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import unittest2
import ../chronos
import ../chronos/unittest2/asynctests
{.used.}
@ -23,30 +23,40 @@ suite "Server's test suite":
CustomData = ref object
test: string
teardown:
checkLeaks()
proc serveStreamClient(server: StreamServer,
transp: StreamTransport) {.async.} =
transp: StreamTransport) {.async: (raises: []).} =
discard
proc serveCustomStreamClient(server: StreamServer,
transp: StreamTransport) {.async.} =
var cserver = cast[CustomServer](server)
var ctransp = cast[CustomTransport](transp)
cserver.test1 = "CONNECTION"
cserver.test2 = ctransp.test
cserver.test3 = await transp.readLine()
var answer = "ANSWER\r\n"
discard await transp.write(answer)
transp.close()
await transp.join()
transp: StreamTransport) {.async: (raises: []).} =
try:
var cserver = cast[CustomServer](server)
var ctransp = cast[CustomTransport](transp)
cserver.test1 = "CONNECTION"
cserver.test2 = ctransp.test
cserver.test3 = await transp.readLine()
var answer = "ANSWER\r\n"
discard await transp.write(answer)
transp.close()
await transp.join()
except CatchableError as exc:
raiseAssert exc.msg
proc serveUdataStreamClient(server: StreamServer,
transp: StreamTransport) {.async.} =
var udata = getUserData[CustomData](server)
var line = await transp.readLine()
var msg = line & udata.test & "\r\n"
discard await transp.write(msg)
transp.close()
await transp.join()
transp: StreamTransport) {.async: (raises: []).} =
try:
var udata = getUserData[CustomData](server)
var line = await transp.readLine()
var msg = line & udata.test & "\r\n"
discard await transp.write(msg)
transp.close()
await transp.join()
except CatchableError as exc:
raiseAssert exc.msg
proc customServerTransport(server: StreamServer,
fd: AsyncFD): StreamTransport =
@ -54,37 +64,47 @@ suite "Server's test suite":
transp.test = "CUSTOM"
result = cast[StreamTransport](transp)
proc test1(): bool =
asyncTest "Stream Server start/stop test":
var ta = initTAddress("127.0.0.1:31354")
var server1 = createStreamServer(ta, serveStreamClient, {ReuseAddr})
server1.start()
server1.stop()
server1.close()
waitFor server1.join()
await server1.join()
var server2 = createStreamServer(ta, serveStreamClient, {ReuseAddr})
server2.start()
server2.stop()
server2.close()
waitFor server2.join()
result = true
await server2.join()
proc test5(): bool =
var ta = initTAddress("127.0.0.1:31354")
asyncTest "Stream Server stop without start test":
var ta = initTAddress("127.0.0.1:0")
var server1 = createStreamServer(ta, serveStreamClient, {ReuseAddr})
ta = server1.localAddress()
server1.stop()
server1.close()
waitFor server1.join()
await server1.join()
var server2 = createStreamServer(ta, serveStreamClient, {ReuseAddr})
server2.stop()
server2.close()
waitFor server2.join()
result = true
await server2.join()
asyncTest "Stream Server inherited object test":
var server = CustomServer()
server.test1 = "TEST"
var ta = initTAddress("127.0.0.1:0")
var pserver = createStreamServer(ta, serveCustomStreamClient, {ReuseAddr},
child = server,
init = customServerTransport)
check:
pserver == server
proc client1(server: CustomServer, ta: TransportAddress) {.async.} =
var transp = CustomTransport()
transp.test = "CLIENT"
server.start()
var ptransp = await connect(ta, child = transp)
var ptransp = await connect(server.localAddress(), child = transp)
var etransp = cast[CustomTransport](ptransp)
doAssert(etransp.test == "CLIENT")
var msg = "TEST\r\n"
@ -96,44 +116,48 @@ suite "Server's test suite":
server.close()
await server.join()
proc client2(server: StreamServer,
ta: TransportAddress): Future[bool] {.async.} =
check:
server.test1 == "CONNECTION"
server.test2 == "CUSTOM"
asyncTest "StreamServer[T] test":
var co = CustomData()
co.test = "CUSTOMDATA"
var ta = initTAddress("127.0.0.1:0")
var server = createStreamServer(ta, serveUdataStreamClient, {ReuseAddr},
udata = co)
server.start()
var transp = await connect(ta)
var transp = await connect(server.localAddress())
var msg = "TEST\r\n"
discard await transp.write(msg)
var line = await transp.readLine()
result = (line == "TESTCUSTOMDATA")
check:
line == "TESTCUSTOMDATA"
transp.close()
server.stop()
server.close()
await server.join()
proc test3(): bool =
var server = CustomServer()
server.test1 = "TEST"
var ta = initTAddress("127.0.0.1:31354")
var pserver = createStreamServer(ta, serveCustomStreamClient, {ReuseAddr},
child = cast[StreamServer](server),
init = customServerTransport)
doAssert(not isNil(pserver))
waitFor client1(server, ta)
result = (server.test1 == "CONNECTION") and (server.test2 == "CUSTOM")
asyncTest "Backlog and connect cancellation":
var ta = initTAddress("127.0.0.1:0")
var server1 = createStreamServer(ta, serveStreamClient, {ReuseAddr}, backlog = 1)
ta = server1.localAddress()
proc test4(): bool =
var co = CustomData()
co.test = "CUSTOMDATA"
var ta = initTAddress("127.0.0.1:31354")
var server = createStreamServer(ta, serveUdataStreamClient, {ReuseAddr},
udata = co)
result = waitFor client2(server, ta)
var clients: seq[Future[StreamTransport]]
for i in 0..<10:
clients.add(connect(server1.localAddress))
# Check for leaks in cancellation / connect when server is not accepting
for c in clients:
if not c.finished:
await c.cancelAndWait()
else:
# The backlog connection "should" end up here
try:
await c.read().closeWait()
except CatchableError:
discard
test "Stream Server start/stop test":
check test1() == true
test "Stream Server stop without start test":
check test5() == true
test "Stream Server inherited object test":
check test3() == true
test "StreamServer[T] test":
check test4() == true
server1.close()
await server1.join()

View File

@ -7,7 +7,8 @@
# MIT license (LICENSE-MIT)
import std/strutils
import ".."/chronos/unittest2/asynctests
import ".."/chronos, ".."/chronos/apps/http/shttpserver
import ".."/chronos,
".."/chronos/apps/http/shttpserver
import stew/base10
{.used.}
@ -107,15 +108,18 @@ suite "Secure HTTP server testing suite":
proc testHTTPS(address: TransportAddress): Future[bool] {.async.} =
var serverRes = false
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
serverRes = true
return await request.respond(Http200, "TEST_OK:" & $request.meth,
HttpTable.init())
try:
await request.respond(Http200, "TEST_OK:" & $request.meth,
HttpTable.init())
except HttpWriteError as exc:
serverRes = false
defaultResponse(exc)
else:
serverRes = false
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let serverFlags = {Secure}
@ -145,16 +149,18 @@ suite "Secure HTTP server testing suite":
var serverRes = false
var testFut = newFuture[void]()
proc process(r: RequestFence): Future[HttpResponseRef] {.
async.} =
async: (raises: [CancelledError]).} =
if r.isOk():
let request = r.get()
serverRes = false
return await request.respond(Http200, "TEST_OK:" & $request.meth,
HttpTable.init())
try:
await request.respond(Http200, "TEST_OK:" & $request.meth,
HttpTable.init())
except HttpWriteError as exc:
defaultResponse(exc)
else:
serverRes = true
testFut.complete()
return defaultResponse()
defaultResponse()
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
let serverFlags = {Secure}

View File

@ -11,75 +11,83 @@ import ../chronos
{.used.}
suite "callSoon() tests suite":
const CallSoonTests = 10
var soonTest1 = 0'u
var timeoutsTest1 = 0
var timeoutsTest2 = 0
var soonTest2 = 0
proc callback1(udata: pointer) {.gcsafe.} =
soonTest1 = soonTest1 xor cast[uint](udata)
proc test1(): uint =
callSoon(callback1, cast[pointer](0x12345678'u))
callSoon(callback1, cast[pointer](0x23456789'u))
callSoon(callback1, cast[pointer](0x3456789A'u))
callSoon(callback1, cast[pointer](0x456789AB'u))
callSoon(callback1, cast[pointer](0x56789ABC'u))
callSoon(callback1, cast[pointer](0x6789ABCD'u))
callSoon(callback1, cast[pointer](0x789ABCDE'u))
callSoon(callback1, cast[pointer](0x89ABCDEF'u))
callSoon(callback1, cast[pointer](0x9ABCDEF1'u))
callSoon(callback1, cast[pointer](0xABCDEF12'u))
callSoon(callback1, cast[pointer](0xBCDEF123'u))
callSoon(callback1, cast[pointer](0xCDEF1234'u))
callSoon(callback1, cast[pointer](0xDEF12345'u))
callSoon(callback1, cast[pointer](0xEF123456'u))
callSoon(callback1, cast[pointer](0xF1234567'u))
callSoon(callback1, cast[pointer](0x12345678'u))
## All callbacks must be processed exactly with 1 poll() call.
poll()
result = soonTest1
proc testProc() {.async.} =
for i in 1..CallSoonTests:
await sleepAsync(100.milliseconds)
timeoutsTest1 += 1
var callbackproc: proc(udata: pointer) {.gcsafe, raises: [].}
callbackproc = proc (udata: pointer) {.gcsafe, raises: [].} =
timeoutsTest2 += 1
{.gcsafe.}:
callSoon(callbackproc)
proc test2(timers, callbacks: var int) =
callSoon(callbackproc)
waitFor(testProc())
timers = timeoutsTest1
callbacks = timeoutsTest2
proc testCallback(udata: pointer) =
soonTest2 = 987654321
proc test3(): bool =
callSoon(testCallback)
poll()
result = soonTest2 == 987654321
test "User-defined callback argument test":
var values = [0x12345678'u, 0x23456789'u, 0x3456789A'u, 0x456789AB'u,
0x56789ABC'u, 0x6789ABCD'u, 0x789ABCDE'u, 0x89ABCDEF'u,
0x9ABCDEF1'u, 0xABCDEF12'u, 0xBCDEF123'u, 0xCDEF1234'u,
0xDEF12345'u, 0xEF123456'u, 0xF1234567'u, 0x12345678'u]
var expect = 0'u
for item in values:
expect = expect xor item
check test1() == expect
proc test(): bool =
var soonTest = 0'u
proc callback(udata: pointer) {.gcsafe.} =
soonTest = soonTest xor cast[uint](udata)
callSoon(callback, cast[pointer](0x12345678'u))
callSoon(callback, cast[pointer](0x23456789'u))
callSoon(callback, cast[pointer](0x3456789A'u))
callSoon(callback, cast[pointer](0x456789AB'u))
callSoon(callback, cast[pointer](0x56789ABC'u))
callSoon(callback, cast[pointer](0x6789ABCD'u))
callSoon(callback, cast[pointer](0x789ABCDE'u))
callSoon(callback, cast[pointer](0x89ABCDEF'u))
callSoon(callback, cast[pointer](0x9ABCDEF1'u))
callSoon(callback, cast[pointer](0xABCDEF12'u))
callSoon(callback, cast[pointer](0xBCDEF123'u))
callSoon(callback, cast[pointer](0xCDEF1234'u))
callSoon(callback, cast[pointer](0xDEF12345'u))
callSoon(callback, cast[pointer](0xEF123456'u))
callSoon(callback, cast[pointer](0xF1234567'u))
callSoon(callback, cast[pointer](0x12345678'u))
## All callbacks must be processed exactly with 1 poll() call.
poll()
var values = [0x12345678'u, 0x23456789'u, 0x3456789A'u, 0x456789AB'u,
0x56789ABC'u, 0x6789ABCD'u, 0x789ABCDE'u, 0x89ABCDEF'u,
0x9ABCDEF1'u, 0xABCDEF12'u, 0xBCDEF123'u, 0xCDEF1234'u,
0xDEF12345'u, 0xEF123456'u, 0xF1234567'u, 0x12345678'u]
var expect = 0'u
for item in values:
expect = expect xor item
soonTest == expect
check test() == true
test "`Asynchronous dead end` #7193 test":
var timers, callbacks: int
test2(timers, callbacks)
check:
timers == CallSoonTests
callbacks > CallSoonTests * 2
const CallSoonTests = 5
proc test() =
var
timeoutsTest1 = 0
timeoutsTest2 = 0
stopFlag = false
var callbackproc: proc(udata: pointer) {.gcsafe, raises: [].}
callbackproc = proc (udata: pointer) {.gcsafe, raises: [].} =
timeoutsTest2 += 1
if not(stopFlag):
callSoon(callbackproc)
proc testProc() {.async.} =
for i in 1 .. CallSoonTests:
await sleepAsync(10.milliseconds)
timeoutsTest1 += 1
callSoon(callbackproc)
waitFor(testProc())
stopFlag = true
poll()
check:
timeoutsTest1 == CallSoonTests
timeoutsTest2 > CallSoonTests * 2
test()
test "`callSoon() is not working prior getGlobalDispatcher()` #7192 test":
check test3() == true
proc test(): bool =
var soonTest = 0
proc testCallback(udata: pointer) =
soonTest = 987654321
callSoon(testCallback)
poll()
soonTest == 987654321
check test() == true

File diff suppressed because it is too large Load Diff

View File

@ -150,9 +150,9 @@ suite "Asynchronous sync primitives test suite":
var fut2 = task(lock, 2, n2)
var fut3 = task(lock, 3, n3)
if cancelIndex == 2:
fut2.cancel()
fut2.cancelSoon()
else:
fut3.cancel()
fut3.cancelSoon()
await allFutures(fut1, fut2, fut3)
result = stripe

View File

@ -91,26 +91,36 @@ suite "Asynchronous timers & steps test suite":
$nanoseconds(1_800_000_600) == "1s800ms600ns"
test "Asynchronous steps test":
var futn1 = stepsAsync(-1)
var fut0 = stepsAsync(0)
var fut1 = stepsAsync(1)
var fut2 = stepsAsync(2)
var fut3 = stepsAsync(3)
check:
futn1.completed() == true
fut0.completed() == true
fut1.completed() == false
fut2.completed() == false
fut3.completed() == false
poll()
# We need `fut` because `stepsAsync` do not power `poll()` anymore.
block:
var fut {.used.} = sleepAsync(50.milliseconds)
poll()
check:
fut1.completed() == true
fut2.completed() == false
fut3.completed() == false
poll()
block:
var fut {.used.} = sleepAsync(50.milliseconds)
poll()
check:
fut2.completed() == true
fut3.completed() == false
poll()
block:
var fut {.used.} = sleepAsync(50.milliseconds)
poll()
check:
fut3.completed() == true

View File

@ -56,7 +56,7 @@ suite "Asynchronous utilities test suite":
check:
getCount() == 1'u
pendingFuturesCount() == 1'u
fut3.cancel()
discard fut3.tryCancel()
poll()
check:
getCount() == 0'u
@ -75,11 +75,6 @@ suite "Asynchronous utilities test suite":
pendingFuturesCount() == 2'u
waitFor fut
check:
getCount() == 1'u
pendingFuturesCount() == 1'u
poll()
check:
getCount() == 0'u
pendingFuturesCount() == 0'u