mirror of
https://github.com/logos-storage/nim-chronos.git
synced 2026-01-03 14:03:06 +00:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b55e2816eb | ||
|
|
0646c444fc | ||
|
|
36d8ee5617 | ||
|
|
7c5cbf04a6 | ||
|
|
70cbe346e2 | ||
|
|
03f4328de6 | ||
|
|
9186950e03 | ||
|
|
c04576d829 | ||
|
|
dc3847e4d6 | ||
|
|
8f609b6c17 | ||
|
|
13d28a5b71 | ||
|
|
4ad38079de | ||
|
|
7630f39471 | ||
|
|
c44406594f | ||
|
|
1b9d9253e8 | ||
|
|
8a306763ce | ||
|
|
1ff81c60ea | ||
|
|
52b02b9977 | ||
|
|
72f560f049 | ||
|
|
bb96f02ae8 | ||
|
|
0f0ed1d654 | ||
|
|
d184a92227 | ||
|
|
7a3eaffa4f | ||
|
|
bd7d84fbcb | ||
|
|
e4cb48088c | ||
|
|
0d050d5823 | ||
|
|
8e49df1400 | ||
|
|
2d85229dce | ||
|
|
402914f4cf |
165
.github/workflows/ci.yml
vendored
165
.github/workflows/ci.yml
vendored
@ -6,163 +6,12 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency: # Cancel stale PR builds (but not push builds)
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
strategy:
|
uses: status-im/nimbus-common-workflow/.github/workflows/common.yml@main
|
||||||
fail-fast: false
|
with:
|
||||||
matrix:
|
test-command: |
|
||||||
target:
|
nimble install -y libbacktrace
|
||||||
- os: linux
|
nimble test
|
||||||
cpu: amd64
|
nimble test_libbacktrace
|
||||||
- os: linux
|
nimble examples
|
||||||
cpu: i386
|
|
||||||
- os: macos
|
|
||||||
cpu: amd64
|
|
||||||
- os: windows
|
|
||||||
cpu: amd64
|
|
||||||
#- os: windows
|
|
||||||
#cpu: i386
|
|
||||||
branch: [version-1-6, version-2-0, devel]
|
|
||||||
include:
|
|
||||||
- target:
|
|
||||||
os: linux
|
|
||||||
builder: ubuntu-20.04
|
|
||||||
shell: bash
|
|
||||||
- target:
|
|
||||||
os: macos
|
|
||||||
builder: macos-12
|
|
||||||
shell: bash
|
|
||||||
- target:
|
|
||||||
os: windows
|
|
||||||
builder: windows-2019
|
|
||||||
shell: msys2 {0}
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: ${{ matrix.shell }}
|
|
||||||
|
|
||||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
|
||||||
runs-on: ${{ matrix.builder }}
|
|
||||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Enable debug verbosity
|
|
||||||
if: runner.debug == '1'
|
|
||||||
run: |
|
|
||||||
echo "V=1" >> $GITHUB_ENV
|
|
||||||
echo "UNITTEST2_OUTPUT_LVL=VERBOSE" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Install build dependencies (Linux i386)
|
|
||||||
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
|
||||||
run: |
|
|
||||||
sudo dpkg --add-architecture i386
|
|
||||||
sudo apt-fast update -qq
|
|
||||||
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
|
|
||||||
--no-install-recommends -yq gcc-multilib g++-multilib \
|
|
||||||
libssl-dev:i386
|
|
||||||
mkdir -p external/bin
|
|
||||||
cat << EOF > external/bin/gcc
|
|
||||||
#!/bin/bash
|
|
||||||
exec $(which gcc) -m32 "\$@"
|
|
||||||
EOF
|
|
||||||
cat << EOF > external/bin/g++
|
|
||||||
#!/bin/bash
|
|
||||||
exec $(which g++) -m32 "\$@"
|
|
||||||
EOF
|
|
||||||
chmod 755 external/bin/gcc external/bin/g++
|
|
||||||
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: MSYS2 (Windows i386)
|
|
||||||
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
|
|
||||||
uses: msys2/setup-msys2@v2
|
|
||||||
with:
|
|
||||||
path-type: inherit
|
|
||||||
msystem: MINGW32
|
|
||||||
install: >-
|
|
||||||
base-devel
|
|
||||||
git
|
|
||||||
mingw-w64-i686-toolchain
|
|
||||||
|
|
||||||
- name: MSYS2 (Windows amd64)
|
|
||||||
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
|
||||||
uses: msys2/setup-msys2@v2
|
|
||||||
with:
|
|
||||||
path-type: inherit
|
|
||||||
install: >-
|
|
||||||
base-devel
|
|
||||||
git
|
|
||||||
mingw-w64-x86_64-toolchain
|
|
||||||
|
|
||||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
id: windows-dlls-cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: external/dlls-${{ matrix.target.cpu }}
|
|
||||||
key: 'dlls-${{ matrix.target.cpu }}'
|
|
||||||
|
|
||||||
- name: Install DLLs dependencies (Windows)
|
|
||||||
if: >
|
|
||||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
|
||||||
runner.os == 'Windows'
|
|
||||||
run: |
|
|
||||||
mkdir -p external
|
|
||||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
|
||||||
7z x -y external/windeps.zip -oexternal/dlls-${{ matrix.target.cpu }}
|
|
||||||
|
|
||||||
- name: Path to cached dependencies (Windows)
|
|
||||||
if: >
|
|
||||||
runner.os == 'Windows'
|
|
||||||
run: |
|
|
||||||
echo "${{ github.workspace }}/external/dlls-${{ matrix.target.cpu }}" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Derive environment variables
|
|
||||||
run: |
|
|
||||||
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
|
||||||
PLATFORM=x64
|
|
||||||
else
|
|
||||||
PLATFORM=x86
|
|
||||||
fi
|
|
||||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
ncpu=
|
|
||||||
MAKE_CMD="make"
|
|
||||||
case '${{ runner.os }}' in
|
|
||||||
'Linux')
|
|
||||||
ncpu=$(nproc)
|
|
||||||
;;
|
|
||||||
'macOS')
|
|
||||||
ncpu=$(sysctl -n hw.ncpu)
|
|
||||||
;;
|
|
||||||
'Windows')
|
|
||||||
ncpu=$NUMBER_OF_PROCESSORS
|
|
||||||
MAKE_CMD="mingw32-make"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
|
||||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
|
||||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build Nim and Nimble
|
|
||||||
run: |
|
|
||||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
|
|
||||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
|
||||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
|
||||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
nim --version
|
|
||||||
nimble --version
|
|
||||||
nimble install -y --depsOnly
|
|
||||||
nimble install -y libbacktrace
|
|
||||||
nimble test
|
|
||||||
nimble test_libbacktrace
|
|
||||||
nimble examples
|
|
||||||
|
|||||||
4
.github/workflows/doc.yml
vendored
4
.github/workflows/doc.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
|||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- uses: actions-rs/install@v0.1
|
- uses: actions-rs/install@v0.1
|
||||||
@ -41,7 +41,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: jiro4989/setup-nim-action@v1
|
- uses: jiro4989/setup-nim-action@v1
|
||||||
with:
|
with:
|
||||||
nim-version: '1.6.16'
|
nim-version: '1.6.20'
|
||||||
|
|
||||||
- name: Generate doc
|
- name: Generate doc
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
mode = ScriptMode.Verbose
|
mode = ScriptMode.Verbose
|
||||||
|
|
||||||
packageName = "chronos"
|
packageName = "chronos"
|
||||||
version = "4.0.2"
|
version = "4.0.4"
|
||||||
author = "Status Research & Development GmbH"
|
author = "Status Research & Development GmbH"
|
||||||
description = "Networking framework with async/await support"
|
description = "Networking framework with async/await support"
|
||||||
license = "MIT or Apache License 2.0"
|
license = "MIT or Apache License 2.0"
|
||||||
@ -10,7 +10,7 @@ skipDirs = @["tests"]
|
|||||||
requires "nim >= 1.6.16",
|
requires "nim >= 1.6.16",
|
||||||
"results",
|
"results",
|
||||||
"stew",
|
"stew",
|
||||||
"bearssl",
|
"bearssl >= 0.2.5",
|
||||||
"httputils",
|
"httputils",
|
||||||
"unittest2"
|
"unittest2"
|
||||||
|
|
||||||
@ -54,11 +54,19 @@ task examples, "Build examples":
|
|||||||
build "--threads:on", file
|
build "--threads:on", file
|
||||||
|
|
||||||
task test, "Run all tests":
|
task test, "Run all tests":
|
||||||
|
for args in testArguments:
|
||||||
|
# First run tests with `refc` memory manager.
|
||||||
|
run args & " --mm:refc", "tests/testall"
|
||||||
|
if (NimMajor, NimMinor) > (1, 6):
|
||||||
|
run args & " --mm:orc", "tests/testall"
|
||||||
|
|
||||||
|
task test_v3_compat, "Run all tests in v3 compatibility mode":
|
||||||
for args in testArguments:
|
for args in testArguments:
|
||||||
if (NimMajor, NimMinor) > (1, 6):
|
if (NimMajor, NimMinor) > (1, 6):
|
||||||
# First run tests with `refc` memory manager.
|
# First run tests with `refc` memory manager.
|
||||||
run args & " --mm:refc", "tests/testall"
|
run args & " --mm:refc -d:chronosHandleException", "tests/testall"
|
||||||
run args, "tests/testall"
|
|
||||||
|
run args & " -d:chronosHandleException", "tests/testall"
|
||||||
|
|
||||||
task test_libbacktrace, "test with libbacktrace":
|
task test_libbacktrace, "test with libbacktrace":
|
||||||
if platform != "x86":
|
if platform != "x86":
|
||||||
@ -67,10 +75,10 @@ task test_libbacktrace, "test with libbacktrace":
|
|||||||
]
|
]
|
||||||
|
|
||||||
for args in allArgs:
|
for args in allArgs:
|
||||||
|
# First run tests with `refc` memory manager.
|
||||||
|
run args & " --mm:refc", "tests/testall"
|
||||||
if (NimMajor, NimMinor) > (1, 6):
|
if (NimMajor, NimMinor) > (1, 6):
|
||||||
# First run tests with `refc` memory manager.
|
run args & " --mm:orc", "tests/testall"
|
||||||
run args & " --mm:refc", "tests/testall"
|
|
||||||
run args, "tests/testall"
|
|
||||||
|
|
||||||
task docs, "Generate API documentation":
|
task docs, "Generate API documentation":
|
||||||
exec "mdbook build docs"
|
exec "mdbook build docs"
|
||||||
|
|||||||
@ -159,6 +159,7 @@ type
|
|||||||
redirectCount: int
|
redirectCount: int
|
||||||
timestamp*: Moment
|
timestamp*: Moment
|
||||||
duration*: Duration
|
duration*: Duration
|
||||||
|
headersBuffer: seq[byte]
|
||||||
|
|
||||||
HttpClientRequestRef* = ref HttpClientRequest
|
HttpClientRequestRef* = ref HttpClientRequest
|
||||||
|
|
||||||
@ -567,7 +568,8 @@ proc new(
|
|||||||
tls =
|
tls =
|
||||||
try:
|
try:
|
||||||
newTLSClientAsyncStream(treader, twriter, ha.hostname,
|
newTLSClientAsyncStream(treader, twriter, ha.hostname,
|
||||||
flags = session.flags.getTLSFlags())
|
flags = session.flags.getTLSFlags(),
|
||||||
|
bufferSize = session.connectionBufferSize)
|
||||||
except TLSStreamInitError as exc:
|
except TLSStreamInitError as exc:
|
||||||
return err(exc.msg)
|
return err(exc.msg)
|
||||||
|
|
||||||
@ -858,6 +860,7 @@ proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} =
|
|||||||
await noCancel(allFutures(pending))
|
await noCancel(allFutures(pending))
|
||||||
request.session = nil
|
request.session = nil
|
||||||
request.error = nil
|
request.error = nil
|
||||||
|
request.headersBuffer.reset()
|
||||||
request.state = HttpReqRespState.Closed
|
request.state = HttpReqRespState.Closed
|
||||||
untrackCounter(HttpClientRequestTrackerName)
|
untrackCounter(HttpClientRequestTrackerName)
|
||||||
|
|
||||||
@ -991,14 +994,14 @@ proc prepareResponse(
|
|||||||
|
|
||||||
proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {.
|
proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {.
|
||||||
async: (raises: [CancelledError, HttpError]).} =
|
async: (raises: [CancelledError, HttpError]).} =
|
||||||
var buffer: array[HttpMaxHeadersSize, byte]
|
|
||||||
let timestamp = Moment.now()
|
let timestamp = Moment.now()
|
||||||
req.connection.setTimestamp(timestamp)
|
req.connection.setTimestamp(timestamp)
|
||||||
let
|
let
|
||||||
bytesRead =
|
bytesRead =
|
||||||
try:
|
try:
|
||||||
await req.connection.reader.readUntil(addr buffer[0],
|
await req.connection.reader.readUntil(addr req.headersBuffer[0],
|
||||||
len(buffer), HeadersMark).wait(
|
len(req.headersBuffer),
|
||||||
|
HeadersMark).wait(
|
||||||
req.session.headersTimeout)
|
req.session.headersTimeout)
|
||||||
except AsyncTimeoutError:
|
except AsyncTimeoutError:
|
||||||
raiseHttpReadError("Reading response headers timed out")
|
raiseHttpReadError("Reading response headers timed out")
|
||||||
@ -1006,23 +1009,25 @@ proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {.
|
|||||||
raiseHttpReadError(
|
raiseHttpReadError(
|
||||||
"Could not read response headers, reason: " & $exc.msg)
|
"Could not read response headers, reason: " & $exc.msg)
|
||||||
|
|
||||||
let response = prepareResponse(req, buffer.toOpenArray(0, bytesRead - 1))
|
let response =
|
||||||
if response.isErr():
|
prepareResponse(req,
|
||||||
raiseHttpProtocolError(response.error())
|
req.headersBuffer.toOpenArray(0, bytesRead - 1)).valueOr:
|
||||||
let res = response.get()
|
raiseHttpProtocolError(error)
|
||||||
res.setTimestamp(timestamp)
|
response.setTimestamp(timestamp)
|
||||||
return res
|
response
|
||||||
|
|
||||||
proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
ha: HttpAddress, meth: HttpMethod = MethodGet,
|
ha: HttpAddress, meth: HttpMethod = MethodGet,
|
||||||
version: HttpVersion = HttpVersion11,
|
version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = [],
|
headers: openArray[HttpHeaderTuple] = [],
|
||||||
body: openArray[byte] = []): HttpClientRequestRef =
|
body: openArray[byte] = []): HttpClientRequestRef =
|
||||||
let res = HttpClientRequestRef(
|
let res = HttpClientRequestRef(
|
||||||
state: HttpReqRespState.Ready, session: session, meth: meth,
|
state: HttpReqRespState.Ready, session: session, meth: meth,
|
||||||
version: version, flags: flags, headers: HttpTable.init(headers),
|
version: version, flags: flags, headers: HttpTable.init(headers),
|
||||||
address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body
|
address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body,
|
||||||
|
headersBuffer: newSeq[byte](max(maxResponseHeadersSize, HttpMaxHeadersSize))
|
||||||
)
|
)
|
||||||
trackCounter(HttpClientRequestTrackerName)
|
trackCounter(HttpClientRequestTrackerName)
|
||||||
res
|
res
|
||||||
@ -1031,13 +1036,15 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
|||||||
url: string, meth: HttpMethod = MethodGet,
|
url: string, meth: HttpMethod = MethodGet,
|
||||||
version: HttpVersion = HttpVersion11,
|
version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = [],
|
headers: openArray[HttpHeaderTuple] = [],
|
||||||
body: openArray[byte] = []): HttpResult[HttpClientRequestRef] =
|
body: openArray[byte] = []): HttpResult[HttpClientRequestRef] =
|
||||||
let address = ? session.getAddress(parseUri(url))
|
let address = ? session.getAddress(parseUri(url))
|
||||||
let res = HttpClientRequestRef(
|
let res = HttpClientRequestRef(
|
||||||
state: HttpReqRespState.Ready, session: session, meth: meth,
|
state: HttpReqRespState.Ready, session: session, meth: meth,
|
||||||
version: version, flags: flags, headers: HttpTable.init(headers),
|
version: version, flags: flags, headers: HttpTable.init(headers),
|
||||||
address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body
|
address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body,
|
||||||
|
headersBuffer: newSeq[byte](max(maxResponseHeadersSize, HttpMaxHeadersSize))
|
||||||
)
|
)
|
||||||
trackCounter(HttpClientRequestTrackerName)
|
trackCounter(HttpClientRequestTrackerName)
|
||||||
ok(res)
|
ok(res)
|
||||||
@ -1045,48 +1052,58 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
|||||||
proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
url: string, version: HttpVersion = HttpVersion11,
|
url: string, version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = []
|
headers: openArray[HttpHeaderTuple] = []
|
||||||
): HttpResult[HttpClientRequestRef] =
|
): HttpResult[HttpClientRequestRef] =
|
||||||
HttpClientRequestRef.new(session, url, MethodGet, version, flags, headers)
|
HttpClientRequestRef.new(session, url, MethodGet, version, flags,
|
||||||
|
maxResponseHeadersSize, headers)
|
||||||
|
|
||||||
proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
ha: HttpAddress, version: HttpVersion = HttpVersion11,
|
ha: HttpAddress, version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = []
|
headers: openArray[HttpHeaderTuple] = []
|
||||||
): HttpClientRequestRef =
|
): HttpClientRequestRef =
|
||||||
HttpClientRequestRef.new(session, ha, MethodGet, version, flags, headers)
|
HttpClientRequestRef.new(session, ha, MethodGet, version, flags,
|
||||||
|
maxResponseHeadersSize, headers)
|
||||||
|
|
||||||
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
url: string, version: HttpVersion = HttpVersion11,
|
url: string, version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = [],
|
headers: openArray[HttpHeaderTuple] = [],
|
||||||
body: openArray[byte] = []
|
body: openArray[byte] = []
|
||||||
): HttpResult[HttpClientRequestRef] =
|
): HttpResult[HttpClientRequestRef] =
|
||||||
HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers,
|
HttpClientRequestRef.new(session, url, MethodPost, version, flags,
|
||||||
body)
|
maxResponseHeadersSize, headers, body)
|
||||||
|
|
||||||
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
url: string, version: HttpVersion = HttpVersion11,
|
url: string, version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = [],
|
headers: openArray[HttpHeaderTuple] = [],
|
||||||
body: openArray[char] = []): HttpResult[HttpClientRequestRef] =
|
body: openArray[char] = []): HttpResult[HttpClientRequestRef] =
|
||||||
HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers,
|
HttpClientRequestRef.new(session, url, MethodPost, version, flags,
|
||||||
|
maxResponseHeadersSize, headers,
|
||||||
body.toOpenArrayByte(0, len(body) - 1))
|
body.toOpenArrayByte(0, len(body) - 1))
|
||||||
|
|
||||||
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
ha: HttpAddress, version: HttpVersion = HttpVersion11,
|
ha: HttpAddress, version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = [],
|
headers: openArray[HttpHeaderTuple] = [],
|
||||||
body: openArray[byte] = []): HttpClientRequestRef =
|
body: openArray[byte] = []): HttpClientRequestRef =
|
||||||
HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers,
|
HttpClientRequestRef.new(session, ha, MethodPost, version, flags,
|
||||||
body)
|
maxResponseHeadersSize, headers, body)
|
||||||
|
|
||||||
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef,
|
||||||
ha: HttpAddress, version: HttpVersion = HttpVersion11,
|
ha: HttpAddress, version: HttpVersion = HttpVersion11,
|
||||||
flags: set[HttpClientRequestFlag] = {},
|
flags: set[HttpClientRequestFlag] = {},
|
||||||
|
maxResponseHeadersSize: int = HttpMaxHeadersSize,
|
||||||
headers: openArray[HttpHeaderTuple] = [],
|
headers: openArray[HttpHeaderTuple] = [],
|
||||||
body: openArray[char] = []): HttpClientRequestRef =
|
body: openArray[char] = []): HttpClientRequestRef =
|
||||||
HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers,
|
HttpClientRequestRef.new(session, ha, MethodPost, version, flags,
|
||||||
|
maxResponseHeadersSize, headers,
|
||||||
body.toOpenArrayByte(0, len(body) - 1))
|
body.toOpenArrayByte(0, len(body) - 1))
|
||||||
|
|
||||||
proc prepareRequest(request: HttpClientRequestRef): string =
|
proc prepareRequest(request: HttpClientRequestRef): string =
|
||||||
@ -1327,13 +1344,18 @@ proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader {.
|
|||||||
let reader =
|
let reader =
|
||||||
case response.bodyFlag
|
case response.bodyFlag
|
||||||
of HttpClientBodyFlag.Sized:
|
of HttpClientBodyFlag.Sized:
|
||||||
let bstream = newBoundedStreamReader(response.connection.reader,
|
newHttpBodyReader(
|
||||||
response.contentLength)
|
newBoundedStreamReader(
|
||||||
newHttpBodyReader(bstream)
|
response.connection.reader, response.contentLength,
|
||||||
|
bufferSize = response.session.connectionBufferSize))
|
||||||
of HttpClientBodyFlag.Chunked:
|
of HttpClientBodyFlag.Chunked:
|
||||||
newHttpBodyReader(newChunkedStreamReader(response.connection.reader))
|
newHttpBodyReader(
|
||||||
|
newChunkedStreamReader(
|
||||||
|
response.connection.reader,
|
||||||
|
bufferSize = response.session.connectionBufferSize))
|
||||||
of HttpClientBodyFlag.Custom:
|
of HttpClientBodyFlag.Custom:
|
||||||
newHttpBodyReader(newAsyncStreamReader(response.connection.reader))
|
newHttpBodyReader(
|
||||||
|
newAsyncStreamReader(response.connection.reader))
|
||||||
response.connection.state = HttpClientConnectionState.ResponseBodyReceiving
|
response.connection.state = HttpClientConnectionState.ResponseBodyReceiving
|
||||||
response.reader = reader
|
response.reader = reader
|
||||||
response.reader
|
response.reader
|
||||||
@ -1448,8 +1470,10 @@ proc redirect*(request: HttpClientRequestRef,
|
|||||||
var res = request.headers
|
var res = request.headers
|
||||||
res.set(HostHeader, ha.hostname)
|
res.set(HostHeader, ha.hostname)
|
||||||
res
|
res
|
||||||
var res = HttpClientRequestRef.new(request.session, ha, request.meth,
|
var res =
|
||||||
request.version, request.flags, headers.toList(), request.buffer)
|
HttpClientRequestRef.new(request.session, ha, request.meth,
|
||||||
|
request.version, request.flags, headers = headers.toList(),
|
||||||
|
body = request.buffer)
|
||||||
res.redirectCount = redirectCount
|
res.redirectCount = redirectCount
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
@ -1472,8 +1496,10 @@ proc redirect*(request: HttpClientRequestRef,
|
|||||||
var res = request.headers
|
var res = request.headers
|
||||||
res.set(HostHeader, address.hostname)
|
res.set(HostHeader, address.hostname)
|
||||||
res
|
res
|
||||||
var res = HttpClientRequestRef.new(request.session, address, request.meth,
|
var res =
|
||||||
request.version, request.flags, headers.toList(), request.buffer)
|
HttpClientRequestRef.new(request.session, address, request.meth,
|
||||||
|
request.version, request.flags, headers = headers.toList(),
|
||||||
|
body = request.buffer)
|
||||||
res.redirectCount = redirectCount
|
res.redirectCount = redirectCount
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
|
|||||||
@ -252,47 +252,42 @@ proc new*(
|
|||||||
dualstack = DualStackType.Auto,
|
dualstack = DualStackType.Auto,
|
||||||
middlewares: openArray[HttpServerMiddlewareRef] = []
|
middlewares: openArray[HttpServerMiddlewareRef] = []
|
||||||
): HttpResult[HttpServerRef] =
|
): HttpResult[HttpServerRef] =
|
||||||
|
let
|
||||||
let serverUri =
|
serverInstance =
|
||||||
if len(serverUri.hostname) > 0:
|
|
||||||
serverUri
|
|
||||||
else:
|
|
||||||
try:
|
try:
|
||||||
parseUri("http://" & $address & "/")
|
createStreamServer(address, flags = socketFlags, bufferSize = bufferSize,
|
||||||
except TransportAddressError as exc:
|
backlog = backlogSize, dualstack = dualstack)
|
||||||
|
except TransportOsError as exc:
|
||||||
return err(exc.msg)
|
return err(exc.msg)
|
||||||
|
serverUri =
|
||||||
let serverInstance =
|
if len(serverUri.hostname) > 0:
|
||||||
try:
|
serverUri
|
||||||
createStreamServer(address, flags = socketFlags, bufferSize = bufferSize,
|
else:
|
||||||
backlog = backlogSize, dualstack = dualstack)
|
parseUri("http://" & $serverInstance.localAddress() & "/")
|
||||||
except TransportOsError as exc:
|
res = HttpServerRef(
|
||||||
return err(exc.msg)
|
address: serverInstance.localAddress(),
|
||||||
|
instance: serverInstance,
|
||||||
var res = HttpServerRef(
|
processCallback: processCallback,
|
||||||
address: serverInstance.localAddress(),
|
createConnCallback: createConnection,
|
||||||
instance: serverInstance,
|
baseUri: serverUri,
|
||||||
processCallback: processCallback,
|
serverIdent: serverIdent,
|
||||||
createConnCallback: createConnection,
|
flags: serverFlags,
|
||||||
baseUri: serverUri,
|
socketFlags: socketFlags,
|
||||||
serverIdent: serverIdent,
|
maxConnections: maxConnections,
|
||||||
flags: serverFlags,
|
bufferSize: bufferSize,
|
||||||
socketFlags: socketFlags,
|
backlogSize: backlogSize,
|
||||||
maxConnections: maxConnections,
|
headersTimeout: httpHeadersTimeout,
|
||||||
bufferSize: bufferSize,
|
maxHeadersSize: maxHeadersSize,
|
||||||
backlogSize: backlogSize,
|
maxRequestBodySize: maxRequestBodySize,
|
||||||
headersTimeout: httpHeadersTimeout,
|
# semaphore:
|
||||||
maxHeadersSize: maxHeadersSize,
|
# if maxConnections > 0:
|
||||||
maxRequestBodySize: maxRequestBodySize,
|
# newAsyncSemaphore(maxConnections)
|
||||||
# semaphore:
|
# else:
|
||||||
# if maxConnections > 0:
|
# nil
|
||||||
# newAsyncSemaphore(maxConnections)
|
lifetime: newFuture[void]("http.server.lifetime"),
|
||||||
# else:
|
connections: initOrderedTable[string, HttpConnectionHolderRef](),
|
||||||
# nil
|
middlewares: prepareMiddlewares(processCallback, middlewares)
|
||||||
lifetime: newFuture[void]("http.server.lifetime"),
|
)
|
||||||
connections: initOrderedTable[string, HttpConnectionHolderRef](),
|
|
||||||
middlewares: prepareMiddlewares(processCallback, middlewares)
|
|
||||||
)
|
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
@ -1187,23 +1182,7 @@ proc closeWait*(server: HttpServerRef) {.async: (raises: []).} =
|
|||||||
proc join*(server: HttpServerRef): Future[void] {.
|
proc join*(server: HttpServerRef): Future[void] {.
|
||||||
async: (raw: true, raises: [CancelledError]).} =
|
async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Wait until HTTP server will not be closed.
|
## Wait until HTTP server will not be closed.
|
||||||
var retFuture = newFuture[void]("http.server.join")
|
server.lifetime.join()
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
|
||||||
if not(retFuture.finished()):
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
|
||||||
if not(retFuture.finished()):
|
|
||||||
server.lifetime.removeCallback(continuation, cast[pointer](retFuture))
|
|
||||||
|
|
||||||
if server.state == ServerClosed:
|
|
||||||
retFuture.complete()
|
|
||||||
else:
|
|
||||||
server.lifetime.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
retFuture.cancelCallback = cancellation
|
|
||||||
|
|
||||||
retFuture
|
|
||||||
|
|
||||||
proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] =
|
proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] =
|
||||||
## Create new MultiPartReader interface for specific request.
|
## Create new MultiPartReader interface for specific request.
|
||||||
|
|||||||
@ -98,52 +98,47 @@ proc new*(htype: typedesc[SecureHttpServerRef],
|
|||||||
maxRequestBodySize: int = 1_048_576,
|
maxRequestBodySize: int = 1_048_576,
|
||||||
dualstack = DualStackType.Auto
|
dualstack = DualStackType.Auto
|
||||||
): HttpResult[SecureHttpServerRef] =
|
): HttpResult[SecureHttpServerRef] =
|
||||||
|
|
||||||
doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!")
|
doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!")
|
||||||
doAssert(not(isNil(tlsCertificate)), "TLS certificate must not be nil!")
|
doAssert(not(isNil(tlsCertificate)), "TLS certificate must not be nil!")
|
||||||
|
let
|
||||||
let serverUri =
|
serverInstance =
|
||||||
if len(serverUri.hostname) > 0:
|
|
||||||
serverUri
|
|
||||||
else:
|
|
||||||
try:
|
try:
|
||||||
parseUri("https://" & $address & "/")
|
createStreamServer(address, flags = socketFlags,
|
||||||
except TransportAddressError as exc:
|
bufferSize = bufferSize,
|
||||||
|
backlog = backlogSize, dualstack = dualstack)
|
||||||
|
except TransportOsError as exc:
|
||||||
return err(exc.msg)
|
return err(exc.msg)
|
||||||
|
serverUri =
|
||||||
let serverInstance =
|
if len(serverUri.hostname) > 0:
|
||||||
try:
|
serverUri
|
||||||
createStreamServer(address, flags = socketFlags, bufferSize = bufferSize,
|
else:
|
||||||
backlog = backlogSize, dualstack = dualstack)
|
parseUri("https://" & $serverInstance.localAddress() & "/")
|
||||||
except TransportOsError as exc:
|
res = SecureHttpServerRef(
|
||||||
return err(exc.msg)
|
address: serverInstance.localAddress(),
|
||||||
|
instance: serverInstance,
|
||||||
let res = SecureHttpServerRef(
|
processCallback: processCallback,
|
||||||
address: address,
|
createConnCallback: createSecConnection,
|
||||||
instance: serverInstance,
|
baseUri: serverUri,
|
||||||
processCallback: processCallback,
|
serverIdent: serverIdent,
|
||||||
createConnCallback: createSecConnection,
|
flags: serverFlags + {HttpServerFlags.Secure},
|
||||||
baseUri: serverUri,
|
socketFlags: socketFlags,
|
||||||
serverIdent: serverIdent,
|
maxConnections: maxConnections,
|
||||||
flags: serverFlags + {HttpServerFlags.Secure},
|
bufferSize: bufferSize,
|
||||||
socketFlags: socketFlags,
|
backlogSize: backlogSize,
|
||||||
maxConnections: maxConnections,
|
headersTimeout: httpHeadersTimeout,
|
||||||
bufferSize: bufferSize,
|
maxHeadersSize: maxHeadersSize,
|
||||||
backlogSize: backlogSize,
|
maxRequestBodySize: maxRequestBodySize,
|
||||||
headersTimeout: httpHeadersTimeout,
|
# semaphore:
|
||||||
maxHeadersSize: maxHeadersSize,
|
# if maxConnections > 0:
|
||||||
maxRequestBodySize: maxRequestBodySize,
|
# newAsyncSemaphore(maxConnections)
|
||||||
# semaphore:
|
# else:
|
||||||
# if maxConnections > 0:
|
# nil
|
||||||
# newAsyncSemaphore(maxConnections)
|
lifetime: newFuture[void]("http.server.lifetime"),
|
||||||
# else:
|
connections: initOrderedTable[string, HttpConnectionHolderRef](),
|
||||||
# nil
|
tlsCertificate: tlsCertificate,
|
||||||
lifetime: newFuture[void]("http.server.lifetime"),
|
tlsPrivateKey: tlsPrivateKey,
|
||||||
connections: initOrderedTable[string, HttpConnectionHolderRef](),
|
secureFlags: secureFlags
|
||||||
tlsCertificate: tlsCertificate,
|
)
|
||||||
tlsPrivateKey: tlsPrivateKey,
|
|
||||||
secureFlags: secureFlags
|
|
||||||
)
|
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc new*(htype: typedesc[SecureHttpServerRef],
|
proc new*(htype: typedesc[SecureHttpServerRef],
|
||||||
|
|||||||
@ -996,7 +996,7 @@ else:
|
|||||||
return
|
return
|
||||||
if not(isNil(timer)):
|
if not(isNil(timer)):
|
||||||
clearTimer(timer)
|
clearTimer(timer)
|
||||||
let exitCode = p.peekProcessExitCode().valueOr:
|
let exitCode = p.peekProcessExitCode(reap = true).valueOr:
|
||||||
retFuture.fail(newException(AsyncProcessError, osErrorMsg(error)))
|
retFuture.fail(newException(AsyncProcessError, osErrorMsg(error)))
|
||||||
return
|
return
|
||||||
if exitCode == -1:
|
if exitCode == -1:
|
||||||
|
|||||||
140
chronos/bipbuffer.nim
Normal file
140
chronos/bipbuffer.nim
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
#
|
||||||
|
# Chronos
|
||||||
|
#
|
||||||
|
# (c) Copyright 2018-Present Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
## This module implements Bip Buffer (bi-partite circular buffer) by Simone
|
||||||
|
## Cooke.
|
||||||
|
##
|
||||||
|
## The Bip-Buffer is like a circular buffer, but slightly different. Instead of
|
||||||
|
## keeping one head and tail pointer to the data in the buffer, it maintains two
|
||||||
|
## revolving regions, allowing for fast data access without having to worry
|
||||||
|
## about wrapping at the end of the buffer. Buffer allocations are always
|
||||||
|
## maintained as contiguous blocks, allowing the buffer to be used in a highly
|
||||||
|
## efficient manner with API calls, and also reducing the amount of copying
|
||||||
|
## which needs to be performed to put data into the buffer. Finally, a two-phase
|
||||||
|
## allocation system allows the user to pessimistically reserve an area of
|
||||||
|
## buffer space, and then trim back the buffer to commit to only the space which
|
||||||
|
## was used.
|
||||||
|
##
|
||||||
|
## https://www.codeproject.com/Articles/3479/The-Bip-Buffer-The-Circular-Buffer-with-a-Twist
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
type
|
||||||
|
BipPos = object
|
||||||
|
start: Natural
|
||||||
|
finish: Natural
|
||||||
|
|
||||||
|
BipBuffer* = object
|
||||||
|
a, b, r: BipPos
|
||||||
|
data: seq[byte]
|
||||||
|
|
||||||
|
proc init*(t: typedesc[BipBuffer], size: int): BipBuffer =
|
||||||
|
## Creates new Bip Buffer with size `size`.
|
||||||
|
BipBuffer(data: newSeq[byte](size))
|
||||||
|
|
||||||
|
template len(pos: BipPos): Natural =
|
||||||
|
pos.finish - pos.start
|
||||||
|
|
||||||
|
template reset(pos: var BipPos) =
|
||||||
|
pos = BipPos()
|
||||||
|
|
||||||
|
func init(t: typedesc[BipPos], start, finish: Natural): BipPos =
|
||||||
|
BipPos(start: start, finish: finish)
|
||||||
|
|
||||||
|
func calcReserve(bp: BipBuffer): tuple[space: Natural, start: Natural] =
|
||||||
|
if len(bp.b) > 0:
|
||||||
|
(Natural(bp.a.start - bp.b.finish), bp.b.finish)
|
||||||
|
else:
|
||||||
|
let spaceAfterA = Natural(len(bp.data) - bp.a.finish)
|
||||||
|
if spaceAfterA >= bp.a.start:
|
||||||
|
(spaceAfterA, bp.a.finish)
|
||||||
|
else:
|
||||||
|
(bp.a.start, Natural(0))
|
||||||
|
|
||||||
|
func availSpace*(bp: BipBuffer): Natural =
|
||||||
|
## Returns amount of space available for reserve in buffer `bp`.
|
||||||
|
let (res, _) = bp.calcReserve()
|
||||||
|
res
|
||||||
|
|
||||||
|
func len*(bp: BipBuffer): Natural =
|
||||||
|
## Returns amount of used space in buffer `bp`.
|
||||||
|
len(bp.b) + len(bp.a)
|
||||||
|
|
||||||
|
proc reserve*(bp: var BipBuffer,
|
||||||
|
size: Natural = 0): tuple[data: ptr byte, size: Natural] =
|
||||||
|
## Reserve `size` bytes in buffer.
|
||||||
|
##
|
||||||
|
## If `size == 0` (default) reserve all available space from buffer.
|
||||||
|
##
|
||||||
|
## If there is not enough space in buffer for resevation - error will be
|
||||||
|
## returned.
|
||||||
|
##
|
||||||
|
## Returns current reserved range as pointer of type `pt` and size of
|
||||||
|
## type `st`.
|
||||||
|
const ErrorMessage = "Not enough space available"
|
||||||
|
doAssert(size <= len(bp.data))
|
||||||
|
let (availableSpace, reserveStart) = bp.calcReserve()
|
||||||
|
if availableSpace == 0:
|
||||||
|
raiseAssert ErrorMessage
|
||||||
|
let reserveLength =
|
||||||
|
if size == 0:
|
||||||
|
availableSpace
|
||||||
|
else:
|
||||||
|
if size < availableSpace:
|
||||||
|
raiseAssert ErrorMessage
|
||||||
|
size
|
||||||
|
bp.r = BipPos.init(reserveStart, Natural(reserveStart + reserveLength))
|
||||||
|
(addr bp.data[bp.r.start], len(bp.r))
|
||||||
|
|
||||||
|
proc commit*(bp: var BipBuffer, size: Natural) =
|
||||||
|
## Updates structure's pointers when new data inserted into buffer.
|
||||||
|
doAssert(len(bp.r) >= size,
|
||||||
|
"Committed size could not be larger than the previously reserved one")
|
||||||
|
if size == 0:
|
||||||
|
bp.r.reset()
|
||||||
|
return
|
||||||
|
|
||||||
|
let toCommit = min(size, len(bp.r))
|
||||||
|
if len(bp.a) == 0 and len(bp.b) == 0:
|
||||||
|
bp.a.start = bp.r.start
|
||||||
|
bp.a.finish = bp.r.start + toCommit
|
||||||
|
elif bp.r.start == bp.a.finish:
|
||||||
|
bp.a.finish += toCommit
|
||||||
|
else:
|
||||||
|
bp.b.finish += toCommit
|
||||||
|
bp.r.reset()
|
||||||
|
|
||||||
|
proc consume*(bp: var BipBuffer, size: Natural) =
|
||||||
|
## The procedure removes/frees `size` bytes from the buffer ``bp``.
|
||||||
|
var currentSize = size
|
||||||
|
if currentSize >= len(bp.a):
|
||||||
|
currentSize -= len(bp.a)
|
||||||
|
bp.a = bp.b
|
||||||
|
bp.b.reset()
|
||||||
|
if currentSize >= len(bp.a):
|
||||||
|
currentSize -= len(bp.a)
|
||||||
|
bp.a.reset()
|
||||||
|
else:
|
||||||
|
bp.a.start += currentSize
|
||||||
|
else:
|
||||||
|
bp.a.start += currentSize
|
||||||
|
|
||||||
|
iterator items*(bp: BipBuffer): byte =
|
||||||
|
## Iterates over all the bytes in the buffer.
|
||||||
|
for index in bp.a.start ..< bp.a.finish:
|
||||||
|
yield bp.data[index]
|
||||||
|
for index in bp.b.start ..< bp.b.finish:
|
||||||
|
yield bp.data[index]
|
||||||
|
|
||||||
|
iterator regions*(bp: var BipBuffer): tuple[data: ptr byte, size: Natural] =
|
||||||
|
## Iterates over all the regions (`a` and `b`) in the buffer.
|
||||||
|
if len(bp.a) > 0:
|
||||||
|
yield (addr bp.data[bp.a.start], len(bp.a))
|
||||||
|
if len(bp.b) > 0:
|
||||||
|
yield (addr bp.data[bp.b.start], len(bp.b))
|
||||||
@ -76,22 +76,11 @@ template Finished*(T: type FutureState): FutureState {.
|
|||||||
deprecated: "Use FutureState.Completed instead".} =
|
deprecated: "Use FutureState.Completed instead".} =
|
||||||
FutureState.Completed
|
FutureState.Completed
|
||||||
|
|
||||||
proc newFutureImpl[T](loc: ptr SrcLoc): Future[T] =
|
|
||||||
let fut = Future[T]()
|
|
||||||
internalInitFutureBase(fut, loc, FutureState.Pending, {})
|
|
||||||
fut
|
|
||||||
|
|
||||||
proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] =
|
proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] =
|
||||||
let fut = Future[T]()
|
let fut = Future[T]()
|
||||||
internalInitFutureBase(fut, loc, FutureState.Pending, flags)
|
internalInitFutureBase(fut, loc, FutureState.Pending, flags)
|
||||||
fut
|
fut
|
||||||
|
|
||||||
proc newInternalRaisesFutureImpl[T, E](
|
|
||||||
loc: ptr SrcLoc): InternalRaisesFuture[T, E] =
|
|
||||||
let fut = InternalRaisesFuture[T, E]()
|
|
||||||
internalInitFutureBase(fut, loc, FutureState.Pending, {})
|
|
||||||
fut
|
|
||||||
|
|
||||||
proc newInternalRaisesFutureImpl[T, E](
|
proc newInternalRaisesFutureImpl[T, E](
|
||||||
loc: ptr SrcLoc, flags: FutureFlags): InternalRaisesFuture[T, E] =
|
loc: ptr SrcLoc, flags: FutureFlags): InternalRaisesFuture[T, E] =
|
||||||
let fut = InternalRaisesFuture[T, E]()
|
let fut = InternalRaisesFuture[T, E]()
|
||||||
@ -125,7 +114,7 @@ template newInternalRaisesFuture*[T, E](fromProc: static[string] = ""): auto =
|
|||||||
##
|
##
|
||||||
## Specifying ``fromProc``, which is a string specifying the name of the proc
|
## Specifying ``fromProc``, which is a string specifying the name of the proc
|
||||||
## that this future belongs to, is a good habit as it helps with debugging.
|
## that this future belongs to, is a good habit as it helps with debugging.
|
||||||
newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc))
|
newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc), {})
|
||||||
|
|
||||||
template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] {.deprecated.} =
|
template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] {.deprecated.} =
|
||||||
## Create a new future which can hold/preserve GC sequence until future will
|
## Create a new future which can hold/preserve GC sequence until future will
|
||||||
@ -1000,31 +989,87 @@ template cancel*(future: FutureBase) {.
|
|||||||
## Cancel ``future``.
|
## Cancel ``future``.
|
||||||
cancelSoon(future, nil, nil, getSrcLocation())
|
cancelSoon(future, nil, nil, getSrcLocation())
|
||||||
|
|
||||||
proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {.
|
proc cancelAndWait(
|
||||||
async: (raw: true, raises: []).} =
|
loc: ptr SrcLoc,
|
||||||
|
futs: varargs[FutureBase]
|
||||||
|
): Future[void] {.async: (raw: true, raises: []).} =
|
||||||
|
let
|
||||||
|
retFuture =
|
||||||
|
Future[void].Raising([]).init(
|
||||||
|
"chronos.cancelAndWait(varargs[FutureBase])",
|
||||||
|
{FutureFlag.OwnCancelSchedule})
|
||||||
|
var count = 0
|
||||||
|
|
||||||
|
proc continuation(udata: pointer) {.gcsafe.} =
|
||||||
|
dec(count)
|
||||||
|
if count == 0:
|
||||||
|
retFuture.complete()
|
||||||
|
|
||||||
|
retFuture.cancelCallback = nil
|
||||||
|
|
||||||
|
for futn in futs:
|
||||||
|
if not(futn.finished()):
|
||||||
|
inc(count)
|
||||||
|
cancelSoon(futn, continuation, cast[pointer](futn), loc)
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
retFuture.complete()
|
||||||
|
|
||||||
|
retFuture
|
||||||
|
|
||||||
|
proc cancelAndWait(
|
||||||
|
loc: ptr SrcLoc,
|
||||||
|
futs: openArray[SomeFuture]
|
||||||
|
): Future[void] {.async: (raw: true, raises: []).} =
|
||||||
|
cancelAndWait(loc, futs.mapIt(FutureBase(it)))
|
||||||
|
|
||||||
|
template cancelAndWait*(future: FutureBase): Future[void].Raising([]) =
|
||||||
## Perform cancellation ``future`` return Future which will be completed when
|
## Perform cancellation ``future`` return Future which will be completed when
|
||||||
## ``future`` become finished (completed with value, failed or cancelled).
|
## ``future`` become finished (completed with value, failed or cancelled).
|
||||||
##
|
##
|
||||||
## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees
|
## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees
|
||||||
## that ``future``will be finished (completed with value, failed or cancelled)
|
## that ``future``will be finished (completed with value, failed or cancelled)
|
||||||
## as quickly as possible.
|
## as quickly as possible.
|
||||||
let retFuture = newFuture[void]("chronos.cancelAndWait(FutureBase)",
|
cancelAndWait(getSrcLocation(), future)
|
||||||
{FutureFlag.OwnCancelSchedule})
|
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
template cancelAndWait*(future: SomeFuture): Future[void].Raising([]) =
|
||||||
retFuture.complete()
|
## Perform cancellation ``future`` return Future which will be completed when
|
||||||
|
## ``future`` become finished (completed with value, failed or cancelled).
|
||||||
|
##
|
||||||
|
## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees
|
||||||
|
## that ``future``will be finished (completed with value, failed or cancelled)
|
||||||
|
## as quickly as possible.
|
||||||
|
cancelAndWait(getSrcLocation(), FutureBase(future))
|
||||||
|
|
||||||
if future.finished():
|
template cancelAndWait*(futs: varargs[FutureBase]): Future[void].Raising([]) =
|
||||||
retFuture.complete()
|
## Perform cancellation of all the ``futs``. Returns Future which will be
|
||||||
else:
|
## completed when all the ``futs`` become finished (completed with value,
|
||||||
retFuture.cancelCallback = nil
|
## failed or cancelled).
|
||||||
cancelSoon(future, continuation, cast[pointer](retFuture), loc)
|
##
|
||||||
|
## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees
|
||||||
|
## that all the ``futs``will be finished (completed with value, failed or
|
||||||
|
## cancelled) as quickly as possible.
|
||||||
|
##
|
||||||
|
## NOTE: It is safe to pass finished futures in ``futs`` (completed with
|
||||||
|
## value, failed or cancelled).
|
||||||
|
##
|
||||||
|
## NOTE: If ``futs`` is an empty array, procedure returns completed Future.
|
||||||
|
cancelAndWait(getSrcLocation(), futs)
|
||||||
|
|
||||||
retFuture
|
template cancelAndWait*(futs: openArray[SomeFuture]): Future[void].Raising([]) =
|
||||||
|
## Perform cancellation of all the ``futs``. Returns Future which will be
|
||||||
template cancelAndWait*(future: FutureBase): Future[void].Raising([]) =
|
## completed when all the ``futs`` become finished (completed with value,
|
||||||
## Cancel ``future``.
|
## failed or cancelled).
|
||||||
cancelAndWait(future, getSrcLocation())
|
##
|
||||||
|
## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees
|
||||||
|
## that all the ``futs``will be finished (completed with value, failed or
|
||||||
|
## cancelled) as quickly as possible.
|
||||||
|
##
|
||||||
|
## NOTE: It is safe to pass finished futures in ``futs`` (completed with
|
||||||
|
## value, failed or cancelled).
|
||||||
|
##
|
||||||
|
## NOTE: If ``futs`` is an empty array, procedure returns completed Future.
|
||||||
|
cancelAndWait(getSrcLocation(), futs)
|
||||||
|
|
||||||
proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: asyncraiseOf(future) - CancelledError
|
proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: asyncraiseOf(future) - CancelledError
|
||||||
## Prevent cancellation requests from propagating to ``future`` while
|
## Prevent cancellation requests from propagating to ``future`` while
|
||||||
@ -1042,19 +1087,24 @@ proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: as
|
|||||||
let retFuture = newFuture[F.T]("chronos.noCancel(T)",
|
let retFuture = newFuture[F.T]("chronos.noCancel(T)",
|
||||||
{FutureFlag.OwnCancelSchedule})
|
{FutureFlag.OwnCancelSchedule})
|
||||||
template completeFuture() =
|
template completeFuture() =
|
||||||
|
const canFail = when declared(InternalRaisesFutureRaises):
|
||||||
|
InternalRaisesFutureRaises isnot void
|
||||||
|
else:
|
||||||
|
true
|
||||||
|
|
||||||
if future.completed():
|
if future.completed():
|
||||||
when F.T is void:
|
when F.T is void:
|
||||||
retFuture.complete()
|
retFuture.complete()
|
||||||
else:
|
else:
|
||||||
retFuture.complete(future.value)
|
retFuture.complete(future.value)
|
||||||
elif future.failed():
|
|
||||||
when F is Future:
|
|
||||||
retFuture.fail(future.error, warn = false)
|
|
||||||
when declared(InternalRaisesFutureRaises):
|
|
||||||
when InternalRaisesFutureRaises isnot void:
|
|
||||||
retFuture.fail(future.error, warn = false)
|
|
||||||
else:
|
else:
|
||||||
raiseAssert("Unexpected future state [" & $future.state & "]")
|
when canFail: # Avoid calling `failed` on non-failing raises futures
|
||||||
|
if future.failed():
|
||||||
|
retFuture.fail(future.error, warn = false)
|
||||||
|
else:
|
||||||
|
raiseAssert("Unexpected future state [" & $future.state & "]")
|
||||||
|
else:
|
||||||
|
raiseAssert("Unexpected future state [" & $future.state & "]")
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
proc continuation(udata: pointer) {.gcsafe.} =
|
||||||
completeFuture()
|
completeFuture()
|
||||||
@ -1466,18 +1516,25 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {.
|
|||||||
timer: TimerCallback
|
timer: TimerCallback
|
||||||
timeouted = false
|
timeouted = false
|
||||||
|
|
||||||
template completeFuture(fut: untyped): untyped =
|
template completeFuture(fut: untyped, timeout: bool): untyped =
|
||||||
if fut.failed() or fut.completed():
|
if fut.failed() or fut.completed():
|
||||||
retFuture.complete(true)
|
retFuture.complete(true)
|
||||||
else:
|
else:
|
||||||
retFuture.cancelAndSchedule()
|
if timeout:
|
||||||
|
retFuture.complete(false)
|
||||||
|
else:
|
||||||
|
retFuture.cancelAndSchedule()
|
||||||
|
|
||||||
# TODO: raises annotation shouldn't be needed, but likely similar issue as
|
# TODO: raises annotation shouldn't be needed, but likely similar issue as
|
||||||
# https://github.com/nim-lang/Nim/issues/17369
|
# https://github.com/nim-lang/Nim/issues/17369
|
||||||
proc continuation(udata: pointer) {.gcsafe, raises: [].} =
|
proc continuation(udata: pointer) {.gcsafe, raises: [].} =
|
||||||
if not(retFuture.finished()):
|
if not(retFuture.finished()):
|
||||||
if timeouted:
|
if timeouted:
|
||||||
retFuture.complete(false)
|
# We should not unconditionally complete result future with `false`.
|
||||||
|
# Initiated by timeout handler cancellation could fail, in this case
|
||||||
|
# we could get `fut` in complete or in failed state, so we should
|
||||||
|
# complete result future with `true` instead of `false` here.
|
||||||
|
fut.completeFuture(timeouted)
|
||||||
return
|
return
|
||||||
if not(fut.finished()):
|
if not(fut.finished()):
|
||||||
# Timer exceeded first, we going to cancel `fut` and wait until it
|
# Timer exceeded first, we going to cancel `fut` and wait until it
|
||||||
@ -1488,7 +1545,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {.
|
|||||||
# Future `fut` completed/failed/cancelled first.
|
# Future `fut` completed/failed/cancelled first.
|
||||||
if not(isNil(timer)):
|
if not(isNil(timer)):
|
||||||
clearTimer(timer)
|
clearTimer(timer)
|
||||||
fut.completeFuture()
|
fut.completeFuture(false)
|
||||||
timer = nil
|
timer = nil
|
||||||
|
|
||||||
# TODO: raises annotation shouldn't be needed, but likely similar issue as
|
# TODO: raises annotation shouldn't be needed, but likely similar issue as
|
||||||
@ -1499,7 +1556,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {.
|
|||||||
clearTimer(timer)
|
clearTimer(timer)
|
||||||
fut.cancelSoon()
|
fut.cancelSoon()
|
||||||
else:
|
else:
|
||||||
fut.completeFuture()
|
fut.completeFuture(false)
|
||||||
timer = nil
|
timer = nil
|
||||||
|
|
||||||
if fut.finished():
|
if fut.finished():
|
||||||
@ -1522,17 +1579,21 @@ proc withTimeout*[T](fut: Future[T], timeout: int): Future[bool] {.
|
|||||||
inline, deprecated: "Use withTimeout(Future[T], Duration)".} =
|
inline, deprecated: "Use withTimeout(Future[T], Duration)".} =
|
||||||
withTimeout(fut, timeout.milliseconds())
|
withTimeout(fut, timeout.milliseconds())
|
||||||
|
|
||||||
proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto =
|
proc waitUntilImpl[F: SomeFuture](fut: F, retFuture: auto,
|
||||||
var
|
deadline: auto): auto =
|
||||||
moment: Moment
|
var timeouted = false
|
||||||
timer: TimerCallback
|
|
||||||
timeouted = false
|
|
||||||
|
|
||||||
template completeFuture(fut: untyped): untyped =
|
template completeFuture(fut: untyped, timeout: bool): untyped =
|
||||||
if fut.failed():
|
if fut.failed():
|
||||||
retFuture.fail(fut.error(), warn = false)
|
retFuture.fail(fut.error(), warn = false)
|
||||||
elif fut.cancelled():
|
elif fut.cancelled():
|
||||||
retFuture.cancelAndSchedule()
|
if timeout:
|
||||||
|
# Its possible that `future` could be cancelled in some other place. In
|
||||||
|
# such case we can't detect if it was our cancellation due to timeout,
|
||||||
|
# or some other cancellation.
|
||||||
|
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
||||||
|
else:
|
||||||
|
retFuture.cancelAndSchedule()
|
||||||
else:
|
else:
|
||||||
when type(fut).T is void:
|
when type(fut).T is void:
|
||||||
retFuture.complete()
|
retFuture.complete()
|
||||||
@ -1542,7 +1603,64 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto =
|
|||||||
proc continuation(udata: pointer) {.raises: [].} =
|
proc continuation(udata: pointer) {.raises: [].} =
|
||||||
if not(retFuture.finished()):
|
if not(retFuture.finished()):
|
||||||
if timeouted:
|
if timeouted:
|
||||||
|
# When timeout is exceeded and we cancelled future via cancelSoon(),
|
||||||
|
# its possible that future at this moment already has value
|
||||||
|
# and/or error.
|
||||||
|
fut.completeFuture(timeouted)
|
||||||
|
return
|
||||||
|
if not(fut.finished()):
|
||||||
|
timeouted = true
|
||||||
|
fut.cancelSoon()
|
||||||
|
else:
|
||||||
|
fut.completeFuture(false)
|
||||||
|
|
||||||
|
var cancellation: proc(udata: pointer) {.gcsafe, raises: [].}
|
||||||
|
cancellation = proc(udata: pointer) {.gcsafe, raises: [].} =
|
||||||
|
deadline.removeCallback(continuation)
|
||||||
|
if not(fut.finished()):
|
||||||
|
fut.cancelSoon()
|
||||||
|
else:
|
||||||
|
fut.completeFuture(false)
|
||||||
|
|
||||||
|
if fut.finished():
|
||||||
|
fut.completeFuture(false)
|
||||||
|
else:
|
||||||
|
if deadline.finished():
|
||||||
|
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
||||||
|
else:
|
||||||
|
retFuture.cancelCallback = cancellation
|
||||||
|
fut.addCallback(continuation)
|
||||||
|
deadline.addCallback(continuation)
|
||||||
|
retFuture
|
||||||
|
|
||||||
|
proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto =
|
||||||
|
var
|
||||||
|
moment: Moment
|
||||||
|
timer: TimerCallback
|
||||||
|
timeouted = false
|
||||||
|
|
||||||
|
template completeFuture(fut: untyped, timeout: bool): untyped =
|
||||||
|
if fut.failed():
|
||||||
|
retFuture.fail(fut.error(), warn = false)
|
||||||
|
elif fut.cancelled():
|
||||||
|
if timeout:
|
||||||
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
||||||
|
else:
|
||||||
|
retFuture.cancelAndSchedule()
|
||||||
|
else:
|
||||||
|
when type(fut).T is void:
|
||||||
|
retFuture.complete()
|
||||||
|
else:
|
||||||
|
retFuture.complete(fut.value)
|
||||||
|
|
||||||
|
proc continuation(udata: pointer) {.raises: [].} =
|
||||||
|
if not(retFuture.finished()):
|
||||||
|
if timeouted:
|
||||||
|
# We should not unconditionally fail `retFuture` with
|
||||||
|
# `AsyncTimeoutError`. Initiated by timeout handler cancellation
|
||||||
|
# could fail, in this case we could get `fut` in complete or in failed
|
||||||
|
# state, so we should return error/value instead of `AsyncTimeoutError`.
|
||||||
|
fut.completeFuture(timeouted)
|
||||||
return
|
return
|
||||||
if not(fut.finished()):
|
if not(fut.finished()):
|
||||||
# Timer exceeded first.
|
# Timer exceeded first.
|
||||||
@ -1552,7 +1670,7 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto =
|
|||||||
# Future `fut` completed/failed/cancelled first.
|
# Future `fut` completed/failed/cancelled first.
|
||||||
if not(isNil(timer)):
|
if not(isNil(timer)):
|
||||||
clearTimer(timer)
|
clearTimer(timer)
|
||||||
fut.completeFuture()
|
fut.completeFuture(false)
|
||||||
timer = nil
|
timer = nil
|
||||||
|
|
||||||
var cancellation: proc(udata: pointer) {.gcsafe, raises: [].}
|
var cancellation: proc(udata: pointer) {.gcsafe, raises: [].}
|
||||||
@ -1562,12 +1680,12 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto =
|
|||||||
clearTimer(timer)
|
clearTimer(timer)
|
||||||
fut.cancelSoon()
|
fut.cancelSoon()
|
||||||
else:
|
else:
|
||||||
fut.completeFuture()
|
fut.completeFuture(false)
|
||||||
|
|
||||||
timer = nil
|
timer = nil
|
||||||
|
|
||||||
if fut.finished():
|
if fut.finished():
|
||||||
fut.completeFuture()
|
fut.completeFuture(false)
|
||||||
else:
|
else:
|
||||||
if timeout.isZero():
|
if timeout.isZero():
|
||||||
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
||||||
@ -1592,7 +1710,8 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] =
|
|||||||
## TODO: In case when ``fut`` got cancelled, what result Future[T]
|
## TODO: In case when ``fut`` got cancelled, what result Future[T]
|
||||||
## should return, because it can't be cancelled too.
|
## should return, because it can't be cancelled too.
|
||||||
var
|
var
|
||||||
retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule})
|
retFuture = newFuture[T]("chronos.wait(duration)",
|
||||||
|
{FutureFlag.OwnCancelSchedule})
|
||||||
# We set `OwnCancelSchedule` flag, because we going to cancel `retFuture`
|
# We set `OwnCancelSchedule` flag, because we going to cancel `retFuture`
|
||||||
# manually at proper time.
|
# manually at proper time.
|
||||||
|
|
||||||
@ -1607,6 +1726,61 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {.
|
|||||||
else:
|
else:
|
||||||
wait(fut, timeout.milliseconds())
|
wait(fut, timeout.milliseconds())
|
||||||
|
|
||||||
|
proc wait*[T](fut: Future[T], deadline: SomeFuture): Future[T] =
|
||||||
|
## Returns a future which will complete once future ``fut`` completes
|
||||||
|
## or if ``deadline`` future completes.
|
||||||
|
##
|
||||||
|
## If `deadline` future completes before future `fut` -
|
||||||
|
## `AsyncTimeoutError` exception will be raised.
|
||||||
|
##
|
||||||
|
## Note: `deadline` future will not be cancelled and/or failed.
|
||||||
|
##
|
||||||
|
## Note: While `waitUntil(future)` operation is pending, please avoid any
|
||||||
|
## attempts to cancel future `fut`. If it happens `waitUntil()` could
|
||||||
|
## introduce undefined behavior - it could raise`CancelledError` or
|
||||||
|
## `AsyncTimeoutError`.
|
||||||
|
##
|
||||||
|
## If you need to cancel `future` - cancel `waitUntil(future)` instead.
|
||||||
|
var
|
||||||
|
retFuture = newFuture[T]("chronos.wait(future)",
|
||||||
|
{FutureFlag.OwnCancelSchedule})
|
||||||
|
# We set `OwnCancelSchedule` flag, because we going to cancel `retFuture`
|
||||||
|
# manually at proper time.
|
||||||
|
waitUntilImpl(fut, retFuture, deadline)
|
||||||
|
|
||||||
|
proc join*(future: FutureBase): Future[void] {.
|
||||||
|
async: (raw: true, raises: [CancelledError]).} =
|
||||||
|
## Returns a future which will complete once future ``future`` completes.
|
||||||
|
##
|
||||||
|
## This primitive helps to carefully monitor ``future`` state, in case of
|
||||||
|
## cancellation ``join`` operation it will not going to cancel ``future``.
|
||||||
|
##
|
||||||
|
## If ``future`` is already completed - ``join`` will return completed
|
||||||
|
## future immediately.
|
||||||
|
let retFuture = newFuture[void]("chronos.join()")
|
||||||
|
|
||||||
|
proc continuation(udata: pointer) {.gcsafe.} =
|
||||||
|
retFuture.complete()
|
||||||
|
|
||||||
|
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||||
|
future.removeCallback(continuation, cast[pointer](retFuture))
|
||||||
|
|
||||||
|
if not(future.finished()):
|
||||||
|
future.addCallback(continuation, cast[pointer](retFuture))
|
||||||
|
retFuture.cancelCallback = cancellation
|
||||||
|
else:
|
||||||
|
retFuture.complete()
|
||||||
|
|
||||||
|
retFuture
|
||||||
|
|
||||||
|
proc join*(future: SomeFuture): Future[void] {.
|
||||||
|
async: (raw: true, raises: [CancelledError]).} =
|
||||||
|
## Returns a future which will complete once future ``future`` completes.
|
||||||
|
##
|
||||||
|
## This primitive helps to carefully monitor ``future`` state, in case of
|
||||||
|
## cancellation ``join`` operation it will not going to cancel ``future``.
|
||||||
|
join(FutureBase(future))
|
||||||
|
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
import ../osdefs
|
import ../osdefs
|
||||||
|
|
||||||
@ -1736,8 +1910,21 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto =
|
|||||||
InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError)
|
InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError)
|
||||||
|
|
||||||
let
|
let
|
||||||
retFuture = newFuture[T]("chronos.wait()", {OwnCancelSchedule})
|
retFuture = newFuture[T]("chronos.wait(duration)", {OwnCancelSchedule})
|
||||||
# We set `OwnCancelSchedule` flag, because we going to cancel `retFuture`
|
# We set `OwnCancelSchedule` flag, because we going to cancel `retFuture`
|
||||||
# manually at proper time.
|
# manually at proper time.
|
||||||
|
|
||||||
waitImpl(fut, retFuture, timeout)
|
waitImpl(fut, retFuture, timeout)
|
||||||
|
|
||||||
|
proc wait*(fut: InternalRaisesFuture, deadline: SomeFuture): auto =
|
||||||
|
type
|
||||||
|
T = type(fut).T
|
||||||
|
E = type(fut).E
|
||||||
|
InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError)
|
||||||
|
|
||||||
|
let
|
||||||
|
retFuture = newFuture[T]("chronos.wait(future)", {OwnCancelSchedule})
|
||||||
|
# We set `OwnCancelSchedule` flag, because we going to cancel `retFuture`
|
||||||
|
# manually at proper time.
|
||||||
|
|
||||||
|
waitUntilImpl(fut, retFuture, deadline)
|
||||||
|
|||||||
@ -219,12 +219,14 @@ proc decodeParams(params: NimNode): AsyncParams =
|
|||||||
var
|
var
|
||||||
raw = false
|
raw = false
|
||||||
raises: NimNode = nil
|
raises: NimNode = nil
|
||||||
handleException = chronosHandleException
|
handleException = false
|
||||||
|
hasLocalAnnotations = false
|
||||||
|
|
||||||
for param in params:
|
for param in params:
|
||||||
param.expectKind(nnkExprColonExpr)
|
param.expectKind(nnkExprColonExpr)
|
||||||
|
|
||||||
if param[0].eqIdent("raises"):
|
if param[0].eqIdent("raises"):
|
||||||
|
hasLocalAnnotations = true
|
||||||
param[1].expectKind(nnkBracket)
|
param[1].expectKind(nnkBracket)
|
||||||
if param[1].len == 0:
|
if param[1].len == 0:
|
||||||
raises = makeNoRaises()
|
raises = makeNoRaises()
|
||||||
@ -236,10 +238,14 @@ proc decodeParams(params: NimNode): AsyncParams =
|
|||||||
# boolVal doesn't work in untyped macros it seems..
|
# boolVal doesn't work in untyped macros it seems..
|
||||||
raw = param[1].eqIdent("true")
|
raw = param[1].eqIdent("true")
|
||||||
elif param[0].eqIdent("handleException"):
|
elif param[0].eqIdent("handleException"):
|
||||||
|
hasLocalAnnotations = true
|
||||||
handleException = param[1].eqIdent("true")
|
handleException = param[1].eqIdent("true")
|
||||||
else:
|
else:
|
||||||
warning("Unrecognised async parameter: " & repr(param[0]), param)
|
warning("Unrecognised async parameter: " & repr(param[0]), param)
|
||||||
|
|
||||||
|
if not hasLocalAnnotations:
|
||||||
|
handleException = chronosHandleException
|
||||||
|
|
||||||
(raw, raises, handleException)
|
(raw, raises, handleException)
|
||||||
|
|
||||||
proc isEmpty(n: NimNode): bool {.compileTime.} =
|
proc isEmpty(n: NimNode): bool {.compileTime.} =
|
||||||
|
|||||||
@ -220,7 +220,10 @@ proc selectInto2*[T](s: Selector[T], timeout: int,
|
|||||||
verifySelectParams(timeout, -1, int(high(cint)))
|
verifySelectParams(timeout, -1, int(high(cint)))
|
||||||
|
|
||||||
let
|
let
|
||||||
maxEventsCount = min(len(s.pollfds), len(readyKeys))
|
maxEventsCount = culong(min(len(s.pollfds), len(readyKeys)))
|
||||||
|
# Without `culong` conversion, this code could fail with RangeError
|
||||||
|
# defect on explicit Tnfds(integer) conversion (probably related to
|
||||||
|
# combination of nim+clang (android toolchain)).
|
||||||
eventsCount =
|
eventsCount =
|
||||||
if maxEventsCount > 0:
|
if maxEventsCount > 0:
|
||||||
let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount),
|
let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount),
|
||||||
|
|||||||
@ -965,7 +965,7 @@ elif defined(macos) or defined(macosx):
|
|||||||
events*: cshort
|
events*: cshort
|
||||||
revents*: cshort
|
revents*: cshort
|
||||||
|
|
||||||
Tnfds* {.importc: "nfds_t", header: "<poll.h>".} = cuint
|
Tnfds* {.importc: "nfds_t", header: "<poll.h>".} = culong
|
||||||
|
|
||||||
const
|
const
|
||||||
POLLIN* = 0x0001
|
POLLIN* = 0x0001
|
||||||
|
|||||||
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import ../[config, asyncloop, asyncsync]
|
import ../[config, asyncloop, asyncsync, bipbuffer]
|
||||||
import ../transports/[common, stream]
|
import ../transports/[common, stream]
|
||||||
export asyncloop, asyncsync, stream, common
|
export asyncloop, asyncsync, stream, common
|
||||||
|
|
||||||
@ -34,10 +34,11 @@ type
|
|||||||
AsyncStreamWriteEOFError* = object of AsyncStreamWriteError
|
AsyncStreamWriteEOFError* = object of AsyncStreamWriteError
|
||||||
|
|
||||||
AsyncBuffer* = object
|
AsyncBuffer* = object
|
||||||
offset*: int
|
backend*: BipBuffer
|
||||||
buffer*: seq[byte]
|
|
||||||
events*: array[2, AsyncEvent]
|
events*: array[2, AsyncEvent]
|
||||||
|
|
||||||
|
AsyncBufferRef* = ref AsyncBuffer
|
||||||
|
|
||||||
WriteType* = enum
|
WriteType* = enum
|
||||||
Pointer, Sequence, String
|
Pointer, Sequence, String
|
||||||
|
|
||||||
@ -73,7 +74,7 @@ type
|
|||||||
tsource*: StreamTransport
|
tsource*: StreamTransport
|
||||||
readerLoop*: StreamReaderLoop
|
readerLoop*: StreamReaderLoop
|
||||||
state*: AsyncStreamState
|
state*: AsyncStreamState
|
||||||
buffer*: AsyncBuffer
|
buffer*: AsyncBufferRef
|
||||||
udata: pointer
|
udata: pointer
|
||||||
error*: ref AsyncStreamError
|
error*: ref AsyncStreamError
|
||||||
bytesCount*: uint64
|
bytesCount*: uint64
|
||||||
@ -96,85 +97,51 @@ type
|
|||||||
|
|
||||||
AsyncStreamRW* = AsyncStreamReader | AsyncStreamWriter
|
AsyncStreamRW* = AsyncStreamReader | AsyncStreamWriter
|
||||||
|
|
||||||
proc init*(t: typedesc[AsyncBuffer], size: int): AsyncBuffer =
|
proc new*(t: typedesc[AsyncBufferRef], size: int): AsyncBufferRef =
|
||||||
AsyncBuffer(
|
AsyncBufferRef(
|
||||||
buffer: newSeq[byte](size),
|
backend: BipBuffer.init(size),
|
||||||
events: [newAsyncEvent(), newAsyncEvent()],
|
events: [newAsyncEvent(), newAsyncEvent()]
|
||||||
offset: 0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
proc getBuffer*(sb: AsyncBuffer): pointer {.inline.} =
|
template wait*(sb: AsyncBufferRef): untyped =
|
||||||
unsafeAddr sb.buffer[sb.offset]
|
|
||||||
|
|
||||||
proc bufferLen*(sb: AsyncBuffer): int {.inline.} =
|
|
||||||
len(sb.buffer) - sb.offset
|
|
||||||
|
|
||||||
proc getData*(sb: AsyncBuffer): pointer {.inline.} =
|
|
||||||
unsafeAddr sb.buffer[0]
|
|
||||||
|
|
||||||
template dataLen*(sb: AsyncBuffer): int =
|
|
||||||
sb.offset
|
|
||||||
|
|
||||||
proc `[]`*(sb: AsyncBuffer, index: int): byte {.inline.} =
|
|
||||||
doAssert(index < sb.offset)
|
|
||||||
sb.buffer[index]
|
|
||||||
|
|
||||||
proc update*(sb: var AsyncBuffer, size: int) {.inline.} =
|
|
||||||
sb.offset += size
|
|
||||||
|
|
||||||
template wait*(sb: var AsyncBuffer): untyped =
|
|
||||||
sb.events[0].clear()
|
sb.events[0].clear()
|
||||||
sb.events[1].fire()
|
sb.events[1].fire()
|
||||||
sb.events[0].wait()
|
sb.events[0].wait()
|
||||||
|
|
||||||
template transfer*(sb: var AsyncBuffer): untyped =
|
template transfer*(sb: AsyncBufferRef): untyped =
|
||||||
sb.events[1].clear()
|
sb.events[1].clear()
|
||||||
sb.events[0].fire()
|
sb.events[0].fire()
|
||||||
sb.events[1].wait()
|
sb.events[1].wait()
|
||||||
|
|
||||||
proc forget*(sb: var AsyncBuffer) {.inline.} =
|
proc forget*(sb: AsyncBufferRef) {.inline.} =
|
||||||
sb.events[1].clear()
|
sb.events[1].clear()
|
||||||
sb.events[0].fire()
|
sb.events[0].fire()
|
||||||
|
|
||||||
proc shift*(sb: var AsyncBuffer, size: int) {.inline.} =
|
proc upload*(sb: AsyncBufferRef, pbytes: ptr byte,
|
||||||
if sb.offset > size:
|
|
||||||
moveMem(addr sb.buffer[0], addr sb.buffer[size], sb.offset - size)
|
|
||||||
sb.offset = sb.offset - size
|
|
||||||
else:
|
|
||||||
sb.offset = 0
|
|
||||||
|
|
||||||
proc copyData*(sb: AsyncBuffer, dest: pointer, offset, length: int) {.inline.} =
|
|
||||||
copyMem(cast[pointer](cast[uint](dest) + cast[uint](offset)),
|
|
||||||
unsafeAddr sb.buffer[0], length)
|
|
||||||
|
|
||||||
proc upload*(sb: ptr AsyncBuffer, pbytes: ptr byte,
|
|
||||||
nbytes: int): Future[void] {.
|
nbytes: int): Future[void] {.
|
||||||
async: (raises: [CancelledError]).} =
|
async: (raises: [CancelledError]).} =
|
||||||
## You can upload any amount of bytes to the buffer. If size of internal
|
## You can upload any amount of bytes to the buffer. If size of internal
|
||||||
## buffer is not enough to fit all the data at once, data will be uploaded
|
## buffer is not enough to fit all the data at once, data will be uploaded
|
||||||
## via chunks of size up to internal buffer size.
|
## via chunks of size up to internal buffer size.
|
||||||
var length = nbytes
|
var
|
||||||
var srcBuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
length = nbytes
|
||||||
var srcOffset = 0
|
srcBuffer = pbytes.toUnchecked()
|
||||||
|
offset = 0
|
||||||
|
|
||||||
while length > 0:
|
while length > 0:
|
||||||
let size = min(length, sb[].bufferLen())
|
let size = min(length, sb.backend.availSpace())
|
||||||
if size == 0:
|
if size == 0:
|
||||||
# Internal buffer is full, we need to transfer data to consumer.
|
# Internal buffer is full, we need to notify consumer.
|
||||||
await sb[].transfer()
|
await sb.transfer()
|
||||||
else:
|
else:
|
||||||
|
let (data, _) = sb.backend.reserve()
|
||||||
# Copy data from `pbytes` to internal buffer.
|
# Copy data from `pbytes` to internal buffer.
|
||||||
copyMem(addr sb[].buffer[sb.offset], addr srcBuffer[srcOffset], size)
|
copyMem(data, addr srcBuffer[offset], size)
|
||||||
sb[].offset = sb[].offset + size
|
sb.backend.commit(size)
|
||||||
srcOffset = srcOffset + size
|
offset = offset + size
|
||||||
length = length - size
|
length = length - size
|
||||||
# We notify consumers that new data is available.
|
# We notify consumers that new data is available.
|
||||||
sb[].forget()
|
sb.forget()
|
||||||
|
|
||||||
template toDataOpenArray*(sb: AsyncBuffer): auto =
|
|
||||||
toOpenArray(sb.buffer, 0, sb.offset - 1)
|
|
||||||
|
|
||||||
template toBufferOpenArray*(sb: AsyncBuffer): auto =
|
|
||||||
toOpenArray(sb.buffer, sb.offset, len(sb.buffer) - 1)
|
|
||||||
|
|
||||||
template copyOut*(dest: pointer, item: WriteItem, length: int) =
|
template copyOut*(dest: pointer, item: WriteItem, length: int) =
|
||||||
if item.kind == Pointer:
|
if item.kind == Pointer:
|
||||||
@ -243,7 +210,7 @@ proc atEof*(rstream: AsyncStreamReader): bool =
|
|||||||
rstream.rsource.atEof()
|
rstream.rsource.atEof()
|
||||||
else:
|
else:
|
||||||
(rstream.state != AsyncStreamState.Running) and
|
(rstream.state != AsyncStreamState.Running) and
|
||||||
(rstream.buffer.dataLen() == 0)
|
(len(rstream.buffer.backend) == 0)
|
||||||
|
|
||||||
proc atEof*(wstream: AsyncStreamWriter): bool =
|
proc atEof*(wstream: AsyncStreamWriter): bool =
|
||||||
## Returns ``true`` is writing stream ``wstream`` closed or finished.
|
## Returns ``true`` is writing stream ``wstream`` closed or finished.
|
||||||
@ -331,12 +298,12 @@ template checkStreamFinished*(t: untyped) =
|
|||||||
|
|
||||||
template readLoop(body: untyped): untyped =
|
template readLoop(body: untyped): untyped =
|
||||||
while true:
|
while true:
|
||||||
if rstream.buffer.dataLen() == 0:
|
if len(rstream.buffer.backend) == 0:
|
||||||
if rstream.state == AsyncStreamState.Error:
|
if rstream.state == AsyncStreamState.Error:
|
||||||
raise rstream.error
|
raise rstream.error
|
||||||
|
|
||||||
let (consumed, done) = body
|
let (consumed, done) = body
|
||||||
rstream.buffer.shift(consumed)
|
rstream.buffer.backend.consume(consumed)
|
||||||
rstream.bytesCount = rstream.bytesCount + uint64(consumed)
|
rstream.bytesCount = rstream.bytesCount + uint64(consumed)
|
||||||
if done:
|
if done:
|
||||||
break
|
break
|
||||||
@ -350,7 +317,7 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer,
|
|||||||
## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store
|
## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store
|
||||||
## it to ``pbytes``.
|
## it to ``pbytes``.
|
||||||
##
|
##
|
||||||
## If EOF is received and ``nbytes`` is not yet readed, the procedure
|
## If EOF is received and ``nbytes`` is not yet read, the procedure
|
||||||
## will raise ``AsyncStreamIncompleteError``.
|
## will raise ``AsyncStreamIncompleteError``.
|
||||||
doAssert(not(isNil(pbytes)), "pbytes must not be nil")
|
doAssert(not(isNil(pbytes)), "pbytes must not be nil")
|
||||||
doAssert(nbytes >= 0, "nbytes must be non-negative integer")
|
doAssert(nbytes >= 0, "nbytes must be non-negative integer")
|
||||||
@ -373,17 +340,23 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer,
|
|||||||
if isNil(rstream.readerLoop):
|
if isNil(rstream.readerLoop):
|
||||||
await readExactly(rstream.rsource, pbytes, nbytes)
|
await readExactly(rstream.rsource, pbytes, nbytes)
|
||||||
else:
|
else:
|
||||||
var index = 0
|
var
|
||||||
var pbuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
index = 0
|
||||||
|
pbuffer = pbytes.toUnchecked()
|
||||||
readLoop():
|
readLoop():
|
||||||
if rstream.buffer.dataLen() == 0:
|
if len(rstream.buffer.backend) == 0:
|
||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
raise newAsyncStreamIncompleteError()
|
raise newAsyncStreamIncompleteError()
|
||||||
let count = min(nbytes - index, rstream.buffer.dataLen())
|
var bytesRead = 0
|
||||||
if count > 0:
|
for (region, rsize) in rstream.buffer.backend.regions():
|
||||||
rstream.buffer.copyData(addr pbuffer[index], 0, count)
|
let count = min(nbytes - index, rsize)
|
||||||
index += count
|
bytesRead += count
|
||||||
(consumed: count, done: index == nbytes)
|
if count > 0:
|
||||||
|
copyMem(addr pbuffer[index], region, count)
|
||||||
|
index += count
|
||||||
|
if index == nbytes:
|
||||||
|
break
|
||||||
|
(consumed: bytesRead, done: index == nbytes)
|
||||||
|
|
||||||
proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
|
proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
|
||||||
nbytes: int): Future[int] {.
|
nbytes: int): Future[int] {.
|
||||||
@ -407,15 +380,21 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer,
|
|||||||
if isNil(rstream.readerLoop):
|
if isNil(rstream.readerLoop):
|
||||||
return await readOnce(rstream.rsource, pbytes, nbytes)
|
return await readOnce(rstream.rsource, pbytes, nbytes)
|
||||||
else:
|
else:
|
||||||
var count = 0
|
var
|
||||||
|
pbuffer = pbytes.toUnchecked()
|
||||||
|
index = 0
|
||||||
readLoop():
|
readLoop():
|
||||||
if rstream.buffer.dataLen() == 0:
|
if len(rstream.buffer.backend) == 0:
|
||||||
(0, rstream.atEof())
|
(0, rstream.atEof())
|
||||||
else:
|
else:
|
||||||
count = min(rstream.buffer.dataLen(), nbytes)
|
for (region, rsize) in rstream.buffer.backend.regions():
|
||||||
rstream.buffer.copyData(pbytes, 0, count)
|
let size = min(rsize, nbytes - index)
|
||||||
(count, true)
|
copyMem(addr pbuffer[index], region, size)
|
||||||
return count
|
index += size
|
||||||
|
if index >= nbytes:
|
||||||
|
break
|
||||||
|
(index, true)
|
||||||
|
index
|
||||||
|
|
||||||
proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
|
proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
|
||||||
sep: seq[byte]): Future[int] {.
|
sep: seq[byte]): Future[int] {.
|
||||||
@ -456,28 +435,32 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int,
|
|||||||
if isNil(rstream.readerLoop):
|
if isNil(rstream.readerLoop):
|
||||||
return await readUntil(rstream.rsource, pbytes, nbytes, sep)
|
return await readUntil(rstream.rsource, pbytes, nbytes, sep)
|
||||||
else:
|
else:
|
||||||
var pbuffer = cast[ptr UncheckedArray[byte]](pbytes)
|
var
|
||||||
var state = 0
|
pbuffer = pbytes.toUnchecked()
|
||||||
var k = 0
|
state = 0
|
||||||
|
k = 0
|
||||||
readLoop():
|
readLoop():
|
||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
raise newAsyncStreamIncompleteError()
|
raise newAsyncStreamIncompleteError()
|
||||||
|
|
||||||
var index = 0
|
var index = 0
|
||||||
while index < rstream.buffer.dataLen():
|
for ch in rstream.buffer.backend:
|
||||||
if k >= nbytes:
|
if k >= nbytes:
|
||||||
raise newAsyncStreamLimitError()
|
raise newAsyncStreamLimitError()
|
||||||
let ch = rstream.buffer[index]
|
|
||||||
inc(index)
|
inc(index)
|
||||||
pbuffer[k] = ch
|
pbuffer[k] = ch
|
||||||
inc(k)
|
inc(k)
|
||||||
|
|
||||||
if sep[state] == ch:
|
if sep[state] == ch:
|
||||||
inc(state)
|
inc(state)
|
||||||
if state == len(sep):
|
if state == len(sep):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
state = 0
|
state = 0
|
||||||
|
|
||||||
(index, state == len(sep))
|
(index, state == len(sep))
|
||||||
return k
|
k
|
||||||
|
|
||||||
proc readLine*(rstream: AsyncStreamReader, limit = 0,
|
proc readLine*(rstream: AsyncStreamReader, limit = 0,
|
||||||
sep = "\r\n"): Future[string] {.
|
sep = "\r\n"): Future[string] {.
|
||||||
@ -507,18 +490,19 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0,
|
|||||||
return await readLine(rstream.rsource, limit, sep)
|
return await readLine(rstream.rsource, limit, sep)
|
||||||
else:
|
else:
|
||||||
let lim = if limit <= 0: -1 else: limit
|
let lim = if limit <= 0: -1 else: limit
|
||||||
var state = 0
|
var
|
||||||
var res = ""
|
state = 0
|
||||||
|
res = ""
|
||||||
|
|
||||||
readLoop():
|
readLoop():
|
||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
(0, true)
|
(0, true)
|
||||||
else:
|
else:
|
||||||
var index = 0
|
var index = 0
|
||||||
while index < rstream.buffer.dataLen():
|
for ch in rstream.buffer.backend:
|
||||||
let ch = char(rstream.buffer[index])
|
|
||||||
inc(index)
|
inc(index)
|
||||||
|
|
||||||
if sep[state] == ch:
|
if sep[state] == char(ch):
|
||||||
inc(state)
|
inc(state)
|
||||||
if state == len(sep):
|
if state == len(sep):
|
||||||
break
|
break
|
||||||
@ -529,11 +513,14 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0,
|
|||||||
res.add(sep[0 ..< missing])
|
res.add(sep[0 ..< missing])
|
||||||
else:
|
else:
|
||||||
res.add(sep[0 ..< state])
|
res.add(sep[0 ..< state])
|
||||||
res.add(ch)
|
state = 0
|
||||||
|
|
||||||
|
res.add(char(ch))
|
||||||
if len(res) == lim:
|
if len(res) == lim:
|
||||||
break
|
break
|
||||||
|
|
||||||
(index, (state == len(sep)) or (lim == len(res)))
|
(index, (state == len(sep)) or (lim == len(res)))
|
||||||
return res
|
res
|
||||||
|
|
||||||
proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.
|
proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.
|
||||||
async: (raises: [CancelledError, AsyncStreamError]).} =
|
async: (raises: [CancelledError, AsyncStreamError]).} =
|
||||||
@ -555,15 +542,17 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.
|
|||||||
if isNil(rstream.readerLoop):
|
if isNil(rstream.readerLoop):
|
||||||
return await read(rstream.rsource)
|
return await read(rstream.rsource)
|
||||||
else:
|
else:
|
||||||
var res = newSeq[byte]()
|
var res: seq[byte]
|
||||||
readLoop():
|
readLoop():
|
||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
(0, true)
|
(0, true)
|
||||||
else:
|
else:
|
||||||
let count = rstream.buffer.dataLen()
|
var bytesRead = 0
|
||||||
res.add(rstream.buffer.buffer.toOpenArray(0, count - 1))
|
for (region, rsize) in rstream.buffer.backend.regions():
|
||||||
(count, false)
|
bytesRead += rsize
|
||||||
return res
|
res.add(region.toUnchecked().toOpenArray(0, rsize - 1))
|
||||||
|
(bytesRead, false)
|
||||||
|
res
|
||||||
|
|
||||||
proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.
|
proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.
|
||||||
async: (raises: [CancelledError, AsyncStreamError]).} =
|
async: (raises: [CancelledError, AsyncStreamError]).} =
|
||||||
@ -592,10 +581,13 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.
|
|||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
(0, true)
|
(0, true)
|
||||||
else:
|
else:
|
||||||
let count = min(rstream.buffer.dataLen(), n - len(res))
|
var bytesRead = 0
|
||||||
res.add(rstream.buffer.buffer.toOpenArray(0, count - 1))
|
for (region, rsize) in rstream.buffer.backend.regions():
|
||||||
(count, len(res) == n)
|
let count = min(rsize, n - len(res))
|
||||||
return res
|
bytesRead += count
|
||||||
|
res.add(region.toUnchecked().toOpenArray(0, count - 1))
|
||||||
|
(bytesRead, len(res) == n)
|
||||||
|
res
|
||||||
|
|
||||||
proc consume*(rstream: AsyncStreamReader): Future[int] {.
|
proc consume*(rstream: AsyncStreamReader): Future[int] {.
|
||||||
async: (raises: [CancelledError, AsyncStreamError]).} =
|
async: (raises: [CancelledError, AsyncStreamError]).} =
|
||||||
@ -622,9 +614,10 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {.
|
|||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
(0, true)
|
(0, true)
|
||||||
else:
|
else:
|
||||||
res += rstream.buffer.dataLen()
|
let used = len(rstream.buffer.backend)
|
||||||
(rstream.buffer.dataLen(), false)
|
res += used
|
||||||
return res
|
(used, false)
|
||||||
|
res
|
||||||
|
|
||||||
proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.
|
proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.
|
||||||
async: (raises: [CancelledError, AsyncStreamError]).} =
|
async: (raises: [CancelledError, AsyncStreamError]).} =
|
||||||
@ -652,13 +645,12 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.
|
|||||||
else:
|
else:
|
||||||
var res = 0
|
var res = 0
|
||||||
readLoop():
|
readLoop():
|
||||||
if rstream.atEof():
|
let
|
||||||
(0, true)
|
used = len(rstream.buffer.backend)
|
||||||
else:
|
count = min(used, n - res)
|
||||||
let count = min(rstream.buffer.dataLen(), n - res)
|
res += count
|
||||||
res += count
|
(count, res == n)
|
||||||
(count, res == n)
|
res
|
||||||
return res
|
|
||||||
|
|
||||||
proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
|
proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
|
||||||
async: (raises: [CancelledError, AsyncStreamError]).} =
|
async: (raises: [CancelledError, AsyncStreamError]).} =
|
||||||
@ -689,15 +681,18 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {.
|
|||||||
await readMessage(rstream.rsource, pred)
|
await readMessage(rstream.rsource, pred)
|
||||||
else:
|
else:
|
||||||
readLoop():
|
readLoop():
|
||||||
let count = rstream.buffer.dataLen()
|
if len(rstream.buffer.backend) == 0:
|
||||||
if count == 0:
|
|
||||||
if rstream.atEof():
|
if rstream.atEof():
|
||||||
pred([])
|
pred([])
|
||||||
else:
|
else:
|
||||||
# Case, when transport's buffer is not yet filled with data.
|
# Case, when transport's buffer is not yet filled with data.
|
||||||
(0, false)
|
(0, false)
|
||||||
else:
|
else:
|
||||||
pred(rstream.buffer.buffer.toOpenArray(0, count - 1))
|
var res: tuple[consumed: int, done: bool]
|
||||||
|
for (region, rsize) in rstream.buffer.backend.regions():
|
||||||
|
res = pred(region.toUnchecked().toOpenArray(0, rsize - 1))
|
||||||
|
break
|
||||||
|
res
|
||||||
|
|
||||||
proc write*(wstream: AsyncStreamWriter, pbytes: pointer,
|
proc write*(wstream: AsyncStreamWriter, pbytes: pointer,
|
||||||
nbytes: int) {.
|
nbytes: int) {.
|
||||||
@ -841,24 +836,7 @@ proc join*(rw: AsyncStreamRW): Future[void] {.
|
|||||||
async: (raw: true, raises: [CancelledError]).} =
|
async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Get Future[void] which will be completed when stream become finished or
|
## Get Future[void] which will be completed when stream become finished or
|
||||||
## closed.
|
## closed.
|
||||||
when rw is AsyncStreamReader:
|
rw.future.join()
|
||||||
var retFuture = newFuture[void]("async.stream.reader.join")
|
|
||||||
else:
|
|
||||||
var retFuture = newFuture[void]("async.stream.writer.join")
|
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe, raises:[].} =
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
proc cancellation(udata: pointer) {.gcsafe, raises:[].} =
|
|
||||||
rw.future.removeCallback(continuation, cast[pointer](retFuture))
|
|
||||||
|
|
||||||
if not(rw.future.finished()):
|
|
||||||
rw.future.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
retFuture.cancelCallback = cancellation
|
|
||||||
else:
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
return retFuture
|
|
||||||
|
|
||||||
proc close*(rw: AsyncStreamRW) =
|
proc close*(rw: AsyncStreamRW) =
|
||||||
## Close and frees resources of stream ``rw``.
|
## Close and frees resources of stream ``rw``.
|
||||||
@ -951,7 +929,8 @@ proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop,
|
|||||||
child.readerLoop = loop
|
child.readerLoop = loop
|
||||||
child.rsource = rsource
|
child.rsource = rsource
|
||||||
child.tsource = rsource.tsource
|
child.tsource = rsource.tsource
|
||||||
child.buffer = AsyncBuffer.init(bufferSize)
|
let size = max(AsyncStreamDefaultBufferSize, bufferSize)
|
||||||
|
child.buffer = AsyncBufferRef.new(size)
|
||||||
trackCounter(AsyncStreamReaderTrackerName)
|
trackCounter(AsyncStreamReaderTrackerName)
|
||||||
child.startReader()
|
child.startReader()
|
||||||
|
|
||||||
@ -963,7 +942,8 @@ proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop,
|
|||||||
child.readerLoop = loop
|
child.readerLoop = loop
|
||||||
child.rsource = rsource
|
child.rsource = rsource
|
||||||
child.tsource = rsource.tsource
|
child.tsource = rsource.tsource
|
||||||
child.buffer = AsyncBuffer.init(bufferSize)
|
let size = max(AsyncStreamDefaultBufferSize, bufferSize)
|
||||||
|
child.buffer = AsyncBufferRef.new(size)
|
||||||
if not isNil(udata):
|
if not isNil(udata):
|
||||||
GC_ref(udata)
|
GC_ref(udata)
|
||||||
child.udata = cast[pointer](udata)
|
child.udata = cast[pointer](udata)
|
||||||
@ -1102,6 +1082,22 @@ proc newAsyncStreamReader*(tsource: StreamTransport): AsyncStreamReader =
|
|||||||
res.init(tsource)
|
res.init(tsource)
|
||||||
res
|
res
|
||||||
|
|
||||||
|
proc newAsyncStreamReader*[T](rsource: AsyncStreamReader,
|
||||||
|
udata: ref T): AsyncStreamReader =
|
||||||
|
## Create copy of AsyncStreamReader object ``rsource``.
|
||||||
|
##
|
||||||
|
## ``udata`` - user object which will be associated with new AsyncStreamReader
|
||||||
|
## object.
|
||||||
|
var res = AsyncStreamReader()
|
||||||
|
res.init(rsource, udata)
|
||||||
|
res
|
||||||
|
|
||||||
|
proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader =
|
||||||
|
## Create copy of AsyncStreamReader object ``rsource``.
|
||||||
|
var res = AsyncStreamReader()
|
||||||
|
res.init(rsource)
|
||||||
|
res
|
||||||
|
|
||||||
proc newAsyncStreamWriter*[T](wsource: AsyncStreamWriter,
|
proc newAsyncStreamWriter*[T](wsource: AsyncStreamWriter,
|
||||||
loop: StreamWriterLoop,
|
loop: StreamWriterLoop,
|
||||||
queueSize = AsyncStreamDefaultQueueSize,
|
queueSize = AsyncStreamDefaultQueueSize,
|
||||||
@ -1167,22 +1163,6 @@ proc newAsyncStreamWriter*(wsource: AsyncStreamWriter): AsyncStreamWriter =
|
|||||||
res.init(wsource)
|
res.init(wsource)
|
||||||
res
|
res
|
||||||
|
|
||||||
proc newAsyncStreamReader*[T](rsource: AsyncStreamWriter,
|
|
||||||
udata: ref T): AsyncStreamWriter =
|
|
||||||
## Create copy of AsyncStreamReader object ``rsource``.
|
|
||||||
##
|
|
||||||
## ``udata`` - user object which will be associated with new AsyncStreamReader
|
|
||||||
## object.
|
|
||||||
var res = AsyncStreamReader()
|
|
||||||
res.init(rsource, udata)
|
|
||||||
res
|
|
||||||
|
|
||||||
proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader =
|
|
||||||
## Create copy of AsyncStreamReader object ``rsource``.
|
|
||||||
var res = AsyncStreamReader()
|
|
||||||
res.init(rsource)
|
|
||||||
res
|
|
||||||
|
|
||||||
proc getUserData*[T](rw: AsyncStreamRW): T {.inline.} =
|
proc getUserData*[T](rw: AsyncStreamRW): T {.inline.} =
|
||||||
## Obtain user data associated with AsyncStreamReader or AsyncStreamWriter
|
## Obtain user data associated with AsyncStreamReader or AsyncStreamWriter
|
||||||
## object ``rw``.
|
## object ``rw``.
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import results
|
import results
|
||||||
import ../[asyncloop, timer, config]
|
import ../[asyncloop, timer, bipbuffer, config]
|
||||||
import asyncstream, ../transports/[stream, common]
|
import asyncstream, ../transports/[stream, common]
|
||||||
export asyncloop, asyncstream, stream, timer, common
|
export asyncloop, asyncstream, stream, timer, common
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ func endsWith(s, suffix: openArray[byte]): bool =
|
|||||||
proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
||||||
var rstream = BoundedStreamReader(stream)
|
var rstream = BoundedStreamReader(stream)
|
||||||
rstream.state = AsyncStreamState.Running
|
rstream.state = AsyncStreamState.Running
|
||||||
var buffer = newSeq[byte](rstream.buffer.bufferLen())
|
var buffer = newSeq[byte](rstream.buffer.backend.availSpace())
|
||||||
while true:
|
while true:
|
||||||
let toRead =
|
let toRead =
|
||||||
if rstream.boundSize.isNone():
|
if rstream.boundSize.isNone():
|
||||||
@ -127,7 +127,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
|||||||
# There should be one step between transferring last bytes to the
|
# There should be one step between transferring last bytes to the
|
||||||
# consumer and declaring stream EOF. Otherwise could not be
|
# consumer and declaring stream EOF. Otherwise could not be
|
||||||
# consumed.
|
# consumed.
|
||||||
await upload(addr rstream.buffer, addr buffer[0], length)
|
await upload(rstream.buffer, addr buffer[0], length)
|
||||||
if rstream.state == AsyncStreamState.Running:
|
if rstream.state == AsyncStreamState.Running:
|
||||||
rstream.state = AsyncStreamState.Finished
|
rstream.state = AsyncStreamState.Finished
|
||||||
else:
|
else:
|
||||||
@ -135,7 +135,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
|||||||
# There should be one step between transferring last bytes to the
|
# There should be one step between transferring last bytes to the
|
||||||
# consumer and declaring stream EOF. Otherwise could not be
|
# consumer and declaring stream EOF. Otherwise could not be
|
||||||
# consumed.
|
# consumed.
|
||||||
await upload(addr rstream.buffer, addr buffer[0], res)
|
await upload(rstream.buffer, addr buffer[0], res)
|
||||||
|
|
||||||
if (res < toRead) and rstream.rsource.atEof():
|
if (res < toRead) and rstream.rsource.atEof():
|
||||||
case rstream.cmpop
|
case rstream.cmpop
|
||||||
@ -151,7 +151,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
|||||||
# There should be one step between transferring last bytes to the
|
# There should be one step between transferring last bytes to the
|
||||||
# consumer and declaring stream EOF. Otherwise could not be
|
# consumer and declaring stream EOF. Otherwise could not be
|
||||||
# consumed.
|
# consumed.
|
||||||
await upload(addr rstream.buffer, addr buffer[0], res)
|
await upload(rstream.buffer, addr buffer[0], res)
|
||||||
|
|
||||||
if (res < toRead) and rstream.rsource.atEof():
|
if (res < toRead) and rstream.rsource.atEof():
|
||||||
case rstream.cmpop
|
case rstream.cmpop
|
||||||
|
|||||||
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import ../[asyncloop, timer, config]
|
import ../[asyncloop, timer, bipbuffer, config]
|
||||||
import asyncstream, ../transports/[stream, common]
|
import asyncstream, ../transports/[stream, common]
|
||||||
import results
|
import results
|
||||||
export asyncloop, asyncstream, stream, timer, common, results
|
export asyncloop, asyncstream, stream, timer, common, results
|
||||||
@ -118,11 +118,11 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
|||||||
var chunksize = cres.get()
|
var chunksize = cres.get()
|
||||||
if chunksize > 0'u64:
|
if chunksize > 0'u64:
|
||||||
while chunksize > 0'u64:
|
while chunksize > 0'u64:
|
||||||
let toRead = int(min(chunksize,
|
let
|
||||||
uint64(rstream.buffer.bufferLen())))
|
(data, rsize) = rstream.buffer.backend.reserve()
|
||||||
await rstream.rsource.readExactly(rstream.buffer.getBuffer(),
|
toRead = int(min(chunksize, uint64(rsize)))
|
||||||
toRead)
|
await rstream.rsource.readExactly(data, toRead)
|
||||||
rstream.buffer.update(toRead)
|
rstream.buffer.backend.commit(toRead)
|
||||||
await rstream.buffer.transfer()
|
await rstream.buffer.transfer()
|
||||||
chunksize = chunksize - uint64(toRead)
|
chunksize = chunksize - uint64(toRead)
|
||||||
|
|
||||||
|
|||||||
@ -242,7 +242,7 @@ proc tlsReadApp(engine: ptr SslEngineContext,
|
|||||||
try:
|
try:
|
||||||
var length = 0'u
|
var length = 0'u
|
||||||
var buf = sslEngineRecvappBuf(engine[], length)
|
var buf = sslEngineRecvappBuf(engine[], length)
|
||||||
await upload(addr reader.buffer, buf, int(length))
|
await upload(reader.buffer, buf, int(length))
|
||||||
sslEngineRecvappAck(engine[], length)
|
sslEngineRecvappAck(engine[], length)
|
||||||
TLSResult.Success
|
TLSResult.Success
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
@ -510,8 +510,10 @@ proc newTLSClientAsyncStream*(
|
|||||||
|
|
||||||
if TLSFlags.NoVerifyHost in flags:
|
if TLSFlags.NoVerifyHost in flags:
|
||||||
sslClientInitFull(res.ccontext, addr res.x509, nil, 0)
|
sslClientInitFull(res.ccontext, addr res.x509, nil, 0)
|
||||||
x509NoanchorInit(res.xwc, addr res.x509.vtable)
|
x509NoanchorInit(res.xwc,
|
||||||
sslEngineSetX509(res.ccontext.eng, addr res.xwc.vtable)
|
X509ClassPointerConst(addr res.x509.vtable))
|
||||||
|
sslEngineSetX509(res.ccontext.eng,
|
||||||
|
X509ClassPointerConst(addr res.xwc.vtable))
|
||||||
else:
|
else:
|
||||||
when trustAnchors is TrustAnchorStore:
|
when trustAnchors is TrustAnchorStore:
|
||||||
res.trustAnchors = trustAnchors
|
res.trustAnchors = trustAnchors
|
||||||
@ -611,7 +613,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader,
|
|||||||
uint16(maxVersion))
|
uint16(maxVersion))
|
||||||
|
|
||||||
if not isNil(cache):
|
if not isNil(cache):
|
||||||
sslServerSetCache(res.scontext, addr cache.context.vtable)
|
sslServerSetCache(
|
||||||
|
res.scontext, SslSessionCacheClassPointerConst(addr cache.context.vtable))
|
||||||
|
|
||||||
if TLSFlags.EnforceServerPref in flags:
|
if TLSFlags.EnforceServerPref in flags:
|
||||||
sslEngineAddFlags(res.scontext.eng, OPT_ENFORCE_SERVER_PREFERENCES)
|
sslEngineAddFlags(res.scontext.eng, OPT_ENFORCE_SERVER_PREFERENCES)
|
||||||
|
|||||||
@ -370,53 +370,42 @@ template add(a: var string, b: Base10Buf[uint64]) =
|
|||||||
for index in 0 ..< b.len:
|
for index in 0 ..< b.len:
|
||||||
a.add(char(b.data[index]))
|
a.add(char(b.data[index]))
|
||||||
|
|
||||||
func `$`*(a: Duration): string {.inline.} =
|
func toString*(a: timer.Duration, parts = int.high): string =
|
||||||
## Returns string representation of Duration ``a`` as nanoseconds value.
|
## Returns a pretty string representation of Duration ``a`` - the
|
||||||
var res = ""
|
## number of parts returned can be limited thus truncating the output to
|
||||||
var v = a.value
|
## an approximation that grows more precise as the duration becomes smaller
|
||||||
|
var
|
||||||
|
res = newStringOfCap(32)
|
||||||
|
v = a.nanoseconds()
|
||||||
|
parts = parts
|
||||||
|
|
||||||
|
template f(n: string, T: Duration) =
|
||||||
|
if parts <= 0:
|
||||||
|
return res
|
||||||
|
|
||||||
|
if v >= T.nanoseconds():
|
||||||
|
res.add(Base10.toBytes(uint64(v div T.nanoseconds())))
|
||||||
|
res.add(n)
|
||||||
|
v = v mod T.nanoseconds()
|
||||||
|
dec parts
|
||||||
|
if v == 0:
|
||||||
|
return res
|
||||||
|
|
||||||
|
f("w", Week)
|
||||||
|
f("d", Day)
|
||||||
|
f("h", Hour)
|
||||||
|
f("m", Minute)
|
||||||
|
f("s", Second)
|
||||||
|
f("ms", Millisecond)
|
||||||
|
f("us", Microsecond)
|
||||||
|
f("ns", Nanosecond)
|
||||||
|
|
||||||
if v >= Week.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Week.value)))
|
|
||||||
res.add('w')
|
|
||||||
v = v mod Week.value
|
|
||||||
if v == 0: return res
|
|
||||||
if v >= Day.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Day.value)))
|
|
||||||
res.add('d')
|
|
||||||
v = v mod Day.value
|
|
||||||
if v == 0: return res
|
|
||||||
if v >= Hour.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Hour.value)))
|
|
||||||
res.add('h')
|
|
||||||
v = v mod Hour.value
|
|
||||||
if v == 0: return res
|
|
||||||
if v >= Minute.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Minute.value)))
|
|
||||||
res.add('m')
|
|
||||||
v = v mod Minute.value
|
|
||||||
if v == 0: return res
|
|
||||||
if v >= Second.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Second.value)))
|
|
||||||
res.add('s')
|
|
||||||
v = v mod Second.value
|
|
||||||
if v == 0: return res
|
|
||||||
if v >= Millisecond.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Millisecond.value)))
|
|
||||||
res.add('m')
|
|
||||||
res.add('s')
|
|
||||||
v = v mod Millisecond.value
|
|
||||||
if v == 0: return res
|
|
||||||
if v >= Microsecond.value:
|
|
||||||
res.add(Base10.toBytes(uint64(v div Microsecond.value)))
|
|
||||||
res.add('u')
|
|
||||||
res.add('s')
|
|
||||||
v = v mod Microsecond.value
|
|
||||||
if v == 0: return res
|
|
||||||
res.add(Base10.toBytes(uint64(v div Nanosecond.value)))
|
|
||||||
res.add('n')
|
|
||||||
res.add('s')
|
|
||||||
res
|
res
|
||||||
|
|
||||||
|
func `$`*(a: Duration): string {.inline.} =
|
||||||
|
## Returns string representation of Duration ``a``.
|
||||||
|
a.toString()
|
||||||
|
|
||||||
func `$`*(a: Moment): string {.inline.} =
|
func `$`*(a: Moment): string {.inline.} =
|
||||||
## Returns string representation of Moment ``a`` as nanoseconds value.
|
## Returns string representation of Moment ``a`` as nanoseconds value.
|
||||||
var res = ""
|
var res = ""
|
||||||
|
|||||||
@ -10,6 +10,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[strutils]
|
import std/[strutils]
|
||||||
|
import results
|
||||||
import stew/[base10, byteutils]
|
import stew/[base10, byteutils]
|
||||||
import ".."/[config, asyncloop, osdefs, oserrno, handles]
|
import ".."/[config, asyncloop, osdefs, oserrno, handles]
|
||||||
|
|
||||||
@ -18,7 +19,7 @@ from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress,
|
|||||||
from std/nativesockets import toInt, `$`
|
from std/nativesockets import toInt, `$`
|
||||||
|
|
||||||
export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType,
|
export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType,
|
||||||
Protocol, Port, toInt, `$`
|
Protocol, Port, toInt, `$`, results
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultStreamBufferSize* = chronosTransportDefaultBufferSize
|
DefaultStreamBufferSize* = chronosTransportDefaultBufferSize
|
||||||
@ -29,7 +30,7 @@ type
|
|||||||
ServerFlags* = enum
|
ServerFlags* = enum
|
||||||
## Server's flags
|
## Server's flags
|
||||||
ReuseAddr, ReusePort, TcpNoDelay, NoAutoRead, GCUserData, FirstPipe,
|
ReuseAddr, ReusePort, TcpNoDelay, NoAutoRead, GCUserData, FirstPipe,
|
||||||
NoPipeFlash, Broadcast
|
NoPipeFlash, Broadcast, V4Mapped
|
||||||
|
|
||||||
DualStackType* {.pure.} = enum
|
DualStackType* {.pure.} = enum
|
||||||
Auto, Enabled, Disabled, Default
|
Auto, Enabled, Disabled, Default
|
||||||
@ -200,6 +201,15 @@ proc `$`*(address: TransportAddress): string =
|
|||||||
of AddressFamily.None:
|
of AddressFamily.None:
|
||||||
"None"
|
"None"
|
||||||
|
|
||||||
|
proc toIpAddress*(address: TransportAddress): IpAddress =
|
||||||
|
case address.family
|
||||||
|
of AddressFamily.IPv4:
|
||||||
|
IpAddress(family: IpAddressFamily.IPv4, address_v4: address.address_v4)
|
||||||
|
of AddressFamily.IPv6:
|
||||||
|
IpAddress(family: IpAddressFamily.IPv6, address_v6: address.address_v6)
|
||||||
|
else:
|
||||||
|
raiseAssert "IpAddress do not support address family " & $address.family
|
||||||
|
|
||||||
proc toHex*(address: TransportAddress): string =
|
proc toHex*(address: TransportAddress): string =
|
||||||
## Returns hexadecimal representation of ``address``.
|
## Returns hexadecimal representation of ``address``.
|
||||||
case address.family
|
case address.family
|
||||||
@ -783,3 +793,25 @@ proc setDualstack*(socket: AsyncFD,
|
|||||||
else:
|
else:
|
||||||
? getDomain(socket)
|
? getDomain(socket)
|
||||||
setDualstack(socket, family, flag)
|
setDualstack(socket, family, flag)
|
||||||
|
|
||||||
|
proc getAutoAddress*(port: Port): TransportAddress =
|
||||||
|
var res =
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
AnyAddress6
|
||||||
|
else:
|
||||||
|
AnyAddress
|
||||||
|
res.port = port
|
||||||
|
res
|
||||||
|
|
||||||
|
proc getAutoAddresses*(
|
||||||
|
localPort: Port,
|
||||||
|
remotePort: Port
|
||||||
|
): tuple[local: TransportAddress, remote: TransportAddress] =
|
||||||
|
var (local, remote) =
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
(AnyAddress6, AnyAddress6)
|
||||||
|
else:
|
||||||
|
(AnyAddress, AnyAddress)
|
||||||
|
local.port = localPort
|
||||||
|
remote.port = remotePort
|
||||||
|
(local, remote)
|
||||||
|
|||||||
@ -10,11 +10,14 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/deques
|
import std/deques
|
||||||
|
import results
|
||||||
when not(defined(windows)): import ".."/selectors2
|
when not(defined(windows)): import ".."/selectors2
|
||||||
import ".."/[asyncloop, config, osdefs, oserrno, osutils, handles]
|
import ".."/[asyncloop, osdefs, oserrno, osutils, handles]
|
||||||
import "."/common
|
import "."/[common, ipnet]
|
||||||
import stew/ptrops
|
import stew/ptrops
|
||||||
|
|
||||||
|
export results
|
||||||
|
|
||||||
type
|
type
|
||||||
VectorKind = enum
|
VectorKind = enum
|
||||||
WithoutAddress, WithAddress
|
WithoutAddress, WithAddress
|
||||||
@ -60,29 +63,78 @@ type
|
|||||||
const
|
const
|
||||||
DgramTransportTrackerName* = "datagram.transport"
|
DgramTransportTrackerName* = "datagram.transport"
|
||||||
|
|
||||||
|
proc getRemoteAddress(transp: DatagramTransport,
|
||||||
|
address: Sockaddr_storage, length: SockLen,
|
||||||
|
): TransportAddress =
|
||||||
|
var raddr: TransportAddress
|
||||||
|
fromSAddr(unsafeAddr address, length, raddr)
|
||||||
|
if ServerFlags.V4Mapped in transp.flags:
|
||||||
|
if raddr.isV4Mapped(): raddr.toIPv4() else: raddr
|
||||||
|
else:
|
||||||
|
raddr
|
||||||
|
|
||||||
|
proc getRemoteAddress(transp: DatagramTransport): TransportAddress =
|
||||||
|
transp.getRemoteAddress(transp.raddr, transp.ralen)
|
||||||
|
|
||||||
|
proc setRemoteAddress(transp: DatagramTransport,
|
||||||
|
address: TransportAddress): TransportAddress =
|
||||||
|
let
|
||||||
|
fixedAddress =
|
||||||
|
when defined(windows):
|
||||||
|
windowsAnyAddressFix(address)
|
||||||
|
else:
|
||||||
|
address
|
||||||
|
remoteAddress =
|
||||||
|
if ServerFlags.V4Mapped in transp.flags:
|
||||||
|
if address.family == AddressFamily.IPv4:
|
||||||
|
fixedAddress.toIPv6()
|
||||||
|
else:
|
||||||
|
fixedAddress
|
||||||
|
else:
|
||||||
|
fixedAddress
|
||||||
|
toSAddr(remoteAddress, transp.waddr, transp.walen)
|
||||||
|
remoteAddress
|
||||||
|
|
||||||
|
proc remoteAddress2*(
|
||||||
|
transp: DatagramTransport
|
||||||
|
): Result[TransportAddress, OSErrorCode] =
|
||||||
|
## Returns ``transp`` remote socket address.
|
||||||
|
if transp.remote.family == AddressFamily.None:
|
||||||
|
var
|
||||||
|
saddr: Sockaddr_storage
|
||||||
|
slen = SockLen(sizeof(saddr))
|
||||||
|
if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr),
|
||||||
|
addr slen) != 0:
|
||||||
|
return err(osLastError())
|
||||||
|
transp.remote = transp.getRemoteAddress(saddr, slen)
|
||||||
|
ok(transp.remote)
|
||||||
|
|
||||||
|
proc localAddress2*(
|
||||||
|
transp: DatagramTransport
|
||||||
|
): Result[TransportAddress, OSErrorCode] =
|
||||||
|
## Returns ``transp`` local socket address.
|
||||||
|
if transp.local.family == AddressFamily.None:
|
||||||
|
var
|
||||||
|
saddr: Sockaddr_storage
|
||||||
|
slen = SockLen(sizeof(saddr))
|
||||||
|
if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr),
|
||||||
|
addr slen) != 0:
|
||||||
|
return err(osLastError())
|
||||||
|
fromSAddr(addr saddr, slen, transp.local)
|
||||||
|
ok(transp.local)
|
||||||
|
|
||||||
|
func toException(v: OSErrorCode): ref TransportOsError =
|
||||||
|
getTransportOsError(v)
|
||||||
|
|
||||||
proc remoteAddress*(transp: DatagramTransport): TransportAddress {.
|
proc remoteAddress*(transp: DatagramTransport): TransportAddress {.
|
||||||
raises: [TransportOsError].} =
|
raises: [TransportOsError].} =
|
||||||
## Returns ``transp`` remote socket address.
|
## Returns ``transp`` remote socket address.
|
||||||
if transp.remote.family == AddressFamily.None:
|
remoteAddress2(transp).tryGet()
|
||||||
var saddr: Sockaddr_storage
|
|
||||||
var slen = SockLen(sizeof(saddr))
|
|
||||||
if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr),
|
|
||||||
addr slen) != 0:
|
|
||||||
raiseTransportOsError(osLastError())
|
|
||||||
fromSAddr(addr saddr, slen, transp.remote)
|
|
||||||
transp.remote
|
|
||||||
|
|
||||||
proc localAddress*(transp: DatagramTransport): TransportAddress {.
|
proc localAddress*(transp: DatagramTransport): TransportAddress {.
|
||||||
raises: [TransportOsError].} =
|
raises: [TransportOsError].} =
|
||||||
## Returns ``transp`` local socket address.
|
## Returns ``transp`` remote socket address.
|
||||||
if transp.local.family == AddressFamily.None:
|
localAddress2(transp).tryGet()
|
||||||
var saddr: Sockaddr_storage
|
|
||||||
var slen = SockLen(sizeof(saddr))
|
|
||||||
if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr),
|
|
||||||
addr slen) != 0:
|
|
||||||
raiseTransportOsError(osLastError())
|
|
||||||
fromSAddr(addr saddr, slen, transp.local)
|
|
||||||
transp.local
|
|
||||||
|
|
||||||
template setReadError(t, e: untyped) =
|
template setReadError(t, e: untyped) =
|
||||||
(t).state.incl(ReadError)
|
(t).state.incl(ReadError)
|
||||||
@ -124,8 +176,8 @@ when defined(windows):
|
|||||||
transp.setWriterWSABuffer(vector)
|
transp.setWriterWSABuffer(vector)
|
||||||
let ret =
|
let ret =
|
||||||
if vector.kind == WithAddress:
|
if vector.kind == WithAddress:
|
||||||
var fixedAddress = windowsAnyAddressFix(vector.address)
|
# We only need `Sockaddr_storage` data here, so result discarded.
|
||||||
toSAddr(fixedAddress, transp.waddr, transp.walen)
|
discard transp.setRemoteAddress(vector.address)
|
||||||
wsaSendTo(fd, addr transp.wwsabuf, DWORD(1), addr bytesCount,
|
wsaSendTo(fd, addr transp.wwsabuf, DWORD(1), addr bytesCount,
|
||||||
DWORD(0), cast[ptr SockAddr](addr transp.waddr),
|
DWORD(0), cast[ptr SockAddr](addr transp.waddr),
|
||||||
cint(transp.walen),
|
cint(transp.walen),
|
||||||
@ -159,22 +211,24 @@ when defined(windows):
|
|||||||
proc readDatagramLoop(udata: pointer) =
|
proc readDatagramLoop(udata: pointer) =
|
||||||
var
|
var
|
||||||
bytesCount: uint32
|
bytesCount: uint32
|
||||||
raddr: TransportAddress
|
ovl = cast[PtrCustomOverlapped](udata)
|
||||||
var ovl = cast[PtrCustomOverlapped](udata)
|
|
||||||
var transp = cast[DatagramTransport](ovl.data.udata)
|
let transp = cast[DatagramTransport](ovl.data.udata)
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
if ReadPending in transp.state:
|
if ReadPending in transp.state:
|
||||||
## Continuation
|
## Continuation
|
||||||
transp.state.excl(ReadPending)
|
transp.state.excl(ReadPending)
|
||||||
let err = transp.rovl.data.errCode
|
let
|
||||||
|
err = transp.rovl.data.errCode
|
||||||
|
remoteAddress = transp.getRemoteAddress()
|
||||||
case err
|
case err
|
||||||
of OSErrorCode(-1):
|
of OSErrorCode(-1):
|
||||||
let bytesCount = transp.rovl.data.bytesCount
|
let bytesCount = transp.rovl.data.bytesCount
|
||||||
if bytesCount == 0:
|
if bytesCount == 0:
|
||||||
transp.state.incl({ReadEof, ReadPaused})
|
transp.state.incl({ReadEof, ReadPaused})
|
||||||
fromSAddr(addr transp.raddr, transp.ralen, raddr)
|
|
||||||
transp.buflen = int(bytesCount)
|
transp.buflen = int(bytesCount)
|
||||||
asyncSpawn transp.function(transp, raddr)
|
asyncSpawn transp.function(transp, remoteAddress)
|
||||||
of ERROR_OPERATION_ABORTED:
|
of ERROR_OPERATION_ABORTED:
|
||||||
# CancelIO() interrupt or closeSocket() call.
|
# CancelIO() interrupt or closeSocket() call.
|
||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
@ -189,7 +243,7 @@ when defined(windows):
|
|||||||
transp.setReadError(err)
|
transp.setReadError(err)
|
||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
transp.buflen = 0
|
transp.buflen = 0
|
||||||
asyncSpawn transp.function(transp, raddr)
|
asyncSpawn transp.function(transp, remoteAddress)
|
||||||
else:
|
else:
|
||||||
## Initiation
|
## Initiation
|
||||||
if transp.state * {ReadEof, ReadClosed, ReadError} == {}:
|
if transp.state * {ReadEof, ReadClosed, ReadError} == {}:
|
||||||
@ -220,7 +274,7 @@ when defined(windows):
|
|||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
transp.setReadError(err)
|
transp.setReadError(err)
|
||||||
transp.buflen = 0
|
transp.buflen = 0
|
||||||
asyncSpawn transp.function(transp, raddr)
|
asyncSpawn transp.function(transp, transp.getRemoteAddress())
|
||||||
else:
|
else:
|
||||||
# Transport closure happens in callback, and we not started new
|
# Transport closure happens in callback, and we not started new
|
||||||
# WSARecvFrom session.
|
# WSARecvFrom session.
|
||||||
@ -341,18 +395,25 @@ when defined(windows):
|
|||||||
closeSocket(localSock)
|
closeSocket(localSock)
|
||||||
raiseTransportOsError(err)
|
raiseTransportOsError(err)
|
||||||
|
|
||||||
|
res.flags =
|
||||||
|
block:
|
||||||
|
# Add `V4Mapped` flag when `::` address is used and dualstack is
|
||||||
|
# set to enabled or auto.
|
||||||
|
var res = flags
|
||||||
|
if (local.family == AddressFamily.IPv6) and local.isAnyLocal():
|
||||||
|
if dualstack in {DualStackType.Enabled, DualStackType.Auto}:
|
||||||
|
res.incl(ServerFlags.V4Mapped)
|
||||||
|
res
|
||||||
|
|
||||||
if remote.port != Port(0):
|
if remote.port != Port(0):
|
||||||
var fixedAddress = windowsAnyAddressFix(remote)
|
let remoteAddress = res.setRemoteAddress(remote)
|
||||||
var saddr: Sockaddr_storage
|
if connect(SocketHandle(localSock), cast[ptr SockAddr](addr res.waddr),
|
||||||
var slen: SockLen
|
res.walen) != 0:
|
||||||
toSAddr(fixedAddress, saddr, slen)
|
|
||||||
if connect(SocketHandle(localSock), cast[ptr SockAddr](addr saddr),
|
|
||||||
slen) != 0:
|
|
||||||
let err = osLastError()
|
let err = osLastError()
|
||||||
if sock == asyncInvalidSocket:
|
if sock == asyncInvalidSocket:
|
||||||
closeSocket(localSock)
|
closeSocket(localSock)
|
||||||
raiseTransportOsError(err)
|
raiseTransportOsError(err)
|
||||||
res.remote = fixedAddress
|
res.remote = remoteAddress
|
||||||
|
|
||||||
res.fd = localSock
|
res.fd = localSock
|
||||||
res.function = cbproc
|
res.function = cbproc
|
||||||
@ -362,12 +423,12 @@ when defined(windows):
|
|||||||
res.state = {ReadPaused, WritePaused}
|
res.state = {ReadPaused, WritePaused}
|
||||||
res.future = Future[void].Raising([]).init(
|
res.future = Future[void].Raising([]).init(
|
||||||
"datagram.transport", {FutureFlag.OwnCancelSchedule})
|
"datagram.transport", {FutureFlag.OwnCancelSchedule})
|
||||||
res.rovl.data = CompletionData(cb: readDatagramLoop,
|
res.rovl.data = CompletionData(
|
||||||
udata: cast[pointer](res))
|
cb: readDatagramLoop, udata: cast[pointer](res))
|
||||||
res.wovl.data = CompletionData(cb: writeDatagramLoop,
|
res.wovl.data = CompletionData(
|
||||||
udata: cast[pointer](res))
|
cb: writeDatagramLoop, udata: cast[pointer](res))
|
||||||
res.rwsabuf = WSABUF(buf: cast[cstring](baseAddr res.buffer),
|
res.rwsabuf = WSABUF(
|
||||||
len: ULONG(len(res.buffer)))
|
buf: cast[cstring](baseAddr res.buffer), len: ULONG(len(res.buffer)))
|
||||||
GC_ref(res)
|
GC_ref(res)
|
||||||
# Start tracking transport
|
# Start tracking transport
|
||||||
trackCounter(DgramTransportTrackerName)
|
trackCounter(DgramTransportTrackerName)
|
||||||
@ -380,10 +441,10 @@ else:
|
|||||||
# Linux/BSD/MacOS part
|
# Linux/BSD/MacOS part
|
||||||
|
|
||||||
proc readDatagramLoop(udata: pointer) {.raises: [].}=
|
proc readDatagramLoop(udata: pointer) {.raises: [].}=
|
||||||
var raddr: TransportAddress
|
|
||||||
doAssert(not isNil(udata))
|
doAssert(not isNil(udata))
|
||||||
let transp = cast[DatagramTransport](udata)
|
let
|
||||||
let fd = SocketHandle(transp.fd)
|
transp = cast[DatagramTransport](udata)
|
||||||
|
fd = SocketHandle(transp.fd)
|
||||||
if int(fd) == 0:
|
if int(fd) == 0:
|
||||||
## This situation can be happen, when there events present
|
## This situation can be happen, when there events present
|
||||||
## after transport was closed.
|
## after transport was closed.
|
||||||
@ -398,9 +459,8 @@ else:
|
|||||||
cast[ptr SockAddr](addr transp.raddr),
|
cast[ptr SockAddr](addr transp.raddr),
|
||||||
addr transp.ralen)
|
addr transp.ralen)
|
||||||
if res >= 0:
|
if res >= 0:
|
||||||
fromSAddr(addr transp.raddr, transp.ralen, raddr)
|
|
||||||
transp.buflen = res
|
transp.buflen = res
|
||||||
asyncSpawn transp.function(transp, raddr)
|
asyncSpawn transp.function(transp, transp.getRemoteAddress())
|
||||||
else:
|
else:
|
||||||
let err = osLastError()
|
let err = osLastError()
|
||||||
case err
|
case err
|
||||||
@ -409,14 +469,15 @@ else:
|
|||||||
else:
|
else:
|
||||||
transp.buflen = 0
|
transp.buflen = 0
|
||||||
transp.setReadError(err)
|
transp.setReadError(err)
|
||||||
asyncSpawn transp.function(transp, raddr)
|
asyncSpawn transp.function(transp, transp.getRemoteAddress())
|
||||||
break
|
break
|
||||||
|
|
||||||
proc writeDatagramLoop(udata: pointer) =
|
proc writeDatagramLoop(udata: pointer) =
|
||||||
var res: int
|
var res: int
|
||||||
doAssert(not isNil(udata))
|
doAssert(not isNil(udata))
|
||||||
var transp = cast[DatagramTransport](udata)
|
let
|
||||||
let fd = SocketHandle(transp.fd)
|
transp = cast[DatagramTransport](udata)
|
||||||
|
fd = SocketHandle(transp.fd)
|
||||||
if int(fd) == 0:
|
if int(fd) == 0:
|
||||||
## This situation can be happen, when there events present
|
## This situation can be happen, when there events present
|
||||||
## after transport was closed.
|
## after transport was closed.
|
||||||
@ -428,7 +489,8 @@ else:
|
|||||||
let vector = transp.queue.popFirst()
|
let vector = transp.queue.popFirst()
|
||||||
while true:
|
while true:
|
||||||
if vector.kind == WithAddress:
|
if vector.kind == WithAddress:
|
||||||
toSAddr(vector.address, transp.waddr, transp.walen)
|
# We only need `Sockaddr_storage` data here, so result discarded.
|
||||||
|
discard transp.setRemoteAddress(vector.address)
|
||||||
res = osdefs.sendto(fd, vector.buf, vector.buflen, MSG_NOSIGNAL,
|
res = osdefs.sendto(fd, vector.buf, vector.buflen, MSG_NOSIGNAL,
|
||||||
cast[ptr SockAddr](addr transp.waddr),
|
cast[ptr SockAddr](addr transp.waddr),
|
||||||
transp.walen)
|
transp.walen)
|
||||||
@ -551,21 +613,28 @@ else:
|
|||||||
closeSocket(localSock)
|
closeSocket(localSock)
|
||||||
raiseTransportOsError(err)
|
raiseTransportOsError(err)
|
||||||
|
|
||||||
|
res.flags =
|
||||||
|
block:
|
||||||
|
# Add `V4Mapped` flag when `::` address is used and dualstack is
|
||||||
|
# set to enabled or auto.
|
||||||
|
var res = flags
|
||||||
|
if (local.family == AddressFamily.IPv6) and local.isAnyLocal():
|
||||||
|
if dualstack != DualStackType.Disabled:
|
||||||
|
res.incl(ServerFlags.V4Mapped)
|
||||||
|
res
|
||||||
|
|
||||||
if remote.port != Port(0):
|
if remote.port != Port(0):
|
||||||
var saddr: Sockaddr_storage
|
let remoteAddress = res.setRemoteAddress(remote)
|
||||||
var slen: SockLen
|
if connect(SocketHandle(localSock), cast[ptr SockAddr](addr res.waddr),
|
||||||
toSAddr(remote, saddr, slen)
|
res.walen) != 0:
|
||||||
if connect(SocketHandle(localSock), cast[ptr SockAddr](addr saddr),
|
|
||||||
slen) != 0:
|
|
||||||
let err = osLastError()
|
let err = osLastError()
|
||||||
if sock == asyncInvalidSocket:
|
if sock == asyncInvalidSocket:
|
||||||
closeSocket(localSock)
|
closeSocket(localSock)
|
||||||
raiseTransportOsError(err)
|
raiseTransportOsError(err)
|
||||||
res.remote = remote
|
res.remote = remoteAddress
|
||||||
|
|
||||||
res.fd = localSock
|
res.fd = localSock
|
||||||
res.function = cbproc
|
res.function = cbproc
|
||||||
res.flags = flags
|
|
||||||
res.buffer = newSeq[byte](bufferSize)
|
res.buffer = newSeq[byte](bufferSize)
|
||||||
res.queue = initDeque[GramVector]()
|
res.queue = initDeque[GramVector]()
|
||||||
res.udata = udata
|
res.udata = udata
|
||||||
@ -605,6 +674,24 @@ proc close*(transp: DatagramTransport) =
|
|||||||
transp.state.incl({WriteClosed, ReadClosed})
|
transp.state.incl({WriteClosed, ReadClosed})
|
||||||
closeSocket(transp.fd, continuation)
|
closeSocket(transp.fd, continuation)
|
||||||
|
|
||||||
|
proc getTransportAddresses(
|
||||||
|
local, remote: Opt[IpAddress],
|
||||||
|
localPort, remotePort: Port
|
||||||
|
): tuple[local: TransportAddress, remote: TransportAddress] =
|
||||||
|
let
|
||||||
|
(localAuto, remoteAuto) = getAutoAddresses(localPort, remotePort)
|
||||||
|
lres =
|
||||||
|
if local.isSome():
|
||||||
|
initTAddress(local.get(), localPort)
|
||||||
|
else:
|
||||||
|
localAuto
|
||||||
|
rres =
|
||||||
|
if remote.isSome():
|
||||||
|
initTAddress(remote.get(), remotePort)
|
||||||
|
else:
|
||||||
|
remoteAuto
|
||||||
|
(lres, rres)
|
||||||
|
|
||||||
proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback,
|
proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback,
|
||||||
remote: TransportAddress,
|
remote: TransportAddress,
|
||||||
local: TransportAddress,
|
local: TransportAddress,
|
||||||
@ -633,7 +720,7 @@ proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback,
|
|||||||
proc wrap(transp: DatagramTransport,
|
proc wrap(transp: DatagramTransport,
|
||||||
remote: TransportAddress) {.async: (raises: []).} =
|
remote: TransportAddress) {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
cbproc(transp, remote)
|
await cbproc(transp, remote)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg
|
raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg
|
||||||
|
|
||||||
@ -824,24 +911,96 @@ proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback,
|
|||||||
cast[pointer](udata), child, bufSize, ttl,
|
cast[pointer](udata), child, bufSize, ttl,
|
||||||
dualstack)
|
dualstack)
|
||||||
|
|
||||||
|
proc newDatagramTransport*(cbproc: DatagramCallback,
|
||||||
|
localPort: Port,
|
||||||
|
remotePort: Port,
|
||||||
|
local: Opt[IpAddress] = Opt.none(IpAddress),
|
||||||
|
remote: Opt[IpAddress] = Opt.none(IpAddress),
|
||||||
|
flags: set[ServerFlags] = {},
|
||||||
|
udata: pointer = nil,
|
||||||
|
child: DatagramTransport = nil,
|
||||||
|
bufSize: int = DefaultDatagramBufferSize,
|
||||||
|
ttl: int = 0,
|
||||||
|
dualstack = DualStackType.Auto
|
||||||
|
): DatagramTransport {.
|
||||||
|
raises: [TransportOsError].} =
|
||||||
|
## Create new UDP datagram transport (IPv6) and bind it to ANY_ADDRESS.
|
||||||
|
## Depending on OS settings procedure perform an attempt to create transport
|
||||||
|
## using IPv6 ANY_ADDRESS, if its not available it will try to bind transport
|
||||||
|
## to IPv4 ANY_ADDRESS.
|
||||||
|
##
|
||||||
|
## ``cbproc`` - callback which will be called, when new datagram received.
|
||||||
|
## ``localPort`` - local peer's port number.
|
||||||
|
## ``remotePort`` - remote peer's port number.
|
||||||
|
## ``local`` - optional local peer's IPv4/IPv6 address.
|
||||||
|
## ``remote`` - optional remote peer's IPv4/IPv6 address.
|
||||||
|
## ``sock`` - application-driven socket to use.
|
||||||
|
## ``flags`` - flags that will be applied to socket.
|
||||||
|
## ``udata`` - custom argument which will be passed to ``cbproc``.
|
||||||
|
## ``bufSize`` - size of internal buffer.
|
||||||
|
## ``ttl`` - TTL for UDP datagram packet (only usable when flags has
|
||||||
|
## ``Broadcast`` option).
|
||||||
|
let
|
||||||
|
(localHost, remoteHost) =
|
||||||
|
getTransportAddresses(local, remote, localPort, remotePort)
|
||||||
|
newDatagramTransportCommon(cbproc, remoteHost, localHost, asyncInvalidSocket,
|
||||||
|
flags, cast[pointer](udata), child, bufSize,
|
||||||
|
ttl, dualstack)
|
||||||
|
|
||||||
|
proc newDatagramTransport*(cbproc: DatagramCallback,
|
||||||
|
localPort: Port,
|
||||||
|
local: Opt[IpAddress] = Opt.none(IpAddress),
|
||||||
|
flags: set[ServerFlags] = {},
|
||||||
|
udata: pointer = nil,
|
||||||
|
child: DatagramTransport = nil,
|
||||||
|
bufSize: int = DefaultDatagramBufferSize,
|
||||||
|
ttl: int = 0,
|
||||||
|
dualstack = DualStackType.Auto
|
||||||
|
): DatagramTransport {.
|
||||||
|
raises: [TransportOsError].} =
|
||||||
|
newDatagramTransport(cbproc, localPort, Port(0), local, Opt.none(IpAddress),
|
||||||
|
flags, udata, child, bufSize, ttl, dualstack)
|
||||||
|
|
||||||
|
proc newDatagramTransport*[T](cbproc: DatagramCallback,
|
||||||
|
localPort: Port,
|
||||||
|
remotePort: Port,
|
||||||
|
local: Opt[IpAddress] = Opt.none(IpAddress),
|
||||||
|
remote: Opt[IpAddress] = Opt.none(IpAddress),
|
||||||
|
flags: set[ServerFlags] = {},
|
||||||
|
udata: ref T,
|
||||||
|
child: DatagramTransport = nil,
|
||||||
|
bufSize: int = DefaultDatagramBufferSize,
|
||||||
|
ttl: int = 0,
|
||||||
|
dualstack = DualStackType.Auto
|
||||||
|
): DatagramTransport {.
|
||||||
|
raises: [TransportOsError].} =
|
||||||
|
let
|
||||||
|
(localHost, remoteHost) =
|
||||||
|
getTransportAddresses(local, remote, localPort, remotePort)
|
||||||
|
fflags = flags + {GCUserData}
|
||||||
|
GC_ref(udata)
|
||||||
|
newDatagramTransportCommon(cbproc, remoteHost, localHost, asyncInvalidSocket,
|
||||||
|
fflags, cast[pointer](udata), child, bufSize, ttl,
|
||||||
|
dualstack)
|
||||||
|
|
||||||
|
proc newDatagramTransport*[T](cbproc: DatagramCallback,
|
||||||
|
localPort: Port,
|
||||||
|
local: Opt[IpAddress] = Opt.none(IpAddress),
|
||||||
|
flags: set[ServerFlags] = {},
|
||||||
|
udata: ref T,
|
||||||
|
child: DatagramTransport = nil,
|
||||||
|
bufSize: int = DefaultDatagramBufferSize,
|
||||||
|
ttl: int = 0,
|
||||||
|
dualstack = DualStackType.Auto
|
||||||
|
): DatagramTransport {.
|
||||||
|
raises: [TransportOsError].} =
|
||||||
|
newDatagramTransport(cbproc, localPort, Port(0), local, Opt.none(IpAddress),
|
||||||
|
flags, udata, child, bufSize, ttl, dualstack)
|
||||||
|
|
||||||
proc join*(transp: DatagramTransport): Future[void] {.
|
proc join*(transp: DatagramTransport): Future[void] {.
|
||||||
async: (raw: true, raises: [CancelledError]).} =
|
async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Wait until the transport ``transp`` will be closed.
|
## Wait until the transport ``transp`` will be closed.
|
||||||
let retFuture = newFuture[void]("datagram.transport.join")
|
transp.future.join()
|
||||||
|
|
||||||
proc continuation(udata: pointer) {.gcsafe.} =
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
proc cancel(udata: pointer) {.gcsafe.} =
|
|
||||||
transp.future.removeCallback(continuation, cast[pointer](retFuture))
|
|
||||||
|
|
||||||
if not(transp.future.finished()):
|
|
||||||
transp.future.addCallback(continuation, cast[pointer](retFuture))
|
|
||||||
retFuture.cancelCallback = cancel
|
|
||||||
else:
|
|
||||||
retFuture.complete()
|
|
||||||
|
|
||||||
return retFuture
|
|
||||||
|
|
||||||
proc closed*(transp: DatagramTransport): bool {.inline.} =
|
proc closed*(transp: DatagramTransport): bool {.inline.} =
|
||||||
## Returns ``true`` if transport in closed state.
|
## Returns ``true`` if transport in closed state.
|
||||||
|
|||||||
@ -52,7 +52,7 @@ proc init*(t: typedesc[IpMask], family: AddressFamily, prefix: int): IpMask =
|
|||||||
IpMask(family: AddressFamily.IPv4, mask4: 0'u32)
|
IpMask(family: AddressFamily.IPv4, mask4: 0'u32)
|
||||||
elif prefix < 32:
|
elif prefix < 32:
|
||||||
let mask = 0xFFFF_FFFF'u32 shl (32 - prefix)
|
let mask = 0xFFFF_FFFF'u32 shl (32 - prefix)
|
||||||
IpMask(family: AddressFamily.IPv4, mask4: mask.toBE())
|
IpMask(family: AddressFamily.IPv4, mask4: mask)
|
||||||
else:
|
else:
|
||||||
IpMask(family: AddressFamily.IPv4, mask4: 0xFFFF_FFFF'u32)
|
IpMask(family: AddressFamily.IPv4, mask4: 0xFFFF_FFFF'u32)
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
@ -65,13 +65,13 @@ proc init*(t: typedesc[IpMask], family: AddressFamily, prefix: int): IpMask =
|
|||||||
if prefix > 64:
|
if prefix > 64:
|
||||||
let mask = 0xFFFF_FFFF_FFFF_FFFF'u64 shl (128 - prefix)
|
let mask = 0xFFFF_FFFF_FFFF_FFFF'u64 shl (128 - prefix)
|
||||||
IpMask(family: AddressFamily.IPv6,
|
IpMask(family: AddressFamily.IPv6,
|
||||||
mask6: [0xFFFF_FFFF_FFFF_FFFF'u64, mask.toBE()])
|
mask6: [0xFFFF_FFFF_FFFF_FFFF'u64, mask])
|
||||||
elif prefix == 64:
|
elif prefix == 64:
|
||||||
IpMask(family: AddressFamily.IPv6,
|
IpMask(family: AddressFamily.IPv6,
|
||||||
mask6: [0xFFFF_FFFF_FFFF_FFFF'u64, 0'u64])
|
mask6: [0xFFFF_FFFF_FFFF_FFFF'u64, 0'u64])
|
||||||
else:
|
else:
|
||||||
let mask = 0xFFFF_FFFF_FFFF_FFFF'u64 shl (64 - prefix)
|
let mask = 0xFFFF_FFFF_FFFF_FFFF'u64 shl (64 - prefix)
|
||||||
IpMask(family: AddressFamily.IPv6, mask6: [mask.toBE(), 0'u64])
|
IpMask(family: AddressFamily.IPv6, mask6: [mask, 0'u64])
|
||||||
else:
|
else:
|
||||||
IpMask(family: family)
|
IpMask(family: family)
|
||||||
|
|
||||||
@ -80,11 +80,12 @@ proc init*(t: typedesc[IpMask], netmask: TransportAddress): IpMask =
|
|||||||
case netmask.family
|
case netmask.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
IpMask(family: AddressFamily.IPv4,
|
IpMask(family: AddressFamily.IPv4,
|
||||||
mask4: uint32.fromBytes(netmask.address_v4))
|
mask4: uint32.fromBytesBE(netmask.address_v4))
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
IpMask(family: AddressFamily.IPv6,
|
IpMask(family: AddressFamily.IPv6,
|
||||||
mask6: [uint64.fromBytes(netmask.address_v6.toOpenArray(0, 7)),
|
mask6: [
|
||||||
uint64.fromBytes(netmask.address_v6.toOpenArray(8, 15))])
|
uint64.fromBytesBE(netmask.address_v6.toOpenArray(0, 7)),
|
||||||
|
uint64.fromBytesBE(netmask.address_v6.toOpenArray(8, 15))])
|
||||||
else:
|
else:
|
||||||
IpMask(family: netmask.family)
|
IpMask(family: netmask.family)
|
||||||
|
|
||||||
@ -95,8 +96,7 @@ proc initIp*(t: typedesc[IpMask], netmask: string): IpMask =
|
|||||||
## If ``netmask`` address string is invalid, result IpMask.family will be
|
## If ``netmask`` address string is invalid, result IpMask.family will be
|
||||||
## set to ``AddressFamily.None``.
|
## set to ``AddressFamily.None``.
|
||||||
try:
|
try:
|
||||||
var ip = parseIpAddress(netmask)
|
let tip = initTAddress(parseIpAddress(netmask), Port(0))
|
||||||
var tip = initTAddress(ip, Port(0))
|
|
||||||
t.init(tip)
|
t.init(tip)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
IpMask(family: AddressFamily.None)
|
IpMask(family: AddressFamily.None)
|
||||||
@ -127,9 +127,9 @@ proc init*(t: typedesc[IpMask], netmask: string): IpMask =
|
|||||||
elif netmask[offset + i] in hexLowers:
|
elif netmask[offset + i] in hexLowers:
|
||||||
v = uint32(ord(netmask[offset + i]) - ord('a') + 10)
|
v = uint32(ord(netmask[offset + i]) - ord('a') + 10)
|
||||||
else:
|
else:
|
||||||
return
|
return IpMask(family: AddressFamily.None)
|
||||||
r = (r shl 4) or v
|
r = (r shl 4) or v
|
||||||
res.mask4 = r.toBE()
|
res.mask4 = r
|
||||||
res
|
res
|
||||||
elif length == 32 or length == (2 + 32):
|
elif length == 32 or length == (2 + 32):
|
||||||
## IPv6 mask
|
## IPv6 mask
|
||||||
@ -147,10 +147,10 @@ proc init*(t: typedesc[IpMask], netmask: string): IpMask =
|
|||||||
elif netmask[offset + i] in hexLowers:
|
elif netmask[offset + i] in hexLowers:
|
||||||
v = uint64(ord(netmask[offset + i]) - ord('a') + 10)
|
v = uint64(ord(netmask[offset + i]) - ord('a') + 10)
|
||||||
else:
|
else:
|
||||||
return
|
return IpMask(family: AddressFamily.None)
|
||||||
r = (r shl 4) or v
|
r = (r shl 4) or v
|
||||||
offset += 16
|
offset += 16
|
||||||
res.mask6[i] = r.toBE()
|
res.mask6[i] = r
|
||||||
res
|
res
|
||||||
else:
|
else:
|
||||||
IpMask(family: AddressFamily.None)
|
IpMask(family: AddressFamily.None)
|
||||||
@ -167,8 +167,7 @@ proc toIPv6*(address: TransportAddress): TransportAddress =
|
|||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
address6[10] = 0xFF'u8
|
address6[10] = 0xFF'u8
|
||||||
address6[11] = 0xFF'u8
|
address6[11] = 0xFF'u8
|
||||||
let ip4 = uint32.fromBytes(address.address_v4)
|
address6[12 .. 15] = toBytesBE(uint32.fromBytesBE(address.address_v4))
|
||||||
address6[12 .. 15] = ip4.toBytes()
|
|
||||||
TransportAddress(family: AddressFamily.IPv6, port: address.port,
|
TransportAddress(family: AddressFamily.IPv6, port: address.port,
|
||||||
address_v6: address6)
|
address_v6: address6)
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
@ -183,9 +182,10 @@ proc isV4Mapped*(address: TransportAddress): bool =
|
|||||||
## Procedure returns ``false`` if ``address`` family is IPv4.
|
## Procedure returns ``false`` if ``address`` family is IPv4.
|
||||||
case address.family
|
case address.family
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
let data0 = uint64.fromBytes(address.address_v6.toOpenArray(0, 7))
|
let
|
||||||
let data1 = uint16.fromBytes(address.address_v6.toOpenArray(8, 9))
|
data0 = uint64.fromBytesBE(address.address_v6.toOpenArray(0, 7))
|
||||||
let data2 = uint16.fromBytes(address.address_v6.toOpenArray(10, 11))
|
data1 = uint16.fromBytesBE(address.address_v6.toOpenArray(8, 9))
|
||||||
|
data2 = uint16.fromBytesBE(address.address_v6.toOpenArray(10, 11))
|
||||||
(data0 == 0x00'u64) and (data1 == 0x00'u16) and (data2 == 0xFFFF'u16)
|
(data0 == 0x00'u64) and (data1 == 0x00'u16) and (data2 == 0xFFFF'u16)
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
@ -202,9 +202,9 @@ proc toIPv4*(address: TransportAddress): TransportAddress =
|
|||||||
address
|
address
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
if isV4Mapped(address):
|
if isV4Mapped(address):
|
||||||
let data = uint32.fromBytes(address.address_v6.toOpenArray(12, 15))
|
let data = uint32.fromBytesBE(address.address_v6.toOpenArray(12, 15))
|
||||||
TransportAddress(family: AddressFamily.IPv4, port: address.port,
|
TransportAddress(family: AddressFamily.IPv4, port: address.port,
|
||||||
address_v4: data.toBytes())
|
address_v4: data.toBytesBE())
|
||||||
else:
|
else:
|
||||||
TransportAddress(family: AddressFamily.None)
|
TransportAddress(family: AddressFamily.None)
|
||||||
else:
|
else:
|
||||||
@ -230,34 +230,34 @@ proc mask*(a: TransportAddress, m: IpMask): TransportAddress =
|
|||||||
## In all other cases returned address will have ``AddressFamily.None``.
|
## In all other cases returned address will have ``AddressFamily.None``.
|
||||||
if (a.family == AddressFamily.IPv4) and (m.family == AddressFamily.IPv6):
|
if (a.family == AddressFamily.IPv4) and (m.family == AddressFamily.IPv6):
|
||||||
if (m.mask6[0] == 0xFFFF_FFFF_FFFF_FFFF'u64) and
|
if (m.mask6[0] == 0xFFFF_FFFF_FFFF_FFFF'u64) and
|
||||||
(m.mask6[1] and 0xFFFF_FFFF'u64) == 0xFFFF_FFFF'u64:
|
(m.mask6[1] and 0xFFFF_FFFF_0000_0000'u64) == 0xFFFF_FFFF_0000_0000'u64:
|
||||||
let
|
let
|
||||||
mask = uint32((m.mask6[1] shr 32) and 0xFFFF_FFFF'u64)
|
mask = uint32(m.mask6[1] and 0xFFFF_FFFF'u64)
|
||||||
data = uint32.fromBytes(a.address_v4)
|
data = uint32.fromBytesBE(a.address_v4)
|
||||||
TransportAddress(family: AddressFamily.IPv4, port: a.port,
|
TransportAddress(family: AddressFamily.IPv4, port: a.port,
|
||||||
address_v4: (data and mask).toBytes())
|
address_v4: (data and mask).toBytesBE())
|
||||||
else:
|
else:
|
||||||
TransportAddress(family: AddressFamily.None)
|
TransportAddress(family: AddressFamily.None)
|
||||||
elif (a.family == AddressFamily.IPv6) and (m.family == AddressFamily.IPv4):
|
elif (a.family == AddressFamily.IPv6) and (m.family == AddressFamily.IPv4):
|
||||||
var ip = a.toIPv4()
|
var ip = a.toIPv4()
|
||||||
if ip.family != AddressFamily.IPv4:
|
if ip.family != AddressFamily.IPv4:
|
||||||
return TransportAddress(family: AddressFamily.None)
|
return TransportAddress(family: AddressFamily.None)
|
||||||
let data = uint32.fromBytes(ip.address_v4)
|
let data = uint32.fromBytesBE(ip.address_v4)
|
||||||
ip.address_v4[0 .. 3] = (data and m.mask4).toBytes()
|
ip.address_v4[0 .. 3] = (data and m.mask4).toBytesBE()
|
||||||
var res = ip.toIPv6()
|
var res = ip.toIPv6()
|
||||||
res.port = a.port
|
res.port = a.port
|
||||||
res
|
res
|
||||||
elif a.family == AddressFamily.IPv4 and m.family == AddressFamily.IPv4:
|
elif a.family == AddressFamily.IPv4 and m.family == AddressFamily.IPv4:
|
||||||
let data = uint32.fromBytes(a.address_v4)
|
let data = uint32.fromBytesBE(a.address_v4)
|
||||||
TransportAddress(family: AddressFamily.IPv4, port: a.port,
|
TransportAddress(family: AddressFamily.IPv4, port: a.port,
|
||||||
address_v4: (data and m.mask4).toBytes())
|
address_v4: (data and m.mask4).toBytesBE())
|
||||||
elif a.family == AddressFamily.IPv6 and m.family == AddressFamily.IPv6:
|
elif a.family == AddressFamily.IPv6 and m.family == AddressFamily.IPv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
let
|
let
|
||||||
data0 = uint64.fromBytes(a.address_v6.toOpenArray(0, 7))
|
data0 = uint64.fromBytesBE(a.address_v6.toOpenArray(0, 7))
|
||||||
data1 = uint64.fromBytes(a.address_v6.toOpenArray(8, 15))
|
data1 = uint64.fromBytesBE(a.address_v6.toOpenArray(8, 15))
|
||||||
address6[0 .. 7] = (data0 and m.mask6[0]).toBytes()
|
address6[0 .. 7] = (data0 and m.mask6[0]).toBytesBE()
|
||||||
address6[8 .. 15] = (data1 and m.mask6[1]).toBytes()
|
address6[8 .. 15] = (data1 and m.mask6[1]).toBytesBE()
|
||||||
TransportAddress(family: AddressFamily.IPv6, port: a.port,
|
TransportAddress(family: AddressFamily.IPv6, port: a.port,
|
||||||
address_v6: address6)
|
address_v6: address6)
|
||||||
else:
|
else:
|
||||||
@ -272,14 +272,14 @@ proc prefix*(mask: IpMask): int =
|
|||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
var
|
var
|
||||||
res = 0
|
res = 0
|
||||||
n = mask.mask4.fromBE()
|
n = mask.mask4
|
||||||
while n != 0:
|
while n != 0:
|
||||||
if (n and 0x8000_0000'u32) == 0'u32: return -1
|
if (n and 0x8000_0000'u32) == 0'u32: return -1
|
||||||
n = n shl 1
|
n = n shl 1
|
||||||
inc(res)
|
inc(res)
|
||||||
res
|
res
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
let mask6 = [mask.mask6[0].fromBE(), mask.mask6[1].fromBE()]
|
let mask6 = [mask.mask6[0], mask.mask6[1]]
|
||||||
var res = 0
|
var res = 0
|
||||||
if mask6[0] == 0xFFFF_FFFF_FFFF_FFFF'u64:
|
if mask6[0] == 0xFFFF_FFFF_FFFF_FFFF'u64:
|
||||||
res += 64
|
res += 64
|
||||||
@ -308,11 +308,11 @@ proc subnetMask*(mask: IpMask): TransportAddress =
|
|||||||
case mask.family
|
case mask.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
TransportAddress(family: AddressFamily.IPv4,
|
TransportAddress(family: AddressFamily.IPv4,
|
||||||
address_v4: mask.mask4.toBytes())
|
address_v4: mask.mask4.toBytesBE())
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
address6[0 .. 7] = mask.mask6[0].toBytes()
|
address6[0 .. 7] = mask.mask6[0].toBytesBE()
|
||||||
address6[8 .. 15] = mask.mask6[1].toBytes()
|
address6[8 .. 15] = mask.mask6[1].toBytesBE()
|
||||||
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
||||||
else:
|
else:
|
||||||
TransportAddress(family: mask.family)
|
TransportAddress(family: mask.family)
|
||||||
@ -321,9 +321,10 @@ proc `$`*(mask: IpMask, include0x = false): string =
|
|||||||
## Returns hexadecimal string representation of IP mask ``mask``.
|
## Returns hexadecimal string representation of IP mask ``mask``.
|
||||||
case mask.family
|
case mask.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
var res = if include0x: "0x" else: ""
|
var
|
||||||
var n = 32
|
res = if include0x: "0x" else: ""
|
||||||
var m = mask.mask4.fromBE()
|
n = 32
|
||||||
|
m = mask.mask4
|
||||||
while n > 0:
|
while n > 0:
|
||||||
n -= 4
|
n -= 4
|
||||||
var c = int((m shr n) and 0x0F)
|
var c = int((m shr n) and 0x0F)
|
||||||
@ -333,7 +334,7 @@ proc `$`*(mask: IpMask, include0x = false): string =
|
|||||||
res.add(chr(ord('A') + (c - 10)))
|
res.add(chr(ord('A') + (c - 10)))
|
||||||
res
|
res
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
let mask6 = [mask.mask6[0].fromBE(), mask.mask6[1].fromBE()]
|
let mask6 = [mask.mask6[0], mask.mask6[1]]
|
||||||
var res = if include0x: "0x" else: ""
|
var res = if include0x: "0x" else: ""
|
||||||
for i in 0 .. 1:
|
for i in 0 .. 1:
|
||||||
var n = 64
|
var n = 64
|
||||||
@ -353,12 +354,11 @@ proc ip*(mask: IpMask): string {.raises: [ValueError].} =
|
|||||||
## Returns IP address text representation of IP mask ``mask``.
|
## Returns IP address text representation of IP mask ``mask``.
|
||||||
case mask.family
|
case mask.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
var address4: array[4, uint8]
|
$IpAddress(family: IpAddressFamily.IPv4, address_v4: mask.mask4.toBytesBE())
|
||||||
copyMem(addr address4[0], unsafeAddr mask.mask4, sizeof(uint32))
|
|
||||||
$IpAddress(family: IpAddressFamily.IPv4, address_v4: address4)
|
|
||||||
of AddressFamily.Ipv6:
|
of AddressFamily.Ipv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
copyMem(addr address6[0], unsafeAddr mask.mask6[0], 16)
|
address6[0 .. 7] = mask.mask6[0].toBytesBE()
|
||||||
|
address6[8 .. 15] = mask.mask6[1].toBytesBE()
|
||||||
$IpAddress(family: IpAddressFamily.IPv6, address_v6: address6)
|
$IpAddress(family: IpAddressFamily.IPv6, address_v6: address6)
|
||||||
else:
|
else:
|
||||||
raise newException(ValueError, "Invalid mask family type")
|
raise newException(ValueError, "Invalid mask family type")
|
||||||
@ -387,11 +387,12 @@ proc init*(t: typedesc[IpNet], network: string): IpNet {.
|
|||||||
raises: [TransportAddressError].} =
|
raises: [TransportAddressError].} =
|
||||||
## Initialize IP Network from string representation in format
|
## Initialize IP Network from string representation in format
|
||||||
## <address>/<prefix length> or <address>/<netmask address>.
|
## <address>/<prefix length> or <address>/<netmask address>.
|
||||||
var parts = network.rsplit("/", maxsplit = 1)
|
var
|
||||||
var host, mhost: TransportAddress
|
parts = network.rsplit("/", maxsplit = 1)
|
||||||
var ipaddr: IpAddress
|
host, mhost: TransportAddress
|
||||||
var mask: IpMask
|
ipaddr: IpAddress
|
||||||
var prefix: int
|
mask: IpMask
|
||||||
|
prefix: int
|
||||||
try:
|
try:
|
||||||
ipaddr = parseIpAddress(parts[0])
|
ipaddr = parseIpAddress(parts[0])
|
||||||
if ipaddr.family == IpAddressFamily.IPv4:
|
if ipaddr.family == IpAddressFamily.IPv4:
|
||||||
@ -428,9 +429,9 @@ proc init*(t: typedesc[IpNet], network: string): IpNet {.
|
|||||||
raise newException(TransportAddressError,
|
raise newException(TransportAddressError,
|
||||||
"Incorrect network address!")
|
"Incorrect network address!")
|
||||||
if prefix == -1:
|
if prefix == -1:
|
||||||
result = t.init(host, mask)
|
t.init(host, mask)
|
||||||
else:
|
else:
|
||||||
result = t.init(host, prefix)
|
t.init(host, prefix)
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
raise newException(TransportAddressError, exc.msg)
|
raise newException(TransportAddressError, exc.msg)
|
||||||
|
|
||||||
@ -461,19 +462,19 @@ proc broadcast*(net: IpNet): TransportAddress =
|
|||||||
case net.host.family
|
case net.host.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
let
|
let
|
||||||
host = uint32.fromBytes(net.host.address_v4)
|
host = uint32.fromBytesBE(net.host.address_v4)
|
||||||
mask = net.mask.mask4
|
mask = net.mask.mask4
|
||||||
TransportAddress(family: AddressFamily.IPv4,
|
TransportAddress(family: AddressFamily.IPv4,
|
||||||
address_v4: (host or (not(mask))).toBytes())
|
address_v4: (host or (not(mask))).toBytesBE())
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
let
|
let
|
||||||
host0 = uint64.fromBytes(net.host.address_v6.toOpenArray(0, 7))
|
host0 = uint64.fromBytesBE(net.host.address_v6.toOpenArray(0, 7))
|
||||||
host1 = uint64.fromBytes(net.host.address_v6.toOpenArray(8, 15))
|
host1 = uint64.fromBytesBE(net.host.address_v6.toOpenArray(8, 15))
|
||||||
data0 = net.mask.mask6[0]
|
data0 = net.mask.mask6[0]
|
||||||
data1 = net.mask.mask6[1]
|
data1 = net.mask.mask6[1]
|
||||||
address6[0 .. 7] = (host0 or (not(data0))).toBytes()
|
address6[0 .. 7] = (host0 or (not(data0))).toBytesBE()
|
||||||
address6[8 .. 15] = (host1 or (not(data1))).toBytes()
|
address6[8 .. 15] = (host1 or (not(data1))).toBytesBE()
|
||||||
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
||||||
else:
|
else:
|
||||||
TransportAddress(family: AddressFamily.None)
|
TransportAddress(family: AddressFamily.None)
|
||||||
@ -496,19 +497,19 @@ proc `and`*(address1, address2: TransportAddress): TransportAddress =
|
|||||||
case address1.family
|
case address1.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
let
|
let
|
||||||
data1 = uint32.fromBytes(address1.address_v4)
|
data1 = uint32.fromBytesBE(address1.address_v4)
|
||||||
data2 = uint32.fromBytes(address2.address_v4)
|
data2 = uint32.fromBytesBE(address2.address_v4)
|
||||||
TransportAddress(family: AddressFamily.IPv4,
|
TransportAddress(family: AddressFamily.IPv4,
|
||||||
address_v4: (data1 and data2).toBytes())
|
address_v4: (data1 and data2).toBytesBE())
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
let
|
let
|
||||||
data1 = uint64.fromBytes(address1.address_v6.toOpenArray(0, 7))
|
data1 = uint64.fromBytesBE(address1.address_v6.toOpenArray(0, 7))
|
||||||
data2 = uint64.fromBytes(address1.address_v6.toOpenArray(8, 15))
|
data2 = uint64.fromBytesBE(address1.address_v6.toOpenArray(8, 15))
|
||||||
data3 = uint64.fromBytes(address2.address_v6.toOpenArray(0, 7))
|
data3 = uint64.fromBytesBE(address2.address_v6.toOpenArray(0, 7))
|
||||||
data4 = uint64.fromBytes(address2.address_v6.toOpenArray(8, 15))
|
data4 = uint64.fromBytesBE(address2.address_v6.toOpenArray(8, 15))
|
||||||
address6[0 .. 7] = (data1 and data3).toBytes()
|
address6[0 .. 7] = (data1 and data3).toBytesBE()
|
||||||
address6[8 .. 15] = (data2 and data4).toBytes()
|
address6[8 .. 15] = (data2 and data4).toBytesBE()
|
||||||
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
||||||
else:
|
else:
|
||||||
raiseAssert "Invalid address family type"
|
raiseAssert "Invalid address family type"
|
||||||
@ -522,19 +523,19 @@ proc `or`*(address1, address2: TransportAddress): TransportAddress =
|
|||||||
case address1.family
|
case address1.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
let
|
let
|
||||||
data1 = uint32.fromBytes(address1.address_v4)
|
data1 = uint32.fromBytesBE(address1.address_v4)
|
||||||
data2 = uint32.fromBytes(address2.address_v4)
|
data2 = uint32.fromBytesBE(address2.address_v4)
|
||||||
TransportAddress(family: AddressFamily.IPv4,
|
TransportAddress(family: AddressFamily.IPv4,
|
||||||
address_v4: (data1 or data2).toBytes())
|
address_v4: (data1 or data2).toBytesBE())
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
let
|
let
|
||||||
data1 = uint64.fromBytes(address1.address_v6.toOpenArray(0, 7))
|
data1 = uint64.fromBytesBE(address1.address_v6.toOpenArray(0, 7))
|
||||||
data2 = uint64.fromBytes(address1.address_v6.toOpenArray(8, 15))
|
data2 = uint64.fromBytesBE(address1.address_v6.toOpenArray(8, 15))
|
||||||
data3 = uint64.fromBytes(address2.address_v6.toOpenArray(0, 7))
|
data3 = uint64.fromBytesBE(address2.address_v6.toOpenArray(0, 7))
|
||||||
data4 = uint64.fromBytes(address2.address_v6.toOpenArray(8, 15))
|
data4 = uint64.fromBytesBE(address2.address_v6.toOpenArray(8, 15))
|
||||||
address6[0 .. 7] = (data1 or data3).toBytes()
|
address6[0 .. 7] = (data1 or data3).toBytesBE()
|
||||||
address6[8 .. 15] = (data2 or data4).toBytes()
|
address6[8 .. 15] = (data2 or data4).toBytesBE()
|
||||||
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
||||||
else:
|
else:
|
||||||
raiseAssert "Invalid address family type"
|
raiseAssert "Invalid address family type"
|
||||||
@ -543,15 +544,15 @@ proc `not`*(address: TransportAddress): TransportAddress =
|
|||||||
## Bitwise ``not`` operation for ``address``.
|
## Bitwise ``not`` operation for ``address``.
|
||||||
case address.family
|
case address.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
let data = not(uint32.fromBytes(address.address_v4))
|
let data = not(uint32.fromBytesBE(address.address_v4))
|
||||||
TransportAddress(family: AddressFamily.IPv4, address_v4: data.toBytes())
|
TransportAddress(family: AddressFamily.IPv4, address_v4: data.toBytesBE())
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
var address6: array[16, uint8]
|
var address6: array[16, uint8]
|
||||||
let
|
let
|
||||||
data1 = not(uint64.fromBytes(address.address_v6.toOpenArray(0, 7)))
|
data1 = not(uint64.fromBytesBE(address.address_v6.toOpenArray(0, 7)))
|
||||||
data2 = not(uint64.fromBytes(address.address_v6.toOpenArray(8, 15)))
|
data2 = not(uint64.fromBytesBE(address.address_v6.toOpenArray(8, 15)))
|
||||||
address6[0 .. 7] = data1.toBytes()
|
address6[0 .. 7] = data1.toBytesBE()
|
||||||
address6[8 .. 15] = data2.toBytes()
|
address6[8 .. 15] = data2.toBytesBE()
|
||||||
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
TransportAddress(family: AddressFamily.IPv6, address_v6: address6)
|
||||||
else:
|
else:
|
||||||
address
|
address
|
||||||
@ -702,10 +703,10 @@ proc isZero*(address: TransportAddress): bool {.inline.} =
|
|||||||
## not ``AddressFamily.None``.
|
## not ``AddressFamily.None``.
|
||||||
case address.family
|
case address.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
uint32.fromBytes(a4()) == 0'u32
|
uint32.fromBytesBE(a4()) == 0'u32
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
(uint64.fromBytes(a6.toOpenArray(0, 7)) == 0'u64) and
|
(uint64.fromBytesBE(a6.toOpenArray(0, 7)) == 0'u64) and
|
||||||
(uint64.fromBytes(a6.toOpenArray(8, 15)) == 0'u64)
|
(uint64.fromBytesBE(a6.toOpenArray(8, 15)) == 0'u64)
|
||||||
of AddressFamily.Unix:
|
of AddressFamily.Unix:
|
||||||
len($cast[cstring](unsafeAddr address.address_un[0])) == 0
|
len($cast[cstring](unsafeAddr address.address_un[0])) == 0
|
||||||
else:
|
else:
|
||||||
@ -804,7 +805,7 @@ proc isLoopback*(address: TransportAddress): bool =
|
|||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
a4[0] == 127'u8
|
a4[0] == 127'u8
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
(uint64.fromBytes(a6.toOpenArray(0, 7)) == 0x00'u64) and
|
(uint64.fromBytesBE(a6.toOpenArray(0, 7)) == 0x00'u64) and
|
||||||
(uint64.fromBytesBE(a6.toOpenArray(8, 15)) == 0x01'u64)
|
(uint64.fromBytesBE(a6.toOpenArray(8, 15)) == 0x01'u64)
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
@ -817,10 +818,10 @@ proc isAnyLocal*(address: TransportAddress): bool =
|
|||||||
## ``IPv6``: ::
|
## ``IPv6``: ::
|
||||||
case address.family
|
case address.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
uint32.fromBytes(a4) == 0'u32
|
uint32.fromBytesBE(a4) == 0'u32
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
(uint64.fromBytes(a6.toOpenArray(0, 7)) == 0x00'u64) and
|
(uint64.fromBytesBE(a6.toOpenArray(0, 7)) == 0x00'u64) and
|
||||||
(uint64.fromBytes(a6.toOpenArray(8, 15)) == 0x00'u64)
|
(uint64.fromBytesBE(a6.toOpenArray(8, 15)) == 0x00'u64)
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
@ -895,7 +896,7 @@ proc isBroadcast*(address: TransportAddress): bool =
|
|||||||
## ``IPv4``: 255.255.255.255
|
## ``IPv4``: 255.255.255.255
|
||||||
case address.family
|
case address.family
|
||||||
of AddressFamily.IPv4:
|
of AddressFamily.IPv4:
|
||||||
uint32.fromBytes(a4) == 0xFFFF_FFFF'u32
|
uint32.fromBytesBE(a4) == 0xFFFF_FFFF'u32
|
||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
false
|
false
|
||||||
else:
|
else:
|
||||||
@ -916,7 +917,7 @@ proc isBenchmarking*(address: TransportAddress): bool =
|
|||||||
of AddressFamily.IPv6:
|
of AddressFamily.IPv6:
|
||||||
(uint16.fromBytesBE(a6.toOpenArray(0, 1)) == 0x2001'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(0, 1)) == 0x2001'u16) and
|
||||||
(uint16.fromBytesBE(a6.toOpenArray(2, 3)) == 0x02'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(2, 3)) == 0x02'u16) and
|
||||||
(uint16.fromBytes(a6.toOpenArray(4, 5)) == 0x00'u16)
|
(uint16.fromBytesBE(a6.toOpenArray(4, 5)) == 0x00'u16)
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
@ -980,9 +981,9 @@ proc isGlobal*(address: TransportAddress): bool =
|
|||||||
address.isLoopback() or
|
address.isLoopback() or
|
||||||
(
|
(
|
||||||
# IPv4-Mapped `::FFFF:0:0/96`
|
# IPv4-Mapped `::FFFF:0:0/96`
|
||||||
(uint64.fromBytes(a6.toOpenArray(0, 7)) == 0x00'u64) and
|
(uint64.fromBytesBE(a6.toOpenArray(0, 7)) == 0x00'u64) and
|
||||||
(uint16.fromBytes(a6.toOpenArray(8, 9)) == 0x00'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(8, 9)) == 0x00'u16) and
|
||||||
(uint16.fromBytes(a6.toOpenArray(10, 11)) == 0xFFFF'u16)
|
(uint16.fromBytesBE(a6.toOpenArray(10, 11)) == 0xFFFF'u16)
|
||||||
) or
|
) or
|
||||||
(
|
(
|
||||||
# IPv4-IPv6 Translation `64:FF9B:1::/48`
|
# IPv4-IPv6 Translation `64:FF9B:1::/48`
|
||||||
@ -993,8 +994,8 @@ proc isGlobal*(address: TransportAddress): bool =
|
|||||||
(
|
(
|
||||||
# Discard-Only Address Block `100::/64`
|
# Discard-Only Address Block `100::/64`
|
||||||
(uint16.fromBytesBE(a6.toOpenArray(0, 1)) == 0x100'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(0, 1)) == 0x100'u16) and
|
||||||
(uint32.fromBytes(a6.toOpenArray(2, 5)) == 0x00'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(2, 5)) == 0x00'u32) and
|
||||||
(uint16.fromBytes(a6.toOpenArray(6, 7)) == 0x00'u16)
|
(uint16.fromBytesBE(a6.toOpenArray(6, 7)) == 0x00'u16)
|
||||||
) or
|
) or
|
||||||
(
|
(
|
||||||
# IETF Protocol Assignments `2001::/23`
|
# IETF Protocol Assignments `2001::/23`
|
||||||
@ -1004,15 +1005,15 @@ proc isGlobal*(address: TransportAddress): bool =
|
|||||||
(
|
(
|
||||||
# Port Control Protocol Anycast `2001:1::1`
|
# Port Control Protocol Anycast `2001:1::1`
|
||||||
(uint32.fromBytesBE(a6.toOpenArray(0, 3)) == 0x20010001'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(0, 3)) == 0x20010001'u32) and
|
||||||
(uint32.fromBytes(a6.toOpenArray(4, 7)) == 0x00'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(4, 7)) == 0x00'u32) and
|
||||||
(uint32.fromBytes(a6.toOpenArray(8, 11)) == 0x00'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(8, 11)) == 0x00'u32) and
|
||||||
(uint32.fromBytesBE(a6.toOpenArray(12, 15)) == 0x01'u32)
|
(uint32.fromBytesBE(a6.toOpenArray(12, 15)) == 0x01'u32)
|
||||||
) or
|
) or
|
||||||
(
|
(
|
||||||
# Traversal Using Relays around NAT Anycast `2001:1::2`
|
# Traversal Using Relays around NAT Anycast `2001:1::2`
|
||||||
(uint32.fromBytesBE(a6.toOpenArray(0, 3)) == 0x20010001'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(0, 3)) == 0x20010001'u32) and
|
||||||
(uint32.fromBytes(a6.toOpenArray(4, 7)) == 0x00'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(4, 7)) == 0x00'u32) and
|
||||||
(uint32.fromBytes(a6.toOpenArray(8, 11)) == 0x00'u32) and
|
(uint32.fromBytesBE(a6.toOpenArray(8, 11)) == 0x00'u32) and
|
||||||
(uint32.fromBytesBE(a6.toOpenArray(12, 15)) == 0x02'u32)
|
(uint32.fromBytesBE(a6.toOpenArray(12, 15)) == 0x02'u32)
|
||||||
) or
|
) or
|
||||||
(
|
(
|
||||||
@ -1025,7 +1026,7 @@ proc isGlobal*(address: TransportAddress): bool =
|
|||||||
(uint16.fromBytesBE(a6.toOpenArray(0, 1)) == 0x2001'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(0, 1)) == 0x2001'u16) and
|
||||||
(uint16.fromBytesBE(a6.toOpenArray(2, 3)) == 0x04'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(2, 3)) == 0x04'u16) and
|
||||||
(uint16.fromBytesBE(a6.toOpenArray(4, 5)) == 0x112'u16) and
|
(uint16.fromBytesBE(a6.toOpenArray(4, 5)) == 0x112'u16) and
|
||||||
(uint16.fromBytes(a6.toOpenArray(6, 7)) == 0x00'u16)
|
(uint16.fromBytesBE(a6.toOpenArray(6, 7)) == 0x00'u16)
|
||||||
) or
|
) or
|
||||||
(
|
(
|
||||||
# ORCHIDv2 `2001:20::/28`
|
# ORCHIDv2 `2001:20::/28`
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -4,6 +4,9 @@ Async/await is a programming model that relies on cooperative multitasking to
|
|||||||
coordinate the concurrent execution of procedures, using event notifications
|
coordinate the concurrent execution of procedures, using event notifications
|
||||||
from the operating system or other treads to resume execution.
|
from the operating system or other treads to resume execution.
|
||||||
|
|
||||||
|
Code execution happens in a loop that alternates between making progress on
|
||||||
|
tasks and handling events.
|
||||||
|
|
||||||
<!-- toc -->
|
<!-- toc -->
|
||||||
|
|
||||||
## The dispatcher
|
## The dispatcher
|
||||||
@ -118,7 +121,8 @@ The `CancelledError` will now travel up the stack like any other exception.
|
|||||||
It can be caught for instance to free some resources and is then typically
|
It can be caught for instance to free some resources and is then typically
|
||||||
re-raised for the whole chain operations to get cancelled.
|
re-raised for the whole chain operations to get cancelled.
|
||||||
|
|
||||||
Alternatively, the cancellation request can be translated to a regular outcome of the operation - for example, a `read` operation might return an empty result.
|
Alternatively, the cancellation request can be translated to a regular outcome
|
||||||
|
of the operation - for example, a `read` operation might return an empty result.
|
||||||
|
|
||||||
Cancelling an already-finished `Future` has no effect, as the following example
|
Cancelling an already-finished `Future` has no effect, as the following example
|
||||||
of downloading two web pages concurrently shows:
|
of downloading two web pages concurrently shows:
|
||||||
@ -127,8 +131,84 @@ of downloading two web pages concurrently shows:
|
|||||||
{{#include ../examples/twogets.nim}}
|
{{#include ../examples/twogets.nim}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Ownership
|
||||||
|
|
||||||
|
When calling a procedure that returns a `Future`, ownership of that `Future` is
|
||||||
|
shared between the callee that created it and the caller that waits for it to be
|
||||||
|
finished.
|
||||||
|
|
||||||
|
The `Future` can be thought of as a single-item channel between a producer and a
|
||||||
|
consumer. The producer creates the `Future` and is responsible for completing or
|
||||||
|
failing it while the caller waits for completion and may `cancel` it.
|
||||||
|
|
||||||
|
Although it is technically possible, callers must not `complete` or `fail`
|
||||||
|
futures and callees or other intermediate observers must not `cancel` them as
|
||||||
|
this may lead to panics and shutdown (ie if the future is completed twice or a
|
||||||
|
cancalletion is not handled by the original caller).
|
||||||
|
|
||||||
|
### `noCancel`
|
||||||
|
|
||||||
|
Certain operations must not be cancelled for semantic reasons. Common scenarios
|
||||||
|
include `closeWait` that releases a resources irrevocably and composed
|
||||||
|
operations whose individual steps should be performed together or not at all.
|
||||||
|
|
||||||
|
In such cases, the `noCancel` modifier to `await` can be used to temporarily
|
||||||
|
disable cancellation propagation, allowing the operation to complete even if
|
||||||
|
the caller initiates a cancellation request:
|
||||||
|
|
||||||
|
```nim
|
||||||
|
proc deepSleep(dur: Duration) {.async.} =
|
||||||
|
# `noCancel` prevents any cancellation request by the caller of `deepSleep`
|
||||||
|
# from reaching `sleepAsync` - even if `deepSleep` is cancelled, its future
|
||||||
|
# will not complete until the sleep finishes.
|
||||||
|
await noCancel sleepAsync(dur)
|
||||||
|
|
||||||
|
let future = deepSleep(10.minutes)
|
||||||
|
|
||||||
|
# This will take ~10 minutes even if we try to cancel the call to `deepSleep`!
|
||||||
|
await cancelAndWait(future)
|
||||||
|
```
|
||||||
|
|
||||||
|
### `join`
|
||||||
|
|
||||||
|
The `join` modifier to `await` allows cancelling an `async` procedure without
|
||||||
|
propagating the cancellation to the awaited operation. This is useful when
|
||||||
|
`await`:ing a `Future` for monitoring purposes, ie when a procedure is not the
|
||||||
|
owner of the future that's being `await`:ed.
|
||||||
|
|
||||||
|
One situation where this happens is when implementing the "observer" pattern,
|
||||||
|
where a helper monitors an operation it did not initiate:
|
||||||
|
|
||||||
|
```nim
|
||||||
|
var tick: Future[void]
|
||||||
|
proc ticker() {.async.} =
|
||||||
|
while true:
|
||||||
|
tick = sleepAsync(1.second)
|
||||||
|
await tick
|
||||||
|
echo "tick!"
|
||||||
|
|
||||||
|
proc tocker() {.async.} =
|
||||||
|
# This operation does not own or implement the operation behind `tick`,
|
||||||
|
# so it should not cancel it when `tocker` is cancelled
|
||||||
|
await join tick
|
||||||
|
echo "tock!"
|
||||||
|
|
||||||
|
let
|
||||||
|
fut = ticker() # `ticker` is now looping and most likely waiting for `tick`
|
||||||
|
fut2 = tocker() # both `ticker` and `tocker` are waiting for `tick`
|
||||||
|
|
||||||
|
# We don't want `tocker` to cancel a future that was created in `ticker`
|
||||||
|
waitFor fut2.cancelAndWait()
|
||||||
|
|
||||||
|
waitFor fut # keeps printing `tick!` every second.
|
||||||
|
```
|
||||||
|
|
||||||
## Compile-time configuration
|
## Compile-time configuration
|
||||||
|
|
||||||
`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant.
|
`chronos` contains several compile-time
|
||||||
|
[configuration options](./chronos/config.nim) enabling stricter compile-time
|
||||||
|
checks and debugging helpers whose runtime cost may be significant.
|
||||||
|
|
||||||
Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information.
|
Strictness options generally will become default in future chronos releases and
|
||||||
|
allow adapting existing code without changing the new version - see the
|
||||||
|
[`config.nim`](./chronos/config.nim) module for more information.
|
||||||
|
|||||||
@ -110,7 +110,7 @@ sometimes lead to compile errors around forward declarations, methods and
|
|||||||
closures as Nim conservatively asssumes that any `Exception` might be raised
|
closures as Nim conservatively asssumes that any `Exception` might be raised
|
||||||
from those.
|
from those.
|
||||||
|
|
||||||
Make sure to excplicitly annotate these with `{.raises.}`:
|
Make sure to explicitly annotate these with `{.raises.}`:
|
||||||
|
|
||||||
```nim
|
```nim
|
||||||
# Forward declarations need to explicitly include a raises list:
|
# Forward declarations need to explicitly include a raises list:
|
||||||
@ -124,11 +124,12 @@ proc myfunction() =
|
|||||||
|
|
||||||
let closure: MyClosure = myfunction
|
let closure: MyClosure = myfunction
|
||||||
```
|
```
|
||||||
|
## Compatibility modes
|
||||||
|
|
||||||
For compatibility, `async` functions can be instructed to handle `Exception` as
|
**Individual functions.** For compatibility, `async` functions can be instructed
|
||||||
well, specifying `handleException: true`. `Exception` that is not a `Defect` and
|
to handle `Exception` as well, specifying `handleException: true`. Any
|
||||||
not a `CatchableError` will then be caught and remapped to
|
`Exception` that is not a `Defect` and not a `CatchableError` will then be
|
||||||
`AsyncExceptionError`:
|
caught and remapped to `AsyncExceptionError`:
|
||||||
|
|
||||||
```nim
|
```nim
|
||||||
proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} =
|
proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} =
|
||||||
@ -136,14 +137,32 @@ proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionEr
|
|||||||
|
|
||||||
proc callRaiseException() {.async: (raises: []).} =
|
proc callRaiseException() {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
raiseException()
|
await raiseException()
|
||||||
except AsyncExceptionError as exc:
|
except AsyncExceptionError as exc:
|
||||||
# The original Exception is available from the `parent` field
|
# The original Exception is available from the `parent` field
|
||||||
echo exc.parent.msg
|
echo exc.parent.msg
|
||||||
```
|
```
|
||||||
|
|
||||||
This mode can be enabled globally with `-d:chronosHandleException` as a help
|
**Global flag.** This mode can be enabled globally with
|
||||||
when porting code to `chronos` but should generally be avoided as global
|
`-d:chronosHandleException` as a help when porting code to `chronos`. The
|
||||||
configuration settings may interfere with libraries that use `chronos` leading
|
behavior in this case will be that:
|
||||||
to unexpected behavior.
|
|
||||||
|
|
||||||
|
1. old-style functions annotated with plain `async` will behave as if they had
|
||||||
|
been annotated with `async: (handleException: true)`.
|
||||||
|
|
||||||
|
This is functionally equivalent to
|
||||||
|
`async: (handleException: true, raises: [CatchableError])` and will, as
|
||||||
|
before, remap any `Exception` that is not `Defect` into
|
||||||
|
`AsyncExceptionError`, while also allowing any `CatchableError` (including
|
||||||
|
`AsyncExceptionError`) to get through without compilation errors.
|
||||||
|
|
||||||
|
2. New-style functions with `async: (raises: [...])` annotations or their own
|
||||||
|
`handleException` annotations will not be affected.
|
||||||
|
|
||||||
|
The rationale here is to allow one to incrementally introduce exception
|
||||||
|
annotations and get compiler feedback while not requiring that every bit of
|
||||||
|
legacy code is updated at once.
|
||||||
|
|
||||||
|
This should be used sparingly and with care, however, as global configuration
|
||||||
|
settings may interfere with libraries that use `chronos` leading to unexpected
|
||||||
|
behavior.
|
||||||
|
|||||||
@ -32,6 +32,10 @@ suite "Datagram Transport test suite":
|
|||||||
m8 = "Bounded multiple clients with messages (" & $ClientsCount &
|
m8 = "Bounded multiple clients with messages (" & $ClientsCount &
|
||||||
" clients x " & $MessagesCount & " messages)"
|
" clients x " & $MessagesCount & " messages)"
|
||||||
|
|
||||||
|
type
|
||||||
|
DatagramSocketType {.pure.} = enum
|
||||||
|
Bound, Unbound
|
||||||
|
|
||||||
proc client1(transp: DatagramTransport,
|
proc client1(transp: DatagramTransport,
|
||||||
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
|
raddr: TransportAddress): Future[void] {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
@ -628,6 +632,243 @@ suite "Datagram Transport test suite":
|
|||||||
await allFutures(sdgram.closeWait(), cdgram.closeWait())
|
await allFutures(sdgram.closeWait(), cdgram.closeWait())
|
||||||
res == 1
|
res == 1
|
||||||
|
|
||||||
|
proc performAutoAddressTest(port: Port,
|
||||||
|
family: AddressFamily): Future[bool] {.async.} =
|
||||||
|
var
|
||||||
|
expectRequest1 = "AUTO REQUEST1"
|
||||||
|
expectRequest2 = "AUTO REQUEST2"
|
||||||
|
expectResponse = "AUTO RESPONSE"
|
||||||
|
mappedResponse = "MAPPED RESPONSE"
|
||||||
|
event = newAsyncEvent()
|
||||||
|
event2 = newAsyncEvent()
|
||||||
|
res = 0
|
||||||
|
|
||||||
|
proc process1(transp: DatagramTransport,
|
||||||
|
raddr: TransportAddress): Future[void] {.
|
||||||
|
async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
var
|
||||||
|
bmsg = transp.getMessage()
|
||||||
|
smsg = string.fromBytes(bmsg)
|
||||||
|
if smsg == expectRequest1:
|
||||||
|
inc(res)
|
||||||
|
await noCancel transp.sendTo(
|
||||||
|
raddr, addr expectResponse[0], len(expectResponse))
|
||||||
|
elif smsg == expectRequest2:
|
||||||
|
inc(res)
|
||||||
|
await noCancel transp.sendTo(
|
||||||
|
raddr, addr mappedResponse[0], len(mappedResponse))
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
except CancelledError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
|
proc process2(transp: DatagramTransport,
|
||||||
|
raddr: TransportAddress): Future[void] {.
|
||||||
|
async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
var
|
||||||
|
bmsg = transp.getMessage()
|
||||||
|
smsg = string.fromBytes(bmsg)
|
||||||
|
if smsg == expectResponse:
|
||||||
|
inc(res)
|
||||||
|
event.fire()
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
except CancelledError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
|
proc process3(transp: DatagramTransport,
|
||||||
|
raddr: TransportAddress): Future[void] {.
|
||||||
|
async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
var
|
||||||
|
bmsg = transp.getMessage()
|
||||||
|
smsg = string.fromBytes(bmsg)
|
||||||
|
if smsg == mappedResponse:
|
||||||
|
inc(res)
|
||||||
|
event2.fire()
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
except CancelledError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
|
let sdgram =
|
||||||
|
block:
|
||||||
|
var res: DatagramTransport
|
||||||
|
var currentPort = port
|
||||||
|
for i in 0 ..< 10:
|
||||||
|
res =
|
||||||
|
try:
|
||||||
|
newDatagramTransport(process1, currentPort,
|
||||||
|
flags = {ServerFlags.ReusePort})
|
||||||
|
except TransportOsError:
|
||||||
|
echo "Unable to create transport on port ", currentPort
|
||||||
|
currentPort = Port(uint16(currentPort) + 1'u16)
|
||||||
|
nil
|
||||||
|
if not(isNil(res)):
|
||||||
|
break
|
||||||
|
doAssert(not(isNil(res)), "Unable to create transport, giving up")
|
||||||
|
res
|
||||||
|
|
||||||
|
var
|
||||||
|
address =
|
||||||
|
case family
|
||||||
|
of AddressFamily.IPv4:
|
||||||
|
initTAddress("127.0.0.1:0")
|
||||||
|
of AddressFamily.IPv6:
|
||||||
|
initTAddress("::1:0")
|
||||||
|
of AddressFamily.Unix, AddressFamily.None:
|
||||||
|
raiseAssert "Not allowed"
|
||||||
|
|
||||||
|
let
|
||||||
|
cdgram =
|
||||||
|
case family
|
||||||
|
of AddressFamily.IPv4:
|
||||||
|
newDatagramTransport(process2, local = address)
|
||||||
|
of AddressFamily.IPv6:
|
||||||
|
newDatagramTransport6(process2, local = address)
|
||||||
|
of AddressFamily.Unix, AddressFamily.None:
|
||||||
|
raiseAssert "Not allowed"
|
||||||
|
|
||||||
|
address.port = sdgram.localAddress().port
|
||||||
|
|
||||||
|
try:
|
||||||
|
await noCancel cdgram.sendTo(
|
||||||
|
address, addr expectRequest1[0], len(expectRequest1))
|
||||||
|
except TransportError:
|
||||||
|
discard
|
||||||
|
|
||||||
|
if family == AddressFamily.IPv6:
|
||||||
|
var remote = initTAddress("127.0.0.1:0")
|
||||||
|
remote.port = sdgram.localAddress().port
|
||||||
|
let wtransp =
|
||||||
|
newDatagramTransport(process3, local = initTAddress("0.0.0.0:0"))
|
||||||
|
try:
|
||||||
|
await noCancel wtransp.sendTo(
|
||||||
|
remote, addr expectRequest2[0], len(expectRequest2))
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert "Got transport error, reason = " & $exc.msg
|
||||||
|
|
||||||
|
try:
|
||||||
|
await event2.wait().wait(1.seconds)
|
||||||
|
except CatchableError:
|
||||||
|
discard
|
||||||
|
|
||||||
|
await wtransp.closeWait()
|
||||||
|
|
||||||
|
try:
|
||||||
|
await event.wait().wait(1.seconds)
|
||||||
|
except CatchableError:
|
||||||
|
discard
|
||||||
|
|
||||||
|
await allFutures(sdgram.closeWait(), cdgram.closeWait())
|
||||||
|
|
||||||
|
if family == AddressFamily.IPv4:
|
||||||
|
res == 2
|
||||||
|
else:
|
||||||
|
res == 4
|
||||||
|
|
||||||
|
proc performAutoAddressTest2(
|
||||||
|
address1: Opt[IpAddress],
|
||||||
|
address2: Opt[IpAddress],
|
||||||
|
port: Port,
|
||||||
|
sendType: AddressFamily,
|
||||||
|
boundType: DatagramSocketType
|
||||||
|
): Future[bool] {.async.} =
|
||||||
|
let
|
||||||
|
expectRequest = "TEST REQUEST"
|
||||||
|
expectResponse = "TEST RESPONSE"
|
||||||
|
event = newAsyncEvent()
|
||||||
|
var res = 0
|
||||||
|
|
||||||
|
proc process1(transp: DatagramTransport,
|
||||||
|
raddr: TransportAddress): Future[void] {.
|
||||||
|
async: (raises: []).} =
|
||||||
|
if raddr.family != sendType:
|
||||||
|
raiseAssert "Incorrect address family received [" & $raddr &
|
||||||
|
"], expected [" & $sendType & "]"
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
bmsg = transp.getMessage()
|
||||||
|
smsg = string.fromBytes(bmsg)
|
||||||
|
if smsg == expectRequest:
|
||||||
|
inc(res)
|
||||||
|
await noCancel transp.sendTo(
|
||||||
|
raddr, unsafeAddr expectResponse[0], len(expectResponse))
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
except CancelledError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
|
proc process2(transp: DatagramTransport,
|
||||||
|
raddr: TransportAddress): Future[void] {.
|
||||||
|
async: (raises: []).} =
|
||||||
|
if raddr.family != sendType:
|
||||||
|
raiseAssert "Incorrect address family received [" & $raddr &
|
||||||
|
"], expected [" & $sendType & "]"
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
bmsg = transp.getMessage()
|
||||||
|
smsg = string.fromBytes(bmsg)
|
||||||
|
if smsg == expectResponse:
|
||||||
|
inc(res)
|
||||||
|
event.fire()
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
except CancelledError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
|
let
|
||||||
|
serverFlags = {ServerFlags.ReuseAddr}
|
||||||
|
server = newDatagramTransport(process1, flags = serverFlags,
|
||||||
|
local = address1, localPort = port)
|
||||||
|
serverAddr = server.localAddress()
|
||||||
|
serverPort = serverAddr.port
|
||||||
|
remoteAddress =
|
||||||
|
case sendType
|
||||||
|
of AddressFamily.IPv4:
|
||||||
|
var res = initTAddress("127.0.0.1:0")
|
||||||
|
res.port = serverPort
|
||||||
|
res
|
||||||
|
of AddressFamily.IPv6:
|
||||||
|
var res = initTAddress("[::1]:0")
|
||||||
|
res.port = serverPort
|
||||||
|
res
|
||||||
|
else:
|
||||||
|
raiseAssert "Incorrect sending type"
|
||||||
|
remoteIpAddress = Opt.some(remoteAddress.toIpAddress())
|
||||||
|
client =
|
||||||
|
case boundType
|
||||||
|
of DatagramSocketType.Bound:
|
||||||
|
newDatagramTransport(process2,
|
||||||
|
localPort = Port(0), remotePort = serverPort,
|
||||||
|
local = address2, remote = remoteIpAddress)
|
||||||
|
of DatagramSocketType.Unbound:
|
||||||
|
newDatagramTransport(process2,
|
||||||
|
localPort = Port(0), remotePort = Port(0),
|
||||||
|
local = address2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
case boundType
|
||||||
|
of DatagramSocketType.Bound:
|
||||||
|
await noCancel client.send(
|
||||||
|
unsafeAddr expectRequest[0], len(expectRequest))
|
||||||
|
of DatagramSocketType.Unbound:
|
||||||
|
await noCancel client.sendTo(remoteAddress,
|
||||||
|
unsafeAddr expectRequest[0], len(expectRequest))
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert "Could not send datagram to remote peer, reason = " & $exc.msg
|
||||||
|
|
||||||
|
try:
|
||||||
|
await event.wait().wait(1.seconds)
|
||||||
|
except CatchableError:
|
||||||
|
discard
|
||||||
|
|
||||||
|
await allFutures(server.closeWait(), client.closeWait())
|
||||||
|
|
||||||
|
res == 2
|
||||||
|
|
||||||
test "close(transport) test":
|
test "close(transport) test":
|
||||||
check waitFor(testTransportClose()) == true
|
check waitFor(testTransportClose()) == true
|
||||||
test m1:
|
test m1:
|
||||||
@ -730,3 +971,104 @@ suite "Datagram Transport test suite":
|
|||||||
DualStackType.Auto, initTAddress("[::1]:0"))) == true
|
DualStackType.Auto, initTAddress("[::1]:0"))) == true
|
||||||
else:
|
else:
|
||||||
skip()
|
skip()
|
||||||
|
asyncTest "[IP] Auto-address constructor test (*:0)":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(0), AddressFamily.IPv6)) == true
|
||||||
|
# If IPv6 is available newAutoDatagramTransport should bind to `::` - this
|
||||||
|
# means that we should be able to connect to it via IPV4_MAPPED address,
|
||||||
|
# but only when IPv4 is also available.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true
|
||||||
|
else:
|
||||||
|
# If IPv6 is not available newAutoDatagramTransport should bind to
|
||||||
|
# `0.0.0.0` - this means we should be able to connect to it via IPv4
|
||||||
|
# address.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true
|
||||||
|
asyncTest "[IP] Auto-address constructor test (*:30231)":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(30231), AddressFamily.IPv6)) == true
|
||||||
|
# If IPv6 is available newAutoDatagramTransport should bind to `::` - this
|
||||||
|
# means that we should be able to connect to it via IPV4_MAPPED address,
|
||||||
|
# but only when IPv4 is also available.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(30231), AddressFamily.IPv4)) ==
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
# If IPv6 is not available newAutoDatagramTransport should bind to
|
||||||
|
# `0.0.0.0` - this means we should be able to connect to it via IPv4
|
||||||
|
# address.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(30231), AddressFamily.IPv4)) ==
|
||||||
|
true
|
||||||
|
|
||||||
|
for socketType in DatagramSocketType:
|
||||||
|
for portNumber in [Port(0), Port(30231)]:
|
||||||
|
asyncTest "[IP] IPv6 mapping test (" & $socketType &
|
||||||
|
"/auto-auto:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.none(IpAddress)
|
||||||
|
address2 = Opt.none(IpAddress)
|
||||||
|
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv4, socketType))
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv6, socketType))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|
||||||
|
asyncTest "[IP] IPv6 mapping test (" & $socketType &
|
||||||
|
"/auto-ipv6:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.none(IpAddress)
|
||||||
|
address2 = Opt.some(initTAddress("[::1]:0").toIpAddress())
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv6, socketType))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|
||||||
|
asyncTest "[IP] IPv6 mapping test (" & $socketType &
|
||||||
|
"/auto-ipv4:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.none(IpAddress)
|
||||||
|
address2 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress())
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(address1, address2, portNumber,
|
||||||
|
AddressFamily.IPv4, socketType))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|
||||||
|
asyncTest "[IP] IPv6 mapping test (" & $socketType &
|
||||||
|
"/ipv6-auto:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.some(initTAddress("[::1]:0").toIpAddress())
|
||||||
|
address2 = Opt.none(IpAddress)
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(address1, address2, portNumber,
|
||||||
|
AddressFamily.IPv6, socketType))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|
||||||
|
asyncTest "[IP] IPv6 mapping test (" & $socketType &
|
||||||
|
"/ipv4-auto:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress())
|
||||||
|
address2 = Opt.none(IpAddress)
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(address1, address2, portNumber,
|
||||||
|
AddressFamily.IPv4, socketType))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|||||||
@ -6,7 +6,7 @@
|
|||||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
import unittest2
|
import unittest2
|
||||||
import stew/results
|
import results
|
||||||
import ../chronos, ../chronos/unittest2/asynctests
|
import ../chronos, ../chronos/unittest2/asynctests
|
||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
@ -83,7 +83,7 @@ suite "Future[T] behavior test suite":
|
|||||||
fut.finished
|
fut.finished
|
||||||
testResult == "1245"
|
testResult == "1245"
|
||||||
|
|
||||||
asyncTest "wait[T]() test":
|
asyncTest "wait(duration) test":
|
||||||
block:
|
block:
|
||||||
## Test for not immediately completed future and timeout = -1
|
## Test for not immediately completed future and timeout = -1
|
||||||
let res =
|
let res =
|
||||||
@ -146,6 +146,183 @@ suite "Future[T] behavior test suite":
|
|||||||
false
|
false
|
||||||
check res
|
check res
|
||||||
|
|
||||||
|
asyncTest "wait(future) test":
|
||||||
|
block:
|
||||||
|
## Test for not immediately completed future and deadline which is not
|
||||||
|
## going to be finished
|
||||||
|
let
|
||||||
|
deadline = newFuture[void]()
|
||||||
|
future1 = testFuture1()
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
discard await wait(future1, deadline)
|
||||||
|
true
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
check:
|
||||||
|
deadline.finished() == false
|
||||||
|
future1.finished() == true
|
||||||
|
res == true
|
||||||
|
|
||||||
|
await deadline.cancelAndWait()
|
||||||
|
|
||||||
|
check deadline.finished() == true
|
||||||
|
block:
|
||||||
|
## Test for immediately completed future and timeout = -1
|
||||||
|
let
|
||||||
|
deadline = newFuture[void]()
|
||||||
|
future2 = testFuture2()
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
discard await wait(future2, deadline)
|
||||||
|
true
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
check:
|
||||||
|
deadline.finished() == false
|
||||||
|
future2.finished() == true
|
||||||
|
res
|
||||||
|
|
||||||
|
await deadline.cancelAndWait()
|
||||||
|
|
||||||
|
check deadline.finished() == true
|
||||||
|
block:
|
||||||
|
## Test for not immediately completed future and timeout = 0
|
||||||
|
let
|
||||||
|
deadline = newFuture[void]()
|
||||||
|
future1 = testFuture1()
|
||||||
|
deadline.complete()
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
discard await wait(future1, deadline)
|
||||||
|
false
|
||||||
|
except AsyncTimeoutError:
|
||||||
|
true
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
check:
|
||||||
|
future1.finished() == false
|
||||||
|
deadline.finished() == true
|
||||||
|
res
|
||||||
|
|
||||||
|
block:
|
||||||
|
## Test for immediately completed future and timeout = 0
|
||||||
|
let
|
||||||
|
deadline = newFuture[void]()
|
||||||
|
future2 = testFuture2()
|
||||||
|
deadline.complete()
|
||||||
|
let (res1, res2) =
|
||||||
|
try:
|
||||||
|
let res = await wait(future2, deadline)
|
||||||
|
(true, res)
|
||||||
|
except CatchableError:
|
||||||
|
(false, -1)
|
||||||
|
check:
|
||||||
|
future2.finished() == true
|
||||||
|
deadline.finished() == true
|
||||||
|
res1 == true
|
||||||
|
res2 == 1
|
||||||
|
|
||||||
|
block:
|
||||||
|
## Test for future which cannot be completed in timeout period
|
||||||
|
let
|
||||||
|
deadline = sleepAsync(50.milliseconds)
|
||||||
|
future100 = testFuture100()
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
discard await wait(future100, deadline)
|
||||||
|
false
|
||||||
|
except AsyncTimeoutError:
|
||||||
|
true
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
check:
|
||||||
|
deadline.finished() == true
|
||||||
|
res
|
||||||
|
await future100.cancelAndWait()
|
||||||
|
check:
|
||||||
|
future100.finished() == true
|
||||||
|
|
||||||
|
block:
|
||||||
|
## Test for future which will be completed before timeout exceeded.
|
||||||
|
let
|
||||||
|
deadline = sleepAsync(500.milliseconds)
|
||||||
|
future100 = testFuture100()
|
||||||
|
let (res1, res2) =
|
||||||
|
try:
|
||||||
|
let res = await wait(future100, deadline)
|
||||||
|
(true, res)
|
||||||
|
except CatchableError:
|
||||||
|
(false, -1)
|
||||||
|
check:
|
||||||
|
future100.finished() == true
|
||||||
|
deadline.finished() == false
|
||||||
|
res1 == true
|
||||||
|
res2 == 0
|
||||||
|
await deadline.cancelAndWait()
|
||||||
|
check:
|
||||||
|
deadline.finished() == true
|
||||||
|
|
||||||
|
asyncTest "wait(future) cancellation behavior test":
|
||||||
|
proc deepTest3(future: Future[void]) {.async.} =
|
||||||
|
await future
|
||||||
|
|
||||||
|
proc deepTest2(future: Future[void]) {.async.} =
|
||||||
|
await deepTest3(future)
|
||||||
|
|
||||||
|
proc deepTest1(future: Future[void]) {.async.} =
|
||||||
|
await deepTest2(future)
|
||||||
|
|
||||||
|
let
|
||||||
|
|
||||||
|
deadlineFuture = newFuture[void]()
|
||||||
|
|
||||||
|
block:
|
||||||
|
# Cancellation should affect `testFuture` because it is in pending state.
|
||||||
|
let monitorFuture = newFuture[void]()
|
||||||
|
var testFuture = deepTest1(monitorFuture)
|
||||||
|
let waitFut = wait(testFuture, deadlineFuture)
|
||||||
|
await cancelAndWait(waitFut)
|
||||||
|
check:
|
||||||
|
monitorFuture.cancelled() == true
|
||||||
|
testFuture.cancelled() == true
|
||||||
|
waitFut.cancelled() == true
|
||||||
|
deadlineFuture.finished() == false
|
||||||
|
|
||||||
|
block:
|
||||||
|
# Cancellation should not affect `testFuture` because it is completed.
|
||||||
|
let monitorFuture = newFuture[void]()
|
||||||
|
var testFuture = deepTest1(monitorFuture)
|
||||||
|
let waitFut = wait(testFuture, deadlineFuture)
|
||||||
|
monitorFuture.complete()
|
||||||
|
await cancelAndWait(waitFut)
|
||||||
|
check:
|
||||||
|
monitorFuture.completed() == true
|
||||||
|
monitorFuture.cancelled() == false
|
||||||
|
testFuture.completed() == true
|
||||||
|
waitFut.completed() == true
|
||||||
|
deadlineFuture.finished() == false
|
||||||
|
|
||||||
|
block:
|
||||||
|
# Cancellation should not affect `testFuture` because it is failed.
|
||||||
|
let monitorFuture = newFuture[void]()
|
||||||
|
var testFuture = deepTest1(monitorFuture)
|
||||||
|
let waitFut = wait(testFuture, deadlineFuture)
|
||||||
|
monitorFuture.fail(newException(ValueError, "TEST"))
|
||||||
|
await cancelAndWait(waitFut)
|
||||||
|
check:
|
||||||
|
monitorFuture.failed() == true
|
||||||
|
monitorFuture.cancelled() == false
|
||||||
|
testFuture.failed() == true
|
||||||
|
testFuture.cancelled() == false
|
||||||
|
waitFut.failed() == true
|
||||||
|
testFuture.cancelled() == false
|
||||||
|
deadlineFuture.finished() == false
|
||||||
|
|
||||||
|
await cancelAndWait(deadlineFuture)
|
||||||
|
|
||||||
|
check deadlineFuture.finished() == true
|
||||||
|
|
||||||
asyncTest "Discarded result Future[T] test":
|
asyncTest "Discarded result Future[T] test":
|
||||||
var completedFutures = 0
|
var completedFutures = 0
|
||||||
|
|
||||||
@ -1082,7 +1259,7 @@ suite "Future[T] behavior test suite":
|
|||||||
completed == 0
|
completed == 0
|
||||||
cancelled == 1
|
cancelled == 1
|
||||||
|
|
||||||
asyncTest "Cancellation wait() test":
|
asyncTest "Cancellation wait(duration) test":
|
||||||
var neverFlag1, neverFlag2, neverFlag3: bool
|
var neverFlag1, neverFlag2, neverFlag3: bool
|
||||||
var waitProc1, waitProc2: bool
|
var waitProc1, waitProc2: bool
|
||||||
proc neverEndingProc(): Future[void] =
|
proc neverEndingProc(): Future[void] =
|
||||||
@ -1143,7 +1320,39 @@ suite "Future[T] behavior test suite":
|
|||||||
fut.state == FutureState.Completed
|
fut.state == FutureState.Completed
|
||||||
neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2
|
neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2
|
||||||
|
|
||||||
asyncTest "Cancellation race test":
|
asyncTest "Cancellation wait(future) test":
|
||||||
|
var neverFlag1, neverFlag2, neverFlag3: bool
|
||||||
|
var waitProc1, waitProc2: bool
|
||||||
|
proc neverEndingProc(): Future[void] =
|
||||||
|
var res = newFuture[void]()
|
||||||
|
proc continuation(udata: pointer) {.gcsafe.} =
|
||||||
|
neverFlag2 = true
|
||||||
|
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||||
|
neverFlag3 = true
|
||||||
|
res.addCallback(continuation)
|
||||||
|
res.cancelCallback = cancellation
|
||||||
|
result = res
|
||||||
|
neverFlag1 = true
|
||||||
|
|
||||||
|
proc waitProc() {.async.} =
|
||||||
|
let deadline = sleepAsync(100.milliseconds)
|
||||||
|
try:
|
||||||
|
await wait(neverEndingProc(), deadline)
|
||||||
|
except CancelledError:
|
||||||
|
waitProc1 = true
|
||||||
|
except CatchableError:
|
||||||
|
doAssert(false)
|
||||||
|
finally:
|
||||||
|
await cancelAndWait(deadline)
|
||||||
|
waitProc2 = true
|
||||||
|
|
||||||
|
var fut = waitProc()
|
||||||
|
await cancelAndWait(fut)
|
||||||
|
check:
|
||||||
|
fut.state == FutureState.Completed
|
||||||
|
neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2
|
||||||
|
|
||||||
|
asyncTest "Cancellation race() test":
|
||||||
var someFut = newFuture[void]()
|
var someFut = newFuture[void]()
|
||||||
|
|
||||||
proc raceProc(): Future[void] {.async.} =
|
proc raceProc(): Future[void] {.async.} =
|
||||||
@ -1298,7 +1507,7 @@ suite "Future[T] behavior test suite":
|
|||||||
false
|
false
|
||||||
check res
|
check res
|
||||||
|
|
||||||
asyncTest "wait(fut) should wait cancellation test":
|
asyncTest "wait(future) should wait cancellation test":
|
||||||
proc futureNeverEnds(): Future[void] =
|
proc futureNeverEnds(): Future[void] =
|
||||||
newFuture[void]("neverending.future")
|
newFuture[void]("neverending.future")
|
||||||
|
|
||||||
@ -1322,6 +1531,29 @@ suite "Future[T] behavior test suite":
|
|||||||
|
|
||||||
check res
|
check res
|
||||||
|
|
||||||
|
asyncTest "wait(future) should wait cancellation test":
|
||||||
|
proc futureNeverEnds(): Future[void] =
|
||||||
|
newFuture[void]("neverending.future")
|
||||||
|
|
||||||
|
proc futureOneLevelMore() {.async.} =
|
||||||
|
await futureNeverEnds()
|
||||||
|
|
||||||
|
var fut = futureOneLevelMore()
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
await wait(fut, sleepAsync(100.milliseconds))
|
||||||
|
false
|
||||||
|
except AsyncTimeoutError:
|
||||||
|
# Because `fut` is never-ending Future[T], `wait` should raise
|
||||||
|
# `AsyncTimeoutError`, but only after `fut` is cancelled.
|
||||||
|
if fut.cancelled():
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
check res
|
||||||
|
|
||||||
test "race(zero) test":
|
test "race(zero) test":
|
||||||
var tseq = newSeq[FutureBase]()
|
var tseq = newSeq[FutureBase]()
|
||||||
var fut1 = race(tseq)
|
var fut1 = race(tseq)
|
||||||
@ -1563,7 +1795,7 @@ suite "Future[T] behavior test suite":
|
|||||||
v1_u == 0'u
|
v1_u == 0'u
|
||||||
v2_u + 1'u == 0'u
|
v2_u + 1'u == 0'u
|
||||||
|
|
||||||
asyncTest "wait() cancellation undefined behavior test #1":
|
asyncTest "wait(duration) cancellation undefined behavior test #1":
|
||||||
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
||||||
async.} =
|
async.} =
|
||||||
await fooFut
|
await fooFut
|
||||||
@ -1586,7 +1818,7 @@ suite "Future[T] behavior test suite":
|
|||||||
discard someFut.tryCancel()
|
discard someFut.tryCancel()
|
||||||
await someFut
|
await someFut
|
||||||
|
|
||||||
asyncTest "wait() cancellation undefined behavior test #2":
|
asyncTest "wait(duration) cancellation undefined behavior test #2":
|
||||||
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
||||||
async.} =
|
async.} =
|
||||||
await fooFut
|
await fooFut
|
||||||
@ -1613,7 +1845,7 @@ suite "Future[T] behavior test suite":
|
|||||||
discard someFut.tryCancel()
|
discard someFut.tryCancel()
|
||||||
await someFut
|
await someFut
|
||||||
|
|
||||||
asyncTest "wait() should allow cancellation test (depends on race())":
|
asyncTest "wait(duration) should allow cancellation test (depends on race())":
|
||||||
proc testFoo(): Future[bool] {.async.} =
|
proc testFoo(): Future[bool] {.async.} =
|
||||||
let
|
let
|
||||||
resFut = sleepAsync(2.seconds).wait(3.seconds)
|
resFut = sleepAsync(2.seconds).wait(3.seconds)
|
||||||
@ -1699,6 +1931,78 @@ suite "Future[T] behavior test suite":
|
|||||||
|
|
||||||
check (await testFoo()) == true
|
check (await testFoo()) == true
|
||||||
|
|
||||||
|
asyncTest "wait(future) cancellation undefined behavior test #1":
|
||||||
|
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
||||||
|
async.} =
|
||||||
|
await fooFut
|
||||||
|
return TestFooConnection()
|
||||||
|
|
||||||
|
proc testFoo(fooFut: Future[void]) {.async.} =
|
||||||
|
let deadline = sleepAsync(10.seconds)
|
||||||
|
let connection =
|
||||||
|
try:
|
||||||
|
let res = await testInnerFoo(fooFut).wait(deadline)
|
||||||
|
Result[TestFooConnection, int].ok(res)
|
||||||
|
except CancelledError:
|
||||||
|
Result[TestFooConnection, int].err(0)
|
||||||
|
except CatchableError:
|
||||||
|
Result[TestFooConnection, int].err(1)
|
||||||
|
finally:
|
||||||
|
await deadline.cancelAndWait()
|
||||||
|
|
||||||
|
check connection.isOk()
|
||||||
|
|
||||||
|
var future = newFuture[void]("last.child.future")
|
||||||
|
var someFut = testFoo(future)
|
||||||
|
future.complete()
|
||||||
|
discard someFut.tryCancel()
|
||||||
|
await someFut
|
||||||
|
|
||||||
|
asyncTest "wait(future) cancellation undefined behavior test #2":
|
||||||
|
proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
||||||
|
async.} =
|
||||||
|
await fooFut
|
||||||
|
return TestFooConnection()
|
||||||
|
|
||||||
|
proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {.
|
||||||
|
async.} =
|
||||||
|
await testInnerFoo(fooFut)
|
||||||
|
|
||||||
|
proc testFoo(fooFut: Future[void]) {.async.} =
|
||||||
|
let deadline = sleepAsync(10.seconds)
|
||||||
|
let connection =
|
||||||
|
try:
|
||||||
|
let res = await testMiddleFoo(fooFut).wait(deadline)
|
||||||
|
Result[TestFooConnection, int].ok(res)
|
||||||
|
except CancelledError:
|
||||||
|
Result[TestFooConnection, int].err(0)
|
||||||
|
except CatchableError:
|
||||||
|
Result[TestFooConnection, int].err(1)
|
||||||
|
finally:
|
||||||
|
await deadline.cancelAndWait()
|
||||||
|
check connection.isOk()
|
||||||
|
|
||||||
|
var future = newFuture[void]("last.child.future")
|
||||||
|
var someFut = testFoo(future)
|
||||||
|
future.complete()
|
||||||
|
discard someFut.tryCancel()
|
||||||
|
await someFut
|
||||||
|
|
||||||
|
asyncTest "wait(future) should allow cancellation test (depends on race())":
|
||||||
|
proc testFoo(): Future[bool] {.async.} =
|
||||||
|
let
|
||||||
|
deadline = sleepAsync(3.seconds)
|
||||||
|
resFut = sleepAsync(2.seconds).wait(deadline)
|
||||||
|
timeFut = sleepAsync(1.seconds)
|
||||||
|
cancelFut = cancelAndWait(resFut)
|
||||||
|
discard await race(cancelFut, timeFut)
|
||||||
|
await deadline.cancelAndWait()
|
||||||
|
if cancelFut.finished():
|
||||||
|
return (resFut.cancelled() and cancelFut.completed())
|
||||||
|
false
|
||||||
|
|
||||||
|
check (await testFoo()) == true
|
||||||
|
|
||||||
asyncTest "Cancellation behavior test":
|
asyncTest "Cancellation behavior test":
|
||||||
proc testInnerFoo(fooFut: Future[void]) {.async.} =
|
proc testInnerFoo(fooFut: Future[void]) {.async.} =
|
||||||
await fooFut
|
await fooFut
|
||||||
@ -2048,6 +2352,332 @@ suite "Future[T] behavior test suite":
|
|||||||
future1.cancelled() == true
|
future1.cancelled() == true
|
||||||
future2.cancelled() == true
|
future2.cancelled() == true
|
||||||
|
|
||||||
|
asyncTest "cancelAndWait(varargs) should be able to cancel test":
|
||||||
|
proc test01() {.async.} =
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
proc test02() {.async.} =
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
proc test03() {.async.} =
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
proc test04() {.async.} =
|
||||||
|
while true:
|
||||||
|
await noCancel sleepAsync(50.milliseconds)
|
||||||
|
await sleepAsync(0.milliseconds)
|
||||||
|
|
||||||
|
proc test05() {.async.} =
|
||||||
|
while true:
|
||||||
|
await sleepAsync(0.milliseconds)
|
||||||
|
await noCancel sleepAsync(50.milliseconds)
|
||||||
|
|
||||||
|
proc test11() {.async: (raises: [CancelledError]).} =
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
proc test12() {.async: (raises: [CancelledError]).} =
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
proc test13() {.async: (raises: [CancelledError]).} =
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
await noCancel sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
proc test14() {.async: (raises: [CancelledError]).} =
|
||||||
|
while true:
|
||||||
|
await noCancel sleepAsync(50.milliseconds)
|
||||||
|
await sleepAsync(0.milliseconds)
|
||||||
|
|
||||||
|
proc test15() {.async: (raises: [CancelledError]).} =
|
||||||
|
while true:
|
||||||
|
await sleepAsync(0.milliseconds)
|
||||||
|
await noCancel sleepAsync(50.milliseconds)
|
||||||
|
|
||||||
|
template runTest(N1, N2, N3: untyped) =
|
||||||
|
let
|
||||||
|
future01 = `test N2 N1`()
|
||||||
|
future02 = `test N2 N1`()
|
||||||
|
future03 = `test N2 N1`()
|
||||||
|
future04 = `test N2 N1`()
|
||||||
|
future05 = `test N2 N1`()
|
||||||
|
future06 = `test N2 N1`()
|
||||||
|
future07 = `test N2 N1`()
|
||||||
|
future08 = `test N2 N1`()
|
||||||
|
future09 = `test N2 N1`()
|
||||||
|
future10 = `test N2 N1`()
|
||||||
|
future11 = `test N2 N1`()
|
||||||
|
future12 = `test N2 N1`()
|
||||||
|
|
||||||
|
await allFutures(
|
||||||
|
cancelAndWait(future01, future02),
|
||||||
|
cancelAndWait(FutureBase(future03), FutureBase(future04)),
|
||||||
|
cancelAndWait([future05, future06]),
|
||||||
|
cancelAndWait([FutureBase(future07), FutureBase(future08)]),
|
||||||
|
cancelAndWait(@[future09, future10]),
|
||||||
|
cancelAndWait(@[FutureBase(future11), FutureBase(future12)])
|
||||||
|
)
|
||||||
|
|
||||||
|
let
|
||||||
|
future21 = `test N2 N1`()
|
||||||
|
future22 = `test N2 N1`()
|
||||||
|
future23 = `test N2 N1`()
|
||||||
|
future24 = `test N2 N1`()
|
||||||
|
future25 = `test N2 N1`()
|
||||||
|
future26 = `test N2 N1`()
|
||||||
|
future27 = `test N2 N1`()
|
||||||
|
future28 = `test N2 N1`()
|
||||||
|
future29 = `test N2 N1`()
|
||||||
|
future30 = `test N2 N1`()
|
||||||
|
future31 = `test N2 N1`()
|
||||||
|
future32 = `test N2 N1`()
|
||||||
|
|
||||||
|
await sleepAsync(`N3`)
|
||||||
|
|
||||||
|
await allFutures(
|
||||||
|
cancelAndWait(future21, future22),
|
||||||
|
cancelAndWait(FutureBase(future23), FutureBase(future24)),
|
||||||
|
cancelAndWait([future25, future26]),
|
||||||
|
cancelAndWait([FutureBase(future27), FutureBase(future28)]),
|
||||||
|
cancelAndWait(@[future29, future30]),
|
||||||
|
cancelAndWait(@[FutureBase(future31), FutureBase(future32)])
|
||||||
|
)
|
||||||
|
|
||||||
|
check:
|
||||||
|
future01.state == FutureState.Cancelled
|
||||||
|
future02.state == FutureState.Cancelled
|
||||||
|
future03.state == FutureState.Cancelled
|
||||||
|
future04.state == FutureState.Cancelled
|
||||||
|
future05.state == FutureState.Cancelled
|
||||||
|
future06.state == FutureState.Cancelled
|
||||||
|
future07.state == FutureState.Cancelled
|
||||||
|
future08.state == FutureState.Cancelled
|
||||||
|
future09.state == FutureState.Cancelled
|
||||||
|
future10.state == FutureState.Cancelled
|
||||||
|
future11.state == FutureState.Cancelled
|
||||||
|
future12.state == FutureState.Cancelled
|
||||||
|
future21.state == FutureState.Cancelled
|
||||||
|
future22.state == FutureState.Cancelled
|
||||||
|
future23.state == FutureState.Cancelled
|
||||||
|
future24.state == FutureState.Cancelled
|
||||||
|
future25.state == FutureState.Cancelled
|
||||||
|
future26.state == FutureState.Cancelled
|
||||||
|
future27.state == FutureState.Cancelled
|
||||||
|
future28.state == FutureState.Cancelled
|
||||||
|
future29.state == FutureState.Cancelled
|
||||||
|
future30.state == FutureState.Cancelled
|
||||||
|
future31.state == FutureState.Cancelled
|
||||||
|
future32.state == FutureState.Cancelled
|
||||||
|
|
||||||
|
runTest(1, 0, 10.milliseconds)
|
||||||
|
runTest(1, 1, 10.milliseconds)
|
||||||
|
runTest(2, 0, 10.milliseconds)
|
||||||
|
runTest(2, 1, 10.milliseconds)
|
||||||
|
runTest(3, 0, 10.milliseconds)
|
||||||
|
runTest(3, 1, 10.milliseconds)
|
||||||
|
runTest(4, 0, 333.milliseconds)
|
||||||
|
runTest(4, 1, 333.milliseconds)
|
||||||
|
runTest(5, 0, 333.milliseconds)
|
||||||
|
runTest(5, 1, 333.milliseconds)
|
||||||
|
|
||||||
|
asyncTest "cancelAndWait([]) on empty set returns completed Future test":
|
||||||
|
var
|
||||||
|
a0: array[0, Future[void]]
|
||||||
|
a1: array[0, Future[void].Raising([CancelledError])]
|
||||||
|
a2: seq[Future[void].Raising([CancelledError])]
|
||||||
|
a3: seq[Future[void]]
|
||||||
|
|
||||||
|
let
|
||||||
|
future1 = cancelAndWait()
|
||||||
|
future2 = cancelAndWait(a0)
|
||||||
|
future3 = cancelAndWait(a1)
|
||||||
|
future4 = cancelAndWait(a2)
|
||||||
|
future5 = cancelAndWait(a3)
|
||||||
|
|
||||||
|
check:
|
||||||
|
future1.finished() == true
|
||||||
|
future2.finished() == true
|
||||||
|
future3.finished() == true
|
||||||
|
future4.finished() == true
|
||||||
|
future5.finished() == true
|
||||||
|
|
||||||
|
asyncTest "cancelAndWait([]) should ignore finished futures test":
|
||||||
|
let
|
||||||
|
future0 =
|
||||||
|
Future[void].Raising([]).init("future0", {OwnCancelSchedule})
|
||||||
|
future1 =
|
||||||
|
Future[void].Raising([CancelledError]).init("future1")
|
||||||
|
future2 =
|
||||||
|
Future[void].Raising([CancelledError, ValueError]).init("future2")
|
||||||
|
future3 =
|
||||||
|
Future[string].Raising([]).init("future3", {OwnCancelSchedule})
|
||||||
|
future4 =
|
||||||
|
Future[string].Raising([CancelledError]).init("future4")
|
||||||
|
future5 =
|
||||||
|
Future[string].Raising([CancelledError, ValueError]).init("future5")
|
||||||
|
future6 =
|
||||||
|
newFuture[void]("future6")
|
||||||
|
future7 =
|
||||||
|
newFuture[void]("future7")
|
||||||
|
future8 =
|
||||||
|
newFuture[void]("future8")
|
||||||
|
future9 =
|
||||||
|
newFuture[string]("future9")
|
||||||
|
future10 =
|
||||||
|
newFuture[string]("future10")
|
||||||
|
future11 =
|
||||||
|
newFuture[string]("future11")
|
||||||
|
|
||||||
|
future0.complete()
|
||||||
|
check future1.tryCancel() == true
|
||||||
|
future2.fail(newException(ValueError, "Test Error"))
|
||||||
|
future3.complete("test")
|
||||||
|
check future4.tryCancel() == true
|
||||||
|
future5.fail(newException(ValueError, "Test Error"))
|
||||||
|
future6.complete()
|
||||||
|
check future7.tryCancel() == true
|
||||||
|
future8.fail(newException(ValueError, "Test Error"))
|
||||||
|
future9.complete("test")
|
||||||
|
check future10.tryCancel() == true
|
||||||
|
future11.fail(newException(ValueError, "Test Error"))
|
||||||
|
|
||||||
|
check:
|
||||||
|
cancelAndWait(future0, future1, future2).finished() == true
|
||||||
|
cancelAndWait(future3, future4, future5).finished() == true
|
||||||
|
cancelAndWait(future6, future7, future8).finished() == true
|
||||||
|
cancelAndWait(future9, future10, future11).finished() == true
|
||||||
|
cancelAndWait(future0, future1, future2,
|
||||||
|
future3, future4, future5,
|
||||||
|
future5, future7, future8,
|
||||||
|
future9, future10, future11).finished() == true
|
||||||
|
|
||||||
|
cancelAndWait([future0, future1, future2]).finished() == true
|
||||||
|
cancelAndWait([future3, future4]).finished() == true
|
||||||
|
cancelAndWait([future5]).finished() == true
|
||||||
|
cancelAndWait([future6, future7, future8]).finished() == true
|
||||||
|
cancelAndWait([future9, future10, future11]).finished() == true
|
||||||
|
|
||||||
|
cancelAndWait(@[future0, future1, future2]).finished() == true
|
||||||
|
cancelAndWait(@[future3, future4]).finished() == true
|
||||||
|
cancelAndWait(@[future5]).finished() == true
|
||||||
|
cancelAndWait(@[future6, future7, future8]).finished() == true
|
||||||
|
cancelAndWait(@[future9, future10, future11]).finished() == true
|
||||||
|
|
||||||
|
asyncTest "join() test":
|
||||||
|
proc joinFoo0(future: FutureBase) {.async.} =
|
||||||
|
await join(future)
|
||||||
|
|
||||||
|
proc joinFoo1(future: Future[void]) {.async.} =
|
||||||
|
await join(future)
|
||||||
|
|
||||||
|
proc joinFoo2(future: Future[void]) {.
|
||||||
|
async: (raises: [CancelledError]).} =
|
||||||
|
await join(future)
|
||||||
|
|
||||||
|
let
|
||||||
|
future0 = newFuture[void]()
|
||||||
|
future1 = newFuture[void]()
|
||||||
|
future2 = Future[void].Raising([CancelledError]).init()
|
||||||
|
|
||||||
|
let
|
||||||
|
resfut0 = joinFoo0(future0)
|
||||||
|
resfut1 = joinFoo1(future1)
|
||||||
|
resfut2 = joinFoo2(future2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
resfut0.finished() == false
|
||||||
|
resfut1.finished() == false
|
||||||
|
resfut2.finished() == false
|
||||||
|
|
||||||
|
future0.complete()
|
||||||
|
future1.complete()
|
||||||
|
future2.complete()
|
||||||
|
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
await noCancel allFutures(resfut0, resfut1, resfut2).wait(1.seconds)
|
||||||
|
true
|
||||||
|
except AsyncTimeoutError:
|
||||||
|
false
|
||||||
|
|
||||||
|
check:
|
||||||
|
res == true
|
||||||
|
resfut0.finished() == true
|
||||||
|
resfut1.finished() == true
|
||||||
|
resfut2.finished() == true
|
||||||
|
future0.finished() == true
|
||||||
|
future1.finished() == true
|
||||||
|
future2.finished() == true
|
||||||
|
|
||||||
|
asyncTest "join() cancellation test":
|
||||||
|
proc joinFoo0(future: FutureBase) {.async.} =
|
||||||
|
await join(future)
|
||||||
|
|
||||||
|
proc joinFoo1(future: Future[void]) {.async.} =
|
||||||
|
await join(future)
|
||||||
|
|
||||||
|
proc joinFoo2(future: Future[void]) {.
|
||||||
|
async: (raises: [CancelledError]).} =
|
||||||
|
await join(future)
|
||||||
|
|
||||||
|
let
|
||||||
|
future0 = newFuture[void]()
|
||||||
|
future1 = newFuture[void]()
|
||||||
|
future2 = Future[void].Raising([CancelledError]).init()
|
||||||
|
|
||||||
|
let
|
||||||
|
resfut0 = joinFoo0(future0)
|
||||||
|
resfut1 = joinFoo1(future1)
|
||||||
|
resfut2 = joinFoo2(future2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
resfut0.finished() == false
|
||||||
|
resfut1.finished() == false
|
||||||
|
resfut2.finished() == false
|
||||||
|
|
||||||
|
let
|
||||||
|
cancelfut0 = cancelAndWait(resfut0)
|
||||||
|
cancelfut1 = cancelAndWait(resfut1)
|
||||||
|
cancelfut2 = cancelAndWait(resfut2)
|
||||||
|
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
await noCancel allFutures(cancelfut0, cancelfut1,
|
||||||
|
cancelfut2).wait(1.seconds)
|
||||||
|
true
|
||||||
|
except AsyncTimeoutError:
|
||||||
|
false
|
||||||
|
|
||||||
|
check:
|
||||||
|
res == true
|
||||||
|
cancelfut0.finished() == true
|
||||||
|
cancelfut1.finished() == true
|
||||||
|
cancelfut2.finished() == true
|
||||||
|
resfut0.cancelled() == true
|
||||||
|
resfut1.cancelled() == true
|
||||||
|
resfut2.cancelled() == true
|
||||||
|
future0.finished() == false
|
||||||
|
future1.finished() == false
|
||||||
|
future2.finished() == false
|
||||||
|
|
||||||
|
future0.complete()
|
||||||
|
future1.complete()
|
||||||
|
future2.complete()
|
||||||
|
|
||||||
|
check:
|
||||||
|
future0.finished() == true
|
||||||
|
future1.finished() == true
|
||||||
|
future2.finished() == true
|
||||||
|
|
||||||
test "Sink with literals":
|
test "Sink with literals":
|
||||||
# https://github.com/nim-lang/Nim/issues/22175
|
# https://github.com/nim-lang/Nim/issues/22175
|
||||||
let fut = newFuture[string]()
|
let fut = newFuture[string]()
|
||||||
@ -2071,3 +2701,218 @@ suite "Future[T] behavior test suite":
|
|||||||
check:
|
check:
|
||||||
not compiles(Future[void].Raising([42]))
|
not compiles(Future[void].Raising([42]))
|
||||||
not compiles(Future[void].Raising(42))
|
not compiles(Future[void].Raising(42))
|
||||||
|
|
||||||
|
asyncTest "Timeout/cancellation race wait(duration) test":
|
||||||
|
proc raceTest(T: typedesc, itype: int) {.async.} =
|
||||||
|
let monitorFuture = newFuture[T]("monitor",
|
||||||
|
{FutureFlag.OwnCancelSchedule})
|
||||||
|
|
||||||
|
proc raceProc0(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await future
|
||||||
|
proc raceProc1(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await raceProc0(future)
|
||||||
|
proc raceProc2(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await raceProc1(future)
|
||||||
|
|
||||||
|
proc activation(udata: pointer) {.gcsafe.} =
|
||||||
|
if itype == 0:
|
||||||
|
when T is void:
|
||||||
|
monitorFuture.complete()
|
||||||
|
elif T is int:
|
||||||
|
monitorFuture.complete(100)
|
||||||
|
elif itype == 1:
|
||||||
|
monitorFuture.fail(newException(ValueError, "test"))
|
||||||
|
else:
|
||||||
|
monitorFuture.cancelAndSchedule()
|
||||||
|
|
||||||
|
monitorFuture.cancelCallback = activation
|
||||||
|
let
|
||||||
|
testFut = raceProc2(monitorFuture)
|
||||||
|
waitFut = wait(testFut, 10.milliseconds)
|
||||||
|
|
||||||
|
when T is void:
|
||||||
|
let waitRes =
|
||||||
|
try:
|
||||||
|
await waitFut
|
||||||
|
if itype == 0:
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
except CancelledError:
|
||||||
|
false
|
||||||
|
except CatchableError:
|
||||||
|
if itype != 0:
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
check waitRes == true
|
||||||
|
elif T is int:
|
||||||
|
let waitRes =
|
||||||
|
try:
|
||||||
|
let res = await waitFut
|
||||||
|
if itype == 0:
|
||||||
|
(true, res)
|
||||||
|
else:
|
||||||
|
(false, -1)
|
||||||
|
except CancelledError:
|
||||||
|
(false, -1)
|
||||||
|
except CatchableError:
|
||||||
|
if itype != 0:
|
||||||
|
(true, 0)
|
||||||
|
else:
|
||||||
|
(false, -1)
|
||||||
|
if itype == 0:
|
||||||
|
check:
|
||||||
|
waitRes[0] == true
|
||||||
|
waitRes[1] == 100
|
||||||
|
else:
|
||||||
|
check:
|
||||||
|
waitRes[0] == true
|
||||||
|
|
||||||
|
await raceTest(void, 0)
|
||||||
|
await raceTest(void, 1)
|
||||||
|
await raceTest(void, 2)
|
||||||
|
await raceTest(int, 0)
|
||||||
|
await raceTest(int, 1)
|
||||||
|
await raceTest(int, 2)
|
||||||
|
|
||||||
|
asyncTest "Timeout/cancellation race wait(future) test":
|
||||||
|
proc raceTest(T: typedesc, itype: int) {.async.} =
|
||||||
|
let monitorFuture = newFuture[T]()
|
||||||
|
|
||||||
|
proc raceProc0(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await future
|
||||||
|
proc raceProc1(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await raceProc0(future)
|
||||||
|
proc raceProc2(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await raceProc1(future)
|
||||||
|
|
||||||
|
proc continuation(udata: pointer) {.gcsafe.} =
|
||||||
|
if itype == 0:
|
||||||
|
when T is void:
|
||||||
|
monitorFuture.complete()
|
||||||
|
elif T is int:
|
||||||
|
monitorFuture.complete(100)
|
||||||
|
elif itype == 1:
|
||||||
|
monitorFuture.fail(newException(ValueError, "test"))
|
||||||
|
else:
|
||||||
|
monitorFuture.cancelAndSchedule()
|
||||||
|
|
||||||
|
let deadlineFuture = newFuture[void]()
|
||||||
|
deadlineFuture.addCallback continuation
|
||||||
|
|
||||||
|
let
|
||||||
|
testFut = raceProc2(monitorFuture)
|
||||||
|
waitFut = wait(testFut, deadlineFuture)
|
||||||
|
|
||||||
|
deadlineFuture.complete()
|
||||||
|
|
||||||
|
when T is void:
|
||||||
|
let waitRes =
|
||||||
|
try:
|
||||||
|
await waitFut
|
||||||
|
if itype == 0:
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
except CancelledError:
|
||||||
|
false
|
||||||
|
except CatchableError:
|
||||||
|
if itype != 0:
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
check waitRes == true
|
||||||
|
elif T is int:
|
||||||
|
let waitRes =
|
||||||
|
try:
|
||||||
|
let res = await waitFut
|
||||||
|
if itype == 0:
|
||||||
|
(true, res)
|
||||||
|
else:
|
||||||
|
(false, -1)
|
||||||
|
except CancelledError:
|
||||||
|
(false, -1)
|
||||||
|
except CatchableError:
|
||||||
|
if itype != 0:
|
||||||
|
(true, 0)
|
||||||
|
else:
|
||||||
|
(false, -1)
|
||||||
|
if itype == 0:
|
||||||
|
check:
|
||||||
|
waitRes[0] == true
|
||||||
|
waitRes[1] == 100
|
||||||
|
else:
|
||||||
|
check:
|
||||||
|
waitRes[0] == true
|
||||||
|
|
||||||
|
await raceTest(void, 0)
|
||||||
|
await raceTest(void, 1)
|
||||||
|
await raceTest(void, 2)
|
||||||
|
await raceTest(int, 0)
|
||||||
|
await raceTest(int, 1)
|
||||||
|
await raceTest(int, 2)
|
||||||
|
|
||||||
|
asyncTest "Timeout/cancellation race withTimeout() test":
|
||||||
|
proc raceTest(T: typedesc, itype: int) {.async.} =
|
||||||
|
let monitorFuture = newFuture[T]("monitor",
|
||||||
|
{FutureFlag.OwnCancelSchedule})
|
||||||
|
|
||||||
|
proc raceProc0(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await future
|
||||||
|
proc raceProc1(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await raceProc0(future)
|
||||||
|
proc raceProc2(future: Future[T]): Future[T] {.async.} =
|
||||||
|
await raceProc1(future)
|
||||||
|
|
||||||
|
proc activation(udata: pointer) {.gcsafe.} =
|
||||||
|
if itype == 0:
|
||||||
|
when T is void:
|
||||||
|
monitorFuture.complete()
|
||||||
|
elif T is int:
|
||||||
|
monitorFuture.complete(100)
|
||||||
|
elif itype == 1:
|
||||||
|
monitorFuture.fail(newException(ValueError, "test"))
|
||||||
|
else:
|
||||||
|
monitorFuture.cancelAndSchedule()
|
||||||
|
|
||||||
|
monitorFuture.cancelCallback = activation
|
||||||
|
let
|
||||||
|
testFut = raceProc2(monitorFuture)
|
||||||
|
waitFut = withTimeout(testFut, 10.milliseconds)
|
||||||
|
|
||||||
|
when T is void:
|
||||||
|
let waitRes =
|
||||||
|
try:
|
||||||
|
await waitFut
|
||||||
|
except CancelledError:
|
||||||
|
false
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
if itype == 0:
|
||||||
|
check waitRes == true
|
||||||
|
elif itype == 1:
|
||||||
|
check waitRes == true
|
||||||
|
else:
|
||||||
|
check waitRes == false
|
||||||
|
elif T is int:
|
||||||
|
let waitRes =
|
||||||
|
try:
|
||||||
|
await waitFut
|
||||||
|
except CancelledError:
|
||||||
|
false
|
||||||
|
except CatchableError:
|
||||||
|
false
|
||||||
|
if itype == 0:
|
||||||
|
check waitRes == true
|
||||||
|
elif itype == 1:
|
||||||
|
check waitRes == true
|
||||||
|
else:
|
||||||
|
check waitRes == false
|
||||||
|
|
||||||
|
await raceTest(void, 0)
|
||||||
|
await raceTest(void, 1)
|
||||||
|
await raceTest(void, 2)
|
||||||
|
await raceTest(int, 0)
|
||||||
|
await raceTest(int, 1)
|
||||||
|
await raceTest(int, 2)
|
||||||
|
|||||||
@ -1518,3 +1518,63 @@ suite "HTTP client testing suite":
|
|||||||
res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed
|
res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed
|
||||||
res.error.isRecoverableError()
|
res.error.isRecoverableError()
|
||||||
not(res.error.isCriticalError())
|
not(res.error.isCriticalError())
|
||||||
|
|
||||||
|
asyncTest "HTTPS response headers buffer size test":
|
||||||
|
const HeadersSize = HttpMaxHeadersSize
|
||||||
|
let expectValue =
|
||||||
|
string.fromBytes(createBigMessage("HEADERSTEST", HeadersSize))
|
||||||
|
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||||
|
async: (raises: [CancelledError]).} =
|
||||||
|
if r.isOk():
|
||||||
|
let request = r.get()
|
||||||
|
try:
|
||||||
|
case request.uri.path
|
||||||
|
of "/test":
|
||||||
|
let headers = HttpTable.init([("big-header", expectValue)])
|
||||||
|
await request.respond(Http200, "ok", headers)
|
||||||
|
else:
|
||||||
|
await request.respond(Http404, "Page not found")
|
||||||
|
except HttpWriteError as exc:
|
||||||
|
defaultResponse(exc)
|
||||||
|
else:
|
||||||
|
defaultResponse()
|
||||||
|
|
||||||
|
var server = createServer(initTAddress("127.0.0.1:0"), process, false)
|
||||||
|
server.start()
|
||||||
|
let
|
||||||
|
address = server.instance.localAddress()
|
||||||
|
ha = getAddress(address, HttpClientScheme.NonSecure, "/test")
|
||||||
|
session = HttpSessionRef.new()
|
||||||
|
let
|
||||||
|
req1 = HttpClientRequestRef.new(session, ha)
|
||||||
|
req2 =
|
||||||
|
HttpClientRequestRef.new(session, ha,
|
||||||
|
maxResponseHeadersSize = HttpMaxHeadersSize * 2)
|
||||||
|
res1 =
|
||||||
|
try:
|
||||||
|
let res {.used.} = await send(req1)
|
||||||
|
await closeWait(req1)
|
||||||
|
await closeWait(res)
|
||||||
|
false
|
||||||
|
except HttpReadError:
|
||||||
|
true
|
||||||
|
except HttpError:
|
||||||
|
await closeWait(req1)
|
||||||
|
false
|
||||||
|
except CancelledError:
|
||||||
|
await closeWait(req1)
|
||||||
|
false
|
||||||
|
|
||||||
|
res2 = await send(req2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
res1 == true
|
||||||
|
res2.status == 200
|
||||||
|
res2.headers.getString("big-header") == expectValue
|
||||||
|
|
||||||
|
await req1.closeWait()
|
||||||
|
await req2.closeWait()
|
||||||
|
await res2.closeWait()
|
||||||
|
await session.closeWait()
|
||||||
|
await server.stop()
|
||||||
|
await server.closeWait()
|
||||||
|
|||||||
@ -13,6 +13,11 @@ import stew/base10
|
|||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
|
# Trouble finding this if defined near its use for `data2.sorted`, etc. likely
|
||||||
|
# related to "generic sandwich" issues. If any test ever wants to `sort` a
|
||||||
|
# `seq[(string, seq[string]]` differently, they may need to re-work that test.
|
||||||
|
proc `<`(a, b: (string, seq[string])): bool = a[0] < b[0]
|
||||||
|
|
||||||
suite "HTTP server testing suite":
|
suite "HTTP server testing suite":
|
||||||
teardown:
|
teardown:
|
||||||
checkLeaks()
|
checkLeaks()
|
||||||
@ -846,11 +851,11 @@ suite "HTTP server testing suite":
|
|||||||
for key, value in table1.items(true):
|
for key, value in table1.items(true):
|
||||||
data2.add((key, value))
|
data2.add((key, value))
|
||||||
|
|
||||||
check:
|
check: # .sorted to not depend upon hash(key)-order
|
||||||
data1 == @[("Header2", "value2"), ("Header2", "VALUE3"),
|
data1.sorted == sorted(@[("Header2", "value2"), ("Header2", "VALUE3"),
|
||||||
("Header1", "value1")]
|
("Header1", "value1")])
|
||||||
data2 == @[("Header2", @["value2", "VALUE3"]),
|
data2.sorted == sorted(@[("Header2", @["value2", "VALUE3"]),
|
||||||
("Header1", @["value1"])]
|
("Header1", @["value1"])])
|
||||||
|
|
||||||
table1.set("header2", "value4")
|
table1.set("header2", "value4")
|
||||||
check:
|
check:
|
||||||
@ -1785,3 +1790,40 @@ suite "HTTP server testing suite":
|
|||||||
|
|
||||||
await server.stop()
|
await server.stop()
|
||||||
await server.closeWait()
|
await server.closeWait()
|
||||||
|
|
||||||
|
asyncTest "HTTP server - baseUri value test":
|
||||||
|
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||||
|
async: (raises: [CancelledError]).} =
|
||||||
|
defaultResponse()
|
||||||
|
|
||||||
|
let
|
||||||
|
expectUri2 = "http://www.chronos-test.com/"
|
||||||
|
address = initTAddress("127.0.0.1:0")
|
||||||
|
socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||||
|
res1 = HttpServerRef.new(address, process,
|
||||||
|
socketFlags = socketFlags)
|
||||||
|
res2 = HttpServerRef.new(address, process,
|
||||||
|
socketFlags = socketFlags,
|
||||||
|
serverUri = parseUri(expectUri2))
|
||||||
|
check:
|
||||||
|
res1.isOk == true
|
||||||
|
res2.isOk == true
|
||||||
|
|
||||||
|
let
|
||||||
|
server1 = res1.get()
|
||||||
|
server2 = res2.get()
|
||||||
|
|
||||||
|
try:
|
||||||
|
server1.start()
|
||||||
|
server2.start()
|
||||||
|
let
|
||||||
|
localAddress = server1.instance.localAddress()
|
||||||
|
expectUri1 = "http://127.0.0.1:" & $localAddress.port & "/"
|
||||||
|
check:
|
||||||
|
server1.baseUri == parseUri(expectUri1)
|
||||||
|
server2.baseUri == parseUri(expectUri2)
|
||||||
|
finally:
|
||||||
|
await server1.stop()
|
||||||
|
await server1.closeWait()
|
||||||
|
await server2.stop()
|
||||||
|
await server2.closeWait()
|
||||||
|
|||||||
@ -8,6 +8,7 @@
|
|||||||
import std/[macros, strutils]
|
import std/[macros, strutils]
|
||||||
import unittest2
|
import unittest2
|
||||||
import ../chronos
|
import ../chronos
|
||||||
|
import ../chronos/config
|
||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
@ -519,7 +520,7 @@ suite "Exceptions tracking":
|
|||||||
|
|
||||||
noraises()
|
noraises()
|
||||||
|
|
||||||
test "Nocancel errors":
|
test "Nocancel errors with raises":
|
||||||
proc testit {.async: (raises: [ValueError, CancelledError]).} =
|
proc testit {.async: (raises: [ValueError, CancelledError]).} =
|
||||||
await sleepAsync(5.milliseconds)
|
await sleepAsync(5.milliseconds)
|
||||||
raise (ref ValueError)()
|
raise (ref ValueError)()
|
||||||
@ -535,6 +536,36 @@ suite "Exceptions tracking":
|
|||||||
|
|
||||||
noraises()
|
noraises()
|
||||||
|
|
||||||
|
test "Nocancel with no errors":
|
||||||
|
proc testit {.async: (raises: [CancelledError]).} =
|
||||||
|
await sleepAsync(5.milliseconds)
|
||||||
|
|
||||||
|
proc test {.async: (raises: []).} =
|
||||||
|
await noCancel testit()
|
||||||
|
|
||||||
|
proc noraises() {.raises: [].} =
|
||||||
|
let f = test()
|
||||||
|
waitFor(f.cancelAndWait())
|
||||||
|
waitFor(f)
|
||||||
|
|
||||||
|
noraises()
|
||||||
|
|
||||||
|
test "Nocancel errors without raises":
|
||||||
|
proc testit {.async.} =
|
||||||
|
await sleepAsync(5.milliseconds)
|
||||||
|
raise (ref ValueError)()
|
||||||
|
|
||||||
|
proc test {.async.} =
|
||||||
|
await noCancel testit()
|
||||||
|
|
||||||
|
proc noraises() =
|
||||||
|
expect(ValueError):
|
||||||
|
let f = test()
|
||||||
|
waitFor(f.cancelAndWait())
|
||||||
|
waitFor(f)
|
||||||
|
|
||||||
|
noraises()
|
||||||
|
|
||||||
test "Defect on wrong exception type at runtime":
|
test "Defect on wrong exception type at runtime":
|
||||||
{.push warning[User]: off}
|
{.push warning[User]: off}
|
||||||
let f = InternalRaisesFuture[void, (ValueError,)]()
|
let f = InternalRaisesFuture[void, (ValueError,)]()
|
||||||
@ -556,6 +587,20 @@ suite "Exceptions tracking":
|
|||||||
|
|
||||||
waitFor(callCatchAll())
|
waitFor(callCatchAll())
|
||||||
|
|
||||||
|
test "Global handleException does not override local annotations":
|
||||||
|
when chronosHandleException:
|
||||||
|
proc unnanotated() {.async.} = raise (ref CatchableError)()
|
||||||
|
|
||||||
|
checkNotCompiles:
|
||||||
|
proc annotated() {.async: (raises: [ValueError]).} =
|
||||||
|
raise (ref CatchableError)()
|
||||||
|
|
||||||
|
checkNotCompiles:
|
||||||
|
proc noHandleException() {.async: (handleException: false).} =
|
||||||
|
raise (ref Exception)()
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|
||||||
test "Results compatibility":
|
test "Results compatibility":
|
||||||
proc returnOk(): Future[Result[int, string]] {.async: (raises: []).} =
|
proc returnOk(): Future[Result[int, string]] {.async: (raises: []).} =
|
||||||
ok(42)
|
ok(42)
|
||||||
|
|||||||
@ -186,3 +186,49 @@ suite "Secure HTTP server testing suite":
|
|||||||
return serverRes and data == "EXCEPTION"
|
return serverRes and data == "EXCEPTION"
|
||||||
|
|
||||||
check waitFor(testHTTPS2(initTAddress("127.0.0.1:30080"))) == true
|
check waitFor(testHTTPS2(initTAddress("127.0.0.1:30080"))) == true
|
||||||
|
|
||||||
|
asyncTest "HTTPS server - baseUri value test":
|
||||||
|
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||||
|
async: (raises: [CancelledError]).} =
|
||||||
|
defaultResponse()
|
||||||
|
|
||||||
|
let
|
||||||
|
expectUri2 = "https://www.chronos-test.com/"
|
||||||
|
address = initTAddress("127.0.0.1:0")
|
||||||
|
socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||||
|
serverFlags = {Secure}
|
||||||
|
secureKey = TLSPrivateKey.init(HttpsSelfSignedRsaKey)
|
||||||
|
secureCert = TLSCertificate.init(HttpsSelfSignedRsaCert)
|
||||||
|
res1 = SecureHttpServerRef.new(address, process,
|
||||||
|
socketFlags = socketFlags,
|
||||||
|
serverFlags = serverFlags,
|
||||||
|
tlsPrivateKey = secureKey,
|
||||||
|
tlsCertificate = secureCert)
|
||||||
|
res2 = SecureHttpServerRef.new(address, process,
|
||||||
|
socketFlags = socketFlags,
|
||||||
|
serverFlags = serverFlags,
|
||||||
|
serverUri = parseUri(expectUri2),
|
||||||
|
tlsPrivateKey = secureKey,
|
||||||
|
tlsCertificate = secureCert)
|
||||||
|
check:
|
||||||
|
res1.isOk == true
|
||||||
|
res2.isOk == true
|
||||||
|
|
||||||
|
let
|
||||||
|
server1 = res1.get()
|
||||||
|
server2 = res2.get()
|
||||||
|
|
||||||
|
try:
|
||||||
|
server1.start()
|
||||||
|
server2.start()
|
||||||
|
let
|
||||||
|
localAddress = server1.instance.localAddress()
|
||||||
|
expectUri1 = "https://127.0.0.1:" & $localAddress.port & "/"
|
||||||
|
check:
|
||||||
|
server1.baseUri == parseUri(expectUri1)
|
||||||
|
server2.baseUri == parseUri(expectUri2)
|
||||||
|
finally:
|
||||||
|
await server1.stop()
|
||||||
|
await server1.closeWait()
|
||||||
|
await server2.stop()
|
||||||
|
await server2.closeWait()
|
||||||
|
|||||||
@ -1486,6 +1486,170 @@ suite "Stream Transport test suite":
|
|||||||
await server.closeWait()
|
await server.closeWait()
|
||||||
testResult
|
testResult
|
||||||
|
|
||||||
|
proc performAutoAddressTest(port: Port,
|
||||||
|
family: AddressFamily): Future[bool] {.
|
||||||
|
async: (raises: []).} =
|
||||||
|
let server =
|
||||||
|
block:
|
||||||
|
var currentPort = port
|
||||||
|
var res: StreamServer
|
||||||
|
for i in 0 ..< 10:
|
||||||
|
res =
|
||||||
|
try:
|
||||||
|
createStreamServer(port, flags = {ServerFlags.ReuseAddr})
|
||||||
|
except TransportOsError as exc:
|
||||||
|
echo "Unable to create server on port ", currentPort,
|
||||||
|
" with error: ", exc.msg
|
||||||
|
currentPort = Port(uint16(currentPort) + 1'u16)
|
||||||
|
nil
|
||||||
|
if not(isNil(res)):
|
||||||
|
break
|
||||||
|
doAssert(not(isNil(res)), "Unable to create server, giving up")
|
||||||
|
res
|
||||||
|
|
||||||
|
var
|
||||||
|
address =
|
||||||
|
case family
|
||||||
|
of AddressFamily.IPv4:
|
||||||
|
try:
|
||||||
|
initTAddress("127.0.0.1:0")
|
||||||
|
except TransportAddressError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
of AddressFamily.IPv6:
|
||||||
|
try:
|
||||||
|
initTAddress("::1:0")
|
||||||
|
except TransportAddressError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
of AddressFamily.Unix, AddressFamily.None:
|
||||||
|
raiseAssert "Not allowed"
|
||||||
|
|
||||||
|
address.port = server.localAddress().port
|
||||||
|
var acceptFut = server.accept()
|
||||||
|
let
|
||||||
|
clientTransp =
|
||||||
|
try:
|
||||||
|
let res = await connect(address).wait(2.seconds)
|
||||||
|
Opt.some(res)
|
||||||
|
except CatchableError:
|
||||||
|
Opt.none(StreamTransport)
|
||||||
|
serverTransp =
|
||||||
|
if clientTransp.isSome():
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
await noCancel acceptFut
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
Opt.some(res)
|
||||||
|
else:
|
||||||
|
Opt.none(StreamTransport)
|
||||||
|
|
||||||
|
let testResult = clientTransp.isSome() and serverTransp.isSome()
|
||||||
|
var pending: seq[FutureBase]
|
||||||
|
if clientTransp.isSome():
|
||||||
|
pending.add(closeWait(clientTransp.get()))
|
||||||
|
if serverTransp.isSome():
|
||||||
|
pending.add(closeWait(serverTransp.get()))
|
||||||
|
else:
|
||||||
|
pending.add(cancelAndWait(acceptFut))
|
||||||
|
await noCancel allFutures(pending)
|
||||||
|
try:
|
||||||
|
server.stop()
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
await server.closeWait()
|
||||||
|
testResult
|
||||||
|
|
||||||
|
proc performAutoAddressTest2(
|
||||||
|
address1: Opt[IpAddress],
|
||||||
|
address2: Opt[IpAddress],
|
||||||
|
port: Port,
|
||||||
|
sendType: AddressFamily
|
||||||
|
): Future[bool] {.async: (raises: []).} =
|
||||||
|
let
|
||||||
|
server =
|
||||||
|
block:
|
||||||
|
var
|
||||||
|
currentPort = port
|
||||||
|
res: StreamServer
|
||||||
|
for i in 0 ..< 10:
|
||||||
|
res =
|
||||||
|
try:
|
||||||
|
createStreamServer(port, host = address1,
|
||||||
|
flags = {ServerFlags.ReuseAddr})
|
||||||
|
except TransportOsError as exc:
|
||||||
|
echo "Unable to create server on port ", currentPort,
|
||||||
|
" with error: ", exc.msg
|
||||||
|
currentPort = Port(uint16(currentPort) + 1'u16)
|
||||||
|
nil
|
||||||
|
if not(isNil(res)):
|
||||||
|
break
|
||||||
|
doAssert(not(isNil(res)), "Unable to create server, giving up")
|
||||||
|
res
|
||||||
|
serverAddr = server.localAddress()
|
||||||
|
serverPort = serverAddr.port
|
||||||
|
remoteAddress =
|
||||||
|
try:
|
||||||
|
case sendType
|
||||||
|
of AddressFamily.IPv4:
|
||||||
|
var res = initTAddress("127.0.0.1:0")
|
||||||
|
res.port = serverPort
|
||||||
|
res
|
||||||
|
of AddressFamily.IPv6:
|
||||||
|
var res = initTAddress("[::1]:0")
|
||||||
|
res.port = serverPort
|
||||||
|
res
|
||||||
|
else:
|
||||||
|
raiseAssert "Incorrect sending type"
|
||||||
|
except TransportAddressError as exc:
|
||||||
|
raiseAssert "Unable to initialize transport address, " &
|
||||||
|
"reason = " & exc.msg
|
||||||
|
acceptFut = server.accept()
|
||||||
|
|
||||||
|
let
|
||||||
|
clientTransp =
|
||||||
|
try:
|
||||||
|
if address2.isSome():
|
||||||
|
let
|
||||||
|
laddr = initTAddress(address2.get(), Port(0))
|
||||||
|
res = await connect(remoteAddress, localAddress = laddr).
|
||||||
|
wait(2.seconds)
|
||||||
|
Opt.some(res)
|
||||||
|
|
||||||
|
else:
|
||||||
|
let res = await connect(remoteAddress).wait(2.seconds)
|
||||||
|
Opt.some(res)
|
||||||
|
except CatchableError:
|
||||||
|
Opt.none(StreamTransport)
|
||||||
|
serverTransp =
|
||||||
|
if clientTransp.isSome():
|
||||||
|
let res =
|
||||||
|
try:
|
||||||
|
await noCancel acceptFut
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
Opt.some(res)
|
||||||
|
else:
|
||||||
|
Opt.none(StreamTransport)
|
||||||
|
testResult =
|
||||||
|
clientTransp.isSome() and serverTransp.isSome() and
|
||||||
|
(serverTransp.get().remoteAddress2().get().family == sendType) and
|
||||||
|
(clientTransp.get().remoteAddress2().get().family == sendType)
|
||||||
|
var pending: seq[FutureBase]
|
||||||
|
if clientTransp.isSome():
|
||||||
|
pending.add(closeWait(clientTransp.get()))
|
||||||
|
if serverTransp.isSome():
|
||||||
|
pending.add(closeWait(serverTransp.get()))
|
||||||
|
else:
|
||||||
|
pending.add(cancelAndWait(acceptFut))
|
||||||
|
await noCancel allFutures(pending)
|
||||||
|
try:
|
||||||
|
server.stop()
|
||||||
|
except TransportError as exc:
|
||||||
|
raiseAssert exc.msg
|
||||||
|
await server.closeWait()
|
||||||
|
|
||||||
|
testResult
|
||||||
|
|
||||||
markFD = getCurrentFD()
|
markFD = getCurrentFD()
|
||||||
|
|
||||||
for i in 0..<len(addresses):
|
for i in 0..<len(addresses):
|
||||||
@ -1668,6 +1832,96 @@ suite "Stream Transport test suite":
|
|||||||
DualStackType.Disabled, initTAddress("[::1]:0"))) == true
|
DualStackType.Disabled, initTAddress("[::1]:0"))) == true
|
||||||
else:
|
else:
|
||||||
skip()
|
skip()
|
||||||
|
asyncTest "[IP] Auto-address constructor test (*:0)":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(0), AddressFamily.IPv6)) == true
|
||||||
|
# If IPv6 is available createStreamServer should bind to `::` this means
|
||||||
|
# that we should be able to connect to it via IPV4_MAPPED address, but
|
||||||
|
# only when IPv4 is also available.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true
|
||||||
|
else:
|
||||||
|
# If IPv6 is not available createStreamServer should bind to `0.0.0.0`
|
||||||
|
# this means we should be able to connect to it via IPV4 address.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true
|
||||||
|
|
||||||
|
asyncTest "[IP] Auto-address constructor test (*:30532)":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(30532), AddressFamily.IPv6)) == true
|
||||||
|
# If IPv6 is available createStreamServer should bind to `::` this means
|
||||||
|
# that we should be able to connect to it via IPV4_MAPPED address, but
|
||||||
|
# only when IPv4 is also available.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(30532), AddressFamily.IPv4)) ==
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
# If IPv6 is not available createStreamServer should bind to `0.0.0.0`
|
||||||
|
# this means we should be able to connect to it via IPV4 address.
|
||||||
|
if isAvailable(AddressFamily.IPv4):
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest(Port(30532), AddressFamily.IPv4)) ==
|
||||||
|
true
|
||||||
|
|
||||||
|
for portNumber in [Port(0), Port(30231)]:
|
||||||
|
asyncTest "[IP] IPv6 mapping test (auto-auto:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.none(IpAddress)
|
||||||
|
address2 = Opt.none(IpAddress)
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv4))
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv6))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
asyncTest "[IP] IPv6 mapping test (auto-ipv6:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.none(IpAddress)
|
||||||
|
address2 = Opt.some(initTAddress("[::1]:0").toIpAddress())
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv6))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
asyncTest "[IP] IPv6 mapping test (auto-ipv4:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.none(IpAddress)
|
||||||
|
address2 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress())
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv4))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
asyncTest "[IP] IPv6 mapping test (ipv6-auto:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.some(initTAddress("[::1]:0").toIpAddress())
|
||||||
|
address2 = Opt.none(IpAddress)
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv6))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
asyncTest "[IP] IPv6 mapping test (ipv4-auto:" & $int(portNumber) & ")":
|
||||||
|
if isAvailable(AddressFamily.IPv6):
|
||||||
|
let
|
||||||
|
address1 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress())
|
||||||
|
address2 = Opt.none(IpAddress)
|
||||||
|
check:
|
||||||
|
(await performAutoAddressTest2(
|
||||||
|
address1, address2, portNumber, AddressFamily.IPv4))
|
||||||
|
else:
|
||||||
|
skip()
|
||||||
|
|
||||||
test "File descriptors leak test":
|
test "File descriptors leak test":
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
# Windows handle numbers depends on many conditions, so we can't use
|
# Windows handle numbers depends on many conditions, so we can't use
|
||||||
|
|||||||
@ -89,6 +89,9 @@ suite "Asynchronous timers & steps test suite":
|
|||||||
$nanoseconds(1_000_000_900) == "1s900ns"
|
$nanoseconds(1_000_000_900) == "1s900ns"
|
||||||
$nanoseconds(1_800_700_000) == "1s800ms700us"
|
$nanoseconds(1_800_700_000) == "1s800ms700us"
|
||||||
$nanoseconds(1_800_000_600) == "1s800ms600ns"
|
$nanoseconds(1_800_000_600) == "1s800ms600ns"
|
||||||
|
nanoseconds(1_800_000_600).toString(0) == ""
|
||||||
|
nanoseconds(1_800_000_600).toString(1) == "1s"
|
||||||
|
nanoseconds(1_800_000_600).toString(2) == "1s800ms"
|
||||||
|
|
||||||
test "Asynchronous steps test":
|
test "Asynchronous steps test":
|
||||||
var fut1 = stepsAsync(1)
|
var fut1 = stepsAsync(1)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user