mirror of
https://github.com/waku-org/nwaku.git
synced 2025-02-27 06:20:54 +00:00
deploy: d1c1a0ca13e3aa2690a6550faca13210e1f46877
This commit is contained in:
parent
f3b8e07ec1
commit
112da56662
10
.gitmodules
vendored
10
.gitmodules
vendored
@ -110,3 +110,13 @@
|
||||
path = vendor/rln
|
||||
url = https://github.com/kilic/rln
|
||||
branch = full-node
|
||||
[submodule "vendor/nim-testutils"]
|
||||
path = vendor/nim-testutils
|
||||
url = https://github.com/status-im/nim-testutils.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-unittest2"]
|
||||
path = vendor/nim-unittest2
|
||||
url = https://github.com/status-im/nim-unittest2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
|
@ -1 +1 @@
|
||||
1616748591
|
||||
1616752324
|
@ -10,18 +10,14 @@ import
|
||||
|
||||
const clientId = "Waku example v1"
|
||||
|
||||
let
|
||||
# Load the cli configuration from `config_example.nim`.
|
||||
config = WakuNodeConf.load()
|
||||
# Seed the rng.
|
||||
rng = keys.newRng()
|
||||
proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext) =
|
||||
# Set up the address according to NAT information.
|
||||
(ipExt, tcpPortExt, udpPortExt) = setupNat(config.nat, clientId,
|
||||
let (ipExt, tcpPortExt, udpPortExt) = setupNat(config.nat, clientId,
|
||||
Port(config.tcpPort + config.portsShift),
|
||||
Port(config.udpPort + config.portsShift))
|
||||
# TODO: EthereumNode should have a better split of binding address and
|
||||
# external address. Also, can't have different ports as it stands now.
|
||||
address = if ipExt.isNone():
|
||||
let address = if ipExt.isNone():
|
||||
Address(ip: parseIpAddress("0.0.0.0"),
|
||||
tcpPort: Port(config.tcpPort + config.portsShift),
|
||||
udpPort: Port(config.udpPort + config.portsShift))
|
||||
@ -91,7 +87,9 @@ let
|
||||
discard node.subscribeFilter(filter, handler)
|
||||
|
||||
# Repeat the posting of a message every 5 seconds.
|
||||
proc repeatMessage(udata: pointer) {.gcsafe.} =
|
||||
# https://github.com/nim-lang/Nim/issues/17369
|
||||
var repeatMessage: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
repeatMessage = proc(udata: pointer) =
|
||||
{.gcsafe.}:
|
||||
# Post a waku message on the network, encrypted with provided symmetric key,
|
||||
# signed with asymmetric key, on topic and with ttl of 30 seconds.
|
||||
@ -106,3 +104,9 @@ proc repeatMessage(udata: pointer) {.gcsafe.} =
|
||||
discard setTimer(Moment.fromNow(5.seconds), repeatMessage)
|
||||
|
||||
runForever()
|
||||
|
||||
when isMainModule:
|
||||
let
|
||||
rng = keys.newRng()
|
||||
conf = WakuNodeConf.load()
|
||||
run(conf, rng)
|
||||
|
@ -1,5 +1,5 @@
|
||||
import
|
||||
unittest, chronos, bearssl,
|
||||
chronos, bearssl,
|
||||
eth/[keys, p2p]
|
||||
|
||||
import libp2p/crypto/crypto
|
||||
@ -21,18 +21,6 @@ proc setupTestNode*(
|
||||
for capability in capabilities:
|
||||
result.addCapability capability
|
||||
|
||||
template asyncTest*(name, body: untyped) =
|
||||
test name:
|
||||
proc scenario {.async.} = body
|
||||
waitFor scenario()
|
||||
|
||||
template procSuite*(name, body: untyped) =
|
||||
proc suitePayload =
|
||||
suite name:
|
||||
body
|
||||
|
||||
suitePayload()
|
||||
|
||||
# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28
|
||||
type RngWrap = object
|
||||
rng: ref BrHmacDrbgContext
|
||||
|
@ -9,8 +9,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[sequtils, unittest, tables],
|
||||
chronos, eth/p2p, eth/p2p/peer_pool,
|
||||
std/[sequtils, tables],
|
||||
chronos, testutils/unittests, eth/p2p, eth/p2p/peer_pool,
|
||||
eth/p2p/rlpx_protocols/whisper_protocol as whisper,
|
||||
../../waku/v1/protocol/waku_protocol as waku,
|
||||
../../waku/v1/protocol/waku_bridge,
|
||||
|
@ -9,8 +9,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[sequtils, tables, unittest],
|
||||
chronos, eth/[keys, p2p], eth/p2p/peer_pool,
|
||||
std/[sequtils, tables],
|
||||
chronos, testutils/unittests, eth/[keys, p2p], eth/p2p/peer_pool,
|
||||
../../waku/v1/protocol/waku_protocol,
|
||||
../test_helpers
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, tables, sequtils, times],
|
||||
chronos, eth/[p2p, async_utils], eth/p2p/peer_pool,
|
||||
std/[tables, sequtils, times],
|
||||
chronos, testutils/unittests, eth/[p2p, async_utils], eth/p2p/peer_pool,
|
||||
../../waku/v1/protocol/[waku_protocol, waku_mail],
|
||||
../test_helpers
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, options, sets, tables, os, strutils, sequtils],
|
||||
stew/shims/net as stewNet,
|
||||
std/[options, sets, tables, os, strutils, sequtils],
|
||||
testutils/unittests, stew/shims/net as stewNet,
|
||||
json_rpc/[rpcserver, rpcclient],
|
||||
eth/[keys, rlp], eth/common/eth_types,
|
||||
libp2p/[standard_setup, switch, multiaddress],
|
||||
|
@ -1,8 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, options, sets, tables, sequtils],
|
||||
stew/shims/net as stewNet,
|
||||
std/[options, sets, tables, sequtils],
|
||||
testutils/unittests, stew/shims/net as stewNet,
|
||||
json_rpc/[rpcserver, rpcclient],
|
||||
eth/[keys, rlp], eth/common/eth_types,
|
||||
libp2p/[standard_setup, switch, multiaddress],
|
||||
|
@ -1,7 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, strutils],
|
||||
std/strutils,
|
||||
testutils/unittests,
|
||||
chronicles, chronos, stew/shims/net as stewNet, stew/byteutils,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
|
@ -1,8 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, options, tables, sets],
|
||||
chronos, chronicles,
|
||||
std/[options, tables, sets],
|
||||
testutils/unittests, chronos, chronicles,
|
||||
libp2p/switch,
|
||||
libp2p/protobuf/minprotobuf,
|
||||
libp2p/stream/[bufferstream, connection],
|
||||
|
@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
import
|
||||
std/[unittest,algorithm,options],
|
||||
nimcrypto/sha2,
|
||||
std/[algorithm, options],
|
||||
testutils/unittests, nimcrypto/sha2,
|
||||
../../waku/v2/protocol/waku_store/waku_store,
|
||||
../test_helpers
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/unittest,
|
||||
testutils/unittests,
|
||||
../../waku/v2/protocol/waku_message,
|
||||
../../waku/v2/node/waku_payload,
|
||||
../test_helpers
|
||||
|
@ -1,17 +1,14 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
chronos, chronicles, options, stint, unittest,
|
||||
web3,
|
||||
std/options,
|
||||
testutils/unittests, chronos, chronicles, stint, web3,
|
||||
stew/byteutils, stew/shims/net as stewNet,
|
||||
libp2p/crypto/crypto,
|
||||
../../waku/v2/protocol/waku_rln_relay/[rln, waku_rln_relay_utils],
|
||||
../../waku/v2/node/wakunode2,
|
||||
../test_helpers,
|
||||
test_utils
|
||||
|
||||
|
||||
|
||||
./test_utils
|
||||
|
||||
# the address of Ethereum client (ganache-cli for now)
|
||||
# TODO this address in hardcoded in the code, we may need to take it as input from the user
|
||||
|
@ -1,8 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, options, tables, sets],
|
||||
chronos, chronicles,
|
||||
std/[options, tables, sets],
|
||||
testutils/unittests, chronos, chronicles,
|
||||
libp2p/switch,
|
||||
libp2p/protobuf/minprotobuf,
|
||||
libp2p/stream/[bufferstream, connection],
|
||||
|
@ -1,7 +1,8 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[unittest, options, tables, sets],
|
||||
std/[options, tables, sets],
|
||||
testutils/unittests,
|
||||
chronos, chronicles, stew/shims/net as stewNet, stew/byteutils,
|
||||
libp2p/switch,
|
||||
libp2p/protobuf/minprotobuf,
|
||||
|
@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/unittest,
|
||||
testutils/unittests,
|
||||
chronicles, chronos, stew/shims/net as stewNet, stew/byteutils,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
|
10
vendor/nim-chronicles/chronicles.nim
vendored
10
vendor/nim-chronicles/chronicles.nim
vendored
@ -366,6 +366,16 @@ template logFn(name: untyped, severity: typed, debug=false) {.dirty.} =
|
||||
wrapSideEffects(debug):
|
||||
log(instantiationInfo(), stream, severity, eventName, props)
|
||||
|
||||
# workaround for https://github.com/status-im/nim-chronicles/issues/92
|
||||
when defined(windows) and (NimMajor, NimMinor, NimPatch) < (1, 4, 4):
|
||||
logFn trace , LogLevel.TRACE, debug=true
|
||||
logFn debug , LogLevel.DEBUG, debug=true
|
||||
logFn info , LogLevel.INFO, debug=true
|
||||
logFn notice, LogLevel.NOTICE, debug=true
|
||||
logFn warn , LogLevel.WARN, debug=true
|
||||
logFn error , LogLevel.ERROR, debug=true
|
||||
logFn fatal , LogLevel.FATAL, debug=true
|
||||
else:
|
||||
logFn trace , LogLevel.TRACE, debug=true
|
||||
logFn debug , LogLevel.DEBUG
|
||||
logFn info , LogLevel.INFO
|
||||
|
2
vendor/nim-chronicles/chronicles.nimble
vendored
2
vendor/nim-chronicles/chronicles.nimble
vendored
@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "chronicles"
|
||||
version = "0.10.0"
|
||||
version = "0.10.1"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "A crafty implementation of structured logging for Nim"
|
||||
license = "Apache License 2.0"
|
||||
|
@ -86,7 +86,7 @@ else:
|
||||
JsonRecord*[Output; timestamps: static[TimestampsScheme]] = object
|
||||
output*: Output
|
||||
outStream: OutputStream
|
||||
jsonWriter: JsonWriter
|
||||
jsonWriter: Json.Writer
|
||||
|
||||
export
|
||||
JsonString
|
||||
@ -659,7 +659,7 @@ proc initLogRecord*(r: var JsonRecord,
|
||||
r.record = newJsObject()
|
||||
else:
|
||||
r.outStream = memoryOutput()
|
||||
r.jsonWriter = JsonWriter.init(r.outStream, pretty = false)
|
||||
r.jsonWriter = Json.Writer.init(r.outStream, pretty = false)
|
||||
r.jsonWriter.beginRecord()
|
||||
|
||||
if level != NONE:
|
||||
|
@ -4,3 +4,9 @@ func main =
|
||||
trace "effect-free"
|
||||
|
||||
main()
|
||||
|
||||
# issue #92
|
||||
proc test() {.raises: [Defect].} =
|
||||
error "should not raises exception"
|
||||
|
||||
test()
|
||||
|
80
vendor/nim-chronos/README.md
vendored
80
vendor/nim-chronos/README.md
vendored
@ -1,6 +1,6 @@
|
||||
# Chronos - An efficient library for asynchronous programming
|
||||
|
||||

|
||||
[](https://github.com/status-im/nim-chronos/actions/workflows/ci.yml)
|
||||
[")](https://ci.appveyor.com/project/nimbus/nim-asyncdispatch2)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
@ -8,16 +8,39 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Chronos is an [asyncdispatch](https://nim-lang.org/docs/asyncdispatch.html)
|
||||
fork with a unified callback type, FIFO processing order for Future callbacks and [many other changes](https://github.com/status-im/nim-chronos/wiki/AsyncDispatch-comparison) that diverged from upstream's philosophy.
|
||||
Chronos is an efficient [async/await](https://en.wikipedia.org/wiki/Async/await) framework for Nim. Features include:
|
||||
|
||||
* Efficient dispatch pipeline for asynchronous execution
|
||||
* HTTP server with SSL/TLS support out of the box (no OpenSSL needed)
|
||||
* Cancellation support
|
||||
* Synchronization primitivies like queues, events and locks
|
||||
* FIFO processing order of dispatch queue
|
||||
* Minimal exception effect support (see [exception effects](#exception-effects))
|
||||
|
||||
## Installation
|
||||
|
||||
You can use Nim's official package manager Nimble to install Chronos:
|
||||
|
||||
```text
|
||||
nimble install https://github.com/status-im/nim-chronos.git
|
||||
```
|
||||
$ nimble install https://github.com/status-im/nim-chronos.git
|
||||
|
||||
or add a dependency to your `.nimble` file:
|
||||
|
||||
```text
|
||||
requires "chronos"
|
||||
```
|
||||
|
||||
## Projects using `chronos`
|
||||
|
||||
* [libp2p](https://github.com/status-im/nim-libp2p) - Peer-to-Peer networking stack implemented in many languages
|
||||
* [Looper](https://github.com/bung87/Looper) - Web framework
|
||||
* [2DeFi](https://github.com/gogolxdong/2DeFi) - Decentralised file system
|
||||
|
||||
`chronos` is available in the [Nim Playground](https://play.nim-lang.org/#ix=2TpS)
|
||||
|
||||
Submit a PR to add yours!
|
||||
|
||||
## Documentation
|
||||
|
||||
### Concepts
|
||||
@ -176,15 +199,49 @@ proc p3() {.async.} =
|
||||
fut2 = p2()
|
||||
try:
|
||||
await fut1
|
||||
except:
|
||||
except CachableError:
|
||||
echo "p1() failed: ", fut1.error.name, ": ", fut1.error.msg
|
||||
echo "reachable code here"
|
||||
await fut2
|
||||
```
|
||||
|
||||
Exceptions inheriting from `Defect` are treated differently, being raised
|
||||
directly. Don't try to catch them coming out of `poll()`, because this would
|
||||
leave behind some zombie futures.
|
||||
Chronos does not allow that future continuations and other callbacks raise
|
||||
`CatchableError` - as such, calls to `poll` will never raise exceptions caused
|
||||
originating from tasks on the dispatcher queue. It is however possible that
|
||||
`Defect` that happen in tasks bubble up through `poll` as these are not caught
|
||||
by the transformation.
|
||||
|
||||
### Platform independence
|
||||
|
||||
Several functions in `chronos` are backed by the operating system, such as
|
||||
waiting for network events, creating files and sockets etc. The specific
|
||||
exceptions that are raised by the OS is platform-dependent, thus such functions
|
||||
are declared as raising `CatchableError` but will in general raise something
|
||||
more specific. In particular, it's possible that some functions that are
|
||||
annotated as raising `CatchableError` only raise on _some_ platforms - in order
|
||||
to work on all platforms, calling code must assume that they will raise even
|
||||
when they don't seem to do so on one platform.
|
||||
|
||||
### Exception effects
|
||||
|
||||
`chronos` currently offers minimal support for exception effects and `raises`
|
||||
annotations. In general, during the `async` transformation, a generic
|
||||
`except CatchableError` handler is added around the entire function being
|
||||
transformed, in order to catch any exceptions and transfer them to the `Future`.
|
||||
Because of this, the effect system thinks no exceptions are "leaking" because in
|
||||
fact, exception _handling_ is deferred to when the future is being read.
|
||||
|
||||
Effectively, this means that while code can be compiled with
|
||||
`{.push raises: [Defect]}`, the intended effect propagation and checking is
|
||||
**disabled** for `async` functions.
|
||||
|
||||
To enable checking exception effects in `async` code, enable strict mode with
|
||||
`-d:chronosStrictException`.
|
||||
|
||||
In the strict mode, `async` functions are checked such that they only raise
|
||||
`CatchableError` and thus must make sure to explicitly specify exception
|
||||
effects on forward declarations, callbacks and methods using
|
||||
`{.raises: [CatchableError].}` (or more strict) annotations.
|
||||
|
||||
## TODO
|
||||
* Pipe/Subprocess Transports.
|
||||
@ -194,6 +251,12 @@ leave behind some zombie futures.
|
||||
|
||||
When submitting pull requests, please add test cases for any new features or fixes and make sure `nimble test` is still able to execute the entire test suite successfully.
|
||||
|
||||
`chronos` follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
## Other resources
|
||||
|
||||
* [Historical differences with asyncdispatch](https://github.com/status-im/nim-chronos/wiki/AsyncDispatch-comparison)
|
||||
|
||||
## License
|
||||
|
||||
Licensed and distributed under either of
|
||||
@ -205,4 +268,3 @@ or
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
||||
at your option. These files may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
|
7
vendor/nim-chronos/chronos.nimble
vendored
7
vendor/nim-chronos/chronos.nimble
vendored
@ -1,5 +1,5 @@
|
||||
packageName = "chronos"
|
||||
version = "2.6.1"
|
||||
version = "3.0.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "Chronos"
|
||||
license = "Apache License 2.0 or MIT"
|
||||
@ -10,12 +10,13 @@ skipDirs = @["tests"]
|
||||
requires "nim > 1.2.0",
|
||||
"stew",
|
||||
"bearssl",
|
||||
"httputils"
|
||||
"httputils",
|
||||
"https://github.com/status-im/nim-unittest2.git#head"
|
||||
|
||||
task test, "Run all tests":
|
||||
var commands = @[
|
||||
"nim c -r -d:useSysAssert -d:useGcAssert tests/",
|
||||
"nim c -r -d:chronosStackTrace tests/",
|
||||
"nim c -r -d:chronosStackTrace -d:chronosStrictException tests/",
|
||||
"nim c -r -d:release tests/",
|
||||
"nim c -r -d:release -d:chronosFutureTracking tests/"
|
||||
]
|
||||
|
4
vendor/nim-chronos/chronos/apps.nim
vendored
4
vendor/nim-chronos/chronos/apps.nim
vendored
@ -6,5 +6,5 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import apps/http/httpserver
|
||||
export httpserver
|
||||
import ./apps/http/[httpserver, shttpserver]
|
||||
export httpserver, shttpserver
|
||||
|
187
vendor/nim-chronos/chronos/apps/http/httpserver.nim
vendored
187
vendor/nim-chronos/chronos/apps/http/httpserver.nim
vendored
@ -9,9 +9,9 @@
|
||||
import std/[tables, options, uri, strutils]
|
||||
import stew/[results, base10], httputils
|
||||
import ../../asyncloop, ../../asyncsync
|
||||
import ../../streams/[asyncstream, boundstream, chunkstream, tlsstream]
|
||||
import ../../streams/[asyncstream, boundstream, chunkstream]
|
||||
import httptable, httpcommon, multipart
|
||||
export httptable, httpcommon, httputils, multipart, tlsstream, asyncstream,
|
||||
export httptable, httpcommon, httputils, multipart, asyncstream,
|
||||
uri, tables, options, results
|
||||
|
||||
type
|
||||
@ -44,7 +44,12 @@ type
|
||||
Empty, Prepared, Sending, Finished, Failed, Cancelled, Dumb
|
||||
|
||||
HttpProcessCallback* =
|
||||
proc(req: RequestFence): Future[HttpResponseRef] {.gcsafe.}
|
||||
proc(req: RequestFence): Future[HttpResponseRef] {.
|
||||
gcsafe, raises: [Defect, CatchableError].}
|
||||
|
||||
HttpConnectionCallback* =
|
||||
proc(server: HttpServerRef,
|
||||
transp: StreamTransport): Future[HttpConnectionRef] {.gcsafe, raises: [Defect].}
|
||||
|
||||
HttpServer* = object of RootObj
|
||||
instance*: StreamServer
|
||||
@ -56,16 +61,15 @@ type
|
||||
serverIdent*: string
|
||||
flags*: set[HttpServerFlags]
|
||||
socketFlags*: set[ServerFlags]
|
||||
secureFlags*: set[TLSFlags]
|
||||
connections*: Table[string, Future[void]]
|
||||
acceptLoop*: Future[void]
|
||||
lifetime*: Future[void]
|
||||
headersTimeout: Duration
|
||||
bufferSize: int
|
||||
maxHeadersSize: int
|
||||
maxRequestBodySize: int
|
||||
processCallback: HttpProcessCallback
|
||||
tlsPrivateKey: TLSPrivateKey
|
||||
tlsCertificate: TLSCertificate
|
||||
createConnCallback: HttpConnectionCallback
|
||||
|
||||
HttpServerRef* = ref HttpServer
|
||||
|
||||
@ -103,9 +107,8 @@ type
|
||||
HttpConnection* = object of RootObj
|
||||
server*: HttpServerRef
|
||||
transp: StreamTransport
|
||||
mainReader: AsyncStreamReader
|
||||
mainWriter: AsyncStreamWriter
|
||||
tlsStream: TLSAsyncStream
|
||||
mainReader*: AsyncStreamReader
|
||||
mainWriter*: AsyncStreamWriter
|
||||
reader*: AsyncStreamReader
|
||||
writer*: AsyncStreamWriter
|
||||
buffer: seq[byte]
|
||||
@ -119,6 +122,50 @@ proc init(htype: typedesc[HttpProcessError], error: HTTPServerError,
|
||||
code: HttpCode): HttpProcessError {.raises: [Defect].} =
|
||||
HttpProcessError(error: error, exc: exc, remote: remote, code: code)
|
||||
|
||||
proc init*(value: var HttpServer,
|
||||
address: TransportAddress,
|
||||
server: StreamServer,
|
||||
processCallback: HttpProcessCallback,
|
||||
createConnCallback: HttpConnectionCallback,
|
||||
serverUri: Uri,
|
||||
serverFlags: set[HttpServerFlags] = {},
|
||||
socketFlags: set[ServerFlags] = {ReuseAddr},
|
||||
serverIdent = "",
|
||||
maxConnections: int = -1,
|
||||
bufferSize: int = 4096,
|
||||
backlogSize: int = 100,
|
||||
httpHeadersTimeout = 10.seconds,
|
||||
maxHeadersSize: int = 8192,
|
||||
maxRequestBodySize: int = 1_048_576) =
|
||||
|
||||
value = HttpServer(
|
||||
address: address,
|
||||
instance: server,
|
||||
processCallback: processCallback,
|
||||
createConnCallback: createConnCallback,
|
||||
baseUri: serverUri,
|
||||
serverIdent: serverIdent,
|
||||
flags: serverFlags,
|
||||
socketFlags: socketFlags,
|
||||
maxConnections: maxConnections,
|
||||
bufferSize: bufferSize,
|
||||
backlogSize: backlogSize,
|
||||
headersTimeout: httpHeadersTimeout,
|
||||
maxHeadersSize: maxHeadersSize,
|
||||
maxRequestBodySize: maxRequestBodySize,
|
||||
# semaphore:
|
||||
# if maxConnections > 0:
|
||||
# newAsyncSemaphore(maxConnections)
|
||||
# else:
|
||||
# nil
|
||||
lifetime: newFuture[void]("http.server.lifetime"),
|
||||
connections: initTable[string, Future[void]]()
|
||||
)
|
||||
|
||||
proc createConnection(server: HttpServerRef,
|
||||
transp: StreamTransport): Future[HttpConnectionRef] {.
|
||||
gcsafe.}
|
||||
|
||||
proc new*(htype: typedesc[HttpServerRef],
|
||||
address: TransportAddress,
|
||||
processCallback: HttpProcessCallback,
|
||||
@ -126,9 +173,6 @@ proc new*(htype: typedesc[HttpServerRef],
|
||||
socketFlags: set[ServerFlags] = {ReuseAddr},
|
||||
serverUri = Uri(),
|
||||
serverIdent = "",
|
||||
tlsPrivateKey: TLSPrivateKey = nil,
|
||||
tlsCertificate: TLSCertificate = nil,
|
||||
secureFlags: set[TLSFlags] = {},
|
||||
maxConnections: int = -1,
|
||||
bufferSize: int = 4096,
|
||||
backlogSize: int = 100,
|
||||
@ -136,51 +180,31 @@ proc new*(htype: typedesc[HttpServerRef],
|
||||
maxHeadersSize: int = 8192,
|
||||
maxRequestBodySize: int = 1_048_576): HttpResult[HttpServerRef] =
|
||||
|
||||
if HttpServerFlags.Secure in serverFlags:
|
||||
if isNil(tlsPrivateKey) or isNil(tlsCertificate):
|
||||
return err("PrivateKey or Certificate is missing")
|
||||
|
||||
var res = HttpServerRef(
|
||||
address: address,
|
||||
serverIdent: serverIdent,
|
||||
maxConnections: maxConnections,
|
||||
headersTimeout: httpHeadersTimeout,
|
||||
maxHeadersSize: maxHeadersSize,
|
||||
maxRequestBodySize: maxRequestBodySize,
|
||||
processCallback: processCallback,
|
||||
backLogSize: backLogSize,
|
||||
flags: serverFlags,
|
||||
socketFlags: socketFlags,
|
||||
tlsPrivateKey: tlsPrivateKey,
|
||||
tlsCertificate: tlsCertificate
|
||||
)
|
||||
|
||||
res.baseUri =
|
||||
try:
|
||||
if len(serverUri.hostname) > 0 and isAbsolute(serverUri):
|
||||
let serverUri =
|
||||
if len(serverUri.hostname) > 0:
|
||||
serverUri
|
||||
else:
|
||||
if HttpServerFlags.Secure in serverFlags:
|
||||
parseUri("https://" & $address & "/")
|
||||
else:
|
||||
try:
|
||||
parseUri("http://" & $address & "/")
|
||||
except TransportAddressError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
let serverInstance =
|
||||
try:
|
||||
res.instance = createStreamServer(address, flags = socketFlags,
|
||||
bufferSize = bufferSize,
|
||||
createStreamServer(address, flags = socketFlags, bufferSize = bufferSize,
|
||||
backlog = backlogSize)
|
||||
# if maxConnections > 0:
|
||||
# res.semaphore = newAsyncSemaphore(maxConnections)
|
||||
res.lifetime = newFuture[void]("http.server.lifetime")
|
||||
res.connections = initTable[string, Future[void]]()
|
||||
return ok(res)
|
||||
except TransportOsError as exc:
|
||||
return err(exc.msg)
|
||||
except CatchableError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
var res = HttpServerRef()
|
||||
res[].init(address, serverInstance, processCallback, createConnection,
|
||||
serverUri, serverFlags, socketFlags, serverIdent, maxConnections,
|
||||
bufferSize, backLogSize, httpHeadersTimeout, maxHeadersSize,
|
||||
maxRequestBodySize)
|
||||
ok(res)
|
||||
|
||||
proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [Defect].} =
|
||||
if req.response.isNone():
|
||||
var resp = HttpResponseRef(
|
||||
@ -450,47 +474,32 @@ proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {.async.} =
|
||||
except AsyncStreamLimitError:
|
||||
raiseHttpCriticalError("Maximum size of request headers reached", Http431)
|
||||
|
||||
proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef,
|
||||
transp: StreamTransport): HttpConnectionRef =
|
||||
let mainReader = newAsyncStreamReader(transp)
|
||||
let mainWriter = newAsyncStreamWriter(transp)
|
||||
let tlsStream =
|
||||
if HttpServerFlags.Secure in server.flags:
|
||||
newTLSServerAsyncStream(mainReader, mainWriter, server.tlsPrivateKey,
|
||||
server.tlsCertificate,
|
||||
minVersion = TLSVersion.TLS12,
|
||||
flags = server.secureFlags)
|
||||
else:
|
||||
nil
|
||||
|
||||
let reader =
|
||||
if isNil(tlsStream):
|
||||
mainReader
|
||||
else:
|
||||
cast[AsyncStreamReader](tlsStream.reader)
|
||||
|
||||
let writer =
|
||||
if isNil(tlsStream):
|
||||
mainWriter
|
||||
else:
|
||||
cast[AsyncStreamWriter](tlsStream.writer)
|
||||
|
||||
HttpConnectionRef(
|
||||
transp: transp,
|
||||
proc init*(value: var HttpConnection, server: HttpServerRef,
|
||||
transp: StreamTransport) =
|
||||
value = HttpConnection(
|
||||
server: server,
|
||||
transp: transp,
|
||||
buffer: newSeq[byte](server.maxHeadersSize),
|
||||
mainReader: mainReader,
|
||||
mainWriter: mainWriter,
|
||||
tlsStream: tlsStream,
|
||||
reader: reader,
|
||||
writer: writer
|
||||
mainReader: newAsyncStreamReader(transp),
|
||||
mainWriter: newAsyncStreamWriter(transp)
|
||||
)
|
||||
|
||||
proc closeWait(conn: HttpConnectionRef) {.async.} =
|
||||
if HttpServerFlags.Secure in conn.server.flags:
|
||||
# First we will close TLS streams.
|
||||
await allFutures(conn.reader.closeWait(), conn.writer.closeWait())
|
||||
proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef,
|
||||
transp: StreamTransport): HttpConnectionRef =
|
||||
var res = HttpConnectionRef()
|
||||
res[].init(server, transp)
|
||||
res.reader = res.mainReader
|
||||
res.writer = res.mainWriter
|
||||
res
|
||||
|
||||
proc closeWait*(conn: HttpConnectionRef) {.async.} =
|
||||
var pending: seq[Future[void]]
|
||||
if conn.reader != conn.mainReader:
|
||||
pending.add(conn.reader.closeWait())
|
||||
if conn.writer != conn.mainWriter:
|
||||
pending.add(conn.writer.closeWait())
|
||||
if len(pending) > 0:
|
||||
await allFutures(pending)
|
||||
# After we going to close everything else.
|
||||
await allFutures(conn.mainReader.closeWait(), conn.mainWriter.closeWait(),
|
||||
conn.transp.closeWait())
|
||||
@ -505,20 +514,7 @@ proc closeWait(req: HttpRequestRef) {.async.} =
|
||||
proc createConnection(server: HttpServerRef,
|
||||
transp: StreamTransport): Future[HttpConnectionRef] {.
|
||||
async.} =
|
||||
var conn = HttpConnectionRef.new(server, transp)
|
||||
if HttpServerFlags.Secure notin server.flags:
|
||||
# Non secure connection
|
||||
return conn
|
||||
|
||||
try:
|
||||
await handshake(conn.tlsStream)
|
||||
return conn
|
||||
except CancelledError as exc:
|
||||
await conn.closeWait()
|
||||
raise exc
|
||||
except TLSStreamError:
|
||||
await conn.closeWait()
|
||||
raiseHttpCriticalError("Unable to establish secure connection")
|
||||
return HttpConnectionRef.new(server, transp)
|
||||
|
||||
proc processLoop(server: HttpServerRef, transp: StreamTransport) {.async.} =
|
||||
var
|
||||
@ -527,10 +523,9 @@ proc processLoop(server: HttpServerRef, transp: StreamTransport) {.async.} =
|
||||
runLoop = false
|
||||
|
||||
try:
|
||||
conn = await createConnection(server, transp)
|
||||
conn = await server.createConnCallback(server, transp)
|
||||
runLoop = true
|
||||
except CancelledError:
|
||||
# We could be cancelled only when we perform TLS handshake, connection
|
||||
server.connections.del(transp.getId())
|
||||
await transp.closeWait()
|
||||
return
|
||||
|
105
vendor/nim-chronos/chronos/apps/http/shttpserver.nim
vendored
Normal file
105
vendor/nim-chronos/chronos/apps/http/shttpserver.nim
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
#
|
||||
# Chronos HTTP/S server implementation
|
||||
# (c) Copyright 2021-Present
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import httpserver
|
||||
import ../../asyncloop, ../../asyncsync
|
||||
import ../../streams/[asyncstream, tlsstream]
|
||||
export httpserver, asyncstream, tlsstream
|
||||
|
||||
type
|
||||
SecureHttpServer* = object of HttpServer
|
||||
secureFlags*: set[TLSFlags]
|
||||
tlsPrivateKey: TLSPrivateKey
|
||||
tlsCertificate: TLSCertificate
|
||||
|
||||
SecureHttpServerRef* = ref SecureHttpServer
|
||||
|
||||
SecureHttpConnection* = object of HttpConnection
|
||||
tlsStream*: TLSAsyncStream
|
||||
|
||||
SecureHttpConnectionRef* = ref SecureHttpConnection
|
||||
|
||||
proc new*(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef,
|
||||
transp: StreamTransport): SecureHttpConnectionRef =
|
||||
var res = SecureHttpConnectionRef()
|
||||
HttpConnection(res[]).init(HttpServerRef(server), transp)
|
||||
let tlsStream =
|
||||
newTLSServerAsyncStream(res.mainReader, res.mainWriter,
|
||||
server.tlsPrivateKey,
|
||||
server.tlsCertificate,
|
||||
minVersion = TLSVersion.TLS12,
|
||||
flags = server.secureFlags)
|
||||
res.tlsStream = tlsStream
|
||||
res.reader = AsyncStreamReader(tlsStream.reader)
|
||||
res.writer = AsyncStreamWriter(tlsStream.writer)
|
||||
res
|
||||
|
||||
proc createSecConnection(server: HttpServerRef,
|
||||
transp: StreamTransport): Future[HttpConnectionRef] {.
|
||||
async.} =
|
||||
let secureServ = cast[SecureHttpServerRef](server)
|
||||
var sconn = SecureHttpConnectionRef.new(secureServ, transp)
|
||||
try:
|
||||
await handshake(sconn.tlsStream)
|
||||
return HttpConnectionRef(sconn)
|
||||
except CancelledError as exc:
|
||||
await HttpConnectionRef(sconn).closeWait()
|
||||
raise exc
|
||||
except TLSStreamError:
|
||||
await HttpConnectionRef(sconn).closeWait()
|
||||
raiseHttpCriticalError("Unable to establish secure connection")
|
||||
|
||||
proc new*(htype: typedesc[SecureHttpServerRef],
|
||||
address: TransportAddress,
|
||||
processCallback: HttpProcessCallback,
|
||||
tlsPrivateKey: TLSPrivateKey,
|
||||
tlsCertificate: TLSCertificate,
|
||||
serverFlags: set[HttpServerFlags] = {},
|
||||
socketFlags: set[ServerFlags] = {ReuseAddr},
|
||||
serverUri = Uri(),
|
||||
serverIdent = "",
|
||||
secureFlags: set[TLSFlags] = {},
|
||||
maxConnections: int = -1,
|
||||
bufferSize: int = 4096,
|
||||
backlogSize: int = 100,
|
||||
httpHeadersTimeout = 10.seconds,
|
||||
maxHeadersSize: int = 8192,
|
||||
maxRequestBodySize: int = 1_048_576
|
||||
): HttpResult[SecureHttpServerRef] =
|
||||
|
||||
doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!")
|
||||
doAssert(not(isNil(tlsCertificate)), "TLS certificate must not be nil!")
|
||||
|
||||
let serverUri =
|
||||
if len(serverUri.hostname) > 0:
|
||||
serverUri
|
||||
else:
|
||||
try:
|
||||
parseUri("https://" & $address & "/")
|
||||
except TransportAddressError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
let serverInstance =
|
||||
try:
|
||||
createStreamServer(address, flags = socketFlags, bufferSize = bufferSize,
|
||||
backlog = backlogSize)
|
||||
except TransportOsError as exc:
|
||||
return err(exc.msg)
|
||||
except CatchableError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
var res = SecureHttpServerRef()
|
||||
HttpServer(res[]).init(address, serverInstance, processCallback,
|
||||
createSecConnection, serverUri, serverFlags,
|
||||
socketFlags, serverIdent, maxConnections,
|
||||
bufferSize, backLogSize, httpHeadersTimeout,
|
||||
maxHeadersSize, maxRequestBodySize)
|
||||
res.tlsCertificate = tlsCertificate
|
||||
res.tlsPrivateKey = tlsPrivateKey
|
||||
res.secureFlags = secureFlags
|
||||
ok(res)
|
138
vendor/nim-chronos/chronos/asyncfutures2.nim
vendored
138
vendor/nim-chronos/chronos/asyncfutures2.nim
vendored
@ -9,7 +9,7 @@
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
import std/[os, tables, strutils, heapqueue, options, deques, cstrutils, sequtils]
|
||||
import srcloc
|
||||
import ./srcloc
|
||||
export srcloc
|
||||
|
||||
const
|
||||
@ -29,7 +29,7 @@ type
|
||||
cancelcb*: CallbackFunc
|
||||
child*: FutureBase
|
||||
state*: FutureState
|
||||
error*: ref Exception ## Stored exception
|
||||
error*: ref CatchableError ## Stored exception
|
||||
mustCancel*: bool
|
||||
id*: int
|
||||
|
||||
@ -171,7 +171,7 @@ proc done*(future: FutureBase): bool {.inline.} =
|
||||
completed(future)
|
||||
|
||||
when defined(chronosFutureTracking):
|
||||
proc futureDestructor(udata: pointer) {.gcsafe.} =
|
||||
proc futureDestructor(udata: pointer) =
|
||||
## This procedure will be called when Future[T] got finished, cancelled or
|
||||
## failed and all Future[T].callbacks are already scheduled and processed.
|
||||
let future = cast[FutureBase](udata)
|
||||
@ -271,7 +271,7 @@ template complete*[T](futvar: FutureVar[T], val: T) =
|
||||
## Any previously stored value will be overwritten.
|
||||
complete(futvar, val, getSrcLocation())
|
||||
|
||||
proc fail[T](future: Future[T], error: ref Exception, loc: ptr SrcLoc) =
|
||||
proc fail[T](future: Future[T], error: ref CatchableError, loc: ptr SrcLoc) =
|
||||
if not(future.cancelled()):
|
||||
checkFinished(FutureBase(future), loc)
|
||||
future.error = error
|
||||
@ -282,7 +282,7 @@ proc fail[T](future: Future[T], error: ref Exception, loc: ptr SrcLoc) =
|
||||
getStackTrace(error)
|
||||
future.finish(FutureState.Failed)
|
||||
|
||||
template fail*[T](future: Future[T], error: ref Exception) =
|
||||
template fail*[T](future: Future[T], error: ref CatchableError) =
|
||||
## Completes ``future`` with ``error``.
|
||||
fail(future, error, getSrcLocation())
|
||||
|
||||
@ -406,7 +406,7 @@ proc getHint(entry: StackTraceEntry): string =
|
||||
return "Resumes an async procedure"
|
||||
|
||||
proc `$`*(entries: seq[StackTraceEntry]): string =
|
||||
result = ""
|
||||
try:
|
||||
# Find longest filename & line number combo for alignment purposes.
|
||||
var longestLeft = 0
|
||||
for entry in entries:
|
||||
@ -437,6 +437,8 @@ proc `$`*(entries: seq[StackTraceEntry]): string =
|
||||
let hint = getHint(entry)
|
||||
if hint.len > 0:
|
||||
result.add(spaces(indent+2) & "## " & hint & "\n")
|
||||
except ValueError as exc:
|
||||
return exc.msg # Shouldn't actually happen since we set the formatting string
|
||||
|
||||
when defined(chronosStackTrace):
|
||||
proc injectStacktrace(future: FutureBase) =
|
||||
@ -462,7 +464,7 @@ when defined(chronosStackTrace):
|
||||
# newMsg.add "\n" & $entry
|
||||
future.error.msg = newMsg
|
||||
|
||||
proc internalCheckComplete*(fut: FutureBase) =
|
||||
proc internalCheckComplete*(fut: FutureBase) {.raises: [Defect, CatchableError].} =
|
||||
# For internal use only. Used in asyncmacro
|
||||
if not(isNil(fut.error)):
|
||||
when defined(chronosStackTrace):
|
||||
@ -474,22 +476,19 @@ proc internalRead*[T](fut: Future[T] | FutureVar[T]): T {.inline.} =
|
||||
when T isnot void:
|
||||
return fut.value
|
||||
|
||||
proc read*[T](future: Future[T] | FutureVar[T]): T =
|
||||
proc read*[T](future: Future[T] | FutureVar[T]): T {.raises: [Defect, CatchableError].} =
|
||||
## Retrieves the value of ``future``. Future must be finished otherwise
|
||||
## this function will fail with a ``ValueError`` exception.
|
||||
##
|
||||
## If the result of the future is an error then that error will be raised.
|
||||
{.push hint[ConvFromXtoItselfNotNeeded]: off.}
|
||||
let fut = Future[T](future)
|
||||
{.pop.}
|
||||
if fut.finished():
|
||||
if future.finished():
|
||||
internalCheckComplete(future)
|
||||
internalRead(future)
|
||||
else:
|
||||
# TODO: Make a custom exception type for this?
|
||||
raise newException(ValueError, "Future still in progress.")
|
||||
|
||||
proc readError*[T](future: Future[T]): ref Exception =
|
||||
proc readError*[T](future: Future[T]): ref CatchableError {.raises: [Defect, ValueError].} =
|
||||
## Retrieves the exception stored in ``future``.
|
||||
##
|
||||
## An ``ValueError`` exception will be thrown if no exception exists
|
||||
@ -507,18 +506,18 @@ proc mget*[T](future: FutureVar[T]): var T =
|
||||
## Future has not been finished.
|
||||
result = Future[T](future).value
|
||||
|
||||
proc asyncCheck*[T](future: Future[T]) =
|
||||
## Sets a callback on ``future`` which raises an exception if the future
|
||||
## finished with an error.
|
||||
##
|
||||
## This should be used instead of ``discard`` to discard void futures.
|
||||
doAssert(not isNil(future), "Future is nil")
|
||||
proc cb(data: pointer) =
|
||||
if future.failed() or future.cancelled():
|
||||
when defined(chronosStackTrace):
|
||||
injectStacktrace(future)
|
||||
raise future.error
|
||||
future.callback = cb
|
||||
template taskFutureLocation(future: FutureBase): string =
|
||||
let loc = future.location[0]
|
||||
"[" & (
|
||||
if len(loc.procedure) == 0: "[unspecified]" else: $loc.procedure & "()"
|
||||
) & " at " & $loc.file & ":" & $(loc.line) & "]"
|
||||
|
||||
template taskErrorMessage(future: FutureBase): string =
|
||||
"Asynchronous task " & taskFutureLocation(future) &
|
||||
" finished with an exception \"" & $future.error.name & "\"!\nStack trace: " &
|
||||
future.error.getStackTrace()
|
||||
template taskCancelMessage(future: FutureBase): string =
|
||||
"Asynchronous task " & taskFutureLocation(future) & " was cancelled!"
|
||||
|
||||
proc asyncSpawn*(future: Future[void]) =
|
||||
## Spawns a new concurrent async task.
|
||||
@ -534,35 +533,45 @@ proc asyncSpawn*(future: Future[void]) =
|
||||
## and processed immediately.
|
||||
doAssert(not isNil(future), "Future is nil")
|
||||
|
||||
template getFutureLocation(): string =
|
||||
let loc = future.location[0]
|
||||
"[" & (
|
||||
if len(loc.procedure) == 0: "[unspecified]" else: $loc.procedure & "()"
|
||||
) & " at " & $loc.file & ":" & $(loc.line) & "]"
|
||||
|
||||
template getErrorMessage(): string =
|
||||
"Asynchronous task " & getFutureLocation() &
|
||||
" finished with an exception \"" & $future.error.name & "\"!"
|
||||
template getCancelMessage(): string =
|
||||
"Asynchronous task " & getFutureLocation() & " was cancelled!"
|
||||
|
||||
proc cb(data: pointer) =
|
||||
if future.failed():
|
||||
raise newException(FutureDefect, getErrorMessage())
|
||||
raise newException(FutureDefect, taskErrorMessage(future))
|
||||
elif future.cancelled():
|
||||
raise newException(FutureDefect, getCancelMessage())
|
||||
raise newException(FutureDefect, taskCancelMessage(future))
|
||||
|
||||
if not(future.finished()):
|
||||
# We adding completion callback only if ``future`` is not finished yet.
|
||||
future.addCallback(cb)
|
||||
else:
|
||||
if future.failed():
|
||||
raise newException(FutureDefect, getErrorMessage())
|
||||
elif future.cancelled():
|
||||
raise newException(FutureDefect, getCancelMessage())
|
||||
cb(nil)
|
||||
|
||||
proc asyncDiscard*[T](future: Future[T]) {.deprecated.} = discard
|
||||
## This is async workaround for discard ``Future[T]``.
|
||||
proc asyncCheck*[T](future: Future[T]) {.
|
||||
deprecated: "Raises Defect on future failure, fix your code and use asyncSpawn!".} =
|
||||
## This function used to raise an exception through the `poll` call if
|
||||
## the given future failed - there's no way to handle such exceptions so this
|
||||
## function is now an alias for `asyncSpawn`
|
||||
##
|
||||
when T is void:
|
||||
asyncSpawn(future)
|
||||
else:
|
||||
proc cb(data: pointer) =
|
||||
if future.failed():
|
||||
raise newException(FutureDefect, taskErrorMessage(future))
|
||||
elif future.cancelled():
|
||||
raise newException(FutureDefect, taskCancelMessage(future))
|
||||
|
||||
if not(future.finished()):
|
||||
# We adding completion callback only if ``future`` is not finished yet.
|
||||
future.addCallback(cb)
|
||||
else:
|
||||
cb(nil)
|
||||
|
||||
proc asyncDiscard*[T](future: Future[T]) {.
|
||||
deprecated: "Use asyncSpawn or `discard await`".} = discard
|
||||
## `asyncDiscard` will discard the outcome of the operation - unlike `discard`
|
||||
## it also throws away exceptions! Use `asyncSpawn` if you're sure your
|
||||
## code doesn't raise exceptions, or `discard await` to ignore successful
|
||||
## outcomes
|
||||
|
||||
proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {.
|
||||
deprecated: "Use allFutures[T](varargs[Future[T]])".} =
|
||||
@ -587,7 +596,7 @@ proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {.
|
||||
fut1.callback = cb
|
||||
fut2.callback = cb
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
# On cancel we remove all our callbacks only.
|
||||
if not(fut1.finished()):
|
||||
fut1.removeCallback(cb)
|
||||
@ -611,7 +620,8 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] =
|
||||
##
|
||||
## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled.
|
||||
var retFuture = newFuture[void]("chronos.or")
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
cb = proc(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
var fut = cast[FutureBase](udata)
|
||||
if cast[pointer](fut1) == udata:
|
||||
@ -623,7 +633,7 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] =
|
||||
else:
|
||||
retFuture.complete()
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
# On cancel we remove all our callbacks only.
|
||||
if not(fut1.finished()):
|
||||
fut1.removeCallback(cb)
|
||||
@ -676,7 +686,7 @@ proc all*[T](futs: varargs[Future[T]]): auto {.
|
||||
|
||||
when T is void:
|
||||
var retFuture = newFuture[void]("chronos.all(void)")
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
proc cb(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
inc(completedFutures)
|
||||
if completedFutures == totalFutures:
|
||||
@ -698,7 +708,7 @@ proc all*[T](futs: varargs[Future[T]]): auto {.
|
||||
var retFuture = newFuture[seq[T]]("chronos.all(T)")
|
||||
var retValues = newSeq[T](totalFutures)
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
proc cb(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
inc(completedFutures)
|
||||
if completedFutures == totalFutures:
|
||||
@ -707,7 +717,7 @@ proc all*[T](futs: varargs[Future[T]]): auto {.
|
||||
retFuture.fail(nfut.error)
|
||||
break
|
||||
else:
|
||||
retValues[k] = nfut.read()
|
||||
retValues[k] = nfut.value
|
||||
if not(retFuture.failed()):
|
||||
retFuture.complete(retValues)
|
||||
|
||||
@ -731,7 +741,7 @@ proc oneIndex*[T](futs: varargs[Future[T]]): Future[int] {.
|
||||
var nfuts = @futs
|
||||
var retFuture = newFuture[int]("chronos.oneIndex(T)")
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
proc cb(udata: pointer) =
|
||||
var res = -1
|
||||
if not(retFuture.finished()):
|
||||
var rfut = cast[FutureBase](udata)
|
||||
@ -762,7 +772,7 @@ proc oneValue*[T](futs: varargs[Future[T]]): Future[T] {.
|
||||
var nfuts = @futs
|
||||
var retFuture = newFuture[T]("chronos.oneValue(T)")
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
proc cb(udata: pointer) =
|
||||
var resFut: Future[T]
|
||||
if not(retFuture.finished()):
|
||||
var rfut = cast[FutureBase](udata)
|
||||
@ -794,10 +804,10 @@ proc cancelAndWait*[T](fut: Future[T]): Future[void] =
|
||||
## If ``fut`` is already finished (completed, failed or cancelled) result
|
||||
## Future[void] object will be returned complete.
|
||||
var retFuture = newFuture[void]("chronos.cancelAndWait(T)")
|
||||
proc continuation(udata: pointer) {.gcsafe.} =
|
||||
proc continuation(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
retFuture.complete()
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
if not(fut.finished()):
|
||||
fut.removeCallback(continuation)
|
||||
if fut.finished():
|
||||
@ -823,13 +833,13 @@ proc allFutures*[T](futs: varargs[Future[T]]): Future[void] =
|
||||
# Because we can't capture varargs[T] in closures we need to create copy.
|
||||
var nfuts = @futs
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
proc cb(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
inc(completedFutures)
|
||||
if completedFutures == totalFutures:
|
||||
retFuture.complete()
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
# On cancel we remove all our callbacks only.
|
||||
for i in 0..<len(nfuts):
|
||||
if not(nfuts[i].finished()):
|
||||
@ -863,13 +873,13 @@ proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] =
|
||||
|
||||
var nfuts = @futs
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
proc cb(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
inc(completedFutures)
|
||||
if completedFutures == totalFutures:
|
||||
retFuture.complete(nfuts)
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
# On cancel we remove all our callbacks only.
|
||||
for fut in nfuts.mitems():
|
||||
if not(fut.finished()):
|
||||
@ -901,7 +911,8 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] =
|
||||
# Because we can't capture varargs[T] in closures we need to create copy.
|
||||
var nfuts = @futs
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
cb = proc(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
var res: Future[T]
|
||||
var rfut = cast[FutureBase](udata)
|
||||
@ -912,7 +923,7 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] =
|
||||
res = nfuts[i]
|
||||
retFuture.complete(res)
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
# On cancel we remove all our callbacks only.
|
||||
for i in 0..<len(nfuts):
|
||||
if not(nfuts[i].finished()):
|
||||
@ -947,7 +958,8 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] =
|
||||
# Because we can't capture varargs[T] in closures we need to create copy.
|
||||
var nfuts = @futs
|
||||
|
||||
proc cb(udata: pointer) {.gcsafe.} =
|
||||
var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
cb = proc(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
var res: FutureBase
|
||||
var rfut = cast[FutureBase](udata)
|
||||
@ -958,7 +970,7 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] =
|
||||
res = nfuts[i]
|
||||
retFuture.complete(res)
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
# On cancel we remove all our callbacks only.
|
||||
for i in 0..<len(nfuts):
|
||||
if not(nfuts[i].finished()):
|
||||
|
150
vendor/nim-chronos/chronos/asyncloop.nim
vendored
150
vendor/nim-chronos/chronos/asyncloop.nim
vendored
@ -8,11 +8,13 @@
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
include "system/inclrtl"
|
||||
|
||||
import os, tables, strutils, heapqueue, lists, options, nativesockets, net,
|
||||
deques
|
||||
import timer
|
||||
import std/[os, tables, strutils, heapqueue, lists, options, nativesockets, net,
|
||||
deques]
|
||||
import ./timer
|
||||
|
||||
export Port, SocketFlag
|
||||
export timer
|
||||
@ -172,12 +174,12 @@ const unixPlatform = defined(macosx) or defined(freebsd) or
|
||||
when defined(windows):
|
||||
import winlean, sets, hashes
|
||||
elif unixPlatform:
|
||||
import selectors
|
||||
import ./selectors2
|
||||
from posix import EINTR, EAGAIN, EINPROGRESS, EWOULDBLOCK, MSG_PEEK,
|
||||
MSG_NOSIGNAL, SIGPIPE
|
||||
|
||||
type
|
||||
CallbackFunc* = proc (arg: pointer) {.gcsafe.}
|
||||
CallbackFunc* = proc (arg: pointer) {.gcsafe, raises: [Defect].}
|
||||
|
||||
AsyncCallback* = object
|
||||
function*: CallbackFunc
|
||||
@ -194,8 +196,8 @@ type
|
||||
|
||||
TrackerBase* = ref object of RootRef
|
||||
id*: string
|
||||
dump*: proc(): string {.gcsafe.}
|
||||
isLeaked*: proc(): bool {.gcsafe.}
|
||||
dump*: proc(): string {.gcsafe, raises: [Defect].}
|
||||
isLeaked*: proc(): bool {.gcsafe, raises: [Defect].}
|
||||
|
||||
PDispatcherBase = ref object of RootRef
|
||||
timers*: HeapQueue[TimerCallback]
|
||||
@ -278,6 +280,14 @@ template processCallbacks(loop: untyped) =
|
||||
if not isNil(callable.function):
|
||||
callable.function(callable.udata)
|
||||
|
||||
proc raiseAsDefect*(exc: ref Exception, msg: string) {.
|
||||
raises: [Defect], noreturn, noinline.} =
|
||||
# Reraise an exception as a Defect, where it's unexpected and can't be handled
|
||||
# We include the stack trace in the message because otherwise, it's easily
|
||||
# lost - Nim doesn't print it for `parent` exceptions for example (!)
|
||||
raise (ref Defect)(
|
||||
msg: msg & "\n" & exc.msg & "\n" & exc.getStackTrace(), parent: exc)
|
||||
|
||||
when defined(windows) or defined(nimdoc):
|
||||
type
|
||||
WSAPROC_TRANSMITFILE = proc(hSocket: SocketHandle, hFile: Handle,
|
||||
@ -286,7 +296,7 @@ when defined(windows) or defined(nimdoc):
|
||||
lpOverlapped: POVERLAPPED,
|
||||
lpTransmitBuffers: pointer,
|
||||
dwReserved: DWORD): cint {.
|
||||
gcsafe, stdcall.}
|
||||
gcsafe, stdcall, raises: [].}
|
||||
|
||||
CompletionKey = ULONG_PTR
|
||||
|
||||
@ -324,12 +334,12 @@ when defined(windows) or defined(nimdoc):
|
||||
sizeof(GUID).DWORD, addr fun, sizeof(pointer).DWORD,
|
||||
addr bytesRet, nil, nil) == 0
|
||||
|
||||
proc globalInit() =
|
||||
proc globalInit() {.raises: [Defect, OSError].} =
|
||||
var wsa: WSAData
|
||||
if wsaStartup(0x0202'i16, addr wsa) != 0:
|
||||
raiseOSError(osLastError())
|
||||
|
||||
proc initAPI(loop: PDispatcher) =
|
||||
proc initAPI(loop: PDispatcher) {.raises: [Defect, CatchableError].} =
|
||||
var
|
||||
WSAID_TRANSMITFILE = GUID(
|
||||
D1: 0xb5367df0'i32, D2: 0xcbac'i16, D3: 0x11cf'i16,
|
||||
@ -363,7 +373,7 @@ when defined(windows) or defined(nimdoc):
|
||||
loop.transmitFile = cast[WSAPROC_TRANSMITFILE](funcPointer)
|
||||
close(sock)
|
||||
|
||||
proc newDispatcher*(): PDispatcher =
|
||||
proc newDispatcher*(): PDispatcher {.raises: [Defect, CatchableError].} =
|
||||
## Creates a new Dispatcher instance.
|
||||
var res = PDispatcher()
|
||||
res.ioPort = createIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 1)
|
||||
@ -389,17 +399,13 @@ when defined(windows) or defined(nimdoc):
|
||||
|
||||
proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [Defect].}
|
||||
proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [Defect].}
|
||||
proc setGlobalDispatcher*(disp: PDispatcher) {.
|
||||
gcsafe, deprecated: "Use setThreadDispatcher() instead".}
|
||||
proc getGlobalDispatcher*(): PDispatcher {.
|
||||
gcsafe, deprecated: "Use getThreadDispatcher() instead".}
|
||||
|
||||
proc getIoHandler*(disp: PDispatcher): Handle =
|
||||
## Returns the underlying IO Completion Port handle (Windows) or selector
|
||||
## (Unix) for the specified dispatcher.
|
||||
return disp.ioPort
|
||||
|
||||
proc register*(fd: AsyncFD) =
|
||||
proc register*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
|
||||
## Register file descriptor ``fd`` in thread's dispatcher.
|
||||
let loop = getThreadDispatcher()
|
||||
if createIoCompletionPort(fd.Handle, loop.ioPort,
|
||||
@ -407,8 +413,17 @@ when defined(windows) or defined(nimdoc):
|
||||
raiseOSError(osLastError())
|
||||
loop.handles.incl(fd)
|
||||
|
||||
proc poll*() =
|
||||
## Perform single asynchronous step.
|
||||
proc unregister*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
|
||||
## Unregisters ``fd``.
|
||||
getThreadDispatcher().handles.excl(fd)
|
||||
|
||||
proc poll*() {.raises: [Defect, CatchableError].} =
|
||||
## Perform single asynchronous step, processing timers and completing
|
||||
## unblocked tasks. Blocks until at least one event has completed.
|
||||
##
|
||||
## Exceptions raised here indicate that waiting for tasks to be unblocked
|
||||
## failed - exceptions from within tasks are instead propagated through
|
||||
## their respective futures and not allowed to interrrupt the poll call.
|
||||
let loop = getThreadDispatcher()
|
||||
var curTime = Moment.now()
|
||||
var curTimeout = DWORD(0)
|
||||
@ -476,17 +491,13 @@ when defined(windows) or defined(nimdoc):
|
||||
var acb = AsyncCallback(function: aftercb)
|
||||
loop.callbacks.addLast(acb)
|
||||
|
||||
proc unregister*(fd: AsyncFD) =
|
||||
## Unregisters ``fd``.
|
||||
getThreadDispatcher().handles.excl(fd)
|
||||
|
||||
proc contains*(disp: PDispatcher, fd: AsyncFD): bool =
|
||||
## Returns ``true`` if ``fd`` is registered in thread's dispatcher.
|
||||
return fd in disp.handles
|
||||
|
||||
elif unixPlatform:
|
||||
const
|
||||
SIG_IGN = cast[proc(x: cint) {.noconv,gcsafe.}](1)
|
||||
SIG_IGN = cast[proc(x: cint) {.raises: [], noconv, gcsafe.}](1)
|
||||
|
||||
type
|
||||
AsyncFD* = distinct cint
|
||||
@ -513,10 +524,10 @@ elif unixPlatform:
|
||||
# We are ignoring SIGPIPE signal, because we are working with EPIPE.
|
||||
posix.signal(cint(SIGPIPE), SIG_IGN)
|
||||
|
||||
proc initAPI(disp: PDispatcher) =
|
||||
proc initAPI(disp: PDispatcher) {.raises: [Defect, CatchableError].} =
|
||||
discard
|
||||
|
||||
proc newDispatcher*(): PDispatcher =
|
||||
proc newDispatcher*(): PDispatcher {.raises: [Defect, CatchableError].} =
|
||||
## Create new dispatcher.
|
||||
var res = PDispatcher()
|
||||
res.selector = newSelector[SelectorData]()
|
||||
@ -537,16 +548,12 @@ elif unixPlatform:
|
||||
|
||||
proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [Defect].}
|
||||
proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [Defect].}
|
||||
proc setGlobalDispatcher*(disp: PDispatcher) {.
|
||||
gcsafe, deprecated: "Use setThreadDispatcher() instead".}
|
||||
proc getGlobalDispatcher*(): PDispatcher {.
|
||||
gcsafe, deprecated: "Use getThreadDispatcher() instead".}
|
||||
|
||||
proc getIoHandler*(disp: PDispatcher): Selector[SelectorData] =
|
||||
## Returns system specific OS queue.
|
||||
return disp.selector
|
||||
|
||||
proc register*(fd: AsyncFD) =
|
||||
proc register*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
|
||||
## Register file descriptor ``fd`` in thread's dispatcher.
|
||||
let loop = getThreadDispatcher()
|
||||
var data: SelectorData
|
||||
@ -554,7 +561,7 @@ elif unixPlatform:
|
||||
data.wdata.fd = fd
|
||||
loop.selector.registerHandle(int(fd), {}, data)
|
||||
|
||||
proc unregister*(fd: AsyncFD) =
|
||||
proc unregister*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
|
||||
## Unregister file descriptor ``fd`` from thread's dispatcher.
|
||||
getThreadDispatcher().selector.unregister(int(fd))
|
||||
|
||||
@ -562,7 +569,8 @@ elif unixPlatform:
|
||||
## Returns ``true`` if ``fd`` is registered in thread's dispatcher.
|
||||
result = int(fd) in disp.selector
|
||||
|
||||
proc addReader*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) =
|
||||
proc addReader*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {.
|
||||
raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
## Start watching the file descriptor ``fd`` for read availability and then
|
||||
## call the callback ``cb`` with specified argument ``udata``.
|
||||
let loop = getThreadDispatcher()
|
||||
@ -578,7 +586,7 @@ elif unixPlatform:
|
||||
raise newException(ValueError, "File descriptor not registered.")
|
||||
loop.selector.updateHandle(int(fd), newEvents)
|
||||
|
||||
proc removeReader*(fd: AsyncFD) =
|
||||
proc removeReader*(fd: AsyncFD) {.raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
## Stop watching the file descriptor ``fd`` for read availability.
|
||||
let loop = getThreadDispatcher()
|
||||
var newEvents: set[Event]
|
||||
@ -592,7 +600,7 @@ elif unixPlatform:
|
||||
raise newException(ValueError, "File descriptor not registered.")
|
||||
loop.selector.updateHandle(int(fd), newEvents)
|
||||
|
||||
proc addWriter*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) =
|
||||
proc addWriter*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {.raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
## Start watching the file descriptor ``fd`` for write availability and then
|
||||
## call the callback ``cb`` with specified argument ``udata``.
|
||||
let loop = getThreadDispatcher()
|
||||
@ -608,7 +616,7 @@ elif unixPlatform:
|
||||
raise newException(ValueError, "File descriptor not registered.")
|
||||
loop.selector.updateHandle(int(fd), newEvents)
|
||||
|
||||
proc removeWriter*(fd: AsyncFD) =
|
||||
proc removeWriter*(fd: AsyncFD) {.raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
## Stop watching the file descriptor ``fd`` for write availability.
|
||||
let loop = getThreadDispatcher()
|
||||
var newEvents: set[Event]
|
||||
@ -633,7 +641,11 @@ elif unixPlatform:
|
||||
|
||||
proc continuation(udata: pointer) =
|
||||
if SocketHandle(fd) in loop.selector:
|
||||
try:
|
||||
unregister(fd)
|
||||
except CatchableError as exc:
|
||||
raiseAsDefect(exc, "unregister failed")
|
||||
|
||||
close(SocketHandle(fd))
|
||||
if not isNil(aftercb):
|
||||
aftercb(nil)
|
||||
@ -658,7 +670,7 @@ elif unixPlatform:
|
||||
var acb = AsyncCallback(function: continuation)
|
||||
loop.callbacks.addLast(acb)
|
||||
|
||||
proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) {.inline.} =
|
||||
proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) =
|
||||
## Close asynchronous file/pipe handle.
|
||||
##
|
||||
## Please note, that socket is not closed immediately. To avoid bugs with
|
||||
@ -669,7 +681,7 @@ elif unixPlatform:
|
||||
|
||||
when ioselSupportedPlatform:
|
||||
proc addSignal*(signal: int, cb: CallbackFunc,
|
||||
udata: pointer = nil): int =
|
||||
udata: pointer = nil): int {.raises: [Defect, IOSelectorsException, ValueError, OSError].} =
|
||||
## Start watching signal ``signal``, and when signal appears, call the
|
||||
## callback ``cb`` with specified argument ``udata``. Returns signal
|
||||
## identifier code, which can be used to remove signal callback
|
||||
@ -684,12 +696,12 @@ elif unixPlatform:
|
||||
do:
|
||||
raise newException(ValueError, "File descriptor not registered.")
|
||||
|
||||
proc removeSignal*(sigfd: int) =
|
||||
proc removeSignal*(sigfd: int) {.raises: [Defect, IOSelectorsException].} =
|
||||
## Remove watching signal ``signal``.
|
||||
let loop = getThreadDispatcher()
|
||||
loop.selector.unregister(sigfd)
|
||||
|
||||
proc poll*() =
|
||||
proc poll*() {.raises: [Defect, CatchableError].} =
|
||||
## Perform single asynchronous step.
|
||||
let loop = getThreadDispatcher()
|
||||
var curTime = Moment.now()
|
||||
@ -750,33 +762,19 @@ proc setThreadDispatcher*(disp: PDispatcher) =
|
||||
|
||||
proc getThreadDispatcher*(): PDispatcher =
|
||||
## Returns current thread's dispatcher instance.
|
||||
template getErrorMessage(exc): string =
|
||||
"Cannot create thread dispatcher: " & exc.msg
|
||||
|
||||
if gDisp.isNil:
|
||||
when defined(windows):
|
||||
let disp =
|
||||
try:
|
||||
newDispatcher()
|
||||
setThreadDispatcher(newDispatcher())
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, getErrorMessage(exc))
|
||||
else:
|
||||
let disp =
|
||||
try:
|
||||
newDispatcher()
|
||||
except IOSelectorsException as exc:
|
||||
raise newException(Defect, getErrorMessage(exc))
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, getErrorMessage(exc))
|
||||
setThreadDispatcher(disp)
|
||||
return gDisp
|
||||
raiseAsDefect exc, "Cannot create dispatcher"
|
||||
gdisp
|
||||
|
||||
proc setGlobalDispatcher*(disp: PDispatcher) =
|
||||
## Set current thread's dispatcher instance to ``disp``.
|
||||
proc setGlobalDispatcher*(disp: PDispatcher) {.
|
||||
gcsafe, deprecated: "Use setThreadDispatcher() instead".} =
|
||||
setThreadDispatcher(disp)
|
||||
|
||||
proc getGlobalDispatcher*(): PDispatcher =
|
||||
## Returns current thread's dispatcher instance.
|
||||
proc getGlobalDispatcher*(): PDispatcher {.
|
||||
gcsafe, deprecated: "Use getThreadDispatcher() instead".} =
|
||||
getThreadDispatcher()
|
||||
|
||||
proc setTimer*(at: Moment, cb: CallbackFunc,
|
||||
@ -903,7 +901,8 @@ proc stepsAsync*(number: int): Future[void] =
|
||||
var retFuture = newFuture[void]("chronos.stepsAsync(int)")
|
||||
var counter = 0
|
||||
|
||||
proc continuation(data: pointer) {.gcsafe.} =
|
||||
var continuation: proc(data: pointer) {.gcsafe, raises: [Defect].}
|
||||
continuation = proc(data: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
inc(counter)
|
||||
if counter < number:
|
||||
@ -911,7 +910,7 @@ proc stepsAsync*(number: int): Future[void] =
|
||||
else:
|
||||
retFuture.complete()
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
discard
|
||||
|
||||
if number <= 0:
|
||||
@ -952,7 +951,9 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] =
|
||||
var timer: TimerCallback
|
||||
var cancelling = false
|
||||
|
||||
proc continuation(udata: pointer) {.gcsafe.} =
|
||||
# TODO: raises annotation shouldn't be needed, but likely similar issue as
|
||||
# https://github.com/nim-lang/Nim/issues/17369
|
||||
proc continuation(udata: pointer) {.gcsafe, raises: [Defect].} =
|
||||
if not(retFuture.finished()):
|
||||
if not(cancelling):
|
||||
if not(fut.finished()):
|
||||
@ -968,7 +969,9 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] =
|
||||
else:
|
||||
retFuture.complete(false)
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
# TODO: raises annotation shouldn't be needed, but likely similar issue as
|
||||
# https://github.com/nim-lang/Nim/issues/17369
|
||||
proc cancellation(udata: pointer) {.gcsafe, raises: [Defect].} =
|
||||
if not isNil(timer):
|
||||
clearTimer(timer)
|
||||
if not(fut.finished()):
|
||||
@ -1009,7 +1012,7 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] =
|
||||
var timer: TimerCallback
|
||||
var cancelling = false
|
||||
|
||||
proc continuation(udata: pointer) {.gcsafe.} =
|
||||
proc continuation(udata: pointer) {.raises: [Defect].} =
|
||||
if not(retFuture.finished()):
|
||||
if not(cancelling):
|
||||
if not(fut.finished()):
|
||||
@ -1027,11 +1030,12 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] =
|
||||
when T is void:
|
||||
retFuture.complete()
|
||||
else:
|
||||
retFuture.complete(fut.read())
|
||||
retFuture.complete(fut.value)
|
||||
else:
|
||||
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
var cancellation: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
cancellation = proc(udata: pointer) =
|
||||
if not isNil(timer):
|
||||
clearTimer(timer)
|
||||
if not(fut.finished()):
|
||||
@ -1045,7 +1049,7 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] =
|
||||
when T is void:
|
||||
retFuture.complete()
|
||||
else:
|
||||
retFuture.complete(fut.read())
|
||||
retFuture.complete(fut.value)
|
||||
else:
|
||||
if timeout.isZero():
|
||||
retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!"))
|
||||
@ -1069,15 +1073,21 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {.
|
||||
else:
|
||||
wait(fut, timeout.milliseconds())
|
||||
|
||||
{.pop.}
|
||||
|
||||
include asyncmacro2
|
||||
|
||||
proc runForever*() =
|
||||
{.push raises: [Defect].}
|
||||
|
||||
proc runForever*() {.raises: [Defect, CatchableError].} =
|
||||
## Begins a never ending global dispatcher poll loop.
|
||||
## Raises different exceptions depending on the platform.
|
||||
while true:
|
||||
poll()
|
||||
|
||||
proc waitFor*[T](fut: Future[T]): T =
|
||||
proc waitFor*[T](fut: Future[T]): T {.raises: [Defect, CatchableError].} =
|
||||
## **Blocks** the current thread until the specified future completes.
|
||||
## There's no way to tell if poll or read raised the exception
|
||||
while not(fut.finished()):
|
||||
poll()
|
||||
|
||||
|
96
vendor/nim-chronos/chronos/asyncmacro2.nim
vendored
96
vendor/nim-chronos/chronos/asyncmacro2.nim
vendored
@ -7,11 +7,7 @@
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## AsyncMacro
|
||||
## *************
|
||||
## `asyncdispatch` module depends on the `asyncmacro` module to work properly.
|
||||
|
||||
import macros, strutils
|
||||
import std/[macros, strutils]
|
||||
|
||||
proc skipUntilStmtList(node: NimNode): NimNode {.compileTime.} =
|
||||
# Skips a nest of StmtList's.
|
||||
@ -23,15 +19,20 @@ proc skipUntilStmtList(node: NimNode): NimNode {.compileTime.} =
|
||||
# result = node
|
||||
# if node[0].kind == nnkStmtList:
|
||||
# result = node[0]
|
||||
|
||||
when defined(chronosStrictException):
|
||||
template createCb(retFutureSym, iteratorNameSym,
|
||||
strName, identName, futureVarCompletions: untyped) =
|
||||
bind finished
|
||||
|
||||
var nameIterVar = iteratorNameSym
|
||||
{.push stackTrace: off.}
|
||||
proc identName(udata: pointer = nil) {.closure, gcsafe.} =
|
||||
var identName: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
identName = proc(udata: pointer) {.raises: [Defect].} =
|
||||
try:
|
||||
# If the compiler complains about unlisted exception here, it's usually
|
||||
# because you're calling a callback or forward declaration in your code
|
||||
# for which the compiler cannot deduce raises signatures - make sure
|
||||
# to annotate both forward declarations and `proc` types with `raises`!
|
||||
if not(nameIterVar.finished()):
|
||||
var next = nameIterVar()
|
||||
# Continue while the yielded future is already finished.
|
||||
@ -46,21 +47,58 @@ template createCb(retFutureSym, iteratorNameSym,
|
||||
"are you await'ing a `nil` Future?"
|
||||
raiseAssert msg
|
||||
else:
|
||||
{.gcsafe.}:
|
||||
next.addCallback(identName)
|
||||
except CancelledError:
|
||||
retFutureSym.cancelAndSchedule()
|
||||
except CatchableError as exc:
|
||||
futureVarCompletions
|
||||
|
||||
if retFutureSym.finished():
|
||||
# Take a look at tasyncexceptions for the bug which this fixes.
|
||||
# That test explains it better than I can here.
|
||||
raise exc
|
||||
else:
|
||||
retFutureSym.fail(exc)
|
||||
|
||||
identName()
|
||||
identName(nil)
|
||||
{.pop.}
|
||||
else:
|
||||
template createCb(retFutureSym, iteratorNameSym,
|
||||
strName, identName, futureVarCompletions: untyped) =
|
||||
bind finished
|
||||
|
||||
var nameIterVar = iteratorNameSym
|
||||
{.push stackTrace: off.}
|
||||
var identName: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
identName = proc(udata: pointer) {.raises: [Defect].} =
|
||||
try:
|
||||
# If the compiler complains about unlisted exception here, it's usually
|
||||
# because you're calling a callback or forward declaration in your code
|
||||
# for which the compiler cannot deduce raises signatures - make sure
|
||||
# to annotate both forward declarations and `proc` types with `raises`!
|
||||
if not(nameIterVar.finished()):
|
||||
var next = nameIterVar()
|
||||
# Continue while the yielded future is already finished.
|
||||
while (not next.isNil()) and next.finished():
|
||||
next = nameIterVar()
|
||||
if nameIterVar.finished():
|
||||
break
|
||||
|
||||
if next == nil:
|
||||
if not(retFutureSym.finished()):
|
||||
const msg = "Async procedure (&" & strName & ") yielded `nil`, " &
|
||||
"are you await'ing a `nil` Future?"
|
||||
raiseAssert msg
|
||||
else:
|
||||
next.addCallback(identName)
|
||||
except CancelledError:
|
||||
retFutureSym.cancelAndSchedule()
|
||||
except CatchableError as exc:
|
||||
futureVarCompletions
|
||||
retFutureSym.fail(exc)
|
||||
except Exception as exc:
|
||||
# TODO remove Exception handler to turn on strict mode
|
||||
if exc is Defect:
|
||||
raise (ref Defect)(exc)
|
||||
|
||||
futureVarCompletions
|
||||
retFutureSym.fail((ref ValueError)(msg: exc.msg, parent: exc))
|
||||
|
||||
identName(nil)
|
||||
{.pop.}
|
||||
|
||||
proc createFutureVarCompletions(futureVarIdents: seq[NimNode],
|
||||
@ -247,6 +285,29 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
||||
closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body)
|
||||
closureIterator.addPragma(newIdentNode("closure"))
|
||||
|
||||
# TODO when push raises is active in a module, the iterator here inherits
|
||||
# that annotation - here we explicitly disable it again which goes
|
||||
# against the spirit of the raises annotation - one should investigate
|
||||
# here the possibility of transporting more specific error types here
|
||||
# for example by casting exceptions coming out of `await`..
|
||||
when defined(chronosStrictException):
|
||||
closureIterator.addPragma(nnkExprColonExpr.newTree(
|
||||
newIdentNode("raises"),
|
||||
nnkBracket.newTree(
|
||||
newIdentNode("Defect"),
|
||||
newIdentNode("CatchableError")
|
||||
)
|
||||
))
|
||||
else:
|
||||
closureIterator.addPragma(nnkExprColonExpr.newTree(
|
||||
newIdentNode("raises"),
|
||||
nnkBracket.newTree(
|
||||
newIdentNode("Defect"),
|
||||
newIdentNode("CatchableError"),
|
||||
newIdentNode("Exception") # Allow exception effects
|
||||
)
|
||||
))
|
||||
|
||||
# If proc has an explicit gcsafe pragma, we add it to iterator as well.
|
||||
if prc.pragma.findChild(it.kind in {nnkSym, nnkIdent} and $it == "gcsafe") != nil:
|
||||
closureIterator.addPragma(newIdentNode("gcsafe"))
|
||||
@ -255,7 +316,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
||||
# -> createCb(retFuture)
|
||||
# NOTE: The "_continue" suffix is checked for in asyncfutures.nim to produce
|
||||
# friendlier stack traces:
|
||||
var cbName = genSym(nskProc, prcName & "_continue")
|
||||
var cbName = genSym(nskVar, prcName & "_continue")
|
||||
var procCb = getAst createCb(retFutureSym, iteratorNameSym,
|
||||
newStrLitNode(prcName),
|
||||
cbName,
|
||||
@ -281,7 +342,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
||||
#if prcName == "recvLineInto":
|
||||
# echo(toStrLit(result))
|
||||
|
||||
template await*[T](f: Future[T]): auto =
|
||||
template await*[T](f: Future[T]): untyped =
|
||||
when declared(chronosInternalRetFuture):
|
||||
when not declaredInScope(chronosInternalTmpFuture):
|
||||
var chronosInternalTmpFuture {.inject.}: FutureBase
|
||||
@ -304,6 +365,7 @@ template await*[T](f: Future[T]): auto =
|
||||
if chronosInternalRetFuture.mustCancel:
|
||||
raise newCancelledError()
|
||||
chronosInternalTmpFuture.internalCheckComplete()
|
||||
when T isnot void:
|
||||
cast[type(f)](chronosInternalTmpFuture).internalRead()
|
||||
else:
|
||||
unsupported "await is only available within {.async.}"
|
||||
|
23
vendor/nim-chronos/chronos/asyncsync.nim
vendored
23
vendor/nim-chronos/chronos/asyncsync.nim
vendored
@ -9,6 +9,9 @@
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
## This module implements some core synchronization primitives
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/[sequtils, deques]
|
||||
import ./asyncloop
|
||||
|
||||
@ -115,7 +118,7 @@ proc locked*(lock: AsyncLock): bool =
|
||||
## Return `true` if the lock ``lock`` is acquired, `false` otherwise.
|
||||
lock.locked
|
||||
|
||||
proc release*(lock: AsyncLock) =
|
||||
proc release*(lock: AsyncLock) {.raises: [Defect, AsyncLockError].} =
|
||||
## Release a lock ``lock``.
|
||||
##
|
||||
## When the ``lock`` is locked, reset it to unlocked, and return. If any
|
||||
@ -220,7 +223,8 @@ proc empty*[T](aq: AsyncQueue[T]): bool {.inline.} =
|
||||
## Return ``true`` if the queue is empty, ``false`` otherwise.
|
||||
(len(aq.queue) == 0)
|
||||
|
||||
proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) =
|
||||
proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {.
|
||||
raises: [Defect, AsyncQueueFullError].}=
|
||||
## Put an item ``item`` to the beginning of the queue ``aq`` immediately.
|
||||
##
|
||||
## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised.
|
||||
@ -229,7 +233,8 @@ proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) =
|
||||
aq.queue.addFirst(item)
|
||||
aq.getters.wakeupNext()
|
||||
|
||||
proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) =
|
||||
proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {.
|
||||
raises: [Defect, AsyncQueueFullError].}=
|
||||
## Put an item ``item`` at the end of the queue ``aq`` immediately.
|
||||
##
|
||||
## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised.
|
||||
@ -238,7 +243,8 @@ proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) =
|
||||
aq.queue.addLast(item)
|
||||
aq.getters.wakeupNext()
|
||||
|
||||
proc popFirstNoWait*[T](aq: AsyncQueue[T]): T =
|
||||
proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {.
|
||||
raises: [Defect, AsyncQueueEmptyError].} =
|
||||
## Get an item from the beginning of the queue ``aq`` immediately.
|
||||
##
|
||||
## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised.
|
||||
@ -248,7 +254,8 @@ proc popFirstNoWait*[T](aq: AsyncQueue[T]): T =
|
||||
aq.putters.wakeupNext()
|
||||
res
|
||||
|
||||
proc popLastNoWait*[T](aq: AsyncQueue[T]): T =
|
||||
proc popLastNoWait*[T](aq: AsyncQueue[T]): T {.
|
||||
raises: [Defect, AsyncQueueEmptyError].} =
|
||||
## Get an item from the end of the queue ``aq`` immediately.
|
||||
##
|
||||
## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised.
|
||||
@ -314,11 +321,13 @@ proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async.} =
|
||||
raise exc
|
||||
return aq.popLastNoWait()
|
||||
|
||||
proc putNoWait*[T](aq: AsyncQueue[T], item: T) {.inline.} =
|
||||
proc putNoWait*[T](aq: AsyncQueue[T], item: T) {.
|
||||
raises: [Defect, AsyncQueueFullError].} =
|
||||
## Alias of ``addLastNoWait()``.
|
||||
aq.addLastNoWait(item)
|
||||
|
||||
proc getNoWait*[T](aq: AsyncQueue[T]): T {.inline.} =
|
||||
proc getNoWait*[T](aq: AsyncQueue[T]): T {.
|
||||
raises: [Defect, AsyncQueueEmptyError].} =
|
||||
## Alias of ``popFirstNoWait()``.
|
||||
aq.popFirstNoWait()
|
||||
|
||||
|
5
vendor/nim-chronos/chronos/debugutils.nim
vendored
5
vendor/nim-chronos/chronos/debugutils.nim
vendored
@ -6,7 +6,10 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import asyncloop
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import ./asyncloop
|
||||
|
||||
const
|
||||
AllFutureStates* = {FutureState.Pending, FutureState.Cancelled,
|
||||
|
17
vendor/nim-chronos/chronos/handles.nim
vendored
17
vendor/nim-chronos/chronos/handles.nim
vendored
@ -7,7 +7,12 @@
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
import net, nativesockets, asyncloop
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[net, nativesockets],
|
||||
./selectors2,
|
||||
./asyncloop
|
||||
|
||||
when defined(windows):
|
||||
import os, winlean
|
||||
@ -88,7 +93,8 @@ proc getSocketError*(socket: AsyncFD, err: var int): bool =
|
||||
result = getSockOpt(socket, cint(SOL_SOCKET), cint(SO_ERROR), err)
|
||||
|
||||
proc createAsyncSocket*(domain: Domain, sockType: SockType,
|
||||
protocol: Protocol): AsyncFD =
|
||||
protocol: Protocol): AsyncFD {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Creates new asynchronous socket.
|
||||
## Returns ``asyncInvalidSocket`` on error.
|
||||
let handle = createNativeSocket(domain, sockType, protocol)
|
||||
@ -104,7 +110,8 @@ proc createAsyncSocket*(domain: Domain, sockType: SockType,
|
||||
result = AsyncFD(handle)
|
||||
register(result)
|
||||
|
||||
proc wrapAsyncSocket*(sock: SocketHandle): AsyncFD =
|
||||
proc wrapAsyncSocket*(sock: SocketHandle): AsyncFD {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Wraps socket to asynchronous socket handle.
|
||||
## Return ``asyncInvalidSocket`` on error.
|
||||
if not setSocketBlocking(sock, false):
|
||||
@ -117,7 +124,7 @@ proc wrapAsyncSocket*(sock: SocketHandle): AsyncFD =
|
||||
result = AsyncFD(sock)
|
||||
register(result)
|
||||
|
||||
proc getMaxOpenFiles*(): int =
|
||||
proc getMaxOpenFiles*(): int {.raises: [Defect, OSError].} =
|
||||
## Returns maximum file descriptor number that can be opened by this process.
|
||||
##
|
||||
## Note: On Windows its impossible to obtain such number, so getMaxOpenFiles()
|
||||
@ -131,7 +138,7 @@ proc getMaxOpenFiles*(): int =
|
||||
raiseOSError(osLastError())
|
||||
result = int(limits.rlim_cur)
|
||||
|
||||
proc setMaxOpenFiles*(count: int) =
|
||||
proc setMaxOpenFiles*(count: int) {.raises: [Defect, OSError].} =
|
||||
## Set maximum file descriptor number that can be opened by this process.
|
||||
##
|
||||
## Note: On Windows its impossible to set this value, so it just a nop call.
|
||||
|
524
vendor/nim-chronos/chronos/ioselects/ioselectors_epoll.nim
vendored
Normal file
524
vendor/nim-chronos/chronos/ioselects/ioselectors_epoll.nim
vendored
Normal file
@ -0,0 +1,524 @@
|
||||
#
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2016 Eugene Kabanov
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
# This module implements Linux epoll().
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import posix, times, epoll
|
||||
|
||||
# Maximum number of events that can be returned
|
||||
const MAX_EPOLL_EVENTS = 64
|
||||
|
||||
when not defined(android):
|
||||
type
|
||||
SignalFdInfo* {.importc: "struct signalfd_siginfo",
|
||||
header: "<sys/signalfd.h>", pure, final.} = object
|
||||
ssi_signo*: uint32
|
||||
ssi_errno*: int32
|
||||
ssi_code*: int32
|
||||
ssi_pid*: uint32
|
||||
ssi_uid*: uint32
|
||||
ssi_fd*: int32
|
||||
ssi_tid*: uint32
|
||||
ssi_band*: uint32
|
||||
ssi_overrun*: uint32
|
||||
ssi_trapno*: uint32
|
||||
ssi_status*: int32
|
||||
ssi_int*: int32
|
||||
ssi_ptr*: uint64
|
||||
ssi_utime*: uint64
|
||||
ssi_stime*: uint64
|
||||
ssi_addr*: uint64
|
||||
pad* {.importc: "__pad".}: array[0..47, uint8]
|
||||
|
||||
proc timerfd_create(clock_id: ClockId, flags: cint): cint
|
||||
{.cdecl, importc: "timerfd_create", header: "<sys/timerfd.h>".}
|
||||
proc timerfd_settime(ufd: cint, flags: cint,
|
||||
utmr: var Itimerspec, otmr: var Itimerspec): cint
|
||||
{.cdecl, importc: "timerfd_settime", header: "<sys/timerfd.h>".}
|
||||
proc eventfd(count: cuint, flags: cint): cint
|
||||
{.cdecl, importc: "eventfd", header: "<sys/eventfd.h>".}
|
||||
|
||||
when not defined(android):
|
||||
proc signalfd(fd: cint, mask: var Sigset, flags: cint): cint
|
||||
{.cdecl, importc: "signalfd", header: "<sys/signalfd.h>".}
|
||||
|
||||
when hasThreadSupport:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
epollFD: cint
|
||||
numFD: int
|
||||
fds: ptr SharedArray[SelectorKey[T]]
|
||||
count: int
|
||||
Selector*[T] = ptr SelectorImpl[T]
|
||||
else:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
epollFD: cint
|
||||
numFD: int
|
||||
fds: seq[SelectorKey[T]]
|
||||
count: int
|
||||
Selector*[T] = ref SelectorImpl[T]
|
||||
type
|
||||
SelectEventImpl = object
|
||||
efd: cint
|
||||
SelectEvent* = ptr SelectEventImpl
|
||||
|
||||
proc newSelector*[T](): Selector[T] {.raises: [Defect, OSError].} =
|
||||
# Retrieve the maximum fd count (for current OS) via getrlimit()
|
||||
var a = RLimit()
|
||||
# Start with a reasonable size, checkFd() will grow this on demand
|
||||
const numFD = 1024
|
||||
|
||||
var epollFD = epoll_create(MAX_EPOLL_EVENTS)
|
||||
if epollFD < 0:
|
||||
raiseOSError(osLastError())
|
||||
|
||||
when hasThreadSupport:
|
||||
result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T])))
|
||||
result.epollFD = epollFD
|
||||
result.numFD = numFD
|
||||
result.fds = allocSharedArray[SelectorKey[T]](numFD)
|
||||
else:
|
||||
result = Selector[T]()
|
||||
result.epollFD = epollFD
|
||||
result.numFD = numFD
|
||||
result.fds = newSeq[SelectorKey[T]](numFD)
|
||||
|
||||
for i in 0 ..< numFD:
|
||||
result.fds[i].ident = InvalidIdent
|
||||
|
||||
proc close*[T](s: Selector[T]) =
|
||||
let res = posix.close(s.epollFD)
|
||||
when hasThreadSupport:
|
||||
deallocSharedArray(s.fds)
|
||||
deallocShared(cast[pointer](s))
|
||||
if res != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc newSelectEvent*(): SelectEvent {.raises: [Defect, OSError, IOSelectorsException].} =
|
||||
let fdci = eventfd(0, 0)
|
||||
if fdci == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fdci)
|
||||
result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl)))
|
||||
result.efd = fdci
|
||||
|
||||
proc trigger*(ev: SelectEvent) {.raises: [Defect, IOSelectorsException].} =
|
||||
var data: uint64 = 1
|
||||
if posix.write(ev.efd, addr data, sizeof(uint64)) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc close*(ev: SelectEvent) {.raises: [Defect, IOSelectorsException].} =
|
||||
let res = posix.close(ev.efd)
|
||||
deallocShared(cast[pointer](ev))
|
||||
if res != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
template checkFd(s, f) =
|
||||
if f >= s.numFD:
|
||||
var numFD = s.numFD
|
||||
while numFD <= f: numFD *= 2
|
||||
when hasThreadSupport:
|
||||
s.fds = reallocSharedArray(s.fds, numFD)
|
||||
else:
|
||||
s.fds.setLen(numFD)
|
||||
for i in s.numFD ..< numFD:
|
||||
s.fds[i].ident = InvalidIdent
|
||||
s.numFD = numFD
|
||||
|
||||
proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event], data: T) {.
|
||||
raises: [Defect, IOSelectorsException].} =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent, "Descriptor " & $fdi & " already registered")
|
||||
s.setKey(fdi, events, 0, data)
|
||||
if events != {}:
|
||||
var epv = EpollEvent(events: EPOLLRDHUP)
|
||||
epv.data.u64 = fdi.uint
|
||||
if Event.Read in events: epv.events = epv.events or EPOLLIN
|
||||
if Event.Write in events: epv.events = epv.events or EPOLLOUT
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
inc(s.count)
|
||||
|
||||
proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle, events: set[Event]) {.
|
||||
raises: [Defect, IOSelectorsException].} =
|
||||
let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode,
|
||||
Event.User, Event.Oneshot, Event.Error}
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent,
|
||||
"Descriptor " & $fdi & " is not registered in the selector!")
|
||||
doAssert(pkey.events * maskEvents == {})
|
||||
if pkey.events != events:
|
||||
var epv = EpollEvent(events: EPOLLRDHUP)
|
||||
epv.data.u64 = fdi.uint
|
||||
|
||||
if Event.Read in events: epv.events = epv.events or EPOLLIN
|
||||
if Event.Write in events: epv.events = epv.events or EPOLLOUT
|
||||
|
||||
if pkey.events == {}:
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
inc(s.count)
|
||||
else:
|
||||
if events != {}:
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_MOD, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
else:
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
dec(s.count)
|
||||
pkey.events = events
|
||||
|
||||
proc unregister*[T](s: Selector[T], fd: int|SocketHandle) {.raises: [Defect, IOSelectorsException].} =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent,
|
||||
"Descriptor " & $fdi & " is not registered in the selector!")
|
||||
if pkey.events != {}:
|
||||
when not defined(android):
|
||||
if Event.Read in pkey.events or Event.Write in pkey.events or Event.User in pkey.events:
|
||||
var epv = EpollEvent()
|
||||
# TODO: Refactor all these EPOLL_CTL_DEL + dec(s.count) into a proc.
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
dec(s.count)
|
||||
elif Event.Timer in pkey.events:
|
||||
if Event.Finished notin pkey.events:
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
dec(s.count)
|
||||
if posix.close(cint(fdi)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
elif Event.Signal in pkey.events:
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
var nmask, omask: Sigset
|
||||
discard sigemptyset(nmask)
|
||||
discard sigemptyset(omask)
|
||||
discard sigaddset(nmask, cint(s.fds[fdi].param))
|
||||
unblockSignals(nmask, omask)
|
||||
dec(s.count)
|
||||
if posix.close(cint(fdi)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
elif Event.Process in pkey.events:
|
||||
if Event.Finished notin pkey.events:
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
var nmask, omask: Sigset
|
||||
discard sigemptyset(nmask)
|
||||
discard sigemptyset(omask)
|
||||
discard sigaddset(nmask, SIGCHLD)
|
||||
unblockSignals(nmask, omask)
|
||||
dec(s.count)
|
||||
if posix.close(cint(fdi)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
else:
|
||||
if Event.Read in pkey.events or Event.Write in pkey.events or Event.User in pkey.events:
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
dec(s.count)
|
||||
elif Event.Timer in pkey.events:
|
||||
if Event.Finished notin pkey.events:
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
dec(s.count)
|
||||
if posix.close(cint(fdi)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
clearKey(pkey)
|
||||
|
||||
proc unregister*[T](s: Selector[T], ev: SelectEvent) {.
|
||||
raises: [Defect, IOSelectorsException].} =
|
||||
let fdi = int(ev.efd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent, "Event is not registered in the queue!")
|
||||
doAssert(Event.User in pkey.events)
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
dec(s.count)
|
||||
clearKey(pkey)
|
||||
|
||||
proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool,
|
||||
data: T): int {.
|
||||
discardable, raises: [Defect, IOSelectorsException].} =
|
||||
var
|
||||
newTs: Itimerspec
|
||||
oldTs: Itimerspec
|
||||
let fdi = timerfd_create(CLOCK_MONOTONIC, 0).int
|
||||
if fdi == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fdi.cint)
|
||||
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
|
||||
var events = {Event.Timer}
|
||||
var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP)
|
||||
epv.data.u64 = fdi.uint
|
||||
|
||||
if oneshot:
|
||||
newTs.it_interval.tv_sec = posix.Time(0)
|
||||
newTs.it_interval.tv_nsec = 0
|
||||
newTs.it_value.tv_sec = posix.Time(timeout div 1_000)
|
||||
newTs.it_value.tv_nsec = (timeout %% 1_000) * 1_000_000
|
||||
incl(events, Event.Oneshot)
|
||||
epv.events = epv.events or EPOLLONESHOT
|
||||
else:
|
||||
newTs.it_interval.tv_sec = posix.Time(timeout div 1000)
|
||||
newTs.it_interval.tv_nsec = (timeout %% 1_000) * 1_000_000
|
||||
newTs.it_value.tv_sec = newTs.it_interval.tv_sec
|
||||
newTs.it_value.tv_nsec = newTs.it_interval.tv_nsec
|
||||
|
||||
if timerfd_settime(fdi.cint, cint(0), newTs, oldTs) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
s.setKey(fdi, events, 0, data)
|
||||
inc(s.count)
|
||||
result = fdi
|
||||
|
||||
when not defined(android):
|
||||
proc registerSignal*[T](s: Selector[T], signal: int,
|
||||
data: T): int {.
|
||||
discardable, raises: [Defect, OSError, IOSelectorsException].} =
|
||||
var
|
||||
nmask: Sigset
|
||||
omask: Sigset
|
||||
|
||||
discard sigemptyset(nmask)
|
||||
discard sigemptyset(omask)
|
||||
discard sigaddset(nmask, cint(signal))
|
||||
blockSignals(nmask, omask)
|
||||
|
||||
let fdi = signalfd(-1, nmask, 0).int
|
||||
if fdi == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fdi.cint)
|
||||
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
|
||||
var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP)
|
||||
epv.data.u64 = fdi.uint
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
s.setKey(fdi, {Event.Signal}, signal, data)
|
||||
inc(s.count)
|
||||
result = fdi
|
||||
|
||||
proc registerProcess*[T](s: Selector, pid: int,
|
||||
data: T): int {.
|
||||
discardable, raises: [Defect, IOSelectorsException].} =
|
||||
var
|
||||
nmask: Sigset
|
||||
omask: Sigset
|
||||
|
||||
discard sigemptyset(nmask)
|
||||
discard sigemptyset(omask)
|
||||
discard sigaddset(nmask, posix.SIGCHLD)
|
||||
blockSignals(nmask, omask)
|
||||
|
||||
let fdi = signalfd(-1, nmask, 0).int
|
||||
if fdi == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fdi.cint)
|
||||
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
|
||||
var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP)
|
||||
epv.data.u64 = fdi.uint
|
||||
epv.events = EPOLLIN or EPOLLRDHUP
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
s.setKey(fdi, {Event.Process, Event.Oneshot}, pid, data)
|
||||
inc(s.count)
|
||||
result = fdi
|
||||
|
||||
proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) =
|
||||
let fdi = int(ev.efd)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent, "Event is already registered in the queue!")
|
||||
s.setKey(fdi, {Event.User}, 0, data)
|
||||
var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP)
|
||||
epv.data.u64 = ev.efd.uint
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, ev.efd, addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
inc(s.count)
|
||||
|
||||
proc selectInto*[T](s: Selector[T], timeout: int,
|
||||
results: var openArray[ReadyKey]): int {.raises: [Defect, IOSelectorsException].} =
|
||||
var
|
||||
resTable: array[MAX_EPOLL_EVENTS, EpollEvent]
|
||||
maxres = MAX_EPOLL_EVENTS
|
||||
i, k: int
|
||||
|
||||
if maxres > len(results):
|
||||
maxres = len(results)
|
||||
|
||||
verifySelectParams(timeout)
|
||||
|
||||
let count = epoll_wait(s.epollFD, addr(resTable[0]), maxres.cint,
|
||||
timeout.cint)
|
||||
if count < 0:
|
||||
result = 0
|
||||
let err = osLastError()
|
||||
if cint(err) != EINTR:
|
||||
raiseIOSelectorsError(err)
|
||||
elif count == 0:
|
||||
result = 0
|
||||
else:
|
||||
i = 0
|
||||
k = 0
|
||||
while i < count:
|
||||
let fdi = int(resTable[i].data.u64)
|
||||
let pevents = resTable[i].events
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent)
|
||||
var rkey = ReadyKey(fd: fdi, events: {})
|
||||
|
||||
if (pevents and EPOLLERR) != 0 or (pevents and EPOLLHUP) != 0:
|
||||
if (pevents and EPOLLHUP) != 0:
|
||||
rkey.errorCode = OSErrorCode ECONNRESET
|
||||
else:
|
||||
# Try reading SO_ERROR from fd.
|
||||
var error: cint
|
||||
var size = SockLen sizeof(error)
|
||||
if getsockopt(SocketHandle fdi, SOL_SOCKET, SO_ERROR, addr(error),
|
||||
addr(size)) == 0'i32:
|
||||
rkey.errorCode = OSErrorCode error
|
||||
|
||||
rkey.events.incl(Event.Error)
|
||||
if (pevents and EPOLLOUT) != 0:
|
||||
rkey.events.incl(Event.Write)
|
||||
when not defined(android):
|
||||
if (pevents and EPOLLIN) != 0:
|
||||
if Event.Read in pkey.events:
|
||||
rkey.events.incl(Event.Read)
|
||||
elif Event.Timer in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if posix.read(cint(fdi), addr data,
|
||||
sizeof(uint64)) != sizeof(uint64):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
rkey.events.incl(Event.Timer)
|
||||
elif Event.Signal in pkey.events:
|
||||
var data = SignalFdInfo()
|
||||
if posix.read(cint(fdi), addr data,
|
||||
sizeof(SignalFdInfo)) != sizeof(SignalFdInfo):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
rkey.events.incl(Event.Signal)
|
||||
elif Event.Process in pkey.events:
|
||||
var data = SignalFdInfo()
|
||||
if posix.read(cint(fdi), addr data,
|
||||
sizeof(SignalFdInfo)) != sizeof(SignalFdInfo):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
if cast[int](data.ssi_pid) == pkey.param:
|
||||
rkey.events.incl(Event.Process)
|
||||
else:
|
||||
inc(i)
|
||||
continue
|
||||
elif Event.User in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if posix.read(cint(fdi), addr data,
|
||||
sizeof(uint64)) != sizeof(uint64):
|
||||
let err = osLastError()
|
||||
if err == OSErrorCode(EAGAIN):
|
||||
inc(i)
|
||||
continue
|
||||
else:
|
||||
raiseIOSelectorsError(err)
|
||||
rkey.events.incl(Event.User)
|
||||
else:
|
||||
if (pevents and EPOLLIN) != 0:
|
||||
if Event.Read in pkey.events:
|
||||
rkey.events.incl(Event.Read)
|
||||
elif Event.Timer in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if posix.read(cint(fdi), addr data,
|
||||
sizeof(uint64)) != sizeof(uint64):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
rkey.events.incl(Event.Timer)
|
||||
elif Event.User in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if posix.read(cint(fdi), addr data,
|
||||
sizeof(uint64)) != sizeof(uint64):
|
||||
let err = osLastError()
|
||||
if err == OSErrorCode(EAGAIN):
|
||||
inc(i)
|
||||
continue
|
||||
else:
|
||||
raiseIOSelectorsError(err)
|
||||
rkey.events.incl(Event.User)
|
||||
|
||||
if Event.Oneshot in pkey.events:
|
||||
var epv = EpollEvent()
|
||||
if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, cint(fdi), addr epv) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
# we will not clear key until it will be unregistered, so
|
||||
# application can obtain data, but we will decrease counter,
|
||||
# because epoll is empty.
|
||||
dec(s.count)
|
||||
# we are marking key with `Finished` event, to avoid double decrease.
|
||||
pkey.events.incl(Event.Finished)
|
||||
|
||||
results[k] = rkey
|
||||
inc(k)
|
||||
inc(i)
|
||||
result = k
|
||||
|
||||
proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] =
|
||||
result = newSeq[ReadyKey](MAX_EPOLL_EVENTS)
|
||||
let count = selectInto(s, timeout, result)
|
||||
result.setLen(count)
|
||||
|
||||
template isEmpty*[T](s: Selector[T]): bool =
|
||||
(s.count == 0)
|
||||
|
||||
proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} =
|
||||
let fdi = int(fd)
|
||||
fdi < s.numFD and s.fds[fdi].ident != InvalidIdent
|
||||
|
||||
proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
if fdi in s:
|
||||
s.fds[fdi].data = data
|
||||
result = true
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body: untyped) =
|
||||
mixin checkFd
|
||||
let fdi = int(fd)
|
||||
if fdi in s:
|
||||
var value = addr(s.fds[fdi].data)
|
||||
body
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value, body1,
|
||||
body2: untyped) =
|
||||
let fdi = int(fd)
|
||||
if fdi in s:
|
||||
var value = addr(s.fds[fdi].data)
|
||||
body1
|
||||
else:
|
||||
body2
|
||||
|
||||
proc getFd*[T](s: Selector[T]): int =
|
||||
return s.epollFd.int
|
625
vendor/nim-chronos/chronos/ioselects/ioselectors_kqueue.nim
vendored
Normal file
625
vendor/nim-chronos/chronos/ioselects/ioselectors_kqueue.nim
vendored
Normal file
@ -0,0 +1,625 @@
|
||||
#
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2016 Eugene Kabanov
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
# This module implements BSD kqueue().
|
||||
|
||||
import posix, times, kqueue
|
||||
|
||||
const
|
||||
# Maximum number of events that can be returned.
|
||||
MAX_KQUEUE_EVENTS = 64
|
||||
# SIG_IGN and SIG_DFL declared in posix.nim as variables, but we need them
|
||||
# to be constants and GC-safe.
|
||||
SIG_DFL = cast[proc(x: cint) {.raises: [],noconv,gcsafe.}](0)
|
||||
SIG_IGN = cast[proc(x: cint) {.raises: [],noconv,gcsafe.}](1)
|
||||
|
||||
when defined(kqcache):
|
||||
const CACHE_EVENTS = true
|
||||
|
||||
when defined(macosx) or defined(freebsd) or defined(dragonfly):
|
||||
when defined(macosx):
|
||||
const MAX_DESCRIPTORS_ID = 29 # KERN_MAXFILESPERPROC (MacOS)
|
||||
else:
|
||||
const MAX_DESCRIPTORS_ID = 27 # KERN_MAXFILESPERPROC (FreeBSD)
|
||||
proc sysctl(name: ptr cint, namelen: cuint, oldp: pointer, oldplen: ptr csize_t,
|
||||
newp: pointer, newplen: csize_t): cint
|
||||
{.importc: "sysctl",header: """#include <sys/types.h>
|
||||
#include <sys/sysctl.h>"""}
|
||||
elif defined(netbsd) or defined(openbsd):
|
||||
# OpenBSD and NetBSD don't have KERN_MAXFILESPERPROC, so we are using
|
||||
# KERN_MAXFILES, because KERN_MAXFILES is always bigger,
|
||||
# than KERN_MAXFILESPERPROC.
|
||||
const MAX_DESCRIPTORS_ID = 7 # KERN_MAXFILES
|
||||
proc sysctl(name: ptr cint, namelen: cuint, oldp: pointer, oldplen: ptr csize_t,
|
||||
newp: pointer, newplen: csize_t): cint
|
||||
{.importc: "sysctl",header: """#include <sys/param.h>
|
||||
#include <sys/sysctl.h>"""}
|
||||
|
||||
when hasThreadSupport:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
kqFD: cint
|
||||
maxFD: int
|
||||
changes: ptr SharedArray[KEvent]
|
||||
fds: ptr SharedArray[SelectorKey[T]]
|
||||
count: int
|
||||
changesLock: Lock
|
||||
changesSize: int
|
||||
changesLength: int
|
||||
sock: cint
|
||||
Selector*[T] = ptr SelectorImpl[T]
|
||||
else:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
kqFD: cint
|
||||
maxFD: int
|
||||
changes: seq[KEvent]
|
||||
fds: seq[SelectorKey[T]]
|
||||
count: int
|
||||
sock: cint
|
||||
Selector*[T] = ref SelectorImpl[T]
|
||||
|
||||
type
|
||||
SelectEventImpl = object
|
||||
rfd: cint
|
||||
wfd: cint
|
||||
|
||||
SelectEvent* = ptr SelectEventImpl
|
||||
# SelectEvent is declared as `ptr` to be placed in `shared memory`,
|
||||
# so you can share one SelectEvent handle between threads.
|
||||
|
||||
proc getUnique[T](s: Selector[T]): int {.inline.} =
|
||||
# we create duplicated handles to get unique indexes for our `fds` array.
|
||||
result = posix.fcntl(s.sock, F_DUPFD, s.sock)
|
||||
if result == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc newSelector*[T](): owned(Selector[T]) =
|
||||
var maxFD = 0.cint
|
||||
var size = csize_t(sizeof(cint))
|
||||
var namearr = [1.cint, MAX_DESCRIPTORS_ID.cint]
|
||||
# Obtain maximum number of opened file descriptors for process
|
||||
if sysctl(addr(namearr[0]), 2, cast[pointer](addr maxFD), addr size,
|
||||
nil, 0) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
var kqFD = kqueue()
|
||||
if kqFD < 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
# we allocating empty socket to duplicate it handle in future, to get unique
|
||||
# indexes for `fds` array. This is needed to properly identify
|
||||
# {Event.Timer, Event.Signal, Event.Process} events.
|
||||
let usock = posix.socket(posix.AF_INET, posix.SOCK_STREAM,
|
||||
posix.IPPROTO_TCP).cint
|
||||
if usock == -1:
|
||||
let err = osLastError()
|
||||
discard posix.close(kqFD)
|
||||
raiseIOSelectorsError(err)
|
||||
|
||||
when hasThreadSupport:
|
||||
result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T])))
|
||||
result.fds = allocSharedArray[SelectorKey[T]](maxFD)
|
||||
result.changes = allocSharedArray[KEvent](MAX_KQUEUE_EVENTS)
|
||||
result.changesSize = MAX_KQUEUE_EVENTS
|
||||
initLock(result.changesLock)
|
||||
else:
|
||||
result = Selector[T]()
|
||||
result.fds = newSeq[SelectorKey[T]](maxFD)
|
||||
result.changes = newSeqOfCap[KEvent](MAX_KQUEUE_EVENTS)
|
||||
|
||||
for i in 0 ..< maxFD:
|
||||
result.fds[i].ident = InvalidIdent
|
||||
|
||||
result.sock = usock
|
||||
result.kqFD = kqFD
|
||||
result.maxFD = maxFD.int
|
||||
|
||||
proc close*[T](s: Selector[T]) =
|
||||
let res1 = posix.close(s.kqFD)
|
||||
let res2 = posix.close(s.sock)
|
||||
when hasThreadSupport:
|
||||
deinitLock(s.changesLock)
|
||||
deallocSharedArray(s.fds)
|
||||
deallocShared(cast[pointer](s))
|
||||
if res1 != 0 or res2 != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc newSelectEvent*(): SelectEvent =
|
||||
var fds: array[2, cint]
|
||||
if posix.pipe(fds) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fds[0])
|
||||
setNonBlocking(fds[1])
|
||||
result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl)))
|
||||
result.rfd = fds[0]
|
||||
result.wfd = fds[1]
|
||||
|
||||
proc trigger*(ev: SelectEvent) =
|
||||
var data: uint64 = 1
|
||||
if posix.write(ev.wfd, addr data, sizeof(uint64)) != sizeof(uint64):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc close*(ev: SelectEvent) =
|
||||
let res1 = posix.close(ev.rfd)
|
||||
let res2 = posix.close(ev.wfd)
|
||||
deallocShared(cast[pointer](ev))
|
||||
if res1 != 0 or res2 != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
template checkFd(s, f) =
|
||||
if f >= s.maxFD:
|
||||
raiseIOSelectorsError("Maximum number of descriptors is exhausted!")
|
||||
|
||||
when hasThreadSupport:
|
||||
template withChangeLock[T](s: Selector[T], body: untyped) =
|
||||
acquire(s.changesLock)
|
||||
{.locks: [s.changesLock].}:
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
release(s.changesLock)
|
||||
else:
|
||||
template withChangeLock(s, body: untyped) =
|
||||
body
|
||||
|
||||
when hasThreadSupport:
|
||||
template modifyKQueue[T](s: Selector[T], nident: uint, nfilter: cshort,
|
||||
nflags: cushort, nfflags: cuint, ndata: int,
|
||||
nudata: pointer) =
|
||||
mixin withChangeLock
|
||||
s.withChangeLock():
|
||||
if s.changesLength == s.changesSize:
|
||||
# if cache array is full, we allocating new with size * 2
|
||||
let newSize = s.changesSize shl 1
|
||||
let rdata = allocSharedArray[KEvent](newSize)
|
||||
copyMem(rdata, s.changes, s.changesSize * sizeof(KEvent))
|
||||
s.changesSize = newSize
|
||||
s.changes[s.changesLength] = KEvent(ident: nident,
|
||||
filter: nfilter, flags: nflags,
|
||||
fflags: nfflags, data: ndata,
|
||||
udata: nudata)
|
||||
inc(s.changesLength)
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
template flushKQueue[T](s: Selector[T]) =
|
||||
mixin withChangeLock
|
||||
s.withChangeLock():
|
||||
if s.changesLength > 0:
|
||||
if kevent(s.kqFD, addr(s.changes[0]), cint(s.changesLength),
|
||||
nil, 0, nil) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
s.changesLength = 0
|
||||
else:
|
||||
template modifyKQueue[T](s: Selector[T], nident: uint, nfilter: cshort,
|
||||
nflags: cushort, nfflags: cuint, ndata: int,
|
||||
nudata: pointer) =
|
||||
s.changes.add(KEvent(ident: nident,
|
||||
filter: nfilter, flags: nflags,
|
||||
fflags: nfflags, data: ndata,
|
||||
udata: nudata))
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
template flushKQueue[T](s: Selector[T]) =
|
||||
let length = cint(len(s.changes))
|
||||
if length > 0:
|
||||
if kevent(s.kqFD, addr(s.changes[0]), length,
|
||||
nil, 0, nil) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
s.changes.setLen(0)
|
||||
|
||||
proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event], data: T) =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
s.setKey(fdi, events, 0, data)
|
||||
|
||||
if events != {}:
|
||||
if Event.Read in events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_READ, EV_ADD, 0, 0, nil)
|
||||
inc(s.count)
|
||||
if Event.Write in events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_WRITE, EV_ADD, 0, 0, nil)
|
||||
inc(s.count)
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event]) =
|
||||
let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode,
|
||||
Event.User, Event.Oneshot, Event.Error}
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent,
|
||||
"Descriptor $# is not registered in the queue!" % $fdi)
|
||||
doAssert(pkey.events * maskEvents == {})
|
||||
|
||||
if pkey.events != events:
|
||||
if (Event.Read in pkey.events) and (Event.Read notin events):
|
||||
modifyKQueue(s, fdi.uint, EVFILT_READ, EV_DELETE, 0, 0, nil)
|
||||
dec(s.count)
|
||||
if (Event.Write in pkey.events) and (Event.Write notin events):
|
||||
modifyKQueue(s, fdi.uint, EVFILT_WRITE, EV_DELETE, 0, 0, nil)
|
||||
dec(s.count)
|
||||
if (Event.Read notin pkey.events) and (Event.Read in events):
|
||||
modifyKQueue(s, fdi.uint, EVFILT_READ, EV_ADD, 0, 0, nil)
|
||||
inc(s.count)
|
||||
if (Event.Write notin pkey.events) and (Event.Write in events):
|
||||
modifyKQueue(s, fdi.uint, EVFILT_WRITE, EV_ADD, 0, 0, nil)
|
||||
inc(s.count)
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
pkey.events = events
|
||||
|
||||
proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool,
|
||||
data: T): int {.discardable.} =
|
||||
let fdi = getUnique(s)
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
|
||||
let events = if oneshot: {Event.Timer, Event.Oneshot} else: {Event.Timer}
|
||||
let flags: cushort = if oneshot: EV_ONESHOT or EV_ADD else: EV_ADD
|
||||
|
||||
s.setKey(fdi, events, 0, data)
|
||||
|
||||
# EVFILT_TIMER on Open/Net(BSD) has granularity of only milliseconds,
|
||||
# but MacOS and FreeBSD allow use `0` as `fflags` to use milliseconds
|
||||
# too
|
||||
modifyKQueue(s, fdi.uint, EVFILT_TIMER, flags, 0, cint(timeout), nil)
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
inc(s.count)
|
||||
result = fdi
|
||||
|
||||
proc registerSignal*[T](s: Selector[T], signal: int,
|
||||
data: T): int {.discardable.} =
|
||||
let fdi = getUnique(s)
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
|
||||
s.setKey(fdi, {Event.Signal}, signal, data)
|
||||
var nmask, omask: Sigset
|
||||
discard sigemptyset(nmask)
|
||||
discard sigemptyset(omask)
|
||||
discard sigaddset(nmask, cint(signal))
|
||||
blockSignals(nmask, omask)
|
||||
# to be compatible with linux semantic we need to "eat" signals
|
||||
posix.signal(cint(signal), SIG_IGN)
|
||||
|
||||
modifyKQueue(s, signal.uint, EVFILT_SIGNAL, EV_ADD, 0, 0,
|
||||
cast[pointer](fdi))
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
inc(s.count)
|
||||
result = fdi
|
||||
|
||||
proc registerProcess*[T](s: Selector[T], pid: int,
|
||||
data: T): int {.discardable.} =
|
||||
let fdi = getUnique(s)
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
|
||||
var kflags: cushort = EV_ONESHOT or EV_ADD
|
||||
setKey(s, fdi, {Event.Process, Event.Oneshot}, pid, data)
|
||||
|
||||
modifyKQueue(s, pid.uint, EVFILT_PROC, kflags, NOTE_EXIT, 0,
|
||||
cast[pointer](fdi))
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
inc(s.count)
|
||||
result = fdi
|
||||
|
||||
proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) =
|
||||
let fdi = ev.rfd.int
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent, "Event is already registered in the queue!")
|
||||
setKey(s, fdi, {Event.User}, 0, data)
|
||||
|
||||
modifyKQueue(s, fdi.uint, EVFILT_READ, EV_ADD, 0, 0, nil)
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
inc(s.count)
|
||||
|
||||
template processVnodeEvents(events: set[Event]): cuint =
|
||||
var rfflags = 0.cuint
|
||||
if events == {Event.VnodeWrite, Event.VnodeDelete, Event.VnodeExtend,
|
||||
Event.VnodeAttrib, Event.VnodeLink, Event.VnodeRename,
|
||||
Event.VnodeRevoke}:
|
||||
rfflags = NOTE_DELETE or NOTE_WRITE or NOTE_EXTEND or NOTE_ATTRIB or
|
||||
NOTE_LINK or NOTE_RENAME or NOTE_REVOKE
|
||||
else:
|
||||
if Event.VnodeDelete in events: rfflags = rfflags or NOTE_DELETE
|
||||
if Event.VnodeWrite in events: rfflags = rfflags or NOTE_WRITE
|
||||
if Event.VnodeExtend in events: rfflags = rfflags or NOTE_EXTEND
|
||||
if Event.VnodeAttrib in events: rfflags = rfflags or NOTE_ATTRIB
|
||||
if Event.VnodeLink in events: rfflags = rfflags or NOTE_LINK
|
||||
if Event.VnodeRename in events: rfflags = rfflags or NOTE_RENAME
|
||||
if Event.VnodeRevoke in events: rfflags = rfflags or NOTE_REVOKE
|
||||
rfflags
|
||||
|
||||
proc registerVnode*[T](s: Selector[T], fd: cint, events: set[Event], data: T) =
|
||||
let fdi = fd.int
|
||||
setKey(s, fdi, {Event.Vnode} + events, 0, data)
|
||||
var fflags = processVnodeEvents(events)
|
||||
|
||||
modifyKQueue(s, fdi.uint, EVFILT_VNODE, EV_ADD or EV_CLEAR, fflags, 0, nil)
|
||||
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
|
||||
inc(s.count)
|
||||
|
||||
proc unregister*[T](s: Selector[T], fd: int|SocketHandle) =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent,
|
||||
"Descriptor [" & $fdi & "] is not registered in the queue!")
|
||||
|
||||
if pkey.events != {}:
|
||||
if pkey.events * {Event.Read, Event.Write} != {}:
|
||||
if Event.Read in pkey.events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_READ, EV_DELETE, 0, 0, nil)
|
||||
dec(s.count)
|
||||
if Event.Write in pkey.events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_WRITE, EV_DELETE, 0, 0, nil)
|
||||
dec(s.count)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
elif Event.Timer in pkey.events:
|
||||
if Event.Finished notin pkey.events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_TIMER, EV_DELETE, 0, 0, nil)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
dec(s.count)
|
||||
if posix.close(cint(pkey.ident)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
elif Event.Signal in pkey.events:
|
||||
var nmask, omask: Sigset
|
||||
let signal = cint(pkey.param)
|
||||
discard sigemptyset(nmask)
|
||||
discard sigemptyset(omask)
|
||||
discard sigaddset(nmask, signal)
|
||||
unblockSignals(nmask, omask)
|
||||
posix.signal(signal, SIG_DFL)
|
||||
modifyKQueue(s, uint(pkey.param), EVFILT_SIGNAL, EV_DELETE, 0, 0, nil)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
dec(s.count)
|
||||
if posix.close(cint(pkey.ident)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
elif Event.Process in pkey.events:
|
||||
if Event.Finished notin pkey.events:
|
||||
modifyKQueue(s, uint(pkey.param), EVFILT_PROC, EV_DELETE, 0, 0, nil)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
dec(s.count)
|
||||
if posix.close(cint(pkey.ident)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
elif Event.Vnode in pkey.events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_VNODE, EV_DELETE, 0, 0, nil)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
dec(s.count)
|
||||
elif Event.User in pkey.events:
|
||||
modifyKQueue(s, uint(fdi), EVFILT_READ, EV_DELETE, 0, 0, nil)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
dec(s.count)
|
||||
|
||||
clearKey(pkey)
|
||||
|
||||
proc unregister*[T](s: Selector[T], ev: SelectEvent) =
|
||||
let fdi = int(ev.rfd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent, "Event is not registered in the queue!")
|
||||
doAssert(Event.User in pkey.events)
|
||||
modifyKQueue(s, uint(fdi), EVFILT_READ, EV_DELETE, 0, 0, nil)
|
||||
when not declared(CACHE_EVENTS):
|
||||
flushKQueue(s)
|
||||
clearKey(pkey)
|
||||
dec(s.count)
|
||||
|
||||
proc selectInto*[T](s: Selector[T], timeout: int,
|
||||
results: var openArray[ReadyKey]): int =
|
||||
var
|
||||
tv: Timespec
|
||||
resTable: array[MAX_KQUEUE_EVENTS, KEvent]
|
||||
ptv = addr tv
|
||||
maxres = MAX_KQUEUE_EVENTS
|
||||
|
||||
verifySelectParams(timeout)
|
||||
|
||||
if timeout != -1:
|
||||
if timeout >= 1000:
|
||||
tv.tv_sec = posix.Time(timeout div 1_000)
|
||||
tv.tv_nsec = (timeout %% 1_000) * 1_000_000
|
||||
else:
|
||||
tv.tv_sec = posix.Time(0)
|
||||
tv.tv_nsec = timeout * 1_000_000
|
||||
else:
|
||||
ptv = nil
|
||||
|
||||
if maxres > len(results):
|
||||
maxres = len(results)
|
||||
|
||||
var count = 0
|
||||
when not declared(CACHE_EVENTS):
|
||||
count = kevent(s.kqFD, nil, cint(0), addr(resTable[0]), cint(maxres), ptv)
|
||||
else:
|
||||
when hasThreadSupport:
|
||||
s.withChangeLock():
|
||||
if s.changesLength > 0:
|
||||
count = kevent(s.kqFD, addr(s.changes[0]), cint(s.changesLength),
|
||||
addr(resTable[0]), cint(maxres), ptv)
|
||||
s.changesLength = 0
|
||||
else:
|
||||
count = kevent(s.kqFD, nil, cint(0), addr(resTable[0]), cint(maxres),
|
||||
ptv)
|
||||
else:
|
||||
let length = cint(len(s.changes))
|
||||
if length > 0:
|
||||
count = kevent(s.kqFD, addr(s.changes[0]), length,
|
||||
addr(resTable[0]), cint(maxres), ptv)
|
||||
s.changes.setLen(0)
|
||||
else:
|
||||
count = kevent(s.kqFD, nil, cint(0), addr(resTable[0]), cint(maxres),
|
||||
ptv)
|
||||
|
||||
if count < 0:
|
||||
result = 0
|
||||
let err = osLastError()
|
||||
if cint(err) != EINTR:
|
||||
raiseIOSelectorsError(err)
|
||||
elif count == 0:
|
||||
result = 0
|
||||
else:
|
||||
var i = 0
|
||||
var k = 0 # do not delete this, because `continue` used in cycle.
|
||||
var pkey: ptr SelectorKey[T]
|
||||
while i < count:
|
||||
let kevent = addr(resTable[i])
|
||||
var rkey = ReadyKey(fd: int(kevent.ident), events: {})
|
||||
|
||||
if (kevent.flags and EV_ERROR) != 0:
|
||||
rkey.events = {Event.Error}
|
||||
rkey.errorCode = OSErrorCode(kevent.data)
|
||||
|
||||
case kevent.filter:
|
||||
of EVFILT_READ:
|
||||
pkey = addr(s.fds[int(kevent.ident)])
|
||||
rkey.events.incl(Event.Read)
|
||||
if Event.User in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if posix.read(cint(kevent.ident), addr data,
|
||||
sizeof(uint64)) != sizeof(uint64):
|
||||
let err = osLastError()
|
||||
if err == OSErrorCode(EAGAIN):
|
||||
# someone already consumed event data
|
||||
inc(i)
|
||||
continue
|
||||
else:
|
||||
raiseIOSelectorsError(err)
|
||||
rkey.events = {Event.User}
|
||||
of EVFILT_WRITE:
|
||||
pkey = addr(s.fds[int(kevent.ident)])
|
||||
rkey.events.incl(Event.Write)
|
||||
rkey.events = {Event.Write}
|
||||
of EVFILT_TIMER:
|
||||
pkey = addr(s.fds[int(kevent.ident)])
|
||||
if Event.Oneshot in pkey.events:
|
||||
# we will not clear key until it will be unregistered, so
|
||||
# application can obtain data, but we will decrease counter,
|
||||
# because kqueue is empty.
|
||||
dec(s.count)
|
||||
# we are marking key with `Finished` event, to avoid double decrease.
|
||||
pkey.events.incl(Event.Finished)
|
||||
rkey.events.incl(Event.Timer)
|
||||
of EVFILT_VNODE:
|
||||
pkey = addr(s.fds[int(kevent.ident)])
|
||||
rkey.events.incl(Event.Vnode)
|
||||
if (kevent.fflags and NOTE_DELETE) != 0:
|
||||
rkey.events.incl(Event.VnodeDelete)
|
||||
if (kevent.fflags and NOTE_WRITE) != 0:
|
||||
rkey.events.incl(Event.VnodeWrite)
|
||||
if (kevent.fflags and NOTE_EXTEND) != 0:
|
||||
rkey.events.incl(Event.VnodeExtend)
|
||||
if (kevent.fflags and NOTE_ATTRIB) != 0:
|
||||
rkey.events.incl(Event.VnodeAttrib)
|
||||
if (kevent.fflags and NOTE_LINK) != 0:
|
||||
rkey.events.incl(Event.VnodeLink)
|
||||
if (kevent.fflags and NOTE_RENAME) != 0:
|
||||
rkey.events.incl(Event.VnodeRename)
|
||||
if (kevent.fflags and NOTE_REVOKE) != 0:
|
||||
rkey.events.incl(Event.VnodeRevoke)
|
||||
of EVFILT_SIGNAL:
|
||||
pkey = addr(s.fds[cast[int](kevent.udata)])
|
||||
rkey.fd = cast[int](kevent.udata)
|
||||
rkey.events.incl(Event.Signal)
|
||||
of EVFILT_PROC:
|
||||
rkey.fd = cast[int](kevent.udata)
|
||||
pkey = addr(s.fds[cast[int](kevent.udata)])
|
||||
# we will not clear key, until it will be unregistered, so
|
||||
# application can obtain data, but we will decrease counter,
|
||||
# because kqueue is empty.
|
||||
dec(s.count)
|
||||
# we are marking key with `Finished` event, to avoid double decrease.
|
||||
pkey.events.incl(Event.Finished)
|
||||
rkey.events.incl(Event.Process)
|
||||
else:
|
||||
doAssert(true, "Unsupported kqueue filter in the queue!")
|
||||
|
||||
if (kevent.flags and EV_EOF) != 0:
|
||||
# TODO this error handling needs to be rethought.
|
||||
# `fflags` can sometimes be `0x80000000` and thus we use 'cast'
|
||||
# here:
|
||||
if kevent.fflags != 0:
|
||||
rkey.errorCode = cast[OSErrorCode](kevent.fflags)
|
||||
else:
|
||||
# This assumes we are dealing with sockets.
|
||||
# TODO: For future-proofing it might be a good idea to give the
|
||||
# user access to the raw `kevent`.
|
||||
rkey.errorCode = OSErrorCode(ECONNRESET)
|
||||
rkey.events.incl(Event.Error)
|
||||
|
||||
results[k] = rkey
|
||||
inc(k)
|
||||
inc(i)
|
||||
result = k
|
||||
|
||||
proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] =
|
||||
result = newSeq[ReadyKey](MAX_KQUEUE_EVENTS)
|
||||
let count = selectInto(s, timeout, result)
|
||||
result.setLen(count)
|
||||
|
||||
template isEmpty*[T](s: Selector[T]): bool =
|
||||
(s.count == 0)
|
||||
|
||||
proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} =
|
||||
let fdi = fd.int
|
||||
fdi < s.maxFD and s.fds[fd.int].ident != InvalidIdent
|
||||
|
||||
proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool =
|
||||
let fdi = int(fd)
|
||||
if fdi in s:
|
||||
s.fds[fdi].data = data
|
||||
result = true
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body: untyped) =
|
||||
let fdi = int(fd)
|
||||
if fdi in s:
|
||||
var value = addr(s.fds[fdi].data)
|
||||
body
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value, body1,
|
||||
body2: untyped) =
|
||||
let fdi = int(fd)
|
||||
if fdi in s:
|
||||
var value = addr(s.fds[fdi].data)
|
||||
body1
|
||||
else:
|
||||
body2
|
||||
|
||||
|
||||
proc getFd*[T](s: Selector[T]): int =
|
||||
return s.kqFD.int
|
310
vendor/nim-chronos/chronos/ioselects/ioselectors_poll.nim
vendored
Normal file
310
vendor/nim-chronos/chronos/ioselects/ioselectors_poll.nim
vendored
Normal file
@ -0,0 +1,310 @@
|
||||
#
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2016 Eugene Kabanov
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
# This module implements Posix poll().
|
||||
|
||||
import posix, times
|
||||
|
||||
# Maximum number of events that can be returned
|
||||
const MAX_POLL_EVENTS = 64
|
||||
|
||||
when hasThreadSupport:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
maxFD : int
|
||||
pollcnt: int
|
||||
fds: ptr SharedArray[SelectorKey[T]]
|
||||
pollfds: ptr SharedArray[TPollFd]
|
||||
count: int
|
||||
lock: Lock
|
||||
Selector*[T] = ptr SelectorImpl[T]
|
||||
else:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
maxFD : int
|
||||
pollcnt: int
|
||||
fds: seq[SelectorKey[T]]
|
||||
pollfds: seq[TPollFd]
|
||||
count: int
|
||||
Selector*[T] = ref SelectorImpl[T]
|
||||
|
||||
type
|
||||
SelectEventImpl = object
|
||||
rfd: cint
|
||||
wfd: cint
|
||||
SelectEvent* = ptr SelectEventImpl
|
||||
|
||||
when hasThreadSupport:
|
||||
template withPollLock[T](s: Selector[T], body: untyped) =
|
||||
acquire(s.lock)
|
||||
{.locks: [s.lock].}:
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
release(s.lock)
|
||||
else:
|
||||
template withPollLock(s, body: untyped) =
|
||||
body
|
||||
|
||||
proc newSelector*[T](): Selector[T] =
|
||||
var a = RLimit()
|
||||
if getrlimit(posix.RLIMIT_NOFILE, a) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
var maxFD = int(a.rlim_max)
|
||||
|
||||
when hasThreadSupport:
|
||||
result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T])))
|
||||
result.maxFD = maxFD
|
||||
result.fds = allocSharedArray[SelectorKey[T]](maxFD)
|
||||
result.pollfds = allocSharedArray[TPollFd](maxFD)
|
||||
initLock(result.lock)
|
||||
else:
|
||||
result = Selector[T]()
|
||||
result.maxFD = maxFD
|
||||
result.fds = newSeq[SelectorKey[T]](maxFD)
|
||||
result.pollfds = newSeq[TPollFd](maxFD)
|
||||
|
||||
for i in 0 ..< maxFD:
|
||||
result.fds[i].ident = InvalidIdent
|
||||
|
||||
proc close*[T](s: Selector[T]) =
|
||||
when hasThreadSupport:
|
||||
deinitLock(s.lock)
|
||||
deallocSharedArray(s.fds)
|
||||
deallocSharedArray(s.pollfds)
|
||||
deallocShared(cast[pointer](s))
|
||||
|
||||
template pollAdd[T](s: Selector[T], sock: cint, events: set[Event]) =
|
||||
withPollLock(s):
|
||||
var pollev: cshort = 0
|
||||
if Event.Read in events: pollev = pollev or POLLIN
|
||||
if Event.Write in events: pollev = pollev or POLLOUT
|
||||
s.pollfds[s.pollcnt].fd = cint(sock)
|
||||
s.pollfds[s.pollcnt].events = pollev
|
||||
inc(s.count)
|
||||
inc(s.pollcnt)
|
||||
|
||||
template pollUpdate[T](s: Selector[T], sock: cint, events: set[Event]) =
|
||||
withPollLock(s):
|
||||
var i = 0
|
||||
var pollev: cshort = 0
|
||||
if Event.Read in events: pollev = pollev or POLLIN
|
||||
if Event.Write in events: pollev = pollev or POLLOUT
|
||||
|
||||
while i < s.pollcnt:
|
||||
if s.pollfds[i].fd == sock:
|
||||
s.pollfds[i].events = pollev
|
||||
break
|
||||
inc(i)
|
||||
doAssert(i < s.pollcnt,
|
||||
"Descriptor [" & $sock & "] is not registered in the queue!")
|
||||
|
||||
template pollRemove[T](s: Selector[T], sock: cint) =
|
||||
withPollLock(s):
|
||||
var i = 0
|
||||
while i < s.pollcnt:
|
||||
if s.pollfds[i].fd == sock:
|
||||
if i == s.pollcnt - 1:
|
||||
s.pollfds[i].fd = 0
|
||||
s.pollfds[i].events = 0
|
||||
s.pollfds[i].revents = 0
|
||||
else:
|
||||
while i < (s.pollcnt - 1):
|
||||
s.pollfds[i].fd = s.pollfds[i + 1].fd
|
||||
s.pollfds[i].events = s.pollfds[i + 1].events
|
||||
inc(i)
|
||||
break
|
||||
inc(i)
|
||||
dec(s.pollcnt)
|
||||
dec(s.count)
|
||||
|
||||
template checkFd(s, f) =
|
||||
if f >= s.maxFD:
|
||||
raiseIOSelectorsError("Maximum number of descriptors is exhausted!")
|
||||
|
||||
proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event], data: T) =
|
||||
var fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent)
|
||||
setKey(s, fdi, events, 0, data)
|
||||
if events != {}: s.pollAdd(fdi.cint, events)
|
||||
|
||||
proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event]) =
|
||||
let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode,
|
||||
Event.User, Event.Oneshot, Event.Error}
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent,
|
||||
"Descriptor [" & $fdi & "] is not registered in the queue!")
|
||||
doAssert(pkey.events * maskEvents == {})
|
||||
|
||||
if pkey.events != events:
|
||||
if pkey.events == {}:
|
||||
s.pollAdd(fd.cint, events)
|
||||
else:
|
||||
if events != {}:
|
||||
s.pollUpdate(fd.cint, events)
|
||||
else:
|
||||
s.pollRemove(fd.cint)
|
||||
pkey.events = events
|
||||
|
||||
proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) =
|
||||
var fdi = int(ev.rfd)
|
||||
doAssert(s.fds[fdi].ident == InvalidIdent, "Event is already registered in the queue!")
|
||||
var events = {Event.User}
|
||||
setKey(s, fdi, events, 0, data)
|
||||
events.incl(Event.Read)
|
||||
s.pollAdd(fdi.cint, events)
|
||||
|
||||
proc unregister*[T](s: Selector[T], fd: int|SocketHandle) =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent,
|
||||
"Descriptor [" & $fdi & "] is not registered in the queue!")
|
||||
pkey.ident = InvalidIdent
|
||||
if pkey.events != {}:
|
||||
pkey.events = {}
|
||||
s.pollRemove(fdi.cint)
|
||||
|
||||
proc unregister*[T](s: Selector[T], ev: SelectEvent) =
|
||||
let fdi = int(ev.rfd)
|
||||
s.checkFd(fdi)
|
||||
var pkey = addr(s.fds[fdi])
|
||||
doAssert(pkey.ident != InvalidIdent, "Event is not registered in the queue!")
|
||||
doAssert(Event.User in pkey.events)
|
||||
pkey.ident = InvalidIdent
|
||||
pkey.events = {}
|
||||
s.pollRemove(fdi.cint)
|
||||
|
||||
proc newSelectEvent*(): SelectEvent =
|
||||
var fds: array[2, cint]
|
||||
if posix.pipe(fds) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fds[0])
|
||||
setNonBlocking(fds[1])
|
||||
result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl)))
|
||||
result.rfd = fds[0]
|
||||
result.wfd = fds[1]
|
||||
|
||||
proc trigger*(ev: SelectEvent) =
|
||||
var data: uint64 = 1
|
||||
if posix.write(ev.wfd, addr data, sizeof(uint64)) != sizeof(uint64):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc close*(ev: SelectEvent) =
|
||||
let res1 = posix.close(ev.rfd)
|
||||
let res2 = posix.close(ev.wfd)
|
||||
deallocShared(cast[pointer](ev))
|
||||
if res1 != 0 or res2 != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc selectInto*[T](s: Selector[T], timeout: int,
|
||||
results: var openarray[ReadyKey]): int =
|
||||
var maxres = MAX_POLL_EVENTS
|
||||
if maxres > len(results):
|
||||
maxres = len(results)
|
||||
|
||||
verifySelectParams(timeout)
|
||||
|
||||
s.withPollLock():
|
||||
let count = posix.poll(addr(s.pollfds[0]), Tnfds(s.pollcnt), timeout)
|
||||
if count < 0:
|
||||
result = 0
|
||||
let err = osLastError()
|
||||
if cint(err) != EINTR:
|
||||
raiseIOSelectorsError(err)
|
||||
elif count == 0:
|
||||
result = 0
|
||||
else:
|
||||
var i = 0
|
||||
var k = 0
|
||||
var rindex = 0
|
||||
while (i < s.pollcnt) and (k < count) and (rindex < maxres):
|
||||
let revents = s.pollfds[i].revents
|
||||
if revents != 0:
|
||||
let fd = s.pollfds[i].fd
|
||||
var pkey = addr(s.fds[fd])
|
||||
var rkey = ReadyKey(fd: int(fd), events: {})
|
||||
|
||||
if (revents and POLLIN) != 0:
|
||||
rkey.events.incl(Event.Read)
|
||||
if Event.User in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if posix.read(fd, addr data, sizeof(uint64)) != sizeof(uint64):
|
||||
let err = osLastError()
|
||||
if err != OSErrorCode(EAGAIN):
|
||||
raiseIOSelectorsError(err)
|
||||
else:
|
||||
# someone already consumed event data
|
||||
inc(i)
|
||||
continue
|
||||
rkey.events = {Event.User}
|
||||
if (revents and POLLOUT) != 0:
|
||||
rkey.events.incl(Event.Write)
|
||||
if (revents and POLLERR) != 0 or (revents and POLLHUP) != 0 or
|
||||
(revents and POLLNVAL) != 0:
|
||||
rkey.events.incl(Event.Error)
|
||||
results[rindex] = rkey
|
||||
s.pollfds[i].revents = 0
|
||||
inc(rindex)
|
||||
inc(k)
|
||||
inc(i)
|
||||
result = k
|
||||
|
||||
proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] =
|
||||
result = newSeq[ReadyKey](MAX_POLL_EVENTS)
|
||||
let count = selectInto(s, timeout, result)
|
||||
result.setLen(count)
|
||||
|
||||
template isEmpty*[T](s: Selector[T]): bool =
|
||||
(s.count == 0)
|
||||
|
||||
proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} =
|
||||
return s.fds[fd.int].ident != InvalidIdent
|
||||
|
||||
proc getData*[T](s: Selector[T], fd: SocketHandle|int): var T =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
if fdi in s:
|
||||
result = s.fds[fdi].data
|
||||
|
||||
proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool =
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
if fdi in s:
|
||||
s.fds[fdi].data = data
|
||||
result = true
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body: untyped) =
|
||||
mixin checkFd
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
if fdi in s:
|
||||
var value = addr(s.getData(fdi))
|
||||
body
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value, body1,
|
||||
body2: untyped) =
|
||||
mixin checkFd
|
||||
let fdi = int(fd)
|
||||
s.checkFd(fdi)
|
||||
if fdi in s:
|
||||
var value = addr(s.getData(fdi))
|
||||
body1
|
||||
else:
|
||||
body2
|
||||
|
||||
|
||||
proc getFd*[T](s: Selector[T]): int =
|
||||
return -1
|
465
vendor/nim-chronos/chronos/ioselects/ioselectors_select.nim
vendored
Normal file
465
vendor/nim-chronos/chronos/ioselects/ioselectors_select.nim
vendored
Normal file
@ -0,0 +1,465 @@
|
||||
#
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2016 Eugene Kabanov
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
# This module implements Posix and Windows select().
|
||||
|
||||
import times, nativesockets
|
||||
|
||||
when defined(windows):
|
||||
import winlean
|
||||
when defined(gcc):
|
||||
{.passl: "-lws2_32".}
|
||||
elif defined(vcc):
|
||||
{.passl: "ws2_32.lib".}
|
||||
const platformHeaders = """#include <winsock2.h>
|
||||
#include <windows.h>"""
|
||||
const EAGAIN = WSAEWOULDBLOCK
|
||||
else:
|
||||
const platformHeaders = """#include <sys/select.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>"""
|
||||
type
|
||||
Fdset {.importc: "fd_set", header: platformHeaders, pure, final.} = object
|
||||
var
|
||||
FD_SETSIZE {.importc: "FD_SETSIZE", header: platformHeaders.}: cint
|
||||
|
||||
proc IOFD_SET(fd: SocketHandle, fdset: ptr Fdset)
|
||||
{.cdecl, importc: "FD_SET", header: platformHeaders, inline.}
|
||||
proc IOFD_CLR(fd: SocketHandle, fdset: ptr Fdset)
|
||||
{.cdecl, importc: "FD_CLR", header: platformHeaders, inline.}
|
||||
proc IOFD_ZERO(fdset: ptr Fdset)
|
||||
{.cdecl, importc: "FD_ZERO", header: platformHeaders, inline.}
|
||||
|
||||
when defined(windows):
|
||||
proc IOFD_ISSET(fd: SocketHandle, fdset: ptr Fdset): cint
|
||||
{.stdcall, importc: "FD_ISSET", header: platformHeaders, inline.}
|
||||
proc ioselect(nfds: cint, readFds, writeFds, exceptFds: ptr Fdset,
|
||||
timeout: ptr Timeval): cint
|
||||
{.stdcall, importc: "select", header: platformHeaders.}
|
||||
else:
|
||||
proc IOFD_ISSET(fd: SocketHandle, fdset: ptr Fdset): cint
|
||||
{.cdecl, importc: "FD_ISSET", header: platformHeaders, inline.}
|
||||
proc ioselect(nfds: cint, readFds, writeFds, exceptFds: ptr Fdset,
|
||||
timeout: ptr Timeval): cint
|
||||
{.cdecl, importc: "select", header: platformHeaders.}
|
||||
|
||||
when hasThreadSupport:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
rSet: FdSet
|
||||
wSet: FdSet
|
||||
eSet: FdSet
|
||||
maxFD: int
|
||||
fds: ptr SharedArray[SelectorKey[T]]
|
||||
count: int
|
||||
lock: Lock
|
||||
Selector*[T] = ptr SelectorImpl[T]
|
||||
else:
|
||||
type
|
||||
SelectorImpl[T] = object
|
||||
rSet: FdSet
|
||||
wSet: FdSet
|
||||
eSet: FdSet
|
||||
maxFD: int
|
||||
fds: seq[SelectorKey[T]]
|
||||
count: int
|
||||
Selector*[T] = ref SelectorImpl[T]
|
||||
|
||||
type
|
||||
SelectEventImpl = object
|
||||
rsock: SocketHandle
|
||||
wsock: SocketHandle
|
||||
SelectEvent* = ptr SelectEventImpl
|
||||
|
||||
when hasThreadSupport:
|
||||
template withSelectLock[T](s: Selector[T], body: untyped) =
|
||||
acquire(s.lock)
|
||||
{.locks: [s.lock].}:
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
release(s.lock)
|
||||
else:
|
||||
template withSelectLock[T](s: Selector[T], body: untyped) =
|
||||
body
|
||||
|
||||
proc newSelector*[T](): Selector[T] =
|
||||
when hasThreadSupport:
|
||||
result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T])))
|
||||
result.fds = allocSharedArray[SelectorKey[T]](FD_SETSIZE)
|
||||
initLock result.lock
|
||||
else:
|
||||
result = Selector[T]()
|
||||
result.fds = newSeq[SelectorKey[T]](FD_SETSIZE)
|
||||
|
||||
for i in 0 ..< FD_SETSIZE:
|
||||
result.fds[i].ident = InvalidIdent
|
||||
|
||||
IOFD_ZERO(addr result.rSet)
|
||||
IOFD_ZERO(addr result.wSet)
|
||||
IOFD_ZERO(addr result.eSet)
|
||||
|
||||
proc close*[T](s: Selector[T]) =
|
||||
when hasThreadSupport:
|
||||
deallocSharedArray(s.fds)
|
||||
deallocShared(cast[pointer](s))
|
||||
|
||||
when defined(windows):
|
||||
proc newSelectEvent*(): SelectEvent =
|
||||
var ssock = createNativeSocket()
|
||||
var wsock = createNativeSocket()
|
||||
var rsock: SocketHandle = INVALID_SOCKET
|
||||
var saddr = Sockaddr_in()
|
||||
|
||||
saddr.sin_family = winlean.AF_INET
|
||||
saddr.sin_port = 0
|
||||
saddr.sin_addr.s_addr = INADDR_ANY
|
||||
if bindAddr(ssock, cast[ptr SockAddr](addr(saddr)),
|
||||
sizeof(saddr).SockLen) < 0'i32:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
if winlean.listen(ssock, 1) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
var namelen = sizeof(saddr).SockLen
|
||||
if getsockname(ssock, cast[ptr SockAddr](addr(saddr)),
|
||||
addr(namelen)) != 0'i32:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
saddr.sin_addr.s_addr = 0x0100007F
|
||||
if winlean.connect(wsock, cast[ptr SockAddr](addr(saddr)),
|
||||
sizeof(saddr).SockLen) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
namelen = sizeof(saddr).SockLen
|
||||
rsock = winlean.accept(ssock, cast[ptr SockAddr](addr(saddr)),
|
||||
cast[ptr SockLen](addr(namelen)))
|
||||
if rsock == SocketHandle(-1):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
if winlean.closesocket(ssock) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
var mode = clong(1)
|
||||
if ioctlsocket(rsock, FIONBIO, addr(mode)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
mode = clong(1)
|
||||
if ioctlsocket(wsock, FIONBIO, addr(mode)) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl)))
|
||||
result.rsock = rsock
|
||||
result.wsock = wsock
|
||||
|
||||
proc trigger*(ev: SelectEvent) =
|
||||
var data: uint64 = 1
|
||||
if winlean.send(ev.wsock, cast[pointer](addr data),
|
||||
cint(sizeof(uint64)), 0) != sizeof(uint64):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc close*(ev: SelectEvent) =
|
||||
let res1 = winlean.closesocket(ev.rsock)
|
||||
let res2 = winlean.closesocket(ev.wsock)
|
||||
deallocShared(cast[pointer](ev))
|
||||
if res1 != 0 or res2 != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
else:
|
||||
proc newSelectEvent*(): SelectEvent =
|
||||
var fds: array[2, cint]
|
||||
if posix.pipe(fds) != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
setNonBlocking(fds[0])
|
||||
setNonBlocking(fds[1])
|
||||
result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl)))
|
||||
result.rsock = SocketHandle(fds[0])
|
||||
result.wsock = SocketHandle(fds[1])
|
||||
|
||||
proc trigger*(ev: SelectEvent) =
|
||||
var data: uint64 = 1
|
||||
if posix.write(cint(ev.wsock), addr data, sizeof(uint64)) != sizeof(uint64):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc close*(ev: SelectEvent) =
|
||||
let res1 = posix.close(cint(ev.rsock))
|
||||
let res2 = posix.close(cint(ev.wsock))
|
||||
deallocShared(cast[pointer](ev))
|
||||
if res1 != 0 or res2 != 0:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
proc setSelectKey[T](s: Selector[T], fd: SocketHandle, events: set[Event],
|
||||
data: T) =
|
||||
var i = 0
|
||||
let fdi = int(fd)
|
||||
while i < FD_SETSIZE:
|
||||
if s.fds[i].ident == InvalidIdent:
|
||||
var pkey = addr(s.fds[i])
|
||||
pkey.ident = fdi
|
||||
pkey.events = events
|
||||
pkey.data = data
|
||||
break
|
||||
inc(i)
|
||||
if i >= FD_SETSIZE:
|
||||
raiseIOSelectorsError("Maximum number of descriptors is exhausted!")
|
||||
|
||||
proc getKey[T](s: Selector[T], fd: SocketHandle): ptr SelectorKey[T] =
|
||||
var i = 0
|
||||
let fdi = int(fd)
|
||||
while i < FD_SETSIZE:
|
||||
if s.fds[i].ident == fdi:
|
||||
result = addr(s.fds[i])
|
||||
break
|
||||
inc(i)
|
||||
doAssert(i < FD_SETSIZE,
|
||||
"Descriptor [" & $int(fd) & "] is not registered in the queue!")
|
||||
|
||||
proc delKey[T](s: Selector[T], fd: SocketHandle) =
|
||||
var empty: T
|
||||
var i = 0
|
||||
while i < FD_SETSIZE:
|
||||
if s.fds[i].ident == fd.int:
|
||||
s.fds[i].ident = InvalidIdent
|
||||
s.fds[i].events = {}
|
||||
s.fds[i].data = empty
|
||||
break
|
||||
inc(i)
|
||||
doAssert(i < FD_SETSIZE,
|
||||
"Descriptor [" & $int(fd) & "] is not registered in the queue!")
|
||||
|
||||
proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event], data: T) =
|
||||
when not defined(windows):
|
||||
let fdi = int(fd)
|
||||
s.withSelectLock():
|
||||
s.setSelectKey(fd, events, data)
|
||||
when not defined(windows):
|
||||
if fdi > s.maxFD: s.maxFD = fdi
|
||||
if Event.Read in events:
|
||||
IOFD_SET(fd, addr s.rSet)
|
||||
inc(s.count)
|
||||
if Event.Write in events:
|
||||
IOFD_SET(fd, addr s.wSet)
|
||||
IOFD_SET(fd, addr s.eSet)
|
||||
inc(s.count)
|
||||
|
||||
proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) =
|
||||
when not defined(windows):
|
||||
let fdi = int(ev.rsock)
|
||||
s.withSelectLock():
|
||||
s.setSelectKey(ev.rsock, {Event.User}, data)
|
||||
when not defined(windows):
|
||||
if fdi > s.maxFD: s.maxFD = fdi
|
||||
IOFD_SET(ev.rsock, addr s.rSet)
|
||||
inc(s.count)
|
||||
|
||||
proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event]) =
|
||||
let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode,
|
||||
Event.User, Event.Oneshot, Event.Error}
|
||||
s.withSelectLock():
|
||||
var pkey = s.getKey(fd)
|
||||
doAssert(pkey.events * maskEvents == {})
|
||||
if pkey.events != events:
|
||||
if (Event.Read in pkey.events) and (Event.Read notin events):
|
||||
IOFD_CLR(fd, addr s.rSet)
|
||||
dec(s.count)
|
||||
if (Event.Write in pkey.events) and (Event.Write notin events):
|
||||
IOFD_CLR(fd, addr s.wSet)
|
||||
IOFD_CLR(fd, addr s.eSet)
|
||||
dec(s.count)
|
||||
if (Event.Read notin pkey.events) and (Event.Read in events):
|
||||
IOFD_SET(fd, addr s.rSet)
|
||||
inc(s.count)
|
||||
if (Event.Write notin pkey.events) and (Event.Write in events):
|
||||
IOFD_SET(fd, addr s.wSet)
|
||||
IOFD_SET(fd, addr s.eSet)
|
||||
inc(s.count)
|
||||
pkey.events = events
|
||||
|
||||
proc unregister*[T](s: Selector[T], fd: SocketHandle|int) =
|
||||
s.withSelectLock():
|
||||
let fd = fd.SocketHandle
|
||||
var pkey = s.getKey(fd)
|
||||
if Event.Read in pkey.events or Event.User in pkey.events:
|
||||
IOFD_CLR(fd, addr s.rSet)
|
||||
dec(s.count)
|
||||
if Event.Write in pkey.events:
|
||||
IOFD_CLR(fd, addr s.wSet)
|
||||
IOFD_CLR(fd, addr s.eSet)
|
||||
dec(s.count)
|
||||
s.delKey(fd)
|
||||
|
||||
proc unregister*[T](s: Selector[T], ev: SelectEvent) =
|
||||
let fd = ev.rsock
|
||||
s.withSelectLock():
|
||||
var pkey = s.getKey(fd)
|
||||
IOFD_CLR(fd, addr s.rSet)
|
||||
dec(s.count)
|
||||
s.delKey(fd)
|
||||
|
||||
proc selectInto*[T](s: Selector[T], timeout: int,
|
||||
results: var openarray[ReadyKey]): int =
|
||||
var tv = Timeval()
|
||||
var ptv = addr tv
|
||||
var rset, wset, eset: FdSet
|
||||
|
||||
verifySelectParams(timeout)
|
||||
|
||||
if timeout != -1:
|
||||
when defined(genode):
|
||||
tv.tv_sec = Time(timeout div 1_000)
|
||||
else:
|
||||
tv.tv_sec = timeout.int32 div 1_000
|
||||
tv.tv_usec = (timeout.int32 %% 1_000) * 1_000
|
||||
else:
|
||||
ptv = nil
|
||||
|
||||
s.withSelectLock():
|
||||
rset = s.rSet
|
||||
wset = s.wSet
|
||||
eset = s.eSet
|
||||
|
||||
var count = ioselect(cint(s.maxFD) + 1, addr(rset), addr(wset),
|
||||
addr(eset), ptv)
|
||||
if count < 0:
|
||||
result = 0
|
||||
when defined(windows):
|
||||
raiseIOSelectorsError(osLastError())
|
||||
else:
|
||||
let err = osLastError()
|
||||
if cint(err) != EINTR:
|
||||
raiseIOSelectorsError(err)
|
||||
elif count == 0:
|
||||
result = 0
|
||||
else:
|
||||
var rindex = 0
|
||||
var i = 0
|
||||
var k = 0
|
||||
|
||||
while (i < FD_SETSIZE) and (k < count):
|
||||
if s.fds[i].ident != InvalidIdent:
|
||||
var flag = false
|
||||
var pkey = addr(s.fds[i])
|
||||
var rkey = ReadyKey(fd: int(pkey.ident), events: {})
|
||||
let fd = SocketHandle(pkey.ident)
|
||||
if IOFD_ISSET(fd, addr rset) != 0:
|
||||
if Event.User in pkey.events:
|
||||
var data: uint64 = 0
|
||||
if recv(fd, cast[pointer](addr(data)),
|
||||
sizeof(uint64).cint, 0) != sizeof(uint64):
|
||||
let err = osLastError()
|
||||
if cint(err) != EAGAIN:
|
||||
raiseIOSelectorsError(err)
|
||||
else:
|
||||
inc(i)
|
||||
inc(k)
|
||||
continue
|
||||
else:
|
||||
flag = true
|
||||
rkey.events = {Event.User}
|
||||
else:
|
||||
flag = true
|
||||
rkey.events = {Event.Read}
|
||||
if IOFD_ISSET(fd, addr wset) != 0:
|
||||
rkey.events.incl(Event.Write)
|
||||
if IOFD_ISSET(fd, addr eset) != 0:
|
||||
rkey.events.incl(Event.Error)
|
||||
flag = true
|
||||
if flag:
|
||||
results[rindex] = rkey
|
||||
inc(rindex)
|
||||
inc(k)
|
||||
inc(i)
|
||||
result = rindex
|
||||
|
||||
proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] =
|
||||
result = newSeq[ReadyKey](FD_SETSIZE)
|
||||
var count = selectInto(s, timeout, result)
|
||||
result.setLen(count)
|
||||
|
||||
proc flush*[T](s: Selector[T]) = discard
|
||||
|
||||
template isEmpty*[T](s: Selector[T]): bool =
|
||||
(s.count == 0)
|
||||
|
||||
proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} =
|
||||
s.withSelectLock():
|
||||
result = false
|
||||
|
||||
let fdi = int(fd)
|
||||
for i in 0..<FD_SETSIZE:
|
||||
if s.fds[i].ident == fdi:
|
||||
return true
|
||||
|
||||
when hasThreadSupport:
|
||||
template withSelectLock[T](s: Selector[T], body: untyped) =
|
||||
acquire(s.lock)
|
||||
{.locks: [s.lock].}:
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
release(s.lock)
|
||||
else:
|
||||
template withSelectLock[T](s: Selector[T], body: untyped) =
|
||||
body
|
||||
|
||||
proc getData*[T](s: Selector[T], fd: SocketHandle|int): var T =
|
||||
s.withSelectLock():
|
||||
let fdi = int(fd)
|
||||
for i in 0..<FD_SETSIZE:
|
||||
if s.fds[i].ident == fdi:
|
||||
return s.fds[i].data
|
||||
|
||||
proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool =
|
||||
s.withSelectLock():
|
||||
let fdi = int(fd)
|
||||
var i = 0
|
||||
while i < FD_SETSIZE:
|
||||
if s.fds[i].ident == fdi:
|
||||
var pkey = addr(s.fds[i])
|
||||
pkey.data = data
|
||||
result = true
|
||||
break
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body: untyped) =
|
||||
mixin withSelectLock
|
||||
s.withSelectLock():
|
||||
var value: ptr T
|
||||
let fdi = int(fd)
|
||||
var i = 0
|
||||
while i < FD_SETSIZE:
|
||||
if s.fds[i].ident == fdi:
|
||||
value = addr(s.fds[i].data)
|
||||
break
|
||||
inc(i)
|
||||
if i != FD_SETSIZE:
|
||||
body
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body1, body2: untyped) =
|
||||
mixin withSelectLock
|
||||
s.withSelectLock():
|
||||
block:
|
||||
var value: ptr T
|
||||
let fdi = int(fd)
|
||||
var i = 0
|
||||
while i < FD_SETSIZE:
|
||||
if s.fds[i].ident == fdi:
|
||||
value = addr(s.fds[i].data)
|
||||
break
|
||||
inc(i)
|
||||
if i != FD_SETSIZE:
|
||||
body1
|
||||
else:
|
||||
body2
|
||||
|
||||
|
||||
proc getFd*[T](s: Selector[T]): int =
|
||||
return -1
|
360
vendor/nim-chronos/chronos/selectors2.nim
vendored
Normal file
360
vendor/nim-chronos/chronos/selectors2.nim
vendored
Normal file
@ -0,0 +1,360 @@
|
||||
#
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2016 Eugene Kabanov
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## This module allows high-level and efficient I/O multiplexing.
|
||||
##
|
||||
## Supported OS primitives: ``epoll``, ``kqueue``, ``poll`` and
|
||||
## Windows ``select``.
|
||||
##
|
||||
## To use threadsafe version of this module, it needs to be compiled
|
||||
## with both ``-d:threadsafe`` and ``--threads:on`` options.
|
||||
##
|
||||
## Supported features: files, sockets, pipes, timers, processes, signals
|
||||
## and user events.
|
||||
##
|
||||
## Fully supported OS: MacOSX, FreeBSD, OpenBSD, NetBSD, Linux (except
|
||||
## for Android).
|
||||
##
|
||||
## Partially supported OS: Windows (only sockets and user events),
|
||||
## Solaris (files, sockets, handles and user events).
|
||||
## Android (files, sockets, handles and user events).
|
||||
##
|
||||
## TODO: ``/dev/poll``, ``event ports`` and filesystem events.
|
||||
|
||||
# Based on std/selectors, but with stricter exception handling and effect
|
||||
# support - changes could potentially be backported to nim but are not
|
||||
# backwards-compatible.
|
||||
|
||||
import os, nativesockets
|
||||
|
||||
const hasThreadSupport = compileOption("threads") and defined(threadsafe)
|
||||
|
||||
const ioselSupportedPlatform* = defined(macosx) or defined(freebsd) or
|
||||
defined(netbsd) or defined(openbsd) or
|
||||
defined(dragonfly) or
|
||||
(defined(linux) and not defined(android))
|
||||
## This constant is used to determine whether the destination platform is
|
||||
## fully supported by ``ioselectors`` module.
|
||||
|
||||
const bsdPlatform = defined(macosx) or defined(freebsd) or
|
||||
defined(netbsd) or defined(openbsd) or
|
||||
defined(dragonfly)
|
||||
|
||||
when defined(nimdoc):
|
||||
type
|
||||
Selector*[T] = ref object
|
||||
## An object which holds descriptors to be checked for read/write status
|
||||
|
||||
Event* {.pure.} = enum
|
||||
## An enum which hold event types
|
||||
Read, ## Descriptor is available for read
|
||||
Write, ## Descriptor is available for write
|
||||
Timer, ## Timer descriptor is completed
|
||||
Signal, ## Signal is raised
|
||||
Process, ## Process is finished
|
||||
Vnode, ## BSD specific file change
|
||||
User, ## User event is raised
|
||||
Error, ## Error occurred while waiting for descriptor
|
||||
VnodeWrite, ## NOTE_WRITE (BSD specific, write to file occurred)
|
||||
VnodeDelete, ## NOTE_DELETE (BSD specific, unlink of file occurred)
|
||||
VnodeExtend, ## NOTE_EXTEND (BSD specific, file extended)
|
||||
VnodeAttrib, ## NOTE_ATTRIB (BSD specific, file attributes changed)
|
||||
VnodeLink, ## NOTE_LINK (BSD specific, file link count changed)
|
||||
VnodeRename, ## NOTE_RENAME (BSD specific, file renamed)
|
||||
VnodeRevoke ## NOTE_REVOKE (BSD specific, file revoke occurred)
|
||||
|
||||
ReadyKey* = object
|
||||
## An object which holds result for descriptor
|
||||
fd* : int ## file/socket descriptor
|
||||
events*: set[Event] ## set of events
|
||||
errorCode*: OSErrorCode ## additional error code information for
|
||||
## Error events
|
||||
|
||||
SelectEvent* = object
|
||||
## An object which holds user defined event
|
||||
|
||||
proc newSelector*[T](): Selector[T] =
|
||||
## Creates a new selector
|
||||
|
||||
proc close*[T](s: Selector[T]) =
|
||||
## Closes the selector.
|
||||
|
||||
proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event], data: T) =
|
||||
## Registers file/socket descriptor ``fd`` to selector ``s``
|
||||
## with events set in ``events``. The ``data`` is application-defined
|
||||
## data, which will be passed when an event is triggered.
|
||||
|
||||
proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event]) =
|
||||
## Update file/socket descriptor ``fd``, registered in selector
|
||||
## ``s`` with new events set ``event``.
|
||||
|
||||
proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool,
|
||||
data: T): int {.discardable.} =
|
||||
## Registers timer notification with ``timeout`` (in milliseconds)
|
||||
## to selector ``s``.
|
||||
##
|
||||
## If ``oneshot`` is ``true``, timer will be notified only once.
|
||||
##
|
||||
## Set ``oneshot`` to ``false`` if you want periodic notifications.
|
||||
##
|
||||
## The ``data`` is application-defined data, which will be passed, when
|
||||
## the timer is triggered.
|
||||
##
|
||||
## Returns the file descriptor for the registered timer.
|
||||
|
||||
proc registerSignal*[T](s: Selector[T], signal: int,
|
||||
data: T): int {.discardable.} =
|
||||
## Registers Unix signal notification with ``signal`` to selector
|
||||
## ``s``.
|
||||
##
|
||||
## The ``data`` is application-defined data, which will be
|
||||
## passed when signal raises.
|
||||
##
|
||||
## Returns the file descriptor for the registered signal.
|
||||
##
|
||||
## **Note:** This function is not supported on ``Windows``.
|
||||
|
||||
proc registerProcess*[T](s: Selector[T], pid: int,
|
||||
data: T): int {.discardable.} =
|
||||
## Registers a process id (pid) notification (when process has
|
||||
## exited) in selector ``s``.
|
||||
##
|
||||
## The ``data`` is application-defined data, which will be passed when
|
||||
## process with ``pid`` has exited.
|
||||
##
|
||||
## Returns the file descriptor for the registered signal.
|
||||
|
||||
proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) =
|
||||
## Registers selector event ``ev`` in selector ``s``.
|
||||
##
|
||||
## The ``data`` is application-defined data, which will be passed when
|
||||
## ``ev`` happens.
|
||||
|
||||
proc registerVnode*[T](s: Selector[T], fd: cint, events: set[Event],
|
||||
data: T) =
|
||||
## Registers selector BSD/MacOSX specific vnode events for file
|
||||
## descriptor ``fd`` and events ``events``.
|
||||
## ``data`` application-defined data, which to be passed, when
|
||||
## vnode event happens.
|
||||
##
|
||||
## **Note:** This function is supported only by BSD and MacOSX.
|
||||
|
||||
proc newSelectEvent*(): SelectEvent =
|
||||
## Creates a new user-defined event.
|
||||
|
||||
proc trigger*(ev: SelectEvent) =
|
||||
## Trigger event ``ev``.
|
||||
|
||||
proc close*(ev: SelectEvent) =
|
||||
## Closes user-defined event ``ev``.
|
||||
|
||||
proc unregister*[T](s: Selector[T], ev: SelectEvent) =
|
||||
## Unregisters user-defined event ``ev`` from selector ``s``.
|
||||
|
||||
proc unregister*[T](s: Selector[T], fd: int|SocketHandle|cint) =
|
||||
## Unregisters file/socket descriptor ``fd`` from selector ``s``.
|
||||
|
||||
proc selectInto*[T](s: Selector[T], timeout: int,
|
||||
results: var openarray[ReadyKey]): int =
|
||||
## Waits for events registered in selector ``s``.
|
||||
##
|
||||
## The ``timeout`` argument specifies the maximum number of milliseconds
|
||||
## the function will be blocked for if no events are ready. Specifying a
|
||||
## timeout of ``-1`` causes the function to block indefinitely.
|
||||
## All available events will be stored in ``results`` array.
|
||||
##
|
||||
## Returns number of triggered events.
|
||||
|
||||
proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] =
|
||||
## Waits for events registered in selector ``s``.
|
||||
##
|
||||
## The ``timeout`` argument specifies the maximum number of milliseconds
|
||||
## the function will be blocked for if no events are ready. Specifying a
|
||||
## timeout of ``-1`` causes the function to block indefinitely.
|
||||
##
|
||||
## Returns a list of triggered events.
|
||||
|
||||
proc getData*[T](s: Selector[T], fd: SocketHandle|int): var T =
|
||||
## Retrieves application-defined ``data`` associated with descriptor ``fd``.
|
||||
## If specified descriptor ``fd`` is not registered, empty/default value
|
||||
## will be returned.
|
||||
|
||||
proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: var T): bool =
|
||||
## Associate application-defined ``data`` with descriptor ``fd``.
|
||||
##
|
||||
## Returns ``true``, if data was successfully updated, ``false`` otherwise.
|
||||
|
||||
template isEmpty*[T](s: Selector[T]): bool = # TODO: Why is this a template?
|
||||
## Returns ``true``, if there are no registered events or descriptors
|
||||
## in selector.
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body: untyped) =
|
||||
## Retrieves the application-data assigned with descriptor ``fd``
|
||||
## to ``value``. This ``value`` can be modified in the scope of
|
||||
## the ``withData`` call.
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## s.withData(fd, value) do:
|
||||
## # block is executed only if ``fd`` registered in selector ``s``
|
||||
## value.uid = 1000
|
||||
##
|
||||
|
||||
template withData*[T](s: Selector[T], fd: SocketHandle|int, value,
|
||||
body1, body2: untyped) =
|
||||
## Retrieves the application-data assigned with descriptor ``fd``
|
||||
## to ``value``. This ``value`` can be modified in the scope of
|
||||
## the ``withData`` call.
|
||||
##
|
||||
## .. code-block:: nim
|
||||
##
|
||||
## s.withData(fd, value) do:
|
||||
## # block is executed only if ``fd`` registered in selector ``s``.
|
||||
## value.uid = 1000
|
||||
## do:
|
||||
## # block is executed if ``fd`` not registered in selector ``s``.
|
||||
## raise
|
||||
##
|
||||
|
||||
proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} =
|
||||
## Determines whether selector contains a file descriptor.
|
||||
|
||||
proc getFd*[T](s: Selector[T]): int =
|
||||
## Retrieves the underlying selector's file descriptor.
|
||||
##
|
||||
## For *poll* and *select* selectors ``-1`` is returned.
|
||||
|
||||
else:
|
||||
import strutils
|
||||
when hasThreadSupport:
|
||||
import locks
|
||||
|
||||
type
|
||||
SharedArray[T] = UncheckedArray[T]
|
||||
|
||||
proc allocSharedArray[T](nsize: int): ptr SharedArray[T] =
|
||||
result = cast[ptr SharedArray[T]](allocShared0(sizeof(T) * nsize))
|
||||
|
||||
proc reallocSharedArray[T](sa: ptr SharedArray[T], nsize: int): ptr SharedArray[T] =
|
||||
result = cast[ptr SharedArray[T]](reallocShared(sa, sizeof(T) * nsize))
|
||||
|
||||
proc deallocSharedArray[T](sa: ptr SharedArray[T]) =
|
||||
deallocShared(cast[pointer](sa))
|
||||
type
|
||||
Event* {.pure.} = enum
|
||||
Read, Write, Timer, Signal, Process, Vnode, User, Error, Oneshot,
|
||||
Finished, VnodeWrite, VnodeDelete, VnodeExtend, VnodeAttrib, VnodeLink,
|
||||
VnodeRename, VnodeRevoke
|
||||
|
||||
type
|
||||
IOSelectorsException* = object of CatchableError
|
||||
|
||||
ReadyKey* = object
|
||||
fd* : int
|
||||
events*: set[Event]
|
||||
errorCode*: OSErrorCode
|
||||
|
||||
SelectorKey[T] = object
|
||||
ident: int
|
||||
events: set[Event]
|
||||
param: int
|
||||
data: T
|
||||
|
||||
const
|
||||
InvalidIdent = -1
|
||||
|
||||
proc raiseIOSelectorsError[T](message: T) =
|
||||
var msg = ""
|
||||
when T is string:
|
||||
msg.add(message)
|
||||
elif T is OSErrorCode:
|
||||
msg.add(osErrorMsg(message) & " (code: " & $int(message) & ")")
|
||||
else:
|
||||
msg.add("Internal Error\n")
|
||||
var err = newException(IOSelectorsException, msg)
|
||||
raise err
|
||||
|
||||
proc setNonBlocking(fd: cint) {.inline.} =
|
||||
setBlocking(fd.SocketHandle, false)
|
||||
|
||||
when not defined(windows):
|
||||
import posix
|
||||
|
||||
template setKey(s, pident, pevents, pparam, pdata: untyped) =
|
||||
var skey = addr(s.fds[pident])
|
||||
skey.ident = pident
|
||||
skey.events = pevents
|
||||
skey.param = pparam
|
||||
skey.data = data
|
||||
|
||||
when ioselSupportedPlatform:
|
||||
template blockSignals(newmask: var Sigset, oldmask: var Sigset) =
|
||||
when hasThreadSupport:
|
||||
if posix.pthread_sigmask(SIG_BLOCK, newmask, oldmask) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
else:
|
||||
if posix.sigprocmask(SIG_BLOCK, newmask, oldmask) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
template unblockSignals(newmask: var Sigset, oldmask: var Sigset) =
|
||||
when hasThreadSupport:
|
||||
if posix.pthread_sigmask(SIG_UNBLOCK, newmask, oldmask) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
else:
|
||||
if posix.sigprocmask(SIG_UNBLOCK, newmask, oldmask) == -1:
|
||||
raiseIOSelectorsError(osLastError())
|
||||
|
||||
template clearKey[T](key: ptr SelectorKey[T]) =
|
||||
var empty: T
|
||||
key.ident = InvalidIdent
|
||||
key.events = {}
|
||||
key.data = empty
|
||||
|
||||
proc verifySelectParams(timeout: int) =
|
||||
# Timeout of -1 means: wait forever
|
||||
# Anything higher is the time to wait in milliseconds.
|
||||
doAssert(timeout >= -1, "Cannot select with a negative value, got " & $timeout)
|
||||
|
||||
when defined(linux):
|
||||
include ./ioselects/ioselectors_epoll
|
||||
elif bsdPlatform:
|
||||
include ./ioselects/ioselectors_kqueue
|
||||
elif defined(windows):
|
||||
include ./ioselects/ioselectors_select
|
||||
elif defined(solaris):
|
||||
include ./ioselects/ioselectors_poll # need to replace it with event ports
|
||||
elif defined(genode):
|
||||
include ./ioselects/ioselectors_select # TODO: use the native VFS layer
|
||||
elif defined(nintendoswitch):
|
||||
include ./ioselects/ioselectors_select
|
||||
else:
|
||||
include ./ioselects/ioselectors_poll
|
||||
|
||||
proc register*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event], data: T) {.deprecated: "use registerHandle instead".} =
|
||||
## **Deprecated since v0.18.0:** Use ``registerHandle`` instead.
|
||||
s.registerHandle(fd, events, data)
|
||||
|
||||
proc setEvent*(ev: SelectEvent) {.deprecated: "use trigger instead",
|
||||
raises: [Defect, IOSelectorsException].} =
|
||||
## Trigger event ``ev``.
|
||||
##
|
||||
## **Deprecated since v0.18.0:** Use ``trigger`` instead.
|
||||
ev.trigger()
|
||||
|
||||
proc update*[T](s: Selector[T], fd: int | SocketHandle,
|
||||
events: set[Event]) {.deprecated: "use updateHandle instead".} =
|
||||
## Update file/socket descriptor ``fd``, registered in selector
|
||||
## ``s`` with new events set ``event``.
|
||||
##
|
||||
## **Deprecated since v0.18.0:** Use ``updateHandle`` instead.
|
||||
s.updateHandle()
|
2
vendor/nim-chronos/chronos/sendfile.nim
vendored
2
vendor/nim-chronos/chronos/sendfile.nim
vendored
@ -9,6 +9,8 @@
|
||||
|
||||
## This module provides cross-platform wrapper for ``sendfile()`` syscall.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
when defined(nimdoc):
|
||||
proc sendfile*(outfd, infd: int, offset: int, count: var int): int =
|
||||
## Copies data between file descriptor ``infd`` and ``outfd``. Because this
|
||||
|
2
vendor/nim-chronos/chronos/srcloc.nim
vendored
2
vendor/nim-chronos/chronos/srcloc.nim
vendored
@ -1,3 +1,5 @@
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
SrcLoc* = object
|
||||
procedure*: cstring
|
||||
|
@ -6,6 +6,9 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import ../asyncloop, ../asyncsync
|
||||
import ../transports/common, ../transports/stream
|
||||
export asyncsync, stream, common
|
||||
@ -58,9 +61,9 @@ type
|
||||
Finished, ## Stream was properly finished
|
||||
Closed ## Stream was closed
|
||||
|
||||
StreamReaderLoop* = proc (stream: AsyncStreamReader): Future[void] {.gcsafe.}
|
||||
StreamReaderLoop* = proc (stream: AsyncStreamReader): Future[void] {.gcsafe, raises: [Defect].}
|
||||
## Main read loop for read streams.
|
||||
StreamWriterLoop* = proc (stream: AsyncStreamWriter): Future[void] {.gcsafe.}
|
||||
StreamWriterLoop* = proc (stream: AsyncStreamWriter): Future[void] {.gcsafe, raises: [Defect].}
|
||||
## Main write loop for write streams.
|
||||
|
||||
AsyncStreamReader* = ref object of RootRef
|
||||
@ -202,16 +205,20 @@ proc newAsyncStreamUseClosedError*(): ref AsyncStreamUseClosedError {.
|
||||
noinline.} =
|
||||
newException(AsyncStreamUseClosedError, "Stream is already closed")
|
||||
|
||||
proc raiseAsyncStreamUseClosedError*() {.noinline, noreturn.} =
|
||||
proc raiseAsyncStreamUseClosedError*() {.
|
||||
noinline, noreturn, raises: [Defect, AsyncStreamUseClosedError].} =
|
||||
raise newAsyncStreamUseClosedError()
|
||||
|
||||
proc raiseAsyncStreamLimitError*() {.noinline, noreturn.} =
|
||||
proc raiseAsyncStreamLimitError*() {.
|
||||
noinline, noreturn, raises: [Defect, AsyncStreamLimitError].} =
|
||||
raise newAsyncStreamLimitError()
|
||||
|
||||
proc raiseAsyncStreamIncompleteError*() {.noinline, noreturn.} =
|
||||
proc raiseAsyncStreamIncompleteError*() {.
|
||||
noinline, noreturn, raises: [Defect, AsyncStreamIncompleteError].} =
|
||||
raise newAsyncStreamIncompleteError()
|
||||
|
||||
proc raiseAsyncStreamIncorrectDefect*(m: string) {.noinline, noreturn.} =
|
||||
proc raiseAsyncStreamIncorrectDefect*(m: string) {.
|
||||
noinline, noreturn, raises: [Defect].} =
|
||||
raise newException(AsyncStreamIncorrectDefect, m)
|
||||
|
||||
proc raiseEmptyMessageDefect*() {.noinline, noreturn.} =
|
||||
@ -248,8 +255,8 @@ proc running*(rw: AsyncStreamRW): bool {.inline.} =
|
||||
## Returns ``true`` is reading/writing stream is still pending.
|
||||
(rw.state == AsyncStreamState.Running)
|
||||
|
||||
proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {.gcsafe.}
|
||||
proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {.gcsafe.}
|
||||
proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {.gcsafe, raises: [Defect].}
|
||||
proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc getAsyncStreamReaderTracker(): AsyncStreamTracker {.inline.} =
|
||||
var res = cast[AsyncStreamTracker](getTracker(AsyncStreamReaderTrackerName))
|
||||
@ -873,7 +880,7 @@ proc close*(rw: AsyncStreamRW) =
|
||||
|
||||
rw.state = AsyncStreamState.Closed
|
||||
|
||||
proc continuation(udata: pointer) =
|
||||
proc continuation(udata: pointer) {.raises: [Defect].} =
|
||||
if not isNil(rw.udata):
|
||||
GC_unref(cast[ref int](rw.udata))
|
||||
if not(rw.future.finished()):
|
||||
|
2
vendor/nim-chronos/chronos/timer.nim
vendored
2
vendor/nim-chronos/chronos/timer.nim
vendored
@ -24,6 +24,8 @@
|
||||
## You can specify which timer you want to use ``-d:asyncTimer=<system/mono>``.
|
||||
const asyncTimer* {.strdefine.} = "mono"
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
when defined(windows):
|
||||
when asyncTimer == "system":
|
||||
from winlean import getSystemTimeAsFileTime, FILETIME
|
||||
|
4
vendor/nim-chronos/chronos/transport.nim
vendored
4
vendor/nim-chronos/chronos/transport.nim
vendored
@ -6,8 +6,8 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import transports/[datagram, stream, common, ipnet, osnet]
|
||||
import streams/[asyncstream, chunkstream]
|
||||
import ./transports/[datagram, stream, common, ipnet, osnet]
|
||||
import ./streams/[asyncstream, chunkstream]
|
||||
|
||||
export datagram, common, stream, ipnet, osnet
|
||||
export asyncstream, chunkstream
|
||||
|
44
vendor/nim-chronos/chronos/transports/common.nim
vendored
44
vendor/nim-chronos/chronos/transports/common.nim
vendored
@ -6,7 +6,10 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import os, strutils, nativesockets, net
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/[os, strutils, nativesockets, net]
|
||||
import ../asyncloop
|
||||
export net
|
||||
|
||||
@ -183,9 +186,10 @@ proc `$`*(address: TransportAddress): string =
|
||||
else:
|
||||
result = ""
|
||||
else:
|
||||
raise newException(TransportAddressError, "Unknown address family!")
|
||||
result = "Unknown address family: " & $address.family
|
||||
|
||||
proc initTAddress*(address: string): TransportAddress =
|
||||
proc initTAddress*(address: string): TransportAddress {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## Parses string representation of ``address``. ``address`` can be IPv4, IPv6
|
||||
## or Unix domain address.
|
||||
##
|
||||
@ -230,7 +234,8 @@ proc initTAddress*(address: string): TransportAddress =
|
||||
else:
|
||||
result = TransportAddress(family: AddressFamily.Unix)
|
||||
|
||||
proc initTAddress*(address: string, port: Port): TransportAddress =
|
||||
proc initTAddress*(address: string, port: Port): TransportAddress {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## Initialize ``TransportAddress`` with IP (IPv4 or IPv6) address ``address``
|
||||
## and port number ``port``.
|
||||
try:
|
||||
@ -246,7 +251,8 @@ proc initTAddress*(address: string, port: Port): TransportAddress =
|
||||
except CatchableError as exc:
|
||||
raise newException(TransportAddressError, exc.msg)
|
||||
|
||||
proc initTAddress*(address: string, port: int): TransportAddress {.inline.} =
|
||||
proc initTAddress*(address: string, port: int): TransportAddress {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## Initialize ``TransportAddress`` with IP (IPv4 or IPv6) address ``address``
|
||||
## and port number ``port``.
|
||||
if port < 0 or port >= 65536:
|
||||
@ -267,7 +273,8 @@ proc initTAddress*(address: IpAddress, port: Port): TransportAddress =
|
||||
|
||||
proc getAddrInfo(address: string, port: Port, domain: Domain,
|
||||
sockType: SockType = SockType.SOCK_STREAM,
|
||||
protocol: Protocol = Protocol.IPPROTO_TCP): ptr AddrInfo =
|
||||
protocol: Protocol = Protocol.IPPROTO_TCP): ptr AddrInfo {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## We have this one copy of ``getAddrInfo()`` because of AI_V4MAPPED in
|
||||
## ``net.nim:getAddrInfo()``, which is not cross-platform.
|
||||
var hints: AddrInfo
|
||||
@ -346,7 +353,7 @@ proc toSAddr*(address: TransportAddress, sa: var Sockaddr_storage,
|
||||
else:
|
||||
discard
|
||||
|
||||
proc address*(ta: TransportAddress): IpAddress =
|
||||
proc address*(ta: TransportAddress): IpAddress {.raises: [Defect, ValueError].} =
|
||||
## Converts ``TransportAddress`` to ``net.IpAddress`` object.
|
||||
##
|
||||
## Note its impossible to convert ``TransportAddress`` of ``Unix`` family,
|
||||
@ -361,7 +368,8 @@ proc address*(ta: TransportAddress): IpAddress =
|
||||
raise newException(ValueError, "IpAddress supports only IPv4/IPv6!")
|
||||
|
||||
proc resolveTAddress*(address: string,
|
||||
family = AddressFamily.IPv4): seq[TransportAddress] =
|
||||
family = AddressFamily.IPv4): seq[TransportAddress] {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## Resolve string representation of ``address``.
|
||||
##
|
||||
## Supported formats are:
|
||||
@ -412,7 +420,8 @@ proc resolveTAddress*(address: string,
|
||||
freeAddrInfo(aiList)
|
||||
|
||||
proc resolveTAddress*(address: string, port: Port,
|
||||
family = AddressFamily.IPv4): seq[TransportAddress] =
|
||||
family = AddressFamily.IPv4): seq[TransportAddress] {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## Resolve string representation of ``address``.
|
||||
##
|
||||
## ``address`` could be dot IPv4/IPv6 address or hostname.
|
||||
@ -439,7 +448,7 @@ proc resolveTAddress*(address: string, port: Port,
|
||||
|
||||
proc resolveTAddress*(address: string,
|
||||
family: IpAddressFamily): seq[TransportAddress] {.
|
||||
deprecated.} =
|
||||
deprecated, raises: [Defect, TransportAddressError].} =
|
||||
if family == IpAddressFamily.IPv4:
|
||||
result = resolveTAddress(address, AddressFamily.IPv4)
|
||||
elif family == IpAddressFamily.IPv6:
|
||||
@ -447,22 +456,24 @@ proc resolveTAddress*(address: string,
|
||||
|
||||
proc resolveTAddress*(address: string, port: Port,
|
||||
family: IpAddressFamily): seq[TransportAddress] {.
|
||||
deprecated.} =
|
||||
deprecated, raises: [Defect, TransportAddressError].} =
|
||||
if family == IpAddressFamily.IPv4:
|
||||
result = resolveTAddress(address, port, AddressFamily.IPv4)
|
||||
elif family == IpAddressFamily.IPv6:
|
||||
result = resolveTAddress(address, port, AddressFamily.IPv6)
|
||||
|
||||
proc windowsAnyAddressFix*(a: TransportAddress): TransportAddress {.inline.} =
|
||||
proc windowsAnyAddressFix*(a: TransportAddress): TransportAddress =
|
||||
## BSD Sockets on *nix systems are able to perform connections to
|
||||
## `0.0.0.0` or `::0` which are equal to `127.0.0.1` or `::1`.
|
||||
when defined(windows):
|
||||
if (a.family == AddressFamily.IPv4 and
|
||||
a.address_v4 == AnyAddress.address_v4):
|
||||
result = initTAddress("127.0.0.1", a.port)
|
||||
result = try: initTAddress("127.0.0.1", a.port)
|
||||
except TransportAddressError as exc: raiseAssert exc.msg
|
||||
elif (a.family == AddressFamily.IPv6 and
|
||||
a.address_v6 == AnyAddress6.address_v6):
|
||||
result = initTAddress("::1", a.port)
|
||||
result = try: initTAddress("::1", a.port)
|
||||
except TransportAddressError as exc: raiseAssert exc.msg
|
||||
else:
|
||||
result = a
|
||||
else:
|
||||
@ -484,7 +495,7 @@ template checkWriteEof*(t: untyped, future: untyped) =
|
||||
"Transport connection is already dropped!"))
|
||||
return future
|
||||
|
||||
template getError*(t: untyped): ref Exception =
|
||||
template getError*(t: untyped): ref CatchableError =
|
||||
var err = (t).error
|
||||
(t).error = nil
|
||||
err
|
||||
@ -507,7 +518,8 @@ template getTransportOsError*(err: OSErrorCode): ref TransportOsError =
|
||||
template getTransportOsError*(err: cint): ref TransportOsError =
|
||||
getTransportOsError(OSErrorCode(err))
|
||||
|
||||
proc raiseTransportOsError*(err: OSErrorCode) =
|
||||
proc raiseTransportOsError*(err: OSErrorCode) {.
|
||||
raises: [Defect, TransportOsError].} =
|
||||
## Raises transport specific OS error.
|
||||
raise getTransportOsError(err)
|
||||
|
||||
|
@ -7,9 +7,11 @@
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
import net, nativesockets, os, deques
|
||||
import ../asyncloop, ../handles
|
||||
import common
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/[net, nativesockets, os, deques]
|
||||
import ".."/[selectors2, asyncloop, handles]
|
||||
import ./common
|
||||
|
||||
when defined(windows):
|
||||
import winlean
|
||||
@ -33,7 +35,7 @@ type
|
||||
writer: Future[void] # Writer vector completion Future
|
||||
|
||||
DatagramCallback* = proc(transp: DatagramTransport,
|
||||
remote: TransportAddress): Future[void] {.gcsafe.}
|
||||
remote: TransportAddress): Future[void] {.gcsafe, raises: [Defect].}
|
||||
|
||||
DatagramTransport* = ref object of RootRef
|
||||
fd*: AsyncFD # File descriptor
|
||||
@ -41,7 +43,7 @@ type
|
||||
flags: set[ServerFlags] # Flags
|
||||
buffer: seq[byte] # Reading buffer
|
||||
buflen: int # Reading buffer effective size
|
||||
error: ref Exception # Current error
|
||||
error: ref CatchableError # Current error
|
||||
queue: Deque[GramVector] # Writer queue
|
||||
local: TransportAddress # Local address
|
||||
remote: TransportAddress # Remote address
|
||||
@ -66,7 +68,8 @@ type
|
||||
const
|
||||
DgramTransportTrackerName = "datagram.transport"
|
||||
|
||||
proc remoteAddress*(transp: DatagramTransport): TransportAddress =
|
||||
proc remoteAddress*(transp: DatagramTransport): TransportAddress {.
|
||||
raises: [Defect, TransportOsError].} =
|
||||
## Returns ``transp`` remote socket address.
|
||||
if transp.remote.family == AddressFamily.None:
|
||||
var saddr: Sockaddr_storage
|
||||
@ -77,7 +80,8 @@ proc remoteAddress*(transp: DatagramTransport): TransportAddress =
|
||||
fromSAddr(addr saddr, slen, transp.remote)
|
||||
result = transp.remote
|
||||
|
||||
proc localAddress*(transp: DatagramTransport): TransportAddress =
|
||||
proc localAddress*(transp: DatagramTransport): TransportAddress {.
|
||||
raises: [Defect, TransportOsError].} =
|
||||
## Returns ``transp`` local socket address.
|
||||
if transp.local.family == AddressFamily.None:
|
||||
var saddr: Sockaddr_storage
|
||||
@ -92,7 +96,7 @@ template setReadError(t, e: untyped) =
|
||||
(t).state.incl(ReadError)
|
||||
(t).error = getTransportOsError(e)
|
||||
|
||||
proc setupDgramTransportTracker(): DgramTransportTracker {.gcsafe.}
|
||||
proc setupDgramTransportTracker(): DgramTransportTracker {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc getDgramTransportTracker(): DgramTransportTracker {.inline.} =
|
||||
result = cast[DgramTransportTracker](getTracker(DgramTransportTrackerName))
|
||||
@ -286,7 +290,8 @@ when defined(windows):
|
||||
udata: pointer,
|
||||
child: DatagramTransport,
|
||||
bufferSize: int,
|
||||
ttl: int): DatagramTransport =
|
||||
ttl: int): DatagramTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
var localSock: AsyncFD
|
||||
doAssert(remote.family == local.family)
|
||||
doAssert(not isNil(cbproc))
|
||||
@ -300,6 +305,7 @@ when defined(windows):
|
||||
if sock == asyncInvalidSocket:
|
||||
localSock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM,
|
||||
Protocol.IPPROTO_UDP)
|
||||
|
||||
if localSock == asyncInvalidSocket:
|
||||
raiseTransportOsError(osLastError())
|
||||
else:
|
||||
@ -397,7 +403,7 @@ when defined(windows):
|
||||
else:
|
||||
# Linux/BSD/MacOS part
|
||||
|
||||
proc readDatagramLoop(udata: pointer) =
|
||||
proc readDatagramLoop(udata: pointer) {.raises: Defect.}=
|
||||
var raddr: TransportAddress
|
||||
doAssert(not isNil(udata))
|
||||
var cdata = cast[ptr CompletionData](udata)
|
||||
@ -466,15 +472,30 @@ else:
|
||||
break
|
||||
else:
|
||||
transp.state.incl(WritePaused)
|
||||
try:
|
||||
transp.fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
|
||||
proc resumeWrite(transp: DatagramTransport) {.inline.} =
|
||||
transp.state.excl(WritePaused)
|
||||
try:
|
||||
addWriter(transp.fd, writeDatagramLoop, cast[pointer](transp))
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "addWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "addWriter"
|
||||
|
||||
proc resumeRead(transp: DatagramTransport) {.inline.} =
|
||||
transp.state.excl(ReadPaused)
|
||||
try:
|
||||
addReader(transp.fd, readDatagramLoop, cast[pointer](transp))
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "addReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "addReader"
|
||||
|
||||
proc newDatagramTransportCommon(cbproc: DatagramCallback,
|
||||
remote: TransportAddress,
|
||||
@ -482,9 +503,10 @@ else:
|
||||
sock: AsyncFD,
|
||||
flags: set[ServerFlags],
|
||||
udata: pointer,
|
||||
child: DatagramTransport = nil,
|
||||
child: DatagramTransport,
|
||||
bufferSize: int,
|
||||
ttl: int): DatagramTransport =
|
||||
ttl: int): DatagramTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
var localSock: AsyncFD
|
||||
doAssert(remote.family == local.family)
|
||||
doAssert(not isNil(cbproc))
|
||||
@ -580,7 +602,7 @@ else:
|
||||
|
||||
proc close*(transp: DatagramTransport) =
|
||||
## Closes and frees resources of transport ``transp``.
|
||||
proc continuation(udata: pointer) =
|
||||
proc continuation(udata: pointer) {.raises: Defect.} =
|
||||
if not(transp.future.finished()):
|
||||
# Stop tracking transport
|
||||
untrackDgram(transp)
|
||||
@ -612,7 +634,8 @@ proc newDatagramTransport*(cbproc: DatagramCallback,
|
||||
child: DatagramTransport = nil,
|
||||
bufSize: int = DefaultDatagramBufferSize,
|
||||
ttl: int = 0
|
||||
): DatagramTransport =
|
||||
): DatagramTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Create new UDP datagram transport (IPv4).
|
||||
##
|
||||
## ``cbproc`` - callback which will be called, when new datagram received.
|
||||
@ -637,7 +660,8 @@ proc newDatagramTransport*[T](cbproc: DatagramCallback,
|
||||
child: DatagramTransport = nil,
|
||||
bufSize: int = DefaultDatagramBufferSize,
|
||||
ttl: int = 0
|
||||
): DatagramTransport =
|
||||
): DatagramTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
var fflags = flags + {GCUserData}
|
||||
GC_ref(udata)
|
||||
result = newDatagramTransportCommon(cbproc, remote, local, sock,
|
||||
@ -653,7 +677,8 @@ proc newDatagramTransport6*(cbproc: DatagramCallback,
|
||||
child: DatagramTransport = nil,
|
||||
bufSize: int = DefaultDatagramBufferSize,
|
||||
ttl: int = 0
|
||||
): DatagramTransport =
|
||||
): DatagramTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Create new UDP datagram transport (IPv6).
|
||||
##
|
||||
## ``cbproc`` - callback which will be called, when new datagram received.
|
||||
@ -678,7 +703,8 @@ proc newDatagramTransport6*[T](cbproc: DatagramCallback,
|
||||
child: DatagramTransport = nil,
|
||||
bufSize: int = DefaultDatagramBufferSize,
|
||||
ttl: int = 0
|
||||
): DatagramTransport =
|
||||
): DatagramTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
var fflags = flags + {GCUserData}
|
||||
GC_ref(udata)
|
||||
result = newDatagramTransportCommon(cbproc, remote, local, sock,
|
||||
@ -815,7 +841,7 @@ proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress,
|
||||
return retFuture
|
||||
|
||||
proc peekMessage*(transp: DatagramTransport, msg: var seq[byte],
|
||||
msglen: var int) =
|
||||
msglen: var int) {.raises: [Defect, CatchableError].} =
|
||||
## Get access to internal message buffer and length of incoming datagram.
|
||||
if ReadError in transp.state:
|
||||
transp.state.excl(ReadError)
|
||||
@ -823,7 +849,8 @@ proc peekMessage*(transp: DatagramTransport, msg: var seq[byte],
|
||||
shallowCopy(msg, transp.buffer)
|
||||
msglen = transp.buflen
|
||||
|
||||
proc getMessage*(transp: DatagramTransport): seq[byte] =
|
||||
proc getMessage*(transp: DatagramTransport): seq[byte] {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Copy data from internal message buffer and return result.
|
||||
if ReadError in transp.state:
|
||||
transp.state.excl(ReadError)
|
||||
|
20
vendor/nim-chronos/chronos/transports/ipnet.nim
vendored
20
vendor/nim-chronos/chronos/transports/ipnet.nim
vendored
@ -8,8 +8,11 @@
|
||||
# MIT license (LICENSE-MIT)
|
||||
|
||||
## This module implements various IP network utility procedures.
|
||||
import stew/endians2, strutils
|
||||
import common
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import stew/endians2, std/strutils
|
||||
import ./common
|
||||
export common
|
||||
|
||||
type
|
||||
@ -325,9 +328,9 @@ proc `$`*(mask: IpMask, include0x = false): string =
|
||||
else:
|
||||
result.add(chr(ord('A') + (c - 10)))
|
||||
else:
|
||||
raise newException(ValueError, "Invalid mask")
|
||||
return "Unknown mask family: " & $host.family
|
||||
|
||||
proc ip*(mask: IpMask): string =
|
||||
proc ip*(mask: IpMask): string {.raises: [Defect, ValueError].} =
|
||||
## Returns IP address text representation of IP mask ``mask``.
|
||||
if mask.family == AddressFamily.IPv4:
|
||||
var ip = IpAddress(family: IpAddressFamily.IPv4)
|
||||
@ -363,7 +366,8 @@ proc init*(t: typedesc[IpNet], host: TransportAddress,
|
||||
result.mask = mask
|
||||
result.host = host
|
||||
|
||||
proc init*(t: typedesc[IpNet], network: string): IpNet =
|
||||
proc init*(t: typedesc[IpNet], network: string): IpNet {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
## Initialize IP Network from string representation in format
|
||||
## <address>/<prefix length> or <address>/<netmask address>.
|
||||
var parts = network.rsplit("/", maxsplit = 1)
|
||||
@ -549,7 +553,10 @@ proc `$`*(net: IpNet): string =
|
||||
result.add("/")
|
||||
let prefix = net.mask.prefix()
|
||||
if prefix == -1:
|
||||
try:
|
||||
result.add(net.mask.ip())
|
||||
except ValueError as exc:
|
||||
result.add(exc.msg)
|
||||
else:
|
||||
result.add($prefix)
|
||||
elif net.host.family == AddressFamily.IPv6:
|
||||
@ -559,7 +566,10 @@ proc `$`*(net: IpNet): string =
|
||||
result.add("/")
|
||||
let prefix = net.mask.prefix()
|
||||
if prefix == -1:
|
||||
try:
|
||||
result.add(net.mask.ip())
|
||||
except ValueError as exc:
|
||||
result.add(exc.msg)
|
||||
else:
|
||||
result.add($prefix)
|
||||
|
||||
|
20
vendor/nim-chronos/chronos/transports/osnet.nim
vendored
20
vendor/nim-chronos/chronos/transports/osnet.nim
vendored
@ -9,9 +9,12 @@
|
||||
|
||||
## This module implements cross-platform network interfaces list.
|
||||
## Currently supported OSes are Windows, Linux, MacOS, BSD(not tested).
|
||||
import algorithm
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/algorithm
|
||||
from strutils import toHex
|
||||
import ipnet
|
||||
import ./ipnet
|
||||
export ipnet
|
||||
|
||||
const
|
||||
@ -19,7 +22,7 @@ const
|
||||
|
||||
type
|
||||
InterfaceType* = enum
|
||||
IfError = 0, # This is workaround element for ProoveInit warnings.
|
||||
IfError = 0, # This is workaround element for ProveInit warnings.
|
||||
IfOther = 1,
|
||||
IfRegular1822 = 2,
|
||||
IfHdh1822 = 3,
|
||||
@ -316,21 +319,22 @@ proc `$`*(iface: NetworkInterface): string =
|
||||
res.add("inet6 ")
|
||||
res.add($item)
|
||||
res.add(" netmask ")
|
||||
res.add($(item.netmask().address()))
|
||||
res.add(try: $(item.netmask().address()) except ValueError as exc: exc.msg)
|
||||
res.add(" brd ")
|
||||
res.add($(item.broadcast().address()))
|
||||
res.add(
|
||||
try: $(item.broadcast().address()) except ValueError as exc: exc.msg)
|
||||
res
|
||||
|
||||
proc `$`*(route: Route): string =
|
||||
var res = $route.dest.address()
|
||||
var res = try: $route.dest.address() except ValueError as exc: exc.msg
|
||||
res.add(" via ")
|
||||
if route.gateway.family != AddressFamily.None:
|
||||
res.add("gateway ")
|
||||
res.add($route.gateway.address())
|
||||
res.add(try: $route.gateway.address() except ValueError as exc: exc.msg)
|
||||
else:
|
||||
res.add("link")
|
||||
res.add(" src ")
|
||||
res.add($route.source.address())
|
||||
res.add(try: $route.source.address() except ValueError as exc: exc.msg)
|
||||
res
|
||||
|
||||
proc cmp*(a, b: NetworkInterface): int =
|
||||
|
274
vendor/nim-chronos/chronos/transports/stream.nim
vendored
274
vendor/nim-chronos/chronos/transports/stream.nim
vendored
@ -6,11 +6,12 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import net, nativesockets, os, deques
|
||||
import ../asyncloop, ../handles
|
||||
import common
|
||||
|
||||
{.deadCodeElim: on.}
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/[net, nativesockets, os, deques]
|
||||
import ".."/[asyncloop, handles, selectors2]
|
||||
import common
|
||||
|
||||
when defined(windows):
|
||||
import winlean
|
||||
@ -62,7 +63,7 @@ type
|
||||
|
||||
ReadMessagePredicate* = proc (data: openarray[byte]): tuple[consumed: int,
|
||||
done: bool] {.
|
||||
gcsafe, raises: [].}
|
||||
gcsafe, raises: [Defect].}
|
||||
|
||||
const
|
||||
StreamTransportTrackerName = "stream.transport"
|
||||
@ -78,7 +79,7 @@ when defined(windows):
|
||||
reader: Future[void] # Current reader Future
|
||||
buffer: seq[byte] # Reading buffer
|
||||
offset: int # Reading buffer offset
|
||||
error: ref Exception # Current error
|
||||
error: ref CatchableError # Current error
|
||||
queue: Deque[StreamVector] # Writer queue
|
||||
future: Future[void] # Stream life future
|
||||
# Windows specific part
|
||||
@ -105,7 +106,7 @@ else:
|
||||
reader: Future[void] # Current reader Future
|
||||
buffer: seq[byte] # Reading buffer
|
||||
offset: int # Reading buffer offset
|
||||
error: ref Exception # Current error
|
||||
error: ref CatchableError # Current error
|
||||
queue: Deque[StreamVector] # Writer queue
|
||||
future: Future[void] # Stream life future
|
||||
case kind*: TransportKind
|
||||
@ -120,13 +121,13 @@ else:
|
||||
|
||||
type
|
||||
StreamCallback* = proc(server: StreamServer,
|
||||
client: StreamTransport): Future[void] {.gcsafe.}
|
||||
client: StreamTransport): Future[void] {.gcsafe, raises: [Defect].}
|
||||
## New remote client connection callback
|
||||
## ``server`` - StreamServer object.
|
||||
## ``client`` - accepted client transport.
|
||||
|
||||
TransportInitCallback* = proc(server: StreamServer,
|
||||
fd: AsyncFD): StreamTransport {.gcsafe.}
|
||||
fd: AsyncFD): StreamTransport {.gcsafe, raises: [Defect].}
|
||||
## Custom transport initialization procedure, which can allocate inherited
|
||||
## StreamTransport object.
|
||||
|
||||
@ -137,7 +138,8 @@ type
|
||||
init*: TransportInitCallback # callback which will be called before
|
||||
# transport for new client
|
||||
|
||||
proc remoteAddress*(transp: StreamTransport): TransportAddress =
|
||||
proc remoteAddress*(transp: StreamTransport): TransportAddress {.
|
||||
raises: [Defect, TransportError].} =
|
||||
## Returns ``transp`` remote socket address.
|
||||
if transp.kind != TransportKind.Socket:
|
||||
raise newException(TransportError, "Socket required!")
|
||||
@ -150,7 +152,8 @@ proc remoteAddress*(transp: StreamTransport): TransportAddress =
|
||||
fromSAddr(addr saddr, slen, transp.remote)
|
||||
result = transp.remote
|
||||
|
||||
proc localAddress*(transp: StreamTransport): TransportAddress =
|
||||
proc localAddress*(transp: StreamTransport): TransportAddress {.
|
||||
raises: [Defect, TransportError].} =
|
||||
## Returns ``transp`` local socket address.
|
||||
if transp.kind != TransportKind.Socket:
|
||||
raise newException(TransportError, "Socket required!")
|
||||
@ -196,8 +199,8 @@ template shiftVectorFile(v, o: untyped) =
|
||||
(v).buf = cast[pointer](cast[uint]((v).buf) - cast[uint](o))
|
||||
(v).offset += cast[uint]((o))
|
||||
|
||||
proc setupStreamTransportTracker(): StreamTransportTracker {.gcsafe.}
|
||||
proc setupStreamServerTracker(): StreamServerTracker {.gcsafe.}
|
||||
proc setupStreamTransportTracker(): StreamTransportTracker {.gcsafe, raises: [Defect].}
|
||||
proc setupStreamServerTracker(): StreamServerTracker {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc getStreamTransportTracker(): StreamTransportTracker {.inline.} =
|
||||
result = cast[StreamTransportTracker](getTracker(StreamTransportTrackerName))
|
||||
@ -267,7 +270,7 @@ proc completePendingWriteQueue(queue: var Deque[StreamVector],
|
||||
vector.writer.complete(v)
|
||||
|
||||
proc failPendingWriteQueue(queue: var Deque[StreamVector],
|
||||
error: ref Exception) {.inline.} =
|
||||
error: ref CatchableError) {.inline.} =
|
||||
while len(queue) > 0:
|
||||
var vector = queue.popFirst()
|
||||
if not(vector.writer.finished()):
|
||||
@ -704,8 +707,11 @@ when defined(windows):
|
||||
|
||||
toSAddr(raddress, saddr, slen)
|
||||
proto = Protocol.IPPROTO_TCP
|
||||
sock = createAsyncSocket(raddress.getDomain(), SockType.SOCK_STREAM,
|
||||
sock = try: createAsyncSocket(raddress.getDomain(), SockType.SOCK_STREAM,
|
||||
proto)
|
||||
except CatchableError as exc:
|
||||
retFuture.fail(exc)
|
||||
return retFuture
|
||||
if sock == asyncInvalidSocket:
|
||||
retFuture.fail(getTransportOsError(osLastError()))
|
||||
return retFuture
|
||||
@ -760,7 +766,8 @@ when defined(windows):
|
||||
elif address.family == AddressFamily.Unix:
|
||||
## Unix domain socket emulation with Windows Named Pipes.
|
||||
var pipeHandle = INVALID_HANDLE_VALUE
|
||||
proc pipeContinuation(udata: pointer) {.gcsafe.} =
|
||||
var pipeContinuation: proc (udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
pipeContinuation = proc (udata: pointer) =
|
||||
# Continue only if `retFuture` is not cancelled.
|
||||
if not(retFuture.finished()):
|
||||
var pipeSuffix = $cast[cstring](unsafeAddr address.address_un[0])
|
||||
@ -777,9 +784,17 @@ when defined(windows):
|
||||
else:
|
||||
retFuture.fail(getTransportOsError(err))
|
||||
else:
|
||||
try:
|
||||
register(AsyncFD(pipeHandle))
|
||||
let transp = newStreamPipeTransport(AsyncFD(pipeHandle),
|
||||
except CatchableError as exc:
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
|
||||
let transp = try: newStreamPipeTransport(AsyncFD(pipeHandle),
|
||||
bufferSize, child)
|
||||
except CatchableError as exc:
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
# Start tracking transport
|
||||
trackStream(transp)
|
||||
retFuture.complete(transp)
|
||||
@ -787,7 +802,8 @@ when defined(windows):
|
||||
|
||||
return retFuture
|
||||
|
||||
proc createAcceptPipe(server: StreamServer) =
|
||||
proc createAcceptPipe(server: StreamServer) {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
let pipeSuffix = $cast[cstring](addr server.local.address_un)
|
||||
let pipeName = newWideCString(r"\\.\pipe\" & pipeSuffix[1 .. ^1])
|
||||
var openMode = PIPE_ACCESS_DUPLEX or FILE_FLAG_OVERLAPPED
|
||||
@ -840,7 +856,7 @@ when defined(windows):
|
||||
# We should not raise defects in this loop.
|
||||
discard disconnectNamedPipe(Handle(server.sock))
|
||||
discard closeHandle(HANDLE(server.sock))
|
||||
raiseTransportOsError(osLastError())
|
||||
raiseAssert osErrorMsg(osLastError())
|
||||
else:
|
||||
# Server close happens in callback, and we are not started new
|
||||
# connectNamedPipe session.
|
||||
@ -864,10 +880,12 @@ when defined(windows):
|
||||
DWORD(server.bufferSize),
|
||||
DWORD(0), nil)
|
||||
if pipeHandle == INVALID_HANDLE_VALUE:
|
||||
raiseTransportOsError(osLastError())
|
||||
raiseAssert osErrorMsg(osLastError())
|
||||
server.sock = AsyncFD(pipeHandle)
|
||||
server.aovl.data.fd = AsyncFD(pipeHandle)
|
||||
register(server.sock)
|
||||
try: register(server.sock)
|
||||
except CatchableError as exc:
|
||||
raiseAsDefect exc, "register"
|
||||
let res = connectNamedPipe(pipeHandle,
|
||||
cast[POVERLAPPED](addr server.aovl))
|
||||
if res == 0:
|
||||
@ -880,7 +898,7 @@ when defined(windows):
|
||||
elif int32(err) == ERROR_PIPE_CONNECTED:
|
||||
discard
|
||||
else:
|
||||
raiseTransportOsError(err)
|
||||
raiseAssert osErrorMsg(err)
|
||||
break
|
||||
else:
|
||||
# Server close happens in callback, and we are not started new
|
||||
@ -905,7 +923,7 @@ when defined(windows):
|
||||
SockLen(sizeof(SocketHandle))) != 0'i32:
|
||||
let err = OSErrorCode(wsaGetLastError())
|
||||
server.asock.closeSocket()
|
||||
raiseTransportOsError(err)
|
||||
raiseAssert osErrorMsg(err)
|
||||
else:
|
||||
var ntransp: StreamTransport
|
||||
if not isNil(server.init):
|
||||
@ -930,7 +948,7 @@ when defined(windows):
|
||||
break
|
||||
else:
|
||||
server.asock.closeSocket()
|
||||
raiseTransportOsError(ovl.data.errCode)
|
||||
raiseAssert $(ovl.data.errCode)
|
||||
else:
|
||||
# Server close happens in callback, and we are not started new
|
||||
# AcceptEx session.
|
||||
@ -941,10 +959,12 @@ when defined(windows):
|
||||
## Initiation
|
||||
if server.status notin {ServerStatus.Stopped, ServerStatus.Closed}:
|
||||
server.apending = true
|
||||
server.asock = createAsyncSocket(server.domain, SockType.SOCK_STREAM,
|
||||
# TODO No way to report back errors!
|
||||
server.asock = try: createAsyncSocket(server.domain, SockType.SOCK_STREAM,
|
||||
Protocol.IPPROTO_TCP)
|
||||
except CatchableError as exc: raiseAsDefect exc, "createAsyncSocket"
|
||||
if server.asock == asyncInvalidSocket:
|
||||
raiseTransportOsError(OSErrorCode(wsaGetLastError()))
|
||||
raiseAssert osErrorMsg(OSErrorCode(wsaGetLastError()))
|
||||
|
||||
var dwBytesReceived = DWORD(0)
|
||||
let dwReceiveDataLength = DWORD(0)
|
||||
@ -965,7 +985,7 @@ when defined(windows):
|
||||
elif int32(err) == ERROR_IO_PENDING:
|
||||
discard
|
||||
else:
|
||||
raiseTransportOsError(err)
|
||||
raiseAssert osErrorMsg(err)
|
||||
break
|
||||
else:
|
||||
# Server close happens in callback, and we are not started new
|
||||
@ -1071,8 +1091,13 @@ when defined(windows):
|
||||
ntransp = newStreamPipeTransport(server.sock, server.bufferSize,
|
||||
nil, flags)
|
||||
# Start tracking transport
|
||||
trackStream(ntransp)
|
||||
try:
|
||||
server.createAcceptPipe()
|
||||
except CatchableError as exc:
|
||||
closeHandle(server.sock)
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
trackStream(ntransp)
|
||||
retFuture.complete(ntransp)
|
||||
|
||||
elif int32(ovl.data.errCode) in {ERROR_OPERATION_ABORTED,
|
||||
@ -1082,8 +1107,14 @@ when defined(windows):
|
||||
server.clean()
|
||||
else:
|
||||
let sock = server.sock
|
||||
try:
|
||||
server.createAcceptPipe()
|
||||
except CatchableError as exc:
|
||||
closeHandle(sock)
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
closeHandle(sock)
|
||||
|
||||
retFuture.fail(getTransportOsError(ovl.data.errCode))
|
||||
|
||||
proc cancellationPipe(udata: pointer) {.gcsafe.} =
|
||||
@ -1092,8 +1123,12 @@ when defined(windows):
|
||||
if server.local.family in {AddressFamily.IPv4, AddressFamily.IPv6}:
|
||||
# TCP Sockets part
|
||||
var loop = getThreadDispatcher()
|
||||
server.asock = createAsyncSocket(server.domain, SockType.SOCK_STREAM,
|
||||
server.asock = try: createAsyncSocket(server.domain, SockType.SOCK_STREAM,
|
||||
Protocol.IPPROTO_TCP)
|
||||
except CatchableError as exc:
|
||||
retFuture.fail(exc)
|
||||
return retFuture
|
||||
|
||||
if server.asock == asyncInvalidSocket:
|
||||
let err = osLastError()
|
||||
if int32(err) == ERROR_TOO_MANY_OPEN_FILES:
|
||||
@ -1173,7 +1208,8 @@ else:
|
||||
result = (err == OSErrorCode(ECONNRESET)) or
|
||||
(err == OSErrorCode(EPIPE))
|
||||
|
||||
proc writeStreamLoop(udata: pointer) {.gcsafe.} =
|
||||
proc writeStreamLoop(udata: pointer) =
|
||||
# TODO fix Defect raises - they "shouldn't" happen
|
||||
var cdata = cast[ptr CompletionData](udata)
|
||||
var transp = cast[StreamTransport](cdata.udata)
|
||||
let fd = SocketHandle(cdata.fd)
|
||||
@ -1206,7 +1242,13 @@ else:
|
||||
if int(err) == EINTR:
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
transp.fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
|
||||
if isConnResetError(err):
|
||||
# Soft error happens which indicates that remote peer got
|
||||
# disconnected, complete all pending writes in queue with 0.
|
||||
@ -1239,7 +1281,13 @@ else:
|
||||
if int(err) == EINTR:
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
transp.fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
|
||||
if isConnResetError(err):
|
||||
# Soft error happens which indicates that remote peer got
|
||||
# disconnected, complete all pending writes in queue with 0.
|
||||
@ -1270,7 +1318,13 @@ else:
|
||||
if int(err) == EINTR:
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
transp.fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
|
||||
if isConnResetError(err):
|
||||
# Soft error happens which indicates that remote peer got
|
||||
# disconnected, complete all pending writes in queue with 0.
|
||||
@ -1303,7 +1357,12 @@ else:
|
||||
if int(err) == EINTR:
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
transp.fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
if isConnResetError(err):
|
||||
# Soft error happens which indicates that remote peer got
|
||||
# disconnected, complete all pending writes in queue with 0.
|
||||
@ -1320,9 +1379,15 @@ else:
|
||||
break
|
||||
else:
|
||||
transp.state.incl(WritePaused)
|
||||
try:
|
||||
transp.fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeWriter"
|
||||
|
||||
proc readStreamLoop(udata: pointer) {.gcsafe.} =
|
||||
proc readStreamLoop(udata: pointer) =
|
||||
# TODO fix Defect raises - they "shouldn't" happen
|
||||
var cdata = cast[ptr CompletionData](udata)
|
||||
var transp = cast[StreamTransport](cdata.udata)
|
||||
let fd = SocketHandle(cdata.fd)
|
||||
@ -1345,19 +1410,39 @@ else:
|
||||
continue
|
||||
elif int(err) in {ECONNRESET}:
|
||||
transp.state.incl({ReadEof, ReadPaused})
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
else:
|
||||
transp.state.incl(ReadPaused)
|
||||
transp.setReadError(err)
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
elif res == 0:
|
||||
transp.state.incl({ReadEof, ReadPaused})
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
else:
|
||||
transp.offset += res
|
||||
if transp.offset == len(transp.buffer):
|
||||
transp.state.incl(ReadPaused)
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
transp.completeReader()
|
||||
break
|
||||
elif transp.kind == TransportKind.Pipe:
|
||||
@ -1371,15 +1456,30 @@ else:
|
||||
else:
|
||||
transp.state.incl(ReadPaused)
|
||||
transp.setReadError(err)
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
elif res == 0:
|
||||
transp.state.incl({ReadEof, ReadPaused})
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
else:
|
||||
transp.offset += res
|
||||
if transp.offset == len(transp.buffer):
|
||||
transp.state.incl(ReadPaused)
|
||||
try:
|
||||
cdata.fd.removeReader()
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
transp.completeReader()
|
||||
break
|
||||
|
||||
@ -1424,7 +1524,6 @@ else:
|
||||
var
|
||||
saddr: Sockaddr_storage
|
||||
slen: SockLen
|
||||
sock: AsyncFD
|
||||
proto: Protocol
|
||||
var retFuture = newFuture[StreamTransport]("stream.transport.connect")
|
||||
address.toSAddr(saddr, slen)
|
||||
@ -1433,8 +1532,13 @@ else:
|
||||
# `Protocol` enum is missing `0` value, so we making here cast, until
|
||||
# `Protocol` enum will not support IPPROTO_IP == 0.
|
||||
proto = cast[Protocol](0)
|
||||
sock = createAsyncSocket(address.getDomain(), SockType.SOCK_STREAM,
|
||||
|
||||
let sock = try: createAsyncSocket(address.getDomain(), SockType.SOCK_STREAM,
|
||||
proto)
|
||||
except CatchableError as exc:
|
||||
retFuture.fail(exc)
|
||||
return retFuture
|
||||
|
||||
if sock == asyncInvalidSocket:
|
||||
let err = osLastError()
|
||||
if int(err) == EMFILE:
|
||||
@ -1443,12 +1547,20 @@ else:
|
||||
retFuture.fail(getTransportOsError(err))
|
||||
return retFuture
|
||||
|
||||
proc continuation(udata: pointer) {.gcsafe.} =
|
||||
proc continuation(udata: pointer) =
|
||||
if not(retFuture.finished()):
|
||||
var data = cast[ptr CompletionData](udata)
|
||||
var err = 0
|
||||
let fd = data.fd
|
||||
try:
|
||||
fd.removeWriter()
|
||||
except IOSelectorsException as exc:
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
except ValueError as exc:
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
|
||||
if not fd.getSocketError(err):
|
||||
closeSocket(fd)
|
||||
retFuture.fail(getTransportOsError(osLastError()))
|
||||
@ -1462,7 +1574,7 @@ else:
|
||||
trackStream(transp)
|
||||
retFuture.complete(transp)
|
||||
|
||||
proc cancel(udata: pointer) {.gcsafe.} =
|
||||
proc cancel(udata: pointer) =
|
||||
closeSocket(sock)
|
||||
|
||||
while true:
|
||||
@ -1483,11 +1595,18 @@ else:
|
||||
#
|
||||
# http://www.madore.org/~david/computers/connect-intr.html
|
||||
if int(err) == EINPROGRESS or int(err) == EINTR:
|
||||
try:
|
||||
sock.addWriter(continuation)
|
||||
except CatchableError as exc:
|
||||
closeSocket(sock)
|
||||
retFuture.fail(exc)
|
||||
return retFuture
|
||||
|
||||
retFuture.cancelCallback = cancel
|
||||
break
|
||||
else:
|
||||
sock.closeSocket()
|
||||
|
||||
retFuture.fail(getTransportOsError(err))
|
||||
break
|
||||
return retFuture
|
||||
@ -1504,7 +1623,9 @@ else:
|
||||
let res = posix.accept(SocketHandle(server.sock),
|
||||
cast[ptr SockAddr](addr saddr), addr slen)
|
||||
if int(res) > 0:
|
||||
let sock = wrapAsyncSocket(res)
|
||||
let sock = try: wrapAsyncSocket(res)
|
||||
except CatchableError as exc:
|
||||
raiseAsDefect exc, "wrapAsyncSocket"
|
||||
if sock != asyncInvalidSocket:
|
||||
var ntransp: StreamTransport
|
||||
if not isNil(server.init):
|
||||
@ -1526,23 +1647,37 @@ else:
|
||||
break
|
||||
else:
|
||||
## Critical unrecoverable error
|
||||
raiseTransportOsError(err)
|
||||
raiseAssert $err
|
||||
|
||||
proc resumeAccept(server: StreamServer) =
|
||||
proc resumeAccept(server: StreamServer) {.
|
||||
raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
addReader(server.sock, acceptLoop, cast[pointer](server))
|
||||
|
||||
proc pauseAccept(server: StreamServer) =
|
||||
proc pauseAccept(server: StreamServer) {.
|
||||
raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
removeReader(server.sock)
|
||||
|
||||
proc resumeRead(transp: StreamTransport) {.inline.} =
|
||||
if ReadPaused in transp.state:
|
||||
transp.state.excl(ReadPaused)
|
||||
# TODO reset flag on exception??
|
||||
try:
|
||||
addReader(transp.fd, readStreamLoop, cast[pointer](transp))
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "addReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "addReader"
|
||||
|
||||
proc resumeWrite(transp: StreamTransport) {.inline.} =
|
||||
if WritePaused in transp.state:
|
||||
transp.state.excl(WritePaused)
|
||||
# TODO reset flag on exception??
|
||||
try:
|
||||
addWriter(transp.fd, writeStreamLoop, cast[pointer](transp))
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "addWriter"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "addWriter"
|
||||
|
||||
proc accept*(server: StreamServer): Future[StreamTransport] =
|
||||
var retFuture = newFuture[StreamTransport]("stream.server.accept")
|
||||
@ -1565,7 +1700,12 @@ else:
|
||||
let res = posix.accept(SocketHandle(server.sock),
|
||||
cast[ptr SockAddr](addr saddr), addr slen)
|
||||
if int(res) > 0:
|
||||
let sock = wrapAsyncSocket(res)
|
||||
let sock = try: wrapAsyncSocket(res)
|
||||
except CatchableError as exc:
|
||||
close(res)
|
||||
retFuture.fail(exc)
|
||||
return
|
||||
|
||||
if sock != asyncInvalidSocket:
|
||||
var ntransp: StreamTransport
|
||||
if not isNil(server.init):
|
||||
@ -1592,23 +1732,41 @@ else:
|
||||
else:
|
||||
retFuture.fail(getTransportOsError(err))
|
||||
break
|
||||
try:
|
||||
removeReader(server.sock)
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
|
||||
proc cancellation(udata: pointer) {.gcsafe.} =
|
||||
proc cancellation(udata: pointer) =
|
||||
try:
|
||||
removeReader(server.sock)
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "removeReader"
|
||||
|
||||
try:
|
||||
addReader(server.sock, continuation, nil)
|
||||
except IOSelectorsException as exc:
|
||||
raiseAsDefect exc, "addReader"
|
||||
except ValueError as exc:
|
||||
raiseAsDefect exc, "addReader"
|
||||
|
||||
retFuture.cancelCallback = cancellation
|
||||
return retFuture
|
||||
|
||||
proc start*(server: StreamServer) =
|
||||
proc start*(server: StreamServer) {.
|
||||
raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
## Starts ``server``.
|
||||
doAssert(not(isNil(server.function)))
|
||||
if server.status == ServerStatus.Starting:
|
||||
server.resumeAccept()
|
||||
server.status = ServerStatus.Running
|
||||
|
||||
proc stop*(server: StreamServer) =
|
||||
proc stop*(server: StreamServer) {.
|
||||
raises: [Defect, IOSelectorsException, ValueError].} =
|
||||
## Stops ``server``.
|
||||
if server.status == ServerStatus.Running:
|
||||
server.pauseAccept()
|
||||
@ -1620,10 +1778,10 @@ proc join*(server: StreamServer): Future[void] =
|
||||
## Waits until ``server`` is not closed.
|
||||
var retFuture = newFuture[void]("stream.transport.server.join")
|
||||
|
||||
proc continuation(udata: pointer) {.gcsafe.} =
|
||||
proc continuation(udata: pointer) =
|
||||
retFuture.complete()
|
||||
|
||||
proc cancel(udata: pointer) {.gcsafe.} =
|
||||
proc cancel(udata: pointer) =
|
||||
server.loopFuture.removeCallback(continuation, cast[pointer](retFuture))
|
||||
|
||||
if not(server.loopFuture.finished()):
|
||||
@ -1638,7 +1796,7 @@ proc close*(server: StreamServer) =
|
||||
##
|
||||
## Please note that release of resources is not completed immediately, to be
|
||||
## sure all resources got released please use ``await server.join()``.
|
||||
proc continuation(udata: pointer) {.gcsafe.} =
|
||||
proc continuation(udata: pointer) =
|
||||
# Stop tracking server
|
||||
if not(server.loopFuture.finished()):
|
||||
server.clean()
|
||||
@ -1680,7 +1838,8 @@ proc createStreamServer*(host: TransportAddress,
|
||||
bufferSize: int = DefaultStreamBufferSize,
|
||||
child: StreamServer = nil,
|
||||
init: TransportInitCallback = nil,
|
||||
udata: pointer = nil): StreamServer =
|
||||
udata: pointer = nil): StreamServer {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Create new TCP stream server.
|
||||
##
|
||||
## ``host`` - address to which server will be bound.
|
||||
@ -1707,6 +1866,7 @@ proc createStreamServer*(host: TransportAddress,
|
||||
serverSocket = createAsyncSocket(host.getDomain(),
|
||||
SockType.SOCK_STREAM,
|
||||
Protocol.IPPROTO_TCP)
|
||||
|
||||
if serverSocket == asyncInvalidSocket:
|
||||
raiseTransportOsError(osLastError())
|
||||
else:
|
||||
@ -1770,6 +1930,7 @@ proc createStreamServer*(host: TransportAddress,
|
||||
if not setSocketBlocking(SocketHandle(sock), false):
|
||||
raiseTransportOsError(osLastError())
|
||||
register(sock)
|
||||
|
||||
serverSocket = sock
|
||||
|
||||
if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}:
|
||||
@ -1869,7 +2030,8 @@ proc createStreamServer*(host: TransportAddress,
|
||||
bufferSize: int = DefaultStreamBufferSize,
|
||||
child: StreamServer = nil,
|
||||
init: TransportInitCallback = nil,
|
||||
udata: pointer = nil): StreamServer =
|
||||
udata: pointer = nil): StreamServer {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
result = createStreamServer(host, nil, flags, sock, backlog, bufferSize,
|
||||
child, init, cast[pointer](udata))
|
||||
|
||||
@ -1881,7 +2043,8 @@ proc createStreamServer*[T](host: TransportAddress,
|
||||
backlog: int = 100,
|
||||
bufferSize: int = DefaultStreamBufferSize,
|
||||
child: StreamServer = nil,
|
||||
init: TransportInitCallback = nil): StreamServer =
|
||||
init: TransportInitCallback = nil): StreamServer {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
var fflags = flags + {GCUserData}
|
||||
GC_ref(udata)
|
||||
result = createStreamServer(host, cbproc, fflags, sock, backlog, bufferSize,
|
||||
@ -1894,7 +2057,8 @@ proc createStreamServer*[T](host: TransportAddress,
|
||||
backlog: int = 100,
|
||||
bufferSize: int = DefaultStreamBufferSize,
|
||||
child: StreamServer = nil,
|
||||
init: TransportInitCallback = nil): StreamServer =
|
||||
init: TransportInitCallback = nil): StreamServer {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
var fflags = flags + {GCUserData}
|
||||
GC_ref(udata)
|
||||
result = createStreamServer(host, nil, fflags, sock, backlog, bufferSize,
|
||||
@ -1959,10 +2123,12 @@ proc writeFile*(transp: StreamTransport, handle: int,
|
||||
##
|
||||
## You can specify starting ``offset`` in opened file and number of bytes
|
||||
## to transfer from file to transport via ``size``.
|
||||
var retFuture = newFuture[int]("stream.transport.writeFile")
|
||||
when defined(windows):
|
||||
if transp.kind != TransportKind.Socket:
|
||||
raise newException(TransportNoSupport, "writeFile() is not supported!")
|
||||
var retFuture = newFuture[int]("stream.transport.writeFile")
|
||||
retFuture.fail(newException(
|
||||
TransportNoSupport, "writeFile() is not supported!"))
|
||||
return retFuture
|
||||
transp.checkClosed(retFuture)
|
||||
transp.checkWriteEof(retFuture)
|
||||
var vector = StreamVector(kind: DataFile, writer: retFuture,
|
||||
@ -2295,11 +2461,13 @@ proc closed*(transp: StreamTransport): bool {.inline.} =
|
||||
result = ({ReadClosed, WriteClosed} * transp.state != {})
|
||||
|
||||
proc fromPipe*(fd: AsyncFD, child: StreamTransport = nil,
|
||||
bufferSize = DefaultStreamBufferSize): StreamTransport =
|
||||
bufferSize = DefaultStreamBufferSize): StreamTransport {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
## Create new transport object using pipe's file descriptor.
|
||||
##
|
||||
## ``bufferSize`` is size of internal buffer for transport.
|
||||
register(fd)
|
||||
|
||||
result = newStreamPipeTransport(fd, bufferSize, child)
|
||||
# Start tracking transport
|
||||
trackStream(result)
|
||||
|
2
vendor/nim-chronos/tests/testaddress.nim
vendored
2
vendor/nim-chronos/tests/testaddress.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
2
vendor/nim-chronos/tests/testall.nim
vendored
2
vendor/nim-chronos/tests/testall.nim
vendored
@ -7,5 +7,5 @@
|
||||
# MIT license (LICENSE-MIT)
|
||||
import testmacro, testsync, testsoon, testtime, testfut, testsignal,
|
||||
testaddress, testdatagram, teststream, testserver, testbugs, testnet,
|
||||
testasyncstream, testhttpserver
|
||||
testasyncstream, testhttpserver, testshttpserver
|
||||
import testutils
|
||||
|
2
vendor/nim-chronos/tests/testasyncstream.nim
vendored
2
vendor/nim-chronos/tests/testasyncstream.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
import ../chronos/streams/[tlsstream, chunkstream, boundstream]
|
||||
|
||||
|
2
vendor/nim-chronos/tests/testbugs.nim
vendored
2
vendor/nim-chronos/tests/testbugs.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
3
vendor/nim-chronos/tests/testdatagram.nim
vendored
3
vendor/nim-chronos/tests/testdatagram.nim
vendored
@ -5,7 +5,8 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import strutils, net, unittest
|
||||
import std/[strutils, net]
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
2
vendor/nim-chronos/tests/testfut.nim
vendored
2
vendor/nim-chronos/tests/testfut.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
169
vendor/nim-chronos/tests/testhttpserver.nim
vendored
169
vendor/nim-chronos/tests/testhttpserver.nim
vendored
@ -5,69 +5,12 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import std/[strutils, unittest, algorithm, strutils]
|
||||
import ../chronos, ../chronos/apps
|
||||
import std/[strutils, algorithm, strutils]
|
||||
import unittest2
|
||||
import ../chronos, ../chronos/apps/http/httpserver
|
||||
import stew/base10
|
||||
|
||||
# To create self-signed certificate and key you can use openssl
|
||||
# openssl req -new -x509 -sha256 -newkey rsa:2048 -nodes \
|
||||
# -keyout example-com.key.pem -days 3650 -out example-com.cert.pem
|
||||
const HttpsSelfSignedRsaKey = """
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCn7tXGLKMIMzOG
|
||||
tVzUixax1/ftlSLcpEAkZMORuiCCnYjtIJhGZdzRFZC8fBlfAJZpLIAOfX2L2f1J
|
||||
ZuwpwDkOIvNqKMBrl5Mvkl5azPT0rtnjuwrcqN5NFtbmZPKFYvbjex2aXGqjl5MW
|
||||
nQIs/ZA++DVEXmaN9oDxcZsvRMDKfrGQf9iLeoVL47Gx9KpqNqD/JLIn4LpieumV
|
||||
yYidm6ukTOqHRvrWm36y6VvKW4TE97THacULmkeahtTf8zDJbbh4EO+gifgwgJ2W
|
||||
BUS0+5hMcWu8111mXmanlOVlcoW8fH8RmPjL1eK1Z3j3SVHEf7oWZtIVW5gGA0jQ
|
||||
nfA4K51RAgMBAAECggEANZ7/R13tWKrwouy6DWuz/WlWUtgx333atUQvZhKmWs5u
|
||||
cDjeJmxUC7b1FhoSB9GqNT7uTLIpKkSaqZthgRtNnIPwcU890Zz+dEwqMJgNByvl
|
||||
it+oYjjRco/+YmaNQaYN6yjelPE5Y678WlYb4b29Fz4t0/zIhj/VgEKkKH2tiXpS
|
||||
TIicoM7pSOscEUfaW3yp5bS5QwNU6/AaF1wws0feBACd19ZkcdPvr52jopbhxlXw
|
||||
h3XTV/vXIJd5zWGp0h/Jbd4xcD4MVo2GjfkeORKY6SjDaNzt8OGtePcKnnbUVu8b
|
||||
2XlDxukhDQXqJ3g0sHz47mhvo4JeIM+FgymRm+3QmQKBgQDTawrEA3Zy9WvucaC7
|
||||
Zah02oE9nuvpF12lZ7WJh7+tZ/1ss+Fm7YspEKaUiEk7nn1CAVFtem4X4YCXTBiC
|
||||
Oqq/o+ipv1yTur0ae6m4pwLm5wcMWBh3H5zjfQTfrClNN8yjWv8u3/sq8KesHPnT
|
||||
R92/sMAptAChPgTzQphWbxFiYwKBgQDLWFaBqXfZYVnTyUvKX8GorS6jGWc6Eh4l
|
||||
lAFA+2EBWDICrUxsDPoZjEXrWCixdqLhyehaI3KEFIx2bcPv6X2c7yx3IG5lA/Gx
|
||||
TZiKlY74c6jOTstkdLW9RJbg1VUHUVZMf/Owt802YmEfUI5S5v7jFmKW6VG+io+K
|
||||
+5KYeHD1uwKBgQDMf53KPA82422jFwYCPjLT1QduM2q97HwIomhWv5gIg63+l4BP
|
||||
rzYMYq6+vZUYthUy41OAMgyLzPQ1ZMXQMi83b7R9fTxvKRIBq9xfYCzObGnE5vHD
|
||||
SDDZWvR75muM5Yxr9nkfPkgVIPMO6Hg+hiVYZf96V0LEtNjU9HWmJYkLQQKBgQCQ
|
||||
ULGUdGHKtXy7AjH3/t3CiKaAupa4cANVSCVbqQy/l4hmvfdu+AbH+vXkgTzgNgKD
|
||||
nHh7AI1Vj//gTSayLlQn/Nbh9PJkXtg5rYiFUn+VdQBo6yMOuIYDPZqXFtCx0Nge
|
||||
kvCwisHpxwiG4PUhgS+Em259DDonsM8PJFx2OYRx4QKBgEQpGhg71Oi9MhPJshN7
|
||||
dYTowaMS5eLTk2264ARaY+hAIV7fgvUa+5bgTVaWL+Cfs33hi4sMRqlEwsmfds2T
|
||||
cnQiJ4cU20Euldfwa5FLnk6LaWdOyzYt/ICBJnKFRwfCUbS4Bu5rtMEM+3t0wxnJ
|
||||
IgaD04WhoL9EX0Qo3DC1+0kG
|
||||
-----END PRIVATE KEY-----
|
||||
"""
|
||||
|
||||
# This SSL certificate will expire 13 October 2030.
|
||||
const HttpsSelfSignedRsaCert = """
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDnzCCAoegAwIBAgIUUdcusjDd3XQi3FPM8urdFG3qI+8wDQYJKoZIhvcNAQEL
|
||||
BQAwXzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQz
|
||||
ODA4MB4XDTIwMTAxMjIxNDUwMVoXDTMwMTAxMDIxNDUwMVowXzELMAkGA1UEBhMC
|
||||
QVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdp
|
||||
dHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQzODA4MIIBIjANBgkqhkiG
|
||||
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+7VxiyjCDMzhrVc1IsWsdf37ZUi3KRAJGTD
|
||||
kboggp2I7SCYRmXc0RWQvHwZXwCWaSyADn19i9n9SWbsKcA5DiLzaijAa5eTL5Je
|
||||
Wsz09K7Z47sK3KjeTRbW5mTyhWL243sdmlxqo5eTFp0CLP2QPvg1RF5mjfaA8XGb
|
||||
L0TAyn6xkH/Yi3qFS+OxsfSqajag/ySyJ+C6YnrplcmInZurpEzqh0b61pt+sulb
|
||||
yluExPe0x2nFC5pHmobU3/MwyW24eBDvoIn4MICdlgVEtPuYTHFrvNddZl5mp5Tl
|
||||
ZXKFvHx/EZj4y9XitWd490lRxH+6FmbSFVuYBgNI0J3wOCudUQIDAQABo1MwUTAd
|
||||
BgNVHQ4EFgQUBKha84woY5WkFxKw7qx1cONg1H8wHwYDVR0jBBgwFoAUBKha84wo
|
||||
Y5WkFxKw7qx1cONg1H8wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
|
||||
AQEAHZMYt9Ry+Xj3vTbzpGFQzYQVTJlfJWSN6eWNOivRFQE5io9kOBEe5noa8aLo
|
||||
dLkw6ztxRP2QRJmlhGCO9/HwS17ckrkgZp3EC2LFnzxcBmoZu+owfxOT1KqpO52O
|
||||
IKOl8eVohi1pEicE4dtTJVcpI7VCMovnXUhzx1Ci4Vibns4a6H+BQa19a1JSpifN
|
||||
tO8U5jkjJ8Jprs/VPFhJj2O3di53oDHaYSE5eOrm2ZO14KFHSk9cGcOGmcYkUv8B
|
||||
nV5vnGadH5Lvfxb/BCpuONabeRdOxMt9u9yQ89vNpxFtRdZDCpGKZBCfmUP+5m3m
|
||||
N8r5CwGcIX/XPC3lKazzbZ8baA==
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
||||
suite "HTTP server testing suite":
|
||||
type
|
||||
@ -89,34 +32,6 @@ suite "HTTP server testing suite":
|
||||
if not(isNil(transp)):
|
||||
await closeWait(transp)
|
||||
|
||||
proc httpsClient(address: TransportAddress,
|
||||
data: string, flags = {NoVerifyHost, NoVerifyServerName}
|
||||
): Future[string] {.async.} =
|
||||
var
|
||||
transp: StreamTransport
|
||||
tlsstream: TlsAsyncStream
|
||||
reader: AsyncStreamReader
|
||||
writer: AsyncStreamWriter
|
||||
|
||||
try:
|
||||
transp = await connect(address)
|
||||
reader = newAsyncStreamReader(transp)
|
||||
writer = newAsyncStreamWriter(transp)
|
||||
tlsstream = newTLSClientAsyncStream(reader, writer, "", flags = flags)
|
||||
if len(data) > 0:
|
||||
await tlsstream.writer.write(data)
|
||||
var rres = await tlsstream.reader.read()
|
||||
return bytesToString(rres)
|
||||
except CatchableError:
|
||||
return "EXCEPTION"
|
||||
finally:
|
||||
if not(isNil(tlsstream)):
|
||||
await allFutures(tlsstream.reader.closeWait(),
|
||||
tlsstream.writer.closeWait())
|
||||
if not(isNil(reader)):
|
||||
await allFutures(reader.closeWait(), writer.closeWait(),
|
||||
transp.closeWait())
|
||||
|
||||
proc testTooBigBodyChunked(address: TransportAddress,
|
||||
operation: TooBigTest): Future[bool] {.async.} =
|
||||
var serverRes = false
|
||||
@ -606,82 +521,6 @@ suite "HTTP server testing suite":
|
||||
|
||||
check waitFor(testPostMultipart2(initTAddress("127.0.0.1:30080"))) == true
|
||||
|
||||
test "HTTPS server (successful handshake) test":
|
||||
proc testHTTPS(address: TransportAddress): Future[bool] {.async.} =
|
||||
var serverRes = false
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
serverRes = true
|
||||
return await request.respond(Http200, "TEST_OK:" & $request.meth,
|
||||
HttpTable.init())
|
||||
else:
|
||||
serverRes = false
|
||||
return dumbResponse()
|
||||
|
||||
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||
let serverFlags = {Secure}
|
||||
let secureKey = TLSPrivateKey.init(HttpsSelfSignedRsaKey)
|
||||
let secureCert = TLSCertificate.init(HttpsSelfSignedRsaCert)
|
||||
let res = HttpServerRef.new(address, process,
|
||||
socketFlags = socketFlags,
|
||||
serverFlags = serverFlags,
|
||||
tlsPrivateKey = secureKey,
|
||||
tlsCertificate = secureCert)
|
||||
if res.isErr():
|
||||
return false
|
||||
|
||||
let server = res.get()
|
||||
server.start()
|
||||
let message = "GET / HTTP/1.0\r\nHost: https://127.0.0.1:80\r\n\r\n"
|
||||
let data = await httpsClient(address, message)
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return serverRes and (data.find("TEST_OK:GET") >= 0)
|
||||
|
||||
check waitFor(testHTTPS(initTAddress("127.0.0.1:30080"))) == true
|
||||
|
||||
test "HTTPS server (failed handshake) test":
|
||||
proc testHTTPS2(address: TransportAddress): Future[bool] {.async.} =
|
||||
var serverRes = false
|
||||
var testFut = newFuture[void]()
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
serverRes = false
|
||||
return await request.respond(Http200, "TEST_OK:" & $request.meth,
|
||||
HttpTable.init())
|
||||
else:
|
||||
serverRes = true
|
||||
testFut.complete()
|
||||
return dumbResponse()
|
||||
|
||||
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||
let serverFlags = {Secure}
|
||||
let secureKey = TLSPrivateKey.init(HttpsSelfSignedRsaKey)
|
||||
let secureCert = TLSCertificate.init(HttpsSelfSignedRsaCert)
|
||||
let res = HttpServerRef.new(address, process,
|
||||
socketFlags = socketFlags,
|
||||
serverFlags = serverFlags,
|
||||
tlsPrivateKey = secureKey,
|
||||
tlsCertificate = secureCert)
|
||||
if res.isErr():
|
||||
return false
|
||||
|
||||
let server = res.get()
|
||||
server.start()
|
||||
let message = "GET / HTTP/1.0\r\nHost: https://127.0.0.1:80\r\n\r\n"
|
||||
let data = await httpsClient(address, message, {NoVerifyServerName})
|
||||
await testFut
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return serverRes and data == "EXCEPTION"
|
||||
|
||||
check waitFor(testHTTPS2(initTAddress("127.0.0.1:30080"))) == true
|
||||
|
||||
test "drop() connections test":
|
||||
const ClientsCount = 10
|
||||
|
||||
|
2
vendor/nim-chronos/tests/testmacro.nim
vendored
2
vendor/nim-chronos/tests/testmacro.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
2
vendor/nim-chronos/tests/testnet.nim
vendored
2
vendor/nim-chronos/tests/testnet.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos/transports/[osnet, ipnet]
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
2
vendor/nim-chronos/tests/testserver.nim
vendored
2
vendor/nim-chronos/tests/testserver.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
180
vendor/nim-chronos/tests/testshttpserver.nim
vendored
Normal file
180
vendor/nim-chronos/tests/testshttpserver.nim
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
# Chronos Test Suite
|
||||
# (c) Copyright 2021-Present
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import std/[strutils, strutils]
|
||||
import unittest2
|
||||
import ../chronos, ../chronos/apps/http/shttpserver
|
||||
import stew/base10
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
||||
# To create self-signed certificate and key you can use openssl
|
||||
# openssl req -new -x509 -sha256 -newkey rsa:2048 -nodes \
|
||||
# -keyout example-com.key.pem -days 3650 -out example-com.cert.pem
|
||||
const HttpsSelfSignedRsaKey = """
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCn7tXGLKMIMzOG
|
||||
tVzUixax1/ftlSLcpEAkZMORuiCCnYjtIJhGZdzRFZC8fBlfAJZpLIAOfX2L2f1J
|
||||
ZuwpwDkOIvNqKMBrl5Mvkl5azPT0rtnjuwrcqN5NFtbmZPKFYvbjex2aXGqjl5MW
|
||||
nQIs/ZA++DVEXmaN9oDxcZsvRMDKfrGQf9iLeoVL47Gx9KpqNqD/JLIn4LpieumV
|
||||
yYidm6ukTOqHRvrWm36y6VvKW4TE97THacULmkeahtTf8zDJbbh4EO+gifgwgJ2W
|
||||
BUS0+5hMcWu8111mXmanlOVlcoW8fH8RmPjL1eK1Z3j3SVHEf7oWZtIVW5gGA0jQ
|
||||
nfA4K51RAgMBAAECggEANZ7/R13tWKrwouy6DWuz/WlWUtgx333atUQvZhKmWs5u
|
||||
cDjeJmxUC7b1FhoSB9GqNT7uTLIpKkSaqZthgRtNnIPwcU890Zz+dEwqMJgNByvl
|
||||
it+oYjjRco/+YmaNQaYN6yjelPE5Y678WlYb4b29Fz4t0/zIhj/VgEKkKH2tiXpS
|
||||
TIicoM7pSOscEUfaW3yp5bS5QwNU6/AaF1wws0feBACd19ZkcdPvr52jopbhxlXw
|
||||
h3XTV/vXIJd5zWGp0h/Jbd4xcD4MVo2GjfkeORKY6SjDaNzt8OGtePcKnnbUVu8b
|
||||
2XlDxukhDQXqJ3g0sHz47mhvo4JeIM+FgymRm+3QmQKBgQDTawrEA3Zy9WvucaC7
|
||||
Zah02oE9nuvpF12lZ7WJh7+tZ/1ss+Fm7YspEKaUiEk7nn1CAVFtem4X4YCXTBiC
|
||||
Oqq/o+ipv1yTur0ae6m4pwLm5wcMWBh3H5zjfQTfrClNN8yjWv8u3/sq8KesHPnT
|
||||
R92/sMAptAChPgTzQphWbxFiYwKBgQDLWFaBqXfZYVnTyUvKX8GorS6jGWc6Eh4l
|
||||
lAFA+2EBWDICrUxsDPoZjEXrWCixdqLhyehaI3KEFIx2bcPv6X2c7yx3IG5lA/Gx
|
||||
TZiKlY74c6jOTstkdLW9RJbg1VUHUVZMf/Owt802YmEfUI5S5v7jFmKW6VG+io+K
|
||||
+5KYeHD1uwKBgQDMf53KPA82422jFwYCPjLT1QduM2q97HwIomhWv5gIg63+l4BP
|
||||
rzYMYq6+vZUYthUy41OAMgyLzPQ1ZMXQMi83b7R9fTxvKRIBq9xfYCzObGnE5vHD
|
||||
SDDZWvR75muM5Yxr9nkfPkgVIPMO6Hg+hiVYZf96V0LEtNjU9HWmJYkLQQKBgQCQ
|
||||
ULGUdGHKtXy7AjH3/t3CiKaAupa4cANVSCVbqQy/l4hmvfdu+AbH+vXkgTzgNgKD
|
||||
nHh7AI1Vj//gTSayLlQn/Nbh9PJkXtg5rYiFUn+VdQBo6yMOuIYDPZqXFtCx0Nge
|
||||
kvCwisHpxwiG4PUhgS+Em259DDonsM8PJFx2OYRx4QKBgEQpGhg71Oi9MhPJshN7
|
||||
dYTowaMS5eLTk2264ARaY+hAIV7fgvUa+5bgTVaWL+Cfs33hi4sMRqlEwsmfds2T
|
||||
cnQiJ4cU20Euldfwa5FLnk6LaWdOyzYt/ICBJnKFRwfCUbS4Bu5rtMEM+3t0wxnJ
|
||||
IgaD04WhoL9EX0Qo3DC1+0kG
|
||||
-----END PRIVATE KEY-----
|
||||
"""
|
||||
|
||||
# This SSL certificate will expire 13 October 2030.
|
||||
const HttpsSelfSignedRsaCert = """
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDnzCCAoegAwIBAgIUUdcusjDd3XQi3FPM8urdFG3qI+8wDQYJKoZIhvcNAQEL
|
||||
BQAwXzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQz
|
||||
ODA4MB4XDTIwMTAxMjIxNDUwMVoXDTMwMTAxMDIxNDUwMVowXzELMAkGA1UEBhMC
|
||||
QVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdp
|
||||
dHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQzODA4MIIBIjANBgkqhkiG
|
||||
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+7VxiyjCDMzhrVc1IsWsdf37ZUi3KRAJGTD
|
||||
kboggp2I7SCYRmXc0RWQvHwZXwCWaSyADn19i9n9SWbsKcA5DiLzaijAa5eTL5Je
|
||||
Wsz09K7Z47sK3KjeTRbW5mTyhWL243sdmlxqo5eTFp0CLP2QPvg1RF5mjfaA8XGb
|
||||
L0TAyn6xkH/Yi3qFS+OxsfSqajag/ySyJ+C6YnrplcmInZurpEzqh0b61pt+sulb
|
||||
yluExPe0x2nFC5pHmobU3/MwyW24eBDvoIn4MICdlgVEtPuYTHFrvNddZl5mp5Tl
|
||||
ZXKFvHx/EZj4y9XitWd490lRxH+6FmbSFVuYBgNI0J3wOCudUQIDAQABo1MwUTAd
|
||||
BgNVHQ4EFgQUBKha84woY5WkFxKw7qx1cONg1H8wHwYDVR0jBBgwFoAUBKha84wo
|
||||
Y5WkFxKw7qx1cONg1H8wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
|
||||
AQEAHZMYt9Ry+Xj3vTbzpGFQzYQVTJlfJWSN6eWNOivRFQE5io9kOBEe5noa8aLo
|
||||
dLkw6ztxRP2QRJmlhGCO9/HwS17ckrkgZp3EC2LFnzxcBmoZu+owfxOT1KqpO52O
|
||||
IKOl8eVohi1pEicE4dtTJVcpI7VCMovnXUhzx1Ci4Vibns4a6H+BQa19a1JSpifN
|
||||
tO8U5jkjJ8Jprs/VPFhJj2O3di53oDHaYSE5eOrm2ZO14KFHSk9cGcOGmcYkUv8B
|
||||
nV5vnGadH5Lvfxb/BCpuONabeRdOxMt9u9yQ89vNpxFtRdZDCpGKZBCfmUP+5m3m
|
||||
N8r5CwGcIX/XPC3lKazzbZ8baA==
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
|
||||
|
||||
suite "Secure HTTP server testing suite":
|
||||
|
||||
proc httpsClient(address: TransportAddress,
|
||||
data: string, flags = {NoVerifyHost, NoVerifyServerName}
|
||||
): Future[string] {.async.} =
|
||||
var
|
||||
transp: StreamTransport
|
||||
tlsstream: TlsAsyncStream
|
||||
reader: AsyncStreamReader
|
||||
writer: AsyncStreamWriter
|
||||
|
||||
try:
|
||||
transp = await connect(address)
|
||||
reader = newAsyncStreamReader(transp)
|
||||
writer = newAsyncStreamWriter(transp)
|
||||
tlsstream = newTLSClientAsyncStream(reader, writer, "", flags = flags)
|
||||
if len(data) > 0:
|
||||
await tlsstream.writer.write(data)
|
||||
var rres = await tlsstream.reader.read()
|
||||
return bytesToString(rres)
|
||||
except CatchableError:
|
||||
return "EXCEPTION"
|
||||
finally:
|
||||
if not(isNil(tlsstream)):
|
||||
await allFutures(tlsstream.reader.closeWait(),
|
||||
tlsstream.writer.closeWait())
|
||||
if not(isNil(reader)):
|
||||
await allFutures(reader.closeWait(), writer.closeWait(),
|
||||
transp.closeWait())
|
||||
|
||||
test "HTTPS server (successful handshake) test":
|
||||
proc testHTTPS(address: TransportAddress): Future[bool] {.async.} =
|
||||
var serverRes = false
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
serverRes = true
|
||||
return await request.respond(Http200, "TEST_OK:" & $request.meth,
|
||||
HttpTable.init())
|
||||
else:
|
||||
serverRes = false
|
||||
return dumbResponse()
|
||||
|
||||
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||
let serverFlags = {Secure}
|
||||
let secureKey = TLSPrivateKey.init(HttpsSelfSignedRsaKey)
|
||||
let secureCert = TLSCertificate.init(HttpsSelfSignedRsaCert)
|
||||
let res = SecureHttpServerRef.new(address, process,
|
||||
socketFlags = socketFlags,
|
||||
serverFlags = serverFlags,
|
||||
tlsPrivateKey = secureKey,
|
||||
tlsCertificate = secureCert)
|
||||
if res.isErr():
|
||||
return false
|
||||
|
||||
let server = res.get()
|
||||
server.start()
|
||||
let message = "GET / HTTP/1.0\r\nHost: https://127.0.0.1:80\r\n\r\n"
|
||||
let data = await httpsClient(address, message)
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return serverRes and (data.find("TEST_OK:GET") >= 0)
|
||||
|
||||
check waitFor(testHTTPS(initTAddress("127.0.0.1:30080"))) == true
|
||||
|
||||
test "HTTPS server (failed handshake) test":
|
||||
proc testHTTPS2(address: TransportAddress): Future[bool] {.async.} =
|
||||
var serverRes = false
|
||||
var testFut = newFuture[void]()
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
serverRes = false
|
||||
return await request.respond(Http200, "TEST_OK:" & $request.meth,
|
||||
HttpTable.init())
|
||||
else:
|
||||
serverRes = true
|
||||
testFut.complete()
|
||||
return dumbResponse()
|
||||
|
||||
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||
let serverFlags = {Secure}
|
||||
let secureKey = TLSPrivateKey.init(HttpsSelfSignedRsaKey)
|
||||
let secureCert = TLSCertificate.init(HttpsSelfSignedRsaCert)
|
||||
let res = SecureHttpServerRef.new(address, process,
|
||||
socketFlags = socketFlags,
|
||||
serverFlags = serverFlags,
|
||||
tlsPrivateKey = secureKey,
|
||||
tlsCertificate = secureCert)
|
||||
if res.isErr():
|
||||
return false
|
||||
|
||||
let server = res.get()
|
||||
server.start()
|
||||
let message = "GET / HTTP/1.0\r\nHost: https://127.0.0.1:80\r\n\r\n"
|
||||
let data = await httpsClient(address, message, {NoVerifyServerName})
|
||||
await testFut
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return serverRes and data == "EXCEPTION"
|
||||
|
||||
check waitFor(testHTTPS2(initTAddress("127.0.0.1:30080"))) == true
|
8
vendor/nim-chronos/tests/testsignal.nim
vendored
8
vendor/nim-chronos/tests/testsignal.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
@ -20,13 +20,19 @@ suite "Signal handling test suite":
|
||||
proc signalProc(udata: pointer) =
|
||||
var cdata = cast[ptr CompletionData](udata)
|
||||
signalCounter = cast[int](cdata.udata)
|
||||
try:
|
||||
removeSignal(int(cdata.fd))
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
|
||||
proc asyncProc() {.async.} =
|
||||
await sleepAsync(500.milliseconds)
|
||||
|
||||
proc test(signal, value: int): bool =
|
||||
try:
|
||||
discard addSignal(signal, signalProc, cast[pointer](value))
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
var fut = asyncProc()
|
||||
discard posix.kill(posix.getpid(), cint(signal))
|
||||
waitFor(fut)
|
||||
|
6
vendor/nim-chronos/tests/testsoon.nim
vendored
6
vendor/nim-chronos/tests/testsoon.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
@ -46,8 +46,10 @@ suite "callSoon() tests suite":
|
||||
await sleepAsync(100.milliseconds)
|
||||
timeoutsTest1 += 1
|
||||
|
||||
proc callbackProc(udata: pointer) {.gcsafe.} =
|
||||
var callbackproc: proc(udata: pointer) {.gcsafe, raises: [Defect].}
|
||||
callbackproc = proc (udata: pointer) {.gcsafe, raises: [Defect].} =
|
||||
timeoutsTest2 += 1
|
||||
{.gcsafe.}:
|
||||
callSoon(callbackProc)
|
||||
|
||||
proc test2(timers, callbacks: var int) =
|
||||
|
5
vendor/nim-chronos/tests/teststream.nim
vendored
5
vendor/nim-chronos/tests/teststream.nim
vendored
@ -5,7 +5,8 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import strutils, unittest, os
|
||||
import std/[strutils, os]
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
@ -866,7 +867,7 @@ suite "Stream Transport test suite":
|
||||
var
|
||||
valueLen = 0'u32
|
||||
res: seq[byte]
|
||||
error: ref Exception
|
||||
error: ref CatchableError
|
||||
|
||||
proc predicate(data: openarray[byte]): tuple[consumed: int, done: bool] =
|
||||
if len(data) == 0:
|
||||
|
2
vendor/nim-chronos/tests/testsync.nim
vendored
2
vendor/nim-chronos/tests/testsync.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
3
vendor/nim-chronos/tests/testtime.nim
vendored
3
vendor/nim-chronos/tests/testtime.nim
vendored
@ -5,7 +5,8 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import os, unittest
|
||||
import std/os
|
||||
import unittest2
|
||||
import ../chronos, ../chronos/timer
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
2
vendor/nim-chronos/tests/testutils.nim
vendored
2
vendor/nim-chronos/tests/testutils.nim
vendored
@ -5,7 +5,7 @@
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import unittest
|
||||
import unittest2
|
||||
import ../chronos
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
@ -5,10 +5,10 @@ import
|
||||
export
|
||||
serialization, reader, writer
|
||||
|
||||
serializationFormat Envvar,
|
||||
Reader = EnvvarReader,
|
||||
Writer = EnvvarWriter,
|
||||
PreferedOutput = void
|
||||
serializationFormat Envvar
|
||||
|
||||
Envvar.setReader EnvvarReader
|
||||
Envvar.setWriter EnvvarWriter, PreferredOutput = void
|
||||
|
||||
template supports*(_: type Envvar, T: type): bool =
|
||||
# The Envvar format should support every type
|
||||
|
@ -5,10 +5,10 @@ import
|
||||
export
|
||||
serialization, reader, writer, types
|
||||
|
||||
serializationFormat Winreg,
|
||||
Reader = WinregReader,
|
||||
Writer = WinregWriter,
|
||||
PreferedOutput = void
|
||||
serializationFormat Winreg
|
||||
|
||||
Winreg.setReader WinregReader
|
||||
Winreg.setWriter WinregWriter, PreferredOutput = void
|
||||
|
||||
template supports*(_: type Winreg, T: type): bool =
|
||||
# The Winreg format should support every type
|
||||
|
@ -428,7 +428,7 @@ proc receive*(d: Protocol, a: Address, packet: openArray[byte]) {.gcsafe,
|
||||
# CatchableErrors, in fact, we really don't, but hey, they might, considering we
|
||||
# can't enforce it.
|
||||
proc processClient(transp: DatagramTransport, raddr: TransportAddress):
|
||||
Future[void] {.async, gcsafe, raises: [Exception, Defect].} =
|
||||
Future[void] {.async, gcsafe.} =
|
||||
let proto = getUserData[Protocol](transp)
|
||||
|
||||
# TODO: should we use `peekMessage()` to avoid allocation?
|
||||
|
7
vendor/nim-eth/eth/p2p/private/p2p_types.nim
vendored
7
vendor/nim-eth/eth/p2p/private/p2p_types.nim
vendored
@ -139,13 +139,14 @@ type
|
||||
MessageHandlerDecorator* = proc(msgId: int, n: NimNode): NimNode
|
||||
ThunkProc* = proc(x: Peer, msgId: int, data: Rlp): Future[void] {.gcsafe.}
|
||||
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
||||
RequestResolver* = proc(msg: pointer, future: FutureBase) {.gcsafe.}
|
||||
RequestResolver* = proc(msg: pointer, future: FutureBase)
|
||||
{.gcsafe, raises:[Defect].}
|
||||
NextMsgResolver* = proc(msgData: Rlp, future: FutureBase) {.gcsafe.}
|
||||
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
|
||||
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
|
||||
HandshakeStep* = proc(peer: Peer): Future[void] {.gcsafe.}
|
||||
DisconnectionHandler* = proc(peer: Peer,
|
||||
reason: DisconnectionReason): Future[void] {.gcsafe.}
|
||||
DisconnectionHandler* = proc(peer: Peer, reason: DisconnectionReason):
|
||||
Future[void] {.gcsafe.}
|
||||
|
||||
ConnectionState* = enum
|
||||
None,
|
||||
|
12
vendor/nim-eth/eth/p2p/rlpx.nim
vendored
12
vendor/nim-eth/eth/p2p/rlpx.nim
vendored
@ -60,7 +60,8 @@ chronicles.formatIt(Peer): $(it.remote)
|
||||
|
||||
include p2p_backends_helpers
|
||||
|
||||
proc requestResolver[MsgType](msg: pointer, future: FutureBase) {.gcsafe.} =
|
||||
proc requestResolver[MsgType](msg: pointer, future: FutureBase)
|
||||
{.gcsafe, raises:[Defect].} =
|
||||
var f = Future[Option[MsgType]](future)
|
||||
if not f.finished:
|
||||
if msg != nil:
|
||||
@ -72,10 +73,13 @@ proc requestResolver[MsgType](msg: pointer, future: FutureBase) {.gcsafe.} =
|
||||
# here. The only reasonable explanation is that the request should
|
||||
# have timed out.
|
||||
if msg != nil:
|
||||
try:
|
||||
if f.read.isSome:
|
||||
doAssert false, "trying to resolve a request twice"
|
||||
else:
|
||||
doAssert false, "trying to resolve a timed out request with a value"
|
||||
except CatchableError as e:
|
||||
debug "Exception in requestResolver()", exc = e.name, err = e.msg
|
||||
else:
|
||||
try:
|
||||
if not f.read.isSome:
|
||||
@ -88,9 +92,8 @@ proc requestResolver[MsgType](msg: pointer, future: FutureBase) {.gcsafe.} =
|
||||
trace "TransportOsError during request", err = e.msg
|
||||
except TransportError:
|
||||
trace "Transport got closed during request"
|
||||
except Exception as e:
|
||||
except CatchableError as e:
|
||||
debug "Exception in requestResolver()", exc = e.name, err = e.msg
|
||||
raise e
|
||||
|
||||
proc linkSendFailureToReqFuture[S, R](sendFut: Future[S], resFut: Future[R]) =
|
||||
sendFut.addCallback() do (arg: pointer):
|
||||
@ -360,7 +363,8 @@ proc registerRequest(peer: Peer,
|
||||
|
||||
doAssert(not peer.dispatcher.isNil)
|
||||
let requestResolver = peer.dispatcher.messages[responseMsgId].requestResolver
|
||||
proc timeoutExpired(udata: pointer) = requestResolver(nil, responseFuture)
|
||||
proc timeoutExpired(udata: pointer) {.gcsafe, raises:[Defect].} =
|
||||
requestResolver(nil, responseFuture)
|
||||
|
||||
addTimer(timeoutAt, timeoutExpired, nil)
|
||||
|
||||
|
@ -110,7 +110,7 @@ type
|
||||
## XXX: really big messages can cause excessive mem usage when using msg \
|
||||
## count
|
||||
|
||||
FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, closure.}
|
||||
FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, raises: [Defect].}
|
||||
|
||||
Filter* = object
|
||||
src*: Option[PublicKey]
|
||||
|
51
vendor/nim-http-utils/httputils.nim
vendored
51
vendor/nim-http-utils/httputils.nim
vendored
@ -1146,3 +1146,54 @@ proc checkHeaderValue*(value: string): bool =
|
||||
if (ch == CR) or (ch == LF):
|
||||
result = false
|
||||
break
|
||||
|
||||
proc toInt*(code: HttpCode): int =
|
||||
## Returns ``code`` as integer value.
|
||||
case code
|
||||
of Http100: 100
|
||||
of Http101: 101
|
||||
of Http200: 200
|
||||
of Http201: 201
|
||||
of Http202: 202
|
||||
of Http203: 203
|
||||
of Http204: 204
|
||||
of Http205: 205
|
||||
of Http206: 206
|
||||
of Http300: 300
|
||||
of Http301: 301
|
||||
of Http302: 302
|
||||
of Http303: 303
|
||||
of Http304: 304
|
||||
of Http305: 305
|
||||
of Http307: 307
|
||||
of Http400: 400
|
||||
of Http401: 401
|
||||
of Http403: 403
|
||||
of Http404: 404
|
||||
of Http405: 405
|
||||
of Http406: 406
|
||||
of Http407: 407
|
||||
of Http408: 408
|
||||
of Http409: 409
|
||||
of Http410: 410
|
||||
of Http411: 411
|
||||
of Http412: 412
|
||||
of Http413: 413
|
||||
of Http414: 414
|
||||
of Http415: 415
|
||||
of Http416: 416
|
||||
of Http417: 417
|
||||
of Http418: 418
|
||||
of Http421: 421
|
||||
of Http422: 422
|
||||
of Http426: 426
|
||||
of Http428: 428
|
||||
of Http429: 429
|
||||
of Http431: 431
|
||||
of Http451: 451
|
||||
of Http500: 500
|
||||
of Http501: 501
|
||||
of Http502: 502
|
||||
of Http503: 503
|
||||
of Http504: 504
|
||||
of Http505: 505
|
||||
|
@ -1,16 +1,6 @@
|
||||
import
|
||||
serialization, json_serialization/[reader, writer]
|
||||
serialization, json_serialization/[format, reader, writer]
|
||||
|
||||
export
|
||||
serialization, reader, writer
|
||||
|
||||
serializationFormat Json,
|
||||
Reader = JsonReader,
|
||||
Writer = JsonWriter,
|
||||
PreferedOutput = string,
|
||||
mimeType = "application/json"
|
||||
|
||||
template supports*(_: type Json, T: type): bool =
|
||||
# The JSON format should support every type
|
||||
true
|
||||
serialization, format, reader, writer
|
||||
|
||||
|
10
vendor/nim-json-serialization/json_serialization/format.nim
vendored
Normal file
10
vendor/nim-json-serialization/json_serialization/format.nim
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
import
|
||||
serialization/formats
|
||||
|
||||
serializationFormat Json,
|
||||
mimeType = "application/json"
|
||||
|
||||
template supports*(_: type Json, T: type): bool =
|
||||
# The JSON format should support every type
|
||||
true
|
||||
|
@ -1,17 +1,17 @@
|
||||
{.experimental: "notnil".}
|
||||
|
||||
import
|
||||
tables, strutils, typetraits, macros, strformat,
|
||||
faststreams/inputs, serialization/[object_serialization, errors],
|
||||
types, lexer
|
||||
std/[tables, strutils, typetraits, macros, strformat],
|
||||
faststreams/inputs, serialization/[formats, object_serialization, errors],
|
||||
format, types, lexer
|
||||
|
||||
from json import JsonNode, JsonNodeKind
|
||||
|
||||
export
|
||||
types, errors
|
||||
format, types, errors
|
||||
|
||||
type
|
||||
JsonReader* = object
|
||||
JsonReader*[Flavor = DefaultFlavor] = object
|
||||
lexer*: JsonLexer
|
||||
allowUnknownFields: bool
|
||||
|
||||
@ -50,6 +50,8 @@ type
|
||||
isNegative: bool
|
||||
absIntVal: uint64
|
||||
|
||||
Json.setReader JsonReader
|
||||
|
||||
func valueStr(err: ref IntOverflowError): string =
|
||||
if err.isNegative:
|
||||
result.add '-'
|
||||
@ -176,7 +178,10 @@ proc parseJsonNode(r: var JsonReader): JsonNode =
|
||||
r.lexer.next()
|
||||
if r.lexer.tok != tkCurlyRi:
|
||||
while r.lexer.tok == tkString:
|
||||
try:
|
||||
r.readJsonNodeField(result.fields.mgetOrPut(r.lexer.strVal, nil))
|
||||
except KeyError:
|
||||
raiseAssert "mgetOrPut should never raise a KeyError"
|
||||
if r.lexer.tok == tkComma:
|
||||
r.lexer.next()
|
||||
else:
|
||||
@ -368,6 +373,7 @@ template isCharArray(v: auto): bool = false
|
||||
proc readValue*[T](r: var JsonReader, value: var T)
|
||||
{.raises: [SerializationError, IOError, Defect].} =
|
||||
mixin readValue
|
||||
type ReaderType = type r
|
||||
|
||||
let tok {.used.} = r.lexer.tok
|
||||
|
||||
@ -495,7 +501,7 @@ proc readValue*[T](r: var JsonReader, value: var T)
|
||||
r.skipToken tkCurlyLe
|
||||
|
||||
when T.totalSerializedFields > 0:
|
||||
let fields = T.fieldReadersTable(JsonReader)
|
||||
let fields = T.fieldReadersTable(ReaderType)
|
||||
var expectedFieldPos = 0
|
||||
while r.lexer.tok == tkString:
|
||||
when T is tuple:
|
||||
|
@ -1,7 +1,10 @@
|
||||
import
|
||||
typetraits,
|
||||
std/typetraits,
|
||||
faststreams/[outputs, textio], serialization, json,
|
||||
types
|
||||
format, types
|
||||
|
||||
export
|
||||
format, JsonString, DefaultFlavor
|
||||
|
||||
type
|
||||
JsonWriterState = enum
|
||||
@ -9,23 +12,23 @@ type
|
||||
RecordStarted
|
||||
AfterField
|
||||
|
||||
JsonWriter* = object
|
||||
JsonWriter*[Flavor = DefaultFlavor] = object
|
||||
stream*: OutputStream
|
||||
hasTypeAnnotations: bool
|
||||
hasPrettyOutput*: bool # read-only
|
||||
nestingLevel*: int # read-only
|
||||
state: JsonWriterState
|
||||
|
||||
export
|
||||
JsonString
|
||||
Json.setWriter JsonWriter,
|
||||
PreferredOutput = string
|
||||
|
||||
proc init*(T: type JsonWriter, stream: OutputStream,
|
||||
pretty = false, typeAnnotations = false): T =
|
||||
result.stream = stream
|
||||
result.hasPrettyOutput = pretty
|
||||
result.hasTypeAnnotations = typeAnnotations
|
||||
result.nestingLevel = if pretty: 0 else: -1
|
||||
result.state = RecordExpected
|
||||
proc init*(W: type JsonWriter, stream: OutputStream,
|
||||
pretty = false, typeAnnotations = false): W =
|
||||
W(stream: stream,
|
||||
hasPrettyOutput: pretty,
|
||||
hasTypeAnnotations: typeAnnotations,
|
||||
nestingLevel: if pretty: 0 else: -1,
|
||||
state: RecordExpected)
|
||||
|
||||
proc beginRecord*(w: var JsonWriter, T: type)
|
||||
proc beginRecord*(w: var JsonWriter)
|
||||
@ -228,7 +231,7 @@ proc toJson*(v: auto, pretty = false, typeAnnotations = false): string =
|
||||
mixin writeValue
|
||||
|
||||
var s = memoryOutput()
|
||||
var w = JsonWriter.init(s, pretty, typeAnnotations)
|
||||
var w = JsonWriter[DefaultFlavor].init(s, pretty, typeAnnotations)
|
||||
w.writeValue v
|
||||
return s.getOutput(string)
|
||||
|
||||
|
41
vendor/nim-json-serialization/tests/test_json_flavor.nim
vendored
Normal file
41
vendor/nim-json-serialization/tests/test_json_flavor.nim
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
import
|
||||
strutils,
|
||||
serialization,
|
||||
../json_serialization
|
||||
|
||||
Json.createFlavor StringyJson
|
||||
|
||||
proc writeValue*(w: var JsonWriter[StringyJson], val: SomeInteger) =
|
||||
writeValue(w, $val)
|
||||
|
||||
proc readValue*(r: var JsonReader[StringyJson], v: var SomeSignedInt) =
|
||||
try:
|
||||
v = type(v) parseBiggestInt readValue(r, string)
|
||||
except ValueError as err:
|
||||
r.raiseUnexpectedValue("A signed integer encoded as string")
|
||||
|
||||
proc readValue*(r: var JsonReader[StringyJson], v: var SomeUnsignedInt) =
|
||||
try:
|
||||
v = type(v) parseBiggestUInt readValue(r, string)
|
||||
except ValueError as err:
|
||||
r.raiseUnexpectedValue("An unsigned integer encoded as string")
|
||||
|
||||
type
|
||||
Container = object
|
||||
name: string
|
||||
x: int
|
||||
y: uint64
|
||||
list: seq[int64]
|
||||
|
||||
let c = Container(name: "c", x: -10, y: 20, list: @[1'i64, 2, 25])
|
||||
let encoded = StringyJson.encode(c)
|
||||
echo "Encoded: ", encoded
|
||||
|
||||
let decoded = try:
|
||||
StringyJson.decode(encoded, Container)
|
||||
except SerializationError as err:
|
||||
echo err.formatMsg("<encoded>")
|
||||
quit 1
|
||||
|
||||
echo "Decoded: ", decoded
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
# libtool - Provide generalized library-building support services.
|
||||
# Generated automatically by config.status (libbacktrace) version-unused
|
||||
# Libtool was configured on host fv-az231-108:
|
||||
# Libtool was configured on host fv-az193-526:
|
||||
# NOTE: Changes made to this file will be lost: look at ltmain.sh.
|
||||
#
|
||||
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
|
||||
|
3
vendor/nim-libp2p/.github/workflows/ci.yml
vendored
3
vendor/nim-libp2p/.github/workflows/ci.yml
vendored
@ -207,6 +207,7 @@ jobs:
|
||||
nimble test
|
||||
|
||||
bumpNBC-stable:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@ -221,6 +222,7 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
path: nbc
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
@ -246,6 +248,7 @@ jobs:
|
||||
title: nim-libp2p auto bump
|
||||
|
||||
bumpNBC-unstable:
|
||||
if: github.ref == 'refs/heads/unstable'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
@ -41,7 +41,7 @@ proc isUpgraded*(s: Connection): bool =
|
||||
if not isNil(s.upgraded):
|
||||
return s.upgraded.finished
|
||||
|
||||
proc upgrade*(s: Connection, failed: ref Exception = nil) =
|
||||
proc upgrade*(s: Connection, failed: ref CatchableError = nil) =
|
||||
if not isNil(s.upgraded):
|
||||
if not isNil(failed):
|
||||
s.upgraded.fail(failed)
|
||||
|
2
vendor/nim-libp2p/libp2p/switch.nim
vendored
2
vendor/nim-libp2p/libp2p/switch.nim
vendored
@ -85,8 +85,6 @@ proc removePeerEventHandler*(s: Switch,
|
||||
kind: PeerEventKind) =
|
||||
s.connManager.removePeerEventHandler(handler, kind)
|
||||
|
||||
proc disconnect*(s: Switch, peerId: PeerID) {.async, gcsafe.}
|
||||
|
||||
proc isConnected*(s: Switch, peerId: PeerID): bool =
|
||||
## returns true if the peer has one or more
|
||||
## associated connections (sockets)
|
||||
|
@ -35,7 +35,7 @@ type
|
||||
opened*: uint64
|
||||
closed*: uint64
|
||||
|
||||
proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe.}
|
||||
proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} =
|
||||
result = cast[TcpTransportTracker](getTracker(TcpTransportTrackerName))
|
||||
|
6
vendor/nim-serialization/README.md
vendored
6
vendor/nim-serialization/README.md
vendored
@ -33,11 +33,11 @@ serializationFormat Json, # This is the name of the for
|
||||
Reader = JsonReader, # The associated Reader type.
|
||||
Writer = JsonWriter, # The associated Writer type.
|
||||
|
||||
PreferedOutput = string, # APIs such as `Json.encode` will return this type.
|
||||
PreferredOutput = string, # APIs such as `Json.encode` will return this type.
|
||||
# The type must support the following operations:
|
||||
# proc initWithCapacity(_: type T, n: int)
|
||||
# proc add(v: var T, bytes: openarray[byte])
|
||||
# By default, the PreferedOutput is `seq[byte]`.
|
||||
# By default, the PreferredOutput is `seq[byte]`.
|
||||
|
||||
mimeType = "application/json", # Mime type associated with the format (Optional).
|
||||
fileExt = "json" # File extension associated with the format (Optional).
|
||||
@ -48,7 +48,7 @@ serializationFormat Json, # This is the name of the for
|
||||
Most of the time, you'll be using the following high-level APIs when encoding
|
||||
and decoding values:
|
||||
|
||||
#### `Format.encode(value: auto, params: varargs): Format.PreferedOutput`
|
||||
#### `Format.encode(value: auto, params: varargs): Format.PreferredOutput`
|
||||
|
||||
Encodes a value in the specified format returning the preferred output type
|
||||
for the format (usually `string` or `seq[byte]`). All extra params will be
|
||||
|
69
vendor/nim-serialization/serialization.nim
vendored
69
vendor/nim-serialization/serialization.nim
vendored
@ -1,31 +1,13 @@
|
||||
import
|
||||
typetraits,
|
||||
stew/shims/macros, faststreams,
|
||||
serialization/[object_serialization, errors]
|
||||
serialization/[object_serialization, errors, formats]
|
||||
|
||||
export
|
||||
faststreams, object_serialization, errors
|
||||
|
||||
template serializationFormatImpl(Name: untyped,
|
||||
Reader, Writer, PreferedOutput: distinct type,
|
||||
mimeTypeName: static string = "") {.dirty.} =
|
||||
# This indirection is required in order to be able to generate the
|
||||
# `mimeType` accessor template. Without the indirection, the template
|
||||
# mechanism of Nim will try to expand the `mimeType` param in the position
|
||||
# of the `mimeType` template name which will result in error.
|
||||
type Name* = object
|
||||
template ReaderType*(T: type Name): type = Reader
|
||||
template WriterType*(T: type Name): type = Writer
|
||||
template PreferedOutputType*(T: type Name): type = PreferedOutput
|
||||
template mimeType*(T: type Name): string = mimeTypeName
|
||||
|
||||
template serializationFormat*(Name: untyped,
|
||||
Reader, Writer, PreferedOutput: distinct type,
|
||||
mimeType: static string = "") =
|
||||
serializationFormatImpl(Name, Reader, Writer, PreferedOutput, mimeType)
|
||||
faststreams, object_serialization, errors, formats
|
||||
|
||||
template encode*(Format: type, value: auto, params: varargs[untyped]): auto =
|
||||
mixin init, WriterType, writeValue, PreferedOutputType
|
||||
mixin init, Writer, writeValue, PreferredOutputType
|
||||
{.noSideEffect.}:
|
||||
# We assume that there is no side-effects here, because we are
|
||||
# using a `memoryOutput`. The computed side-effects are coming
|
||||
@ -33,9 +15,10 @@ template encode*(Format: type, value: auto, params: varargs[untyped]): auto =
|
||||
# faststreams may be writing to a file or a network device.
|
||||
try:
|
||||
var s = memoryOutput()
|
||||
var writer = unpackArgs(init, [WriterType(Format), s, params])
|
||||
type WriterType = Writer(Format)
|
||||
var writer = unpackArgs(init, [WriterType, s, params])
|
||||
writeValue writer, value
|
||||
s.getOutput PreferedOutputType(Format)
|
||||
s.getOutput PreferredOutputType(Format)
|
||||
except IOError:
|
||||
raise (ref Defect)() # a memoryOutput cannot have an IOError
|
||||
|
||||
@ -52,7 +35,7 @@ template decode*(Format: distinct type,
|
||||
params: varargs[untyped]): auto =
|
||||
# TODO, this is dusplicated only due to a Nim bug:
|
||||
# If `input` was `string|openarray[byte]`, it won't match `seq[byte]`
|
||||
mixin init, ReaderType
|
||||
mixin init, Reader
|
||||
{.noSideEffect.}:
|
||||
# We assume that there are no side-effects here, because we are
|
||||
# using a `memoryInput`. The computed side-effects are coming
|
||||
@ -60,7 +43,8 @@ template decode*(Format: distinct type,
|
||||
# faststreams may be reading from a file or a network device.
|
||||
try:
|
||||
var stream = unsafeMemoryInput(input)
|
||||
var reader = unpackArgs(init, [ReaderType(Format), stream, params])
|
||||
type ReaderType = Reader(Format)
|
||||
var reader = unpackArgs(init, [ReaderType, stream, params])
|
||||
reader.readValue(RecordType)
|
||||
except IOError:
|
||||
raise (ref Defect)() # memory inputs cannot raise an IOError
|
||||
@ -71,7 +55,7 @@ template decode*(Format: distinct type,
|
||||
params: varargs[untyped]): auto =
|
||||
# TODO, this is dusplicated only due to a Nim bug:
|
||||
# If `input` was `string|openarray[byte]`, it won't match `seq[byte]`
|
||||
mixin init, ReaderType
|
||||
mixin init, Reader
|
||||
{.noSideEffect.}:
|
||||
# We assume that there are no side-effects here, because we are
|
||||
# using a `memoryInput`. The computed side-effects are coming
|
||||
@ -79,7 +63,8 @@ template decode*(Format: distinct type,
|
||||
# faststreams may be reading from a file or a network device.
|
||||
try:
|
||||
var stream = unsafeMemoryInput(input)
|
||||
var reader = unpackArgs(init, [ReaderType(Format), stream, params])
|
||||
type ReaderType = Reader(Format)
|
||||
var reader = unpackArgs(init, [ReaderType, stream, params])
|
||||
reader.readValue(RecordType)
|
||||
except IOError:
|
||||
raise (ref Defect)() # memory inputs cannot raise an IOError
|
||||
@ -88,11 +73,12 @@ template loadFile*(Format: distinct type,
|
||||
filename: string,
|
||||
RecordType: distinct type,
|
||||
params: varargs[untyped]): auto =
|
||||
mixin init, ReaderType, readValue
|
||||
mixin init, Reader, readValue
|
||||
|
||||
var stream = memFileInput(filename)
|
||||
try:
|
||||
var reader = unpackArgs(init, [ReaderType(Format), stream, params])
|
||||
type ReaderType = Reader(Format)
|
||||
var reader = unpackArgs(init, [ReaderType, stream, params])
|
||||
reader.readValue(RecordType)
|
||||
finally:
|
||||
close stream
|
||||
@ -104,11 +90,12 @@ template loadFile*[RecordType](Format: type,
|
||||
record = loadFile(Format, filename, RecordType, params)
|
||||
|
||||
template saveFile*(Format: type, filename: string, value: auto, params: varargs[untyped]) =
|
||||
mixin init, WriterType, writeValue
|
||||
mixin init, Writer, writeValue
|
||||
|
||||
var stream = fileOutput(filename)
|
||||
try:
|
||||
var writer = unpackArgs(init, [WriterType(Format), stream, params])
|
||||
type WriterType = Writer(Format)
|
||||
var writer = unpackArgs(init, [WriterType, stream, params])
|
||||
writer.writeValue(value)
|
||||
finally:
|
||||
close stream
|
||||
@ -137,16 +124,16 @@ template borrowSerialization*(Alias: distinct type,
|
||||
|
||||
template serializesAsBase*(SerializedType: distinct type,
|
||||
Format: distinct type) =
|
||||
mixin ReaderType, WriterType
|
||||
mixin Reader, Writer
|
||||
|
||||
type Reader = ReaderType(Format)
|
||||
type Writer = WriterType(Format)
|
||||
type ReaderType = Reader(Format)
|
||||
type WriterType = Writer(Format)
|
||||
|
||||
template writeValue*(writer: var Writer, value: SerializedType) =
|
||||
template writeValue*(writer: var WriterType, value: SerializedType) =
|
||||
mixin writeValue
|
||||
writeValue(writer, distinctBase value)
|
||||
|
||||
template readValue*(reader: var Reader, value: var SerializedType) =
|
||||
template readValue*(reader: var ReaderType, value: var SerializedType) =
|
||||
mixin readValue
|
||||
value = SerializedType reader.readValue(distinctBase SerializedType)
|
||||
|
||||
@ -160,15 +147,17 @@ template readValue*(stream: InputStream,
|
||||
Format: type,
|
||||
ValueType: type,
|
||||
params: varargs[untyped]): untyped =
|
||||
mixin ReaderType, init, readValue
|
||||
var reader = unpackArgs(init, [ReaderType(Format), stream, params])
|
||||
mixin Reader, init, readValue
|
||||
type ReaderType = Reader(Format)
|
||||
var reader = unpackArgs(init, [ReaderType, stream, params])
|
||||
readValue reader, ValueType
|
||||
|
||||
template writeValue*(stream: OutputStream,
|
||||
Format: type,
|
||||
value: auto,
|
||||
params: varargs[untyped]) =
|
||||
mixin WriterType, init, writeValue
|
||||
var writer = unpackArgs(init, [WriterType(Format), stream])
|
||||
mixin Writer, init, writeValue
|
||||
type WriterType = Writer(Format)
|
||||
var writer = unpackArgs(init, [WriterType, stream, params])
|
||||
writeValue writer, value
|
||||
|
||||
|
40
vendor/nim-serialization/serialization/formats.nim
vendored
Normal file
40
vendor/nim-serialization/serialization/formats.nim
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
import
|
||||
std/typetraits
|
||||
|
||||
template serializationFormatImpl(Name: untyped,
|
||||
mimeTypeName: static string = "") {.dirty.} =
|
||||
# This indirection is required in order to be able to generate the
|
||||
# `mimeType` accessor template. Without the indirection, the template
|
||||
# mechanism of Nim will try to expand the `mimeType` param in the position
|
||||
# of the `mimeType` template name which will result in error.
|
||||
type Name* = object
|
||||
template mimeType*(T: type Name): string = mimeTypeName
|
||||
|
||||
template serializationFormat*(Name: untyped, mimeType: static string = "") =
|
||||
serializationFormatImpl(Name, mimeType)
|
||||
|
||||
template setReader*(Format, FormatReader: distinct type) =
|
||||
when arity(FormatReader) > 1:
|
||||
template ReaderType*(T: type Format, F: distinct type = DefaultFlavor): type = FormatReader[F]
|
||||
template Reader*(T: type Format, F: distinct type = DefaultFlavor): type = FormatReader[F]
|
||||
else:
|
||||
template ReaderType*(T: type Format): type = FormatReader
|
||||
template Reader*(T: type Format): type = FormatReader
|
||||
|
||||
template setWriter*(Format, FormatWriter, PreferredOutput: distinct type) =
|
||||
when arity(FormatWriter) > 1:
|
||||
template WriterType*(T: type Format, F: distinct type = DefaultFlavor): type = FormatWriter[F]
|
||||
template Writer*(T: type Format, F: distinct type = DefaultFlavor): type = FormatWriter[F]
|
||||
else:
|
||||
template WriterType*(T: type Format): type = FormatWriter
|
||||
template Writer*(T: type Format): type = FormatWriter
|
||||
|
||||
template PreferredOutputType*(T: type Format): type = PreferredOutput
|
||||
|
||||
template createFlavor*(ModifiedFormat, FlavorName: untyped) =
|
||||
type FlavorName* = object
|
||||
template Reader*(T: type FlavorName): type = Reader(ModifiedFormat, FlavorName)
|
||||
template Writer*(T: type FlavorName): type = Writer(ModifiedFormat, FlavorName)
|
||||
template PreferredOutputType*(T: type FlavorName): type = PreferredOutputType(ModifiedFormat)
|
||||
template mimeType*(T: type FlavorName): string = mimeType(ModifiedFormat)
|
||||
|
@ -3,6 +3,7 @@ import
|
||||
errors
|
||||
|
||||
type
|
||||
DefaultFlavor* = object
|
||||
FieldTag*[RecordType; fieldName: static string; FieldType] = distinct void
|
||||
|
||||
let
|
||||
@ -190,12 +191,12 @@ template writeFieldIMPL*[Writer](writer: var Writer,
|
||||
mixin writeValue
|
||||
writer.writeValue(fieldVal)
|
||||
|
||||
proc makeFieldReadersTable(RecordType, Reader: distinct type):
|
||||
seq[FieldReader[RecordType, Reader]] =
|
||||
proc makeFieldReadersTable(RecordType, ReaderType: distinct type):
|
||||
seq[FieldReader[RecordType, ReaderType]] =
|
||||
mixin enumAllSerializedFields, readFieldIMPL, handleReadException
|
||||
|
||||
enumAllSerializedFields(RecordType):
|
||||
proc readField(obj: var RecordType, reader: var Reader)
|
||||
proc readField(obj: var RecordType, reader: var ReaderType)
|
||||
{.gcsafe, nimcall, raises: [SerializationError, Defect].} =
|
||||
when RecordType is tuple:
|
||||
const i = fieldName.parseInt
|
||||
@ -221,17 +222,17 @@ proc makeFieldReadersTable(RecordType, Reader: distinct type):
|
||||
|
||||
result.add((fieldName, readField))
|
||||
|
||||
proc fieldReadersTable*(RecordType, Reader: distinct type):
|
||||
ptr seq[FieldReader[RecordType, Reader]] =
|
||||
proc fieldReadersTable*(RecordType, ReaderType: distinct type):
|
||||
ptr seq[FieldReader[RecordType, ReaderType]] =
|
||||
mixin readValue
|
||||
|
||||
# careful: https://github.com/nim-lang/Nim/issues/17085
|
||||
# TODO why is this even here? one could just return the function pointer
|
||||
# to the field reader directly instead of going through this seq etc
|
||||
var tbl {.threadvar.}: ref seq[FieldReader[RecordType, Reader]]
|
||||
var tbl {.threadvar.}: ref seq[FieldReader[RecordType, ReaderType]]
|
||||
if tbl == nil:
|
||||
tbl = new typeof(tbl)
|
||||
tbl[] = makeFieldReadersTable(RecordType, Reader)
|
||||
tbl[] = makeFieldReadersTable(RecordType, ReaderType)
|
||||
return addr(tbl[])
|
||||
|
||||
proc findFieldReader*(fieldsTable: FieldReadersTable,
|
||||
@ -341,16 +342,16 @@ proc genCustomSerializationForField(Format, field,
|
||||
|
||||
if readBody != nil:
|
||||
result.add quote do:
|
||||
type Reader = ReaderType(`Format`)
|
||||
type ReaderType = Reader(`Format`)
|
||||
proc readFieldIMPL*(F: type FieldTag[`RecordType`, `fieldName`, auto],
|
||||
`readerSym`: var Reader): `FieldType`
|
||||
`readerSym`: var ReaderType): `FieldType`
|
||||
{.raises: [IOError, SerializationError, Defect].} =
|
||||
`readBody`
|
||||
|
||||
if writeBody != nil:
|
||||
result.add quote do:
|
||||
type Writer = WriterType(`Format`)
|
||||
proc writeFieldIMPL*(`writerSym`: var Writer,
|
||||
type WriterType = Writer(`Format`)
|
||||
proc writeFieldIMPL*(`writerSym`: var WriterType,
|
||||
F: type FieldTag[`RecordType`, `fieldName`, auto],
|
||||
`valueSym`: auto,
|
||||
`holderSym`: `RecordType`)
|
||||
@ -363,15 +364,15 @@ proc genCustomSerializationForType(Format, typ: NimNode,
|
||||
|
||||
if readBody != nil:
|
||||
result.add quote do:
|
||||
type Reader = ReaderType(`Format`)
|
||||
proc readValue*(`readerSym`: var Reader, T: type `typ`): `typ`
|
||||
type ReaderType = Reader(`Format`)
|
||||
proc readValue*(`readerSym`: var ReaderType, T: type `typ`): `typ`
|
||||
{.raises: [IOError, SerializationError, Defect].} =
|
||||
`readBody`
|
||||
|
||||
if writeBody != nil:
|
||||
result.add quote do:
|
||||
type Writer = WriterType(`Format`)
|
||||
proc writeValue*(`writerSym`: var Writer, `valueSym`: `typ`)
|
||||
type WriterType = Writer(`Format`)
|
||||
proc writeValue*(`writerSym`: var WriterType, `valueSym`: `typ`)
|
||||
{.raises: [IOError, SerializationError, Defect].} =
|
||||
`writeBody`
|
||||
|
||||
|
@ -317,14 +317,14 @@ proc executeRoundtripTests*(Format: type) =
|
||||
roundtrip namedT
|
||||
|
||||
proc executeReaderWriterTests*(Format: type) =
|
||||
mixin init, ReaderType, WriterType
|
||||
mixin init, Reader, Writer
|
||||
|
||||
type
|
||||
Reader = ReaderType Format
|
||||
ReaderType = Reader Format
|
||||
|
||||
suite(typetraits.name(Format) & " read/write tests"):
|
||||
test "Low-level field reader test":
|
||||
let barFields = fieldReadersTable(Bar, Reader)
|
||||
let barFields = fieldReadersTable(Bar, ReaderType)
|
||||
var idx = 0
|
||||
|
||||
var fieldReader = findFieldReader(barFields[], "b", idx)
|
||||
@ -336,7 +336,7 @@ proc executeReaderWriterTests*(Format: type) =
|
||||
|
||||
var bytes = Format.encode("test")
|
||||
var stream = unsafeMemoryInput(bytes)
|
||||
var reader = Reader.init(stream)
|
||||
var reader = ReaderType.init(stream)
|
||||
|
||||
var bar: Bar
|
||||
fieldReader(bar, reader)
|
||||
@ -345,7 +345,7 @@ proc executeReaderWriterTests*(Format: type) =
|
||||
|
||||
test "Ignored fields should not be included in the field readers table":
|
||||
var pos = 0
|
||||
let bazFields = fieldReadersTable(Baz, Reader)
|
||||
let bazFields = fieldReadersTable(Baz, ReaderType)
|
||||
check:
|
||||
len(bazFields[]) == 2
|
||||
findFieldReader(bazFields[], "f", pos) != nil
|
||||
|
5
vendor/nim-stew/stew/byteutils.nim
vendored
5
vendor/nim-stew/stew/byteutils.nim
vendored
@ -63,6 +63,11 @@ func hexToByteArray*[N: static[int]](hexStr: string): array[N, byte]
|
||||
## Read an hex string and store it in a byte array. No "endianness" reordering is done.
|
||||
hexToByteArray(hexStr, result)
|
||||
|
||||
func hexToByteArray*(hexStr: string, N: static int): array[N, byte]
|
||||
{.raises: [ValueError, Defect], noInit, inline.}=
|
||||
## Read an hex string and store it in a byte array. No "endianness" reordering is done.
|
||||
hexToByteArray(hexStr, result)
|
||||
|
||||
func fromHex*[N](A: type array[N, byte], hexStr: string): A
|
||||
{.raises: [ValueError, Defect], noInit, inline.}=
|
||||
## Read an hex string and store it in a byte array. No "endianness" reordering is done.
|
||||
|
39
vendor/nim-testutils/.appveyor.yml
vendored
Normal file
39
vendor/nim-testutils/.appveyor.yml
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
version: '{build}'
|
||||
|
||||
image: Visual Studio 2015
|
||||
|
||||
cache:
|
||||
- NimBinaries
|
||||
|
||||
matrix:
|
||||
# We always want 32 and 64-bit compilation
|
||||
fast_finish: false
|
||||
|
||||
platform:
|
||||
- x86
|
||||
- x64
|
||||
|
||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
||||
clone_depth: 10
|
||||
|
||||
install:
|
||||
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
|
||||
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
|
||||
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
||||
|
||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
||||
# regressions of the fast-paced Nim development while maintaining the
|
||||
# flexibility to apply patches
|
||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
- SET PATH=%CD%\Nim\bin;%USERPROFILE%\.nimble\bin;%PATH%
|
||||
|
||||
build_script:
|
||||
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
|
||||
- nimble install -y
|
||||
|
||||
test_script:
|
||||
- nimble test
|
||||
|
||||
deploy: off
|
||||
|
5
vendor/nim-testutils/.editorconfig
vendored
Normal file
5
vendor/nim-testutils/.editorconfig
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
[*]
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
8
vendor/nim-testutils/.gitignore
vendored
Normal file
8
vendor/nim-testutils/.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# ignore all executable files
|
||||
*
|
||||
!*.*
|
||||
!*/
|
||||
*.exe
|
||||
|
||||
nimcache/
|
||||
|
26
vendor/nim-testutils/.travis.yml
vendored
Normal file
26
vendor/nim-testutils/.travis.yml
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
language: c
|
||||
|
||||
# https://docs.travis-ci.com/user/caching/
|
||||
cache:
|
||||
directories:
|
||||
- NimBinaries
|
||||
|
||||
git:
|
||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
||||
depth: 10
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
install:
|
||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
||||
# regressions of the fast-paced Nim development while maintaining the
|
||||
# flexibility to apply patches
|
||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
- export PATH=$PWD/Nim/bin:$PATH
|
||||
|
||||
script:
|
||||
- nimble install -y
|
||||
- nimble test
|
12
vendor/nim-testutils/README.md
vendored
Normal file
12
vendor/nim-testutils/README.md
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# Testutils
|
||||
|
||||
Testutils now is a home to:
|
||||
|
||||
* [Testrunner](testutils/readme.md)
|
||||
[](https://travis-ci.org/status-im/nim-testutils)
|
||||
[](https://ci.appveyor.com/project/nimbus/nim-testutils/branch/master)
|
||||
* [Fuzzing](testutils/fuzzing/readme.md)
|
||||
* [Fuzzing on Windows](testutils/fuzzing/fuzzing_on_windows.md)
|
||||
|
||||
## License
|
||||
Apache2 or MIT
|
1
vendor/nim-testutils/nim.cfg
vendored
Normal file
1
vendor/nim-testutils/nim.cfg
vendored
Normal file
@ -0,0 +1 @@
|
||||
-d:nimOldCaseObjects
|
470
vendor/nim-testutils/ntu.nim
vendored
Normal file
470
vendor/nim-testutils/ntu.nim
vendored
Normal file
@ -0,0 +1,470 @@
|
||||
import
|
||||
std/[hashes, random, tables, sequtils, strtabs, strutils,
|
||||
os, osproc, terminal, times, pegs, algorithm],
|
||||
testutils/[spec, config, helpers, fuzzing_engines]
|
||||
|
||||
#[
|
||||
|
||||
The runner will look recursively for all *.test files at given path. A
|
||||
test file should have at minimum a program name. This is the name of the
|
||||
nim source minus the .nim extension)
|
||||
|
||||
]#
|
||||
|
||||
|
||||
# Code is here and there influenced by nim testament tester and unittest
|
||||
# module.
|
||||
|
||||
const
|
||||
# defaultOptions = "--verbosity:1 --warnings:off --hint[Processing]:off " &
|
||||
# "--hint[Conf]:off --hint[XDeclaredButNotUsed]:off " &
|
||||
# "--hint[Link]:off --hint[Pattern]:off"
|
||||
defaultOptions = "--verbosity:1 --warnings:on "
|
||||
backendOrder = @["c", "cpp", "js"]
|
||||
|
||||
type
|
||||
TestStatus* = enum
|
||||
OK
|
||||
FAILED
|
||||
SKIPPED
|
||||
INVALID
|
||||
|
||||
#[
|
||||
If needed, pass more info to the logresult via a TestResult object
|
||||
TestResult = object
|
||||
status: TestStatus
|
||||
compileTime: float
|
||||
fileSize: uint
|
||||
]#
|
||||
|
||||
ThreadPayload = object
|
||||
core: int
|
||||
spec: TestSpec
|
||||
|
||||
TestThread = Thread[ThreadPayload]
|
||||
TestError* = enum
|
||||
SourceFileNotFound
|
||||
ExeFileNotFound
|
||||
OutputFileNotFound
|
||||
CompileError
|
||||
RuntimeError
|
||||
OutputsDiffer
|
||||
FileSizeTooLarge
|
||||
CompileErrorDiffers
|
||||
|
||||
BackendTests = TableRef[string, seq[TestSpec]]
|
||||
|
||||
proc logFailure(test: TestSpec; error: TestError;
|
||||
data: varargs[string] = [""]) =
|
||||
case error
|
||||
of SourceFileNotFound:
|
||||
styledEcho(fgYellow, styleBright, "source file not found: ",
|
||||
resetStyle, test.source)
|
||||
of ExeFileNotFound:
|
||||
styledEcho(fgYellow, styleBright, "executable file not found: ",
|
||||
resetStyle, test.binary)
|
||||
of OutputFileNotFound:
|
||||
styledEcho(fgYellow, styleBright, "file not found: ",
|
||||
resetStyle, data[0])
|
||||
of CompileError:
|
||||
styledEcho(fgYellow, styleBright, "compile error:\p",
|
||||
resetStyle, data[0])
|
||||
of RuntimeError:
|
||||
styledEcho(fgYellow, styleBright, "runtime error:\p",
|
||||
resetStyle, data[0])
|
||||
of OutputsDiffer:
|
||||
styledEcho(fgYellow, styleBright, "outputs are different:\p",
|
||||
resetStyle,"Expected output to $#:\p$#" % [data[0], data[1]],
|
||||
"Resulted output to $#:\p$#" % [data[0], data[2]])
|
||||
of FileSizeTooLarge:
|
||||
styledEcho(fgYellow, styleBright, "file size is too large: ",
|
||||
resetStyle, data[0] & " > " & $test.maxSize)
|
||||
of CompileErrorDiffers:
|
||||
styledEcho(fgYellow, styleBright, "compile error is different:\p",
|
||||
resetStyle, data[0])
|
||||
|
||||
styledEcho(fgCyan, styleBright, "compiler: ", resetStyle,
|
||||
"$# $# $# $#" % [defaultOptions,
|
||||
test.flags,
|
||||
test.config.compilationFlags,
|
||||
test.source])
|
||||
|
||||
template withinDir(dir: string; body: untyped): untyped =
|
||||
## run the body with a specified directory, returning to current dir
|
||||
let
|
||||
cwd = getCurrentDir()
|
||||
setCurrentDir(dir)
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
setCurrentDir(cwd)
|
||||
|
||||
proc logResult(testName: string, status: TestStatus, time: float) =
|
||||
var color = block:
|
||||
case status
|
||||
of OK: fgGreen
|
||||
of FAILED: fgRed
|
||||
of SKIPPED: fgYellow
|
||||
of INVALID: fgRed
|
||||
styledEcho(styleBright, color, "[", $status, "] ",
|
||||
resetStyle, testName,
|
||||
fgYellow, " ", time.formatFloat(ffDecimal, 3), " s")
|
||||
|
||||
proc logResult(testName: string, status: TestStatus) =
|
||||
var color = block:
|
||||
case status
|
||||
of OK: fgGreen
|
||||
of FAILED: fgRed
|
||||
of SKIPPED: fgYellow
|
||||
of INVALID: fgRed
|
||||
styledEcho(styleBright, color, "[", $status, "] ",
|
||||
resetStyle, testName)
|
||||
|
||||
template time(duration, body): untyped =
|
||||
let t0 = epochTime()
|
||||
block:
|
||||
body
|
||||
duration = epochTime() - t0
|
||||
|
||||
proc composeOutputs(test: TestSpec, stdout: string): TestOutputs =
|
||||
## collect the outputs for the given test
|
||||
result = newTestOutputs()
|
||||
for name, expected in test.outputs.pairs:
|
||||
if name == "stdout":
|
||||
result[name] = stdout
|
||||
else:
|
||||
if not existsFile(name):
|
||||
continue
|
||||
result[name] = readFile(name)
|
||||
removeFile(name)
|
||||
|
||||
proc cmpOutputs(test: TestSpec, outputs: TestOutputs): TestStatus =
|
||||
## compare the test program's outputs to those expected by the test
|
||||
result = OK
|
||||
for name, expected in test.outputs.pairs:
|
||||
if name notin outputs:
|
||||
logFailure(test, OutputFileNotFound, name)
|
||||
result = FAILED
|
||||
continue
|
||||
|
||||
let
|
||||
testOutput = outputs[name]
|
||||
|
||||
# Would be nice to do a real diff here instead of simple compare
|
||||
if test.timestampPeg.len > 0:
|
||||
if not cmpIgnorePegs(testOutput, expected,
|
||||
peg(test.timestampPeg), pegXid):
|
||||
logFailure(test, OutputsDiffer, name, expected, testOutput)
|
||||
result = FAILED
|
||||
else:
|
||||
if not cmpIgnoreDefaultTimestamps(testOutput, expected):
|
||||
logFailure(test, OutputsDiffer, name, expected, testOutput)
|
||||
result = FAILED
|
||||
|
||||
proc compile(test: TestSpec; backend: string): TestStatus =
|
||||
## compile the test program for the requested backends
|
||||
block:
|
||||
if not existsFile(test.source):
|
||||
logFailure(test, SourceFileNotFound)
|
||||
result = FAILED
|
||||
break
|
||||
|
||||
let
|
||||
binary = test.binary(backend)
|
||||
var
|
||||
cmd = findExe("nim")
|
||||
cmd &= " " & backend
|
||||
cmd &= " --nimcache:" & test.config.cache(backend)
|
||||
cmd &= " --out:" & binary
|
||||
cmd &= " " & defaultOptions
|
||||
cmd &= " " & test.flags
|
||||
cmd &= " " & test.config.compilationFlags
|
||||
cmd &= " " & test.source.quoteShell
|
||||
var
|
||||
c = parseCmdLine(cmd)
|
||||
p = startProcess(command=c[0], args=c[1.. ^1],
|
||||
options={poStdErrToStdOut, poUsePath})
|
||||
|
||||
try:
|
||||
let
|
||||
compileInfo = parseCompileStream(p, p.outputStream)
|
||||
|
||||
if compileInfo.exitCode != 0:
|
||||
if test.compileError.len == 0:
|
||||
logFailure(test, CompileError, compileInfo.fullMsg)
|
||||
result = FAILED
|
||||
break
|
||||
else:
|
||||
if test.compileError == compileInfo.msg and
|
||||
(test.errorFile.len == 0 or test.errorFile == compileInfo.errorFile) and
|
||||
(test.errorLine == 0 or test.errorLine == compileInfo.errorLine) and
|
||||
(test.errorColumn == 0 or test.errorColumn == compileInfo.errorColumn):
|
||||
result = OK
|
||||
else:
|
||||
logFailure(test, CompileErrorDiffers, compileInfo.fullMsg)
|
||||
result = FAILED
|
||||
break
|
||||
|
||||
# Lets also check file size here as it kinda belongs to the
|
||||
# compilation result
|
||||
if test.maxSize != 0:
|
||||
var size = getFileSize(binary)
|
||||
if size > test.maxSize:
|
||||
logFailure(test, FileSizeTooLarge, $size)
|
||||
result = FAILED
|
||||
break
|
||||
|
||||
result = OK
|
||||
finally:
|
||||
close(p)
|
||||
|
||||
proc threadedExecute(payload: ThreadPayload) {.thread.}
|
||||
|
||||
proc spawnTest(child: var Thread[ThreadPayload]; test: TestSpec;
|
||||
core: int): bool =
|
||||
## invoke a single test on the given thread/core; true if we
|
||||
## pinned the test to the given core
|
||||
assert core >= 0
|
||||
child.createThread(threadedExecute,
|
||||
ThreadPayload(core: core, spec: test))
|
||||
# set cpu affinity if requested (and cores remain)
|
||||
if CpuAffinity in test.config.flags:
|
||||
if core < countProcessors():
|
||||
child.pinToCpu core
|
||||
result = true
|
||||
|
||||
proc execute(test: TestSpec): TestStatus =
|
||||
## invoke a single test and return a status
|
||||
var
|
||||
# FIXME: pass a backend
|
||||
cmd = test.binary
|
||||
# output the test stage if necessary
|
||||
if test.stage.len > 0:
|
||||
echo 20.spaces & test.stage
|
||||
|
||||
if not fileExists(cmd):
|
||||
result = FAILED
|
||||
logFailure(test, ExeFileNotFound)
|
||||
else:
|
||||
withinDir parentDir(test.path):
|
||||
cmd = cmd.quoteShell & " " & test.args
|
||||
let
|
||||
(output, exitCode) = execCmdEx(cmd)
|
||||
if exitCode != 0:
|
||||
# parseExecuteOutput() # Need to parse the run time failures?
|
||||
logFailure(test, RuntimeError, output)
|
||||
result = FAILED
|
||||
else:
|
||||
let
|
||||
outputs = test.composeOutputs(output)
|
||||
result = test.cmpOutputs(outputs)
|
||||
# perform an update of the testfile if requested and required
|
||||
if UpdateOutputs in test.config.flags and result == FAILED:
|
||||
test.rewriteTestFile(outputs)
|
||||
# we'll call this a `skip` because it's not strictly a failure
|
||||
# and we want any dependent testing to proceed as usual.
|
||||
result = SKIPPED
|
||||
|
||||
proc executeAll(test: TestSpec): TestStatus =
|
||||
## run a test and any dependent children, yielding a single status
|
||||
when compileOption("threads"):
|
||||
try:
|
||||
var
|
||||
thread: TestThread
|
||||
# we spawn and join the test here so that it can receive
|
||||
# cpu affinity via the standard thread.pinToCpu method
|
||||
discard thread.spawnTest(test, 0)
|
||||
thread.joinThreads
|
||||
except:
|
||||
# any thread(?) exception is a failure
|
||||
result = FAILED
|
||||
else:
|
||||
# unthreaded serial test execution
|
||||
result = SKIPPED
|
||||
while test != nil and result in {OK, SKIPPED}:
|
||||
result = test.execute
|
||||
test = test.child
|
||||
|
||||
proc threadedExecute(payload: ThreadPayload) {.thread.} =
|
||||
## a thread in which we'll perform a test execution given the payload
|
||||
var
|
||||
result = FAILED
|
||||
if payload.spec.child == nil:
|
||||
{.gcsafe.}:
|
||||
result = payload.spec.execute
|
||||
else:
|
||||
try:
|
||||
var
|
||||
child: TestThread
|
||||
discard child.spawnTest(payload.spec.child, payload.core + 1)
|
||||
{.gcsafe.}:
|
||||
result = payload.spec.execute
|
||||
child.joinThreads
|
||||
except:
|
||||
result = FAILED
|
||||
if result == FAILED:
|
||||
raise newException(CatchableError, payload.spec.stage & " failed")
|
||||
|
||||
proc optimizeOrder(tests: seq[TestSpec];
|
||||
order: set[SortBy]): seq[TestSpec] =
|
||||
## order the tests by how recently each was modified
|
||||
template whenWritten(path: string): Time =
|
||||
path.getFileInfo(followSymlink = true).lastWriteTime
|
||||
|
||||
result = tests
|
||||
for s in SortBy.low .. SortBy.high:
|
||||
if s in order:
|
||||
case s
|
||||
of Test:
|
||||
result = result.sortedByIt it.path.whenWritten
|
||||
of Source:
|
||||
result = result.sortedByIt it.source.whenWritten
|
||||
of Reverse:
|
||||
result.reverse
|
||||
of Random:
|
||||
result.shuffle
|
||||
|
||||
proc scanTestPath(path: string): seq[string] =
|
||||
## add any tests found at the given path
|
||||
if fileExists(path):
|
||||
result.add path
|
||||
else:
|
||||
for file in walkDirRec path:
|
||||
if file.endsWith ".test":
|
||||
result.add file
|
||||
|
||||
proc test(test: TestSpec; backend: string): TestStatus =
|
||||
let
|
||||
config = test.config
|
||||
var
|
||||
duration: float
|
||||
|
||||
try:
|
||||
time duration:
|
||||
# perform all tests in the test file
|
||||
result = test.executeAll
|
||||
finally:
|
||||
logResult(test.name, result, duration)
|
||||
|
||||
proc buildBackendTests(config: TestConfig;
|
||||
tests: seq[TestSpec]): BackendTests =
|
||||
## build the table mapping backend to test inputs
|
||||
result = newTable[string, seq[TestSpec]](4)
|
||||
for spec in tests.items:
|
||||
for backend in config.backends.items:
|
||||
assert backend != ""
|
||||
if backend in result:
|
||||
if spec notin result[backend]:
|
||||
result[backend].add spec
|
||||
else:
|
||||
result[backend] = @[spec]
|
||||
|
||||
proc removeCaches(config: TestConfig; backend: string) =
|
||||
## cleanup nimcache directories between backend runs
|
||||
removeDir config.cache(backend)
|
||||
|
||||
# we want to run tests on "native", first.
|
||||
proc performTesting(config: TestConfig;
|
||||
backend: string; tests: seq[TestSpec]): TestStatus =
|
||||
var
|
||||
successful, skipped, invalid, failed = 0
|
||||
dedupe: CountTable[Hash]
|
||||
|
||||
assert backend != ""
|
||||
|
||||
# perform each test in an optimized order
|
||||
for spec in tests.optimizeOrder(config.orderBy).items:
|
||||
|
||||
block escapeBlock:
|
||||
if spec.program.len == 0:
|
||||
# a program name is bare minimum of a test file
|
||||
result = INVALID
|
||||
invalid.inc
|
||||
logResult(spec.program & " for " & spec.name, result)
|
||||
break escapeBlock
|
||||
|
||||
if spec.skip or hostOS notin spec.os or config.shouldSkip(spec.name):
|
||||
result = SKIPPED
|
||||
skipped.inc
|
||||
logResult(spec.program & " for " & spec.name, result)
|
||||
break escapeBlock
|
||||
|
||||
let
|
||||
build = spec.binaryHash(backend)
|
||||
if build notin dedupe:
|
||||
dedupe.inc build
|
||||
# compile the test program for all backends
|
||||
var
|
||||
duration: float
|
||||
try:
|
||||
time duration:
|
||||
result = compile(spec, backend)
|
||||
if result != OK or spec.compileError.len != 0:
|
||||
failed.inc
|
||||
break escapeBlock
|
||||
finally:
|
||||
logResult("compiled " & spec.program & " for " & spec.name,
|
||||
result, duration)
|
||||
|
||||
if result == OK:
|
||||
successful.inc
|
||||
|
||||
let nonSuccesful = skipped + invalid + failed
|
||||
styledEcho(styleBright, "Finished run for $#: $#/$# OK, $# SKIPPED, $# FAILED, $# INVALID" %
|
||||
[backend, $successful, $(tests.len),
|
||||
$skipped, $failed, $invalid])
|
||||
|
||||
for spec in tests.items:
|
||||
try:
|
||||
# this may fail in 64-bit AppVeyor images with "The process cannot
|
||||
# access the file because it is being used by another process.
|
||||
# [OSError]"
|
||||
let
|
||||
fn = spec.binary(backend)
|
||||
if fileExists(fn):
|
||||
removeFile(fn)
|
||||
except CatchableError as e:
|
||||
echo e.msg
|
||||
|
||||
if 0 == tests.len - successful - nonSuccesful:
|
||||
config.removeCaches(backend)
|
||||
|
||||
proc main(): int =
|
||||
let config = processArguments()
|
||||
|
||||
case config.cmd
|
||||
of Command.test:
|
||||
let testFiles = scanTestPath(config.path)
|
||||
if testFiles.len == 0:
|
||||
styledEcho(styleBright, "No test files found")
|
||||
result = 1
|
||||
else:
|
||||
var
|
||||
tests = testFiles.mapIt config.parseTestFile(it)
|
||||
backends = config.buildBackendTests(tests)
|
||||
|
||||
# c > cpp > js
|
||||
for backend in backendOrder:
|
||||
assert backend != ""
|
||||
# if we actually need to do anything on the given backend
|
||||
if backend notin backends:
|
||||
continue
|
||||
let
|
||||
tests = backends[backend]
|
||||
try:
|
||||
if OK != config.performTesting(backend, tests):
|
||||
break
|
||||
finally:
|
||||
backends.del(backend)
|
||||
|
||||
for backend, tests in backends.pairs:
|
||||
assert backend != ""
|
||||
if OK != config.performTesting(backend, tests):
|
||||
break
|
||||
of Command.fuzz:
|
||||
runFuzzer(config.target, config.fuzzer, config.corpusDir)
|
||||
of noCommand:
|
||||
discard
|
||||
|
||||
when isMainModule:
|
||||
quit main()
|
2
vendor/nim-testutils/ntu.nim.cfg
vendored
Normal file
2
vendor/nim-testutils/ntu.nim.cfg
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
--threads:on
|
||||
--path="$config"
|
17
vendor/nim-testutils/scripts/install_honggfuzz.sh
vendored
Executable file
17
vendor/nim-testutils/scripts/install_honggfuzz.sh
vendored
Executable file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install binutils-dev
|
||||
sudo apt-get install libunwind8-dev
|
||||
|
||||
git clone https://github.com/google/honggfuzz.git /tmp/honggfuzz
|
||||
|
||||
pushd /tmp/honggfuzz
|
||||
make
|
||||
sudo make install DESTDIR=/opt/honggfuzz
|
||||
popd
|
||||
|
||||
rm -rf /tmp/honggfuzz
|
||||
|
6
vendor/nim-testutils/tests/hello.nim
vendored
Normal file
6
vendor/nim-testutils/tests/hello.nim
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
import std/os
|
||||
|
||||
if paramCount() == 1:
|
||||
echo "hello ", paramStr(1)
|
||||
else:
|
||||
echo "hello world"
|
3
vendor/nim-testutils/tests/hello/hello.test
vendored
Normal file
3
vendor/nim-testutils/tests/hello/hello.test
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
program = "../hello"
|
||||
[Output]
|
||||
stdout = "hello world\n"
|
15
vendor/nim-testutils/tests/hello/hello_multiple.test
vendored
Normal file
15
vendor/nim-testutils/tests/hello/hello_multiple.test
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
program = "../hello"
|
||||
|
||||
# my aim is true
|
||||
[Output]
|
||||
stdout = "hello world\n"
|
||||
|
||||
# option 2
|
||||
[Output_larry_is_a_good_boy]
|
||||
args = "larry"
|
||||
stdout = "hello larry\n"
|
||||
|
||||
# option 47
|
||||
[Output_stevie_is_a_good_boy]
|
||||
args = "stephen"
|
||||
stdout = "hello stephen\n"
|
5
vendor/nim-testutils/tests/hello/hello_size.test
vendored
Normal file
5
vendor/nim-testutils/tests/hello/hello_size.test
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
program = "../hello"
|
||||
max_size = 60000
|
||||
release
|
||||
--opt:size
|
||||
os = "linux,macosx"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user