mirror of
https://github.com/waku-org/nwaku.git
synced 2025-01-13 16:25:00 +00:00
deploy: 8ac8e758e065a1056a0de48e08e3180bf4aea89e
This commit is contained in:
parent
03ad0fa8ce
commit
5947bc7143
3
vendor/nim-chronicles/README.md
vendored
3
vendor/nim-chronicles/README.md
vendored
@ -40,6 +40,9 @@ and it looks like this:
|
|||||||
|
|
||||||
![textblocks format example](media/textlines.svg)
|
![textblocks format example](media/textlines.svg)
|
||||||
|
|
||||||
|
This format is compatible with tooling written for
|
||||||
|
[heroku/logfmt](https://brandur.org/logfmt).
|
||||||
|
|
||||||
Alternatively, you can use a multi-line format called `textblocks`:
|
Alternatively, you can use a multi-line format called `textblocks`:
|
||||||
|
|
||||||
![textblocks format example](media/textblocks.svg)
|
![textblocks format example](media/textblocks.svg)
|
||||||
|
2
vendor/nim-chronicles/chronicles/helpers.nim
vendored
2
vendor/nim-chronicles/chronicles/helpers.nim
vendored
@ -2,7 +2,7 @@ import
|
|||||||
tables, strutils, strformat,
|
tables, strutils, strformat,
|
||||||
topics_registry
|
topics_registry
|
||||||
|
|
||||||
func parseTopicDirectives*(directives: openarray[string]): Table[string, TopicSettings] =
|
func parseTopicDirectives*(directives: openArray[string]): Table[string, TopicSettings] =
|
||||||
result = initTable[string, TopicSettings]()
|
result = initTable[string, TopicSettings]()
|
||||||
|
|
||||||
for directive in directives:
|
for directive in directives:
|
||||||
|
@ -562,7 +562,7 @@ proc setProperty*(r: var TextLineRecord, key: string, val: auto) =
|
|||||||
# This is similar to how it's done in logfmt:
|
# This is similar to how it's done in logfmt:
|
||||||
# https://github.com/csquared/node-logfmt/blob/master/lib/stringify.js#L13
|
# https://github.com/csquared/node-logfmt/blob/master/lib/stringify.js#L13
|
||||||
let
|
let
|
||||||
needsEscape = valText.find(NewLines + {'"', '\\'}) > -1
|
needsEscape = valText.find(Newlines + {'"', '\\'}) > -1
|
||||||
needsQuote = valText.find({' ', '='}) > -1
|
needsQuote = valText.find({' ', '='}) > -1
|
||||||
|
|
||||||
if needsEscape or needsQuote:
|
if needsEscape or needsQuote:
|
||||||
@ -617,7 +617,7 @@ proc setProperty*(r: var TextBlockRecord, key: string, val: auto) =
|
|||||||
append(r.output, ": ")
|
append(r.output, ": ")
|
||||||
applyStyle(r, styleBright)
|
applyStyle(r, styleBright)
|
||||||
|
|
||||||
if valText.find(NewLines) == -1:
|
if valText.find(Newlines) == -1:
|
||||||
append(r.output, valText)
|
append(r.output, valText)
|
||||||
append(r.output, "\n")
|
append(r.output, "\n")
|
||||||
else:
|
else:
|
||||||
@ -681,7 +681,7 @@ template setFirstProperty*(r: var JsonRecord, key: string, val: auto) =
|
|||||||
|
|
||||||
proc flushRecord*(r: var JsonRecord) =
|
proc flushRecord*(r: var JsonRecord) =
|
||||||
when defined(js):
|
when defined(js):
|
||||||
r.output.append JSON.stringify(r.record)
|
r.output.append Json.stringify(r.record)
|
||||||
else:
|
else:
|
||||||
r.jsonWriter.endRecord()
|
r.jsonWriter.endRecord()
|
||||||
r.outStream.write '\n'
|
r.outStream.write '\n'
|
||||||
|
@ -82,7 +82,7 @@ proc setTopicState*(name: string,
|
|||||||
return true
|
return true
|
||||||
|
|
||||||
proc topicsMatch*(logStmtLevel: LogLevel,
|
proc topicsMatch*(logStmtLevel: LogLevel,
|
||||||
logStmtTopics: openarray[ptr TopicSettings]): bool =
|
logStmtTopics: openArray[ptr TopicSettings]): bool =
|
||||||
lockRegistry:
|
lockRegistry:
|
||||||
var
|
var
|
||||||
hasEnabledTopics = gTotalEnabledTopics > 0
|
hasEnabledTopics = gTotalEnabledTopics > 0
|
||||||
|
3
vendor/nim-chronicles/nim.cfg
vendored
3
vendor/nim-chronicles/nim.cfg
vendored
@ -1,2 +1,3 @@
|
|||||||
--path: "."
|
--path: "."
|
||||||
|
--styleCheck:usages
|
||||||
|
--styleCheck:error
|
||||||
|
@ -978,6 +978,7 @@ proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {.
|
|||||||
request.setError(exc)
|
request.setError(exc)
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
connection.flags.incl(HttpClientConnectionFlag.Request)
|
||||||
request.connection = connection
|
request.connection = connection
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -1028,6 +1029,7 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {.
|
|||||||
request.setError(exc)
|
request.setError(exc)
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
connection.flags.incl(HttpClientConnectionFlag.Request)
|
||||||
request.connection = connection
|
request.connection = connection
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
23
vendor/nim-chronos/chronos/asyncloop.nim
vendored
23
vendor/nim-chronos/chronos/asyncloop.nim
vendored
@ -299,7 +299,6 @@ when defined(windows):
|
|||||||
CompletionKey = ULONG_PTR
|
CompletionKey = ULONG_PTR
|
||||||
|
|
||||||
CompletionData* = object
|
CompletionData* = object
|
||||||
fd*: AsyncFD
|
|
||||||
cb*: CallbackFunc
|
cb*: CallbackFunc
|
||||||
errCode*: OSErrorCode
|
errCode*: OSErrorCode
|
||||||
bytesCount*: int32
|
bytesCount*: int32
|
||||||
@ -500,17 +499,9 @@ elif unixPlatform:
|
|||||||
type
|
type
|
||||||
AsyncFD* = distinct cint
|
AsyncFD* = distinct cint
|
||||||
|
|
||||||
CompletionData* = object
|
|
||||||
fd*: AsyncFD
|
|
||||||
udata*: pointer
|
|
||||||
|
|
||||||
PCompletionData* = ptr CompletionData
|
|
||||||
|
|
||||||
SelectorData* = object
|
SelectorData* = object
|
||||||
reader*: AsyncCallback
|
reader*: AsyncCallback
|
||||||
rdata*: CompletionData
|
|
||||||
writer*: AsyncCallback
|
writer*: AsyncCallback
|
||||||
wdata*: CompletionData
|
|
||||||
|
|
||||||
PDispatcher* = ref object of PDispatcherBase
|
PDispatcher* = ref object of PDispatcherBase
|
||||||
selector: Selector[SelectorData]
|
selector: Selector[SelectorData]
|
||||||
@ -555,8 +546,6 @@ elif unixPlatform:
|
|||||||
## Register file descriptor ``fd`` in thread's dispatcher.
|
## Register file descriptor ``fd`` in thread's dispatcher.
|
||||||
let loop = getThreadDispatcher()
|
let loop = getThreadDispatcher()
|
||||||
var data: SelectorData
|
var data: SelectorData
|
||||||
data.rdata.fd = fd
|
|
||||||
data.wdata.fd = fd
|
|
||||||
loop.selector.registerHandle(int(fd), {}, data)
|
loop.selector.registerHandle(int(fd), {}, data)
|
||||||
|
|
||||||
proc unregister*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
|
proc unregister*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
|
||||||
@ -574,9 +563,8 @@ elif unixPlatform:
|
|||||||
let loop = getThreadDispatcher()
|
let loop = getThreadDispatcher()
|
||||||
var newEvents = {Event.Read}
|
var newEvents = {Event.Read}
|
||||||
withData(loop.selector, int(fd), adata) do:
|
withData(loop.selector, int(fd), adata) do:
|
||||||
let acb = AsyncCallback(function: cb, udata: addr adata.rdata)
|
let acb = AsyncCallback(function: cb, udata: udata)
|
||||||
adata.reader = acb
|
adata.reader = acb
|
||||||
adata.rdata = CompletionData(fd: fd, udata: udata)
|
|
||||||
newEvents.incl(Event.Read)
|
newEvents.incl(Event.Read)
|
||||||
if not(isNil(adata.writer.function)):
|
if not(isNil(adata.writer.function)):
|
||||||
newEvents.incl(Event.Write)
|
newEvents.incl(Event.Write)
|
||||||
@ -592,7 +580,6 @@ elif unixPlatform:
|
|||||||
withData(loop.selector, int(fd), adata) do:
|
withData(loop.selector, int(fd), adata) do:
|
||||||
# We need to clear `reader` data, because `selectors` don't do it
|
# We need to clear `reader` data, because `selectors` don't do it
|
||||||
adata.reader = default(AsyncCallback)
|
adata.reader = default(AsyncCallback)
|
||||||
# adata.rdata = CompletionData()
|
|
||||||
if not(isNil(adata.writer.function)):
|
if not(isNil(adata.writer.function)):
|
||||||
newEvents.incl(Event.Write)
|
newEvents.incl(Event.Write)
|
||||||
do:
|
do:
|
||||||
@ -606,9 +593,8 @@ elif unixPlatform:
|
|||||||
let loop = getThreadDispatcher()
|
let loop = getThreadDispatcher()
|
||||||
var newEvents = {Event.Write}
|
var newEvents = {Event.Write}
|
||||||
withData(loop.selector, int(fd), adata) do:
|
withData(loop.selector, int(fd), adata) do:
|
||||||
let acb = AsyncCallback(function: cb, udata: addr adata.wdata)
|
let acb = AsyncCallback(function: cb, udata: udata)
|
||||||
adata.writer = acb
|
adata.writer = acb
|
||||||
adata.wdata = CompletionData(fd: fd, udata: udata)
|
|
||||||
newEvents.incl(Event.Write)
|
newEvents.incl(Event.Write)
|
||||||
if not(isNil(adata.reader.function)):
|
if not(isNil(adata.reader.function)):
|
||||||
newEvents.incl(Event.Read)
|
newEvents.incl(Event.Read)
|
||||||
@ -624,7 +610,6 @@ elif unixPlatform:
|
|||||||
withData(loop.selector, int(fd), adata) do:
|
withData(loop.selector, int(fd), adata) do:
|
||||||
# We need to clear `writer` data, because `selectors` don't do it
|
# We need to clear `writer` data, because `selectors` don't do it
|
||||||
adata.writer = default(AsyncCallback)
|
adata.writer = default(AsyncCallback)
|
||||||
# adata.wdata = CompletionData()
|
|
||||||
if not(isNil(adata.reader.function)):
|
if not(isNil(adata.reader.function)):
|
||||||
newEvents.incl(Event.Read)
|
newEvents.incl(Event.Read)
|
||||||
do:
|
do:
|
||||||
@ -692,9 +677,7 @@ elif unixPlatform:
|
|||||||
var data: SelectorData
|
var data: SelectorData
|
||||||
result = loop.selector.registerSignal(signal, data)
|
result = loop.selector.registerSignal(signal, data)
|
||||||
withData(loop.selector, result, adata) do:
|
withData(loop.selector, result, adata) do:
|
||||||
adata.reader = AsyncCallback(function: cb, udata: addr adata.rdata)
|
adata.reader = AsyncCallback(function: cb, udata: udata)
|
||||||
adata.rdata.fd = AsyncFD(result)
|
|
||||||
adata.rdata.udata = udata
|
|
||||||
do:
|
do:
|
||||||
raise newException(ValueError, "File descriptor not registered.")
|
raise newException(ValueError, "File descriptor not registered.")
|
||||||
|
|
||||||
|
24
vendor/nim-chronos/chronos/asyncmacro2.nim
vendored
24
vendor/nim-chronos/chronos/asyncmacro2.nim
vendored
@ -72,6 +72,20 @@ proc verifyReturnType(typeName: string) {.compileTime.} =
|
|||||||
macro unsupported(s: static[string]): untyped =
|
macro unsupported(s: static[string]): untyped =
|
||||||
error s
|
error s
|
||||||
|
|
||||||
|
proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} =
|
||||||
|
# Replace every Call -> OpenSymChoice by a Bracket expr
|
||||||
|
# ref https://github.com/nim-lang/Nim/issues/11091
|
||||||
|
if node.kind in nnkCallKinds and
|
||||||
|
node[0].kind == nnkOpenSymChoice and node[0].eqIdent("[]"):
|
||||||
|
result = newNimNode(nnkBracketExpr).add(
|
||||||
|
cleanupOpenSymChoice(node[1]),
|
||||||
|
cleanupOpenSymChoice(node[2])
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result = node.copyNimNode()
|
||||||
|
for child in node:
|
||||||
|
result.add(cleanupOpenSymChoice(child))
|
||||||
|
|
||||||
proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
||||||
## This macro transforms a single procedure into a closure iterator.
|
## This macro transforms a single procedure into a closure iterator.
|
||||||
## The ``async`` macro supports a stmtList holding multiple async procedures.
|
## The ``async`` macro supports a stmtList holding multiple async procedures.
|
||||||
@ -81,17 +95,13 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
|||||||
|
|
||||||
let prcName = prc.name.getName
|
let prcName = prc.name.getName
|
||||||
|
|
||||||
let returnType = prc.params[0]
|
let returnType = cleanupOpenSymChoice(prc.params[0])
|
||||||
var baseType: NimNode
|
var baseType: NimNode
|
||||||
# Verify that the return type is a Future[T]
|
# Verify that the return type is a Future[T]
|
||||||
if returnType.kind == nnkBracketExpr:
|
if returnType.kind == nnkBracketExpr:
|
||||||
let fut = repr(returnType[0])
|
let fut = repr(returnType[0])
|
||||||
verifyReturnType(fut)
|
verifyReturnType(fut)
|
||||||
baseType = returnType[1]
|
baseType = returnType[1]
|
||||||
elif returnType.kind in nnkCallKinds and returnType[0].eqIdent("[]"):
|
|
||||||
let fut = repr(returnType[1])
|
|
||||||
verifyReturnType(fut)
|
|
||||||
baseType = returnType[2]
|
|
||||||
elif returnType.kind == nnkEmpty:
|
elif returnType.kind == nnkEmpty:
|
||||||
baseType = returnType
|
baseType = returnType
|
||||||
else:
|
else:
|
||||||
@ -102,6 +112,10 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
|
|||||||
|
|
||||||
var outerProcBody = newNimNode(nnkStmtList, prc.body)
|
var outerProcBody = newNimNode(nnkStmtList, prc.body)
|
||||||
|
|
||||||
|
# Copy comment for nimdoc
|
||||||
|
if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt:
|
||||||
|
outerProcBody.add(prc.body[0])
|
||||||
|
|
||||||
|
|
||||||
# -> iterator nameIter(chronosInternalRetFuture: Future[T]): FutureBase {.closure.} =
|
# -> iterator nameIter(chronosInternalRetFuture: Future[T]): FutureBase {.closure.} =
|
||||||
# -> {.push warning[resultshadowed]: off.}
|
# -> {.push warning[resultshadowed]: off.}
|
||||||
|
@ -185,7 +185,7 @@ elif defined(windows):
|
|||||||
else:
|
else:
|
||||||
## Initiation
|
## Initiation
|
||||||
transp.state.incl(WritePending)
|
transp.state.incl(WritePending)
|
||||||
let fd = SocketHandle(ovl.data.fd)
|
let fd = SocketHandle(transp.fd)
|
||||||
var vector = transp.queue.popFirst()
|
var vector = transp.queue.popFirst()
|
||||||
transp.setWriterWSABuffer(vector)
|
transp.setWriterWSABuffer(vector)
|
||||||
var ret: cint
|
var ret: cint
|
||||||
@ -258,7 +258,7 @@ elif defined(windows):
|
|||||||
## Initiation
|
## Initiation
|
||||||
if transp.state * {ReadEof, ReadClosed, ReadError} == {}:
|
if transp.state * {ReadEof, ReadClosed, ReadError} == {}:
|
||||||
transp.state.incl(ReadPending)
|
transp.state.incl(ReadPending)
|
||||||
let fd = SocketHandle(ovl.data.fd)
|
let fd = SocketHandle(transp.fd)
|
||||||
transp.rflag = 0
|
transp.rflag = 0
|
||||||
transp.ralen = SockLen(sizeof(Sockaddr_storage))
|
transp.ralen = SockLen(sizeof(Sockaddr_storage))
|
||||||
let ret = WSARecvFrom(fd, addr transp.rwsabuf, DWORD(1),
|
let ret = WSARecvFrom(fd, addr transp.rwsabuf, DWORD(1),
|
||||||
@ -406,9 +406,9 @@ elif defined(windows):
|
|||||||
result.udata = udata
|
result.udata = udata
|
||||||
result.state = {WritePaused}
|
result.state = {WritePaused}
|
||||||
result.future = newFuture[void]("datagram.transport")
|
result.future = newFuture[void]("datagram.transport")
|
||||||
result.rovl.data = CompletionData(fd: localSock, cb: readDatagramLoop,
|
result.rovl.data = CompletionData(cb: readDatagramLoop,
|
||||||
udata: cast[pointer](result))
|
udata: cast[pointer](result))
|
||||||
result.wovl.data = CompletionData(fd: localSock, cb: writeDatagramLoop,
|
result.wovl.data = CompletionData(cb: writeDatagramLoop,
|
||||||
udata: cast[pointer](result))
|
udata: cast[pointer](result))
|
||||||
result.rwsabuf = TWSABuf(buf: cast[cstring](addr result.buffer[0]),
|
result.rwsabuf = TWSABuf(buf: cast[cstring](addr result.buffer[0]),
|
||||||
len: int32(len(result.buffer)))
|
len: int32(len(result.buffer)))
|
||||||
@ -426,9 +426,8 @@ else:
|
|||||||
proc readDatagramLoop(udata: pointer) {.raises: Defect.}=
|
proc readDatagramLoop(udata: pointer) {.raises: Defect.}=
|
||||||
var raddr: TransportAddress
|
var raddr: TransportAddress
|
||||||
doAssert(not isNil(udata))
|
doAssert(not isNil(udata))
|
||||||
var cdata = cast[ptr CompletionData](udata)
|
let transp = cast[DatagramTransport](udata)
|
||||||
var transp = cast[DatagramTransport](cdata.udata)
|
let fd = SocketHandle(transp.fd)
|
||||||
let fd = SocketHandle(cdata.fd)
|
|
||||||
if int(fd) == 0:
|
if int(fd) == 0:
|
||||||
## This situation can be happen, when there events present
|
## This situation can be happen, when there events present
|
||||||
## after transport was closed.
|
## after transport was closed.
|
||||||
@ -459,9 +458,8 @@ else:
|
|||||||
proc writeDatagramLoop(udata: pointer) =
|
proc writeDatagramLoop(udata: pointer) =
|
||||||
var res: int
|
var res: int
|
||||||
doAssert(not isNil(udata))
|
doAssert(not isNil(udata))
|
||||||
var cdata = cast[ptr CompletionData](udata)
|
var transp = cast[DatagramTransport](udata)
|
||||||
var transp = cast[DatagramTransport](cdata.udata)
|
let fd = SocketHandle(transp.fd)
|
||||||
let fd = SocketHandle(cdata.fd)
|
|
||||||
if int(fd) == 0:
|
if int(fd) == 0:
|
||||||
## This situation can be happen, when there events present
|
## This situation can be happen, when there events present
|
||||||
## after transport was closed.
|
## after transport was closed.
|
||||||
|
99
vendor/nim-chronos/chronos/transports/stream.nim
vendored
99
vendor/nim-chronos/chronos/transports/stream.nim
vendored
@ -407,7 +407,7 @@ elif defined(windows):
|
|||||||
## Initiation
|
## Initiation
|
||||||
transp.state.incl(WritePending)
|
transp.state.incl(WritePending)
|
||||||
if transp.kind == TransportKind.Socket:
|
if transp.kind == TransportKind.Socket:
|
||||||
let sock = SocketHandle(transp.wovl.data.fd)
|
let sock = SocketHandle(transp.fd)
|
||||||
var vector = transp.queue.popFirst()
|
var vector = transp.queue.popFirst()
|
||||||
if vector.kind == VectorKind.DataBuffer:
|
if vector.kind == VectorKind.DataBuffer:
|
||||||
transp.wovl.zeroOvelappedOffset()
|
transp.wovl.zeroOvelappedOffset()
|
||||||
@ -492,7 +492,7 @@ elif defined(windows):
|
|||||||
else:
|
else:
|
||||||
transp.queue.addFirst(vector)
|
transp.queue.addFirst(vector)
|
||||||
elif transp.kind == TransportKind.Pipe:
|
elif transp.kind == TransportKind.Pipe:
|
||||||
let pipe = Handle(transp.wovl.data.fd)
|
let pipe = Handle(transp.fd)
|
||||||
var vector = transp.queue.popFirst()
|
var vector = transp.queue.popFirst()
|
||||||
if vector.kind == VectorKind.DataBuffer:
|
if vector.kind == VectorKind.DataBuffer:
|
||||||
transp.wovl.zeroOvelappedOffset()
|
transp.wovl.zeroOvelappedOffset()
|
||||||
@ -587,7 +587,7 @@ elif defined(windows):
|
|||||||
transp.state.excl(ReadPaused)
|
transp.state.excl(ReadPaused)
|
||||||
transp.state.incl(ReadPending)
|
transp.state.incl(ReadPending)
|
||||||
if transp.kind == TransportKind.Socket:
|
if transp.kind == TransportKind.Socket:
|
||||||
let sock = SocketHandle(transp.rovl.data.fd)
|
let sock = SocketHandle(transp.fd)
|
||||||
transp.roffset = transp.offset
|
transp.roffset = transp.offset
|
||||||
transp.setReaderWSABuffer()
|
transp.setReaderWSABuffer()
|
||||||
let ret = WSARecv(sock, addr transp.rwsabuf, 1,
|
let ret = WSARecv(sock, addr transp.rwsabuf, 1,
|
||||||
@ -610,7 +610,7 @@ elif defined(windows):
|
|||||||
transp.setReadError(err)
|
transp.setReadError(err)
|
||||||
transp.completeReader()
|
transp.completeReader()
|
||||||
elif transp.kind == TransportKind.Pipe:
|
elif transp.kind == TransportKind.Pipe:
|
||||||
let pipe = Handle(transp.rovl.data.fd)
|
let pipe = Handle(transp.fd)
|
||||||
transp.roffset = transp.offset
|
transp.roffset = transp.offset
|
||||||
transp.setReaderWSABuffer()
|
transp.setReaderWSABuffer()
|
||||||
let ret = readFile(pipe, cast[pointer](transp.rwsabuf.buf),
|
let ret = readFile(pipe, cast[pointer](transp.rwsabuf.buf),
|
||||||
@ -650,9 +650,9 @@ elif defined(windows):
|
|||||||
else:
|
else:
|
||||||
transp = StreamTransport(kind: TransportKind.Socket)
|
transp = StreamTransport(kind: TransportKind.Socket)
|
||||||
transp.fd = sock
|
transp.fd = sock
|
||||||
transp.rovl.data = CompletionData(fd: sock, cb: readStreamLoop,
|
transp.rovl.data = CompletionData(cb: readStreamLoop,
|
||||||
udata: cast[pointer](transp))
|
udata: cast[pointer](transp))
|
||||||
transp.wovl.data = CompletionData(fd: sock, cb: writeStreamLoop,
|
transp.wovl.data = CompletionData(cb: writeStreamLoop,
|
||||||
udata: cast[pointer](transp))
|
udata: cast[pointer](transp))
|
||||||
transp.buffer = newSeq[byte](bufsize)
|
transp.buffer = newSeq[byte](bufsize)
|
||||||
transp.state = {ReadPaused, WritePaused}
|
transp.state = {ReadPaused, WritePaused}
|
||||||
@ -670,9 +670,9 @@ elif defined(windows):
|
|||||||
else:
|
else:
|
||||||
transp = StreamTransport(kind: TransportKind.Pipe)
|
transp = StreamTransport(kind: TransportKind.Pipe)
|
||||||
transp.fd = fd
|
transp.fd = fd
|
||||||
transp.rovl.data = CompletionData(fd: fd, cb: readStreamLoop,
|
transp.rovl.data = CompletionData(cb: readStreamLoop,
|
||||||
udata: cast[pointer](transp))
|
udata: cast[pointer](transp))
|
||||||
transp.wovl.data = CompletionData(fd: fd, cb: writeStreamLoop,
|
transp.wovl.data = CompletionData(cb: writeStreamLoop,
|
||||||
udata: cast[pointer](transp))
|
udata: cast[pointer](transp))
|
||||||
transp.buffer = newSeq[byte](bufsize)
|
transp.buffer = newSeq[byte](bufsize)
|
||||||
transp.flags = flags
|
transp.flags = flags
|
||||||
@ -746,8 +746,7 @@ elif defined(windows):
|
|||||||
sock.closeSocket()
|
sock.closeSocket()
|
||||||
retFuture.fail(getTransportOsError(err))
|
retFuture.fail(getTransportOsError(err))
|
||||||
else:
|
else:
|
||||||
let transp = newStreamSocketTransport(povl.data.fd, bufferSize,
|
let transp = newStreamSocketTransport(sock, bufferSize, child)
|
||||||
child)
|
|
||||||
# Start tracking transport
|
# Start tracking transport
|
||||||
trackStream(transp)
|
trackStream(transp)
|
||||||
retFuture.complete(transp)
|
retFuture.complete(transp)
|
||||||
@ -761,7 +760,7 @@ elif defined(windows):
|
|||||||
|
|
||||||
povl = RefCustomOverlapped()
|
povl = RefCustomOverlapped()
|
||||||
GC_ref(povl)
|
GC_ref(povl)
|
||||||
povl.data = CompletionData(fd: sock, cb: socketContinuation)
|
povl.data = CompletionData(cb: socketContinuation)
|
||||||
let res = loop.connectEx(SocketHandle(sock),
|
let res = loop.connectEx(SocketHandle(sock),
|
||||||
cast[ptr SockAddr](addr saddr),
|
cast[ptr SockAddr](addr saddr),
|
||||||
DWORD(slen), nil, 0, nil,
|
DWORD(slen), nil, 0, nil,
|
||||||
@ -895,7 +894,6 @@ elif defined(windows):
|
|||||||
if pipeHandle == INVALID_HANDLE_VALUE:
|
if pipeHandle == INVALID_HANDLE_VALUE:
|
||||||
raiseAssert osErrorMsg(osLastError())
|
raiseAssert osErrorMsg(osLastError())
|
||||||
server.sock = AsyncFD(pipeHandle)
|
server.sock = AsyncFD(pipeHandle)
|
||||||
server.aovl.data.fd = AsyncFD(pipeHandle)
|
|
||||||
try: register(server.sock)
|
try: register(server.sock)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
raiseAsDefect exc, "register"
|
raiseAsDefect exc, "register"
|
||||||
@ -1177,8 +1175,7 @@ elif defined(windows):
|
|||||||
let dwLocalAddressLength = DWORD(sizeof(Sockaddr_in6) + 16)
|
let dwLocalAddressLength = DWORD(sizeof(Sockaddr_in6) + 16)
|
||||||
let dwRemoteAddressLength = DWORD(sizeof(Sockaddr_in6) + 16)
|
let dwRemoteAddressLength = DWORD(sizeof(Sockaddr_in6) + 16)
|
||||||
|
|
||||||
server.aovl.data = CompletionData(fd: server.sock,
|
server.aovl.data = CompletionData(cb: continuationSocket,
|
||||||
cb: continuationSocket,
|
|
||||||
udata: cast[pointer](server))
|
udata: cast[pointer](server))
|
||||||
server.apending = true
|
server.apending = true
|
||||||
let res = loop.acceptEx(SocketHandle(server.sock),
|
let res = loop.acceptEx(SocketHandle(server.sock),
|
||||||
@ -1219,8 +1216,7 @@ elif defined(windows):
|
|||||||
retFuture.fail(getTransportOsError(err))
|
retFuture.fail(getTransportOsError(err))
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
server.aovl.data = CompletionData(fd: server.sock,
|
server.aovl.data = CompletionData(cb: continuationPipe,
|
||||||
cb: continuationPipe,
|
|
||||||
udata: cast[pointer](server))
|
udata: cast[pointer](server))
|
||||||
server.apending = true
|
server.apending = true
|
||||||
let res = connectNamedPipe(Handle(server.sock),
|
let res = connectNamedPipe(Handle(server.sock),
|
||||||
@ -1260,15 +1256,17 @@ else:
|
|||||||
raiseAsDefect exc, "removeWriter"
|
raiseAsDefect exc, "removeWriter"
|
||||||
|
|
||||||
proc writeStreamLoop(udata: pointer) =
|
proc writeStreamLoop(udata: pointer) =
|
||||||
var cdata = cast[ptr CompletionData](udata)
|
if isNil(udata):
|
||||||
var transp = cast[StreamTransport](cdata.udata)
|
# TODO this is an if rather than an assert for historical reasons:
|
||||||
let fd = SocketHandle(cdata.fd)
|
# it should not happen unless there are race conditions - but if there
|
||||||
|
# are race conditions, `transp` might be invalid even if it's not nil:
|
||||||
if int(fd) == 0 or isNil(transp):
|
# it could have been released
|
||||||
## This situation can be happen, when there events present
|
|
||||||
## after transport was closed.
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
let
|
||||||
|
transp = cast[StreamTransport](udata)
|
||||||
|
fd = SocketHandle(transp.fd)
|
||||||
|
|
||||||
if WriteClosed in transp.state:
|
if WriteClosed in transp.state:
|
||||||
if transp.queue.len > 0:
|
if transp.queue.len > 0:
|
||||||
transp.removeWriter()
|
transp.removeWriter()
|
||||||
@ -1357,15 +1355,17 @@ else:
|
|||||||
transp.removeWriter()
|
transp.removeWriter()
|
||||||
|
|
||||||
proc readStreamLoop(udata: pointer) =
|
proc readStreamLoop(udata: pointer) =
|
||||||
# TODO fix Defect raises - they "shouldn't" happen
|
if isNil(udata):
|
||||||
var cdata = cast[ptr CompletionData](udata)
|
# TODO this is an if rather than an assert for historical reasons:
|
||||||
var transp = cast[StreamTransport](cdata.udata)
|
# it should not happen unless there are race conditions - but if there
|
||||||
let fd = SocketHandle(cdata.fd)
|
# are race conditions, `transp` might be invalid even if it's not nil:
|
||||||
if int(fd) == 0 or isNil(transp):
|
# it could have been released
|
||||||
## This situation can be happen, when there events present
|
|
||||||
## after transport was closed.
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
let
|
||||||
|
transp = cast[StreamTransport](udata)
|
||||||
|
fd = SocketHandle(transp.fd)
|
||||||
|
|
||||||
if ReadClosed in transp.state:
|
if ReadClosed in transp.state:
|
||||||
transp.state.incl({ReadPaused})
|
transp.state.incl({ReadPaused})
|
||||||
transp.completeReader()
|
transp.completeReader()
|
||||||
@ -1381,7 +1381,7 @@ else:
|
|||||||
elif int(err) in {ECONNRESET}:
|
elif int(err) in {ECONNRESET}:
|
||||||
transp.state.incl({ReadEof, ReadPaused})
|
transp.state.incl({ReadEof, ReadPaused})
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1390,7 +1390,7 @@ else:
|
|||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
transp.setReadError(err)
|
transp.setReadError(err)
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1398,7 +1398,7 @@ else:
|
|||||||
elif res == 0:
|
elif res == 0:
|
||||||
transp.state.incl({ReadEof, ReadPaused})
|
transp.state.incl({ReadEof, ReadPaused})
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1408,7 +1408,7 @@ else:
|
|||||||
if transp.offset == len(transp.buffer):
|
if transp.offset == len(transp.buffer):
|
||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1427,7 +1427,7 @@ else:
|
|||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
transp.setReadError(err)
|
transp.setReadError(err)
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1435,7 +1435,7 @@ else:
|
|||||||
elif res == 0:
|
elif res == 0:
|
||||||
transp.state.incl({ReadEof, ReadPaused})
|
transp.state.incl({ReadEof, ReadPaused})
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1445,7 +1445,7 @@ else:
|
|||||||
if transp.offset == len(transp.buffer):
|
if transp.offset == len(transp.buffer):
|
||||||
transp.state.incl(ReadPaused)
|
transp.state.incl(ReadPaused)
|
||||||
try:
|
try:
|
||||||
cdata.fd.removeReader()
|
transp.fd.removeReader()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
raiseAsDefect exc, "removeReader"
|
raiseAsDefect exc, "removeReader"
|
||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
@ -1519,11 +1519,9 @@ else:
|
|||||||
|
|
||||||
proc continuation(udata: pointer) =
|
proc continuation(udata: pointer) =
|
||||||
if not(retFuture.finished()):
|
if not(retFuture.finished()):
|
||||||
var data = cast[ptr CompletionData](udata)
|
|
||||||
var err = 0
|
var err = 0
|
||||||
let fd = data.fd
|
|
||||||
try:
|
try:
|
||||||
fd.removeWriter()
|
sock.removeWriter()
|
||||||
except IOSelectorsException as exc:
|
except IOSelectorsException as exc:
|
||||||
retFuture.fail(exc)
|
retFuture.fail(exc)
|
||||||
return
|
return
|
||||||
@ -1531,15 +1529,15 @@ else:
|
|||||||
retFuture.fail(exc)
|
retFuture.fail(exc)
|
||||||
return
|
return
|
||||||
|
|
||||||
if not(fd.getSocketError(err)):
|
if not(sock.getSocketError(err)):
|
||||||
closeSocket(fd)
|
closeSocket(sock)
|
||||||
retFuture.fail(getTransportOsError(osLastError()))
|
retFuture.fail(getTransportOsError(osLastError()))
|
||||||
return
|
return
|
||||||
if err != 0:
|
if err != 0:
|
||||||
closeSocket(fd)
|
closeSocket(sock)
|
||||||
retFuture.fail(getTransportOsError(OSErrorCode(err)))
|
retFuture.fail(getTransportOsError(OSErrorCode(err)))
|
||||||
return
|
return
|
||||||
let transp = newStreamSocketTransport(fd, bufferSize, child)
|
let transp = newStreamSocketTransport(sock, bufferSize, child)
|
||||||
# Start tracking transport
|
# Start tracking transport
|
||||||
trackStream(transp)
|
trackStream(transp)
|
||||||
retFuture.complete(transp)
|
retFuture.complete(transp)
|
||||||
@ -1581,11 +1579,18 @@ else:
|
|||||||
break
|
break
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
proc acceptLoop(udata: pointer) {.gcsafe.} =
|
proc acceptLoop(udata: pointer) =
|
||||||
|
if isNil(udata):
|
||||||
|
# TODO this is an if rather than an assert for historical reasons:
|
||||||
|
# it should not happen unless there are race conditions - but if there
|
||||||
|
# are race conditions, `transp` might be invalid even if it's not nil:
|
||||||
|
# it could have been released
|
||||||
|
return
|
||||||
|
|
||||||
var
|
var
|
||||||
saddr: Sockaddr_storage
|
saddr: Sockaddr_storage
|
||||||
slen: SockLen
|
slen: SockLen
|
||||||
var server = cast[StreamServer](cast[ptr CompletionData](udata).udata)
|
let server = cast[StreamServer](udata)
|
||||||
while true:
|
while true:
|
||||||
if server.status in {ServerStatus.Stopped, ServerStatus.Closed}:
|
if server.status in {ServerStatus.Stopped, ServerStatus.Closed}:
|
||||||
break
|
break
|
||||||
@ -1990,7 +1995,7 @@ proc createStreamServer*(host: TransportAddress,
|
|||||||
cb = acceptPipeLoop
|
cb = acceptPipeLoop
|
||||||
|
|
||||||
if not(isNil(cbproc)):
|
if not(isNil(cbproc)):
|
||||||
result.aovl.data = CompletionData(fd: serverSocket, cb: cb,
|
result.aovl.data = CompletionData(cb: cb,
|
||||||
udata: cast[pointer](result))
|
udata: cast[pointer](result))
|
||||||
else:
|
else:
|
||||||
if host.family == AddressFamily.Unix:
|
if host.family == AddressFamily.Unix:
|
||||||
|
4
vendor/nim-chronos/tests/testhttpclient.nim
vendored
4
vendor/nim-chronos/tests/testhttpclient.nim
vendored
@ -690,11 +690,11 @@ suite "HTTP client testing suite":
|
|||||||
|
|
||||||
proc testBasicAuthorization(): Future[bool] {.async.} =
|
proc testBasicAuthorization(): Future[bool] {.async.} =
|
||||||
var session = createSession(true, maxRedirections = 10)
|
var session = createSession(true, maxRedirections = 10)
|
||||||
let url = parseUri("https://guest:guest@jigsaw.w3.org/HTTP/Basic/")
|
let url = parseUri("https://user:passwd@httpbin.org/basic-auth/user/passwd")
|
||||||
let resp = await session.fetch(url)
|
let resp = await session.fetch(url)
|
||||||
await session.closeWait()
|
await session.closeWait()
|
||||||
if (resp.status == 200) and
|
if (resp.status == 200) and
|
||||||
("Your browser made it!" in cast[string](resp.data)):
|
("true," in cast[string](resp.data)):
|
||||||
return true
|
return true
|
||||||
else:
|
else:
|
||||||
return false
|
return false
|
||||||
|
17
vendor/nim-chronos/tests/testmacro.nim
vendored
17
vendor/nim-chronos/tests/testmacro.nim
vendored
@ -6,6 +6,7 @@
|
|||||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
import unittest2
|
import unittest2
|
||||||
|
import macros
|
||||||
import ../chronos
|
import ../chronos
|
||||||
|
|
||||||
when defined(nimHasUsed): {.used.}
|
when defined(nimHasUsed): {.used.}
|
||||||
@ -80,3 +81,19 @@ suite "Macro transformations test suite":
|
|||||||
check waitFor(testAwait()) == true
|
check waitFor(testAwait()) == true
|
||||||
test "`awaitne` command test":
|
test "`awaitne` command test":
|
||||||
check waitFor(testAwaitne()) == true
|
check waitFor(testAwaitne()) == true
|
||||||
|
|
||||||
|
|
||||||
|
test "template async macro transformation":
|
||||||
|
template templatedAsync(name, restype: untyped): untyped =
|
||||||
|
proc name(): Future[restype] {.async.} = return @[4]
|
||||||
|
|
||||||
|
templatedAsync(testTemplate, seq[int])
|
||||||
|
check waitFor(testTemplate()) == @[4]
|
||||||
|
|
||||||
|
macro macroAsync(name, restype, innerrestype: untyped): untyped =
|
||||||
|
quote do:
|
||||||
|
proc `name`(): Future[`restype`[`innerrestype`]] {.async.} = return
|
||||||
|
|
||||||
|
type OpenObject = object
|
||||||
|
macroAsync(testMacro, seq, OpenObject)
|
||||||
|
check waitFor(testMacro()).len == 0
|
||||||
|
11
vendor/nim-chronos/tests/testsignal.nim
vendored
11
vendor/nim-chronos/tests/testsignal.nim
vendored
@ -15,13 +15,14 @@ when not defined(windows):
|
|||||||
|
|
||||||
suite "Signal handling test suite":
|
suite "Signal handling test suite":
|
||||||
when not defined(windows):
|
when not defined(windows):
|
||||||
var signalCounter = 0
|
var
|
||||||
|
signalCounter = 0
|
||||||
|
sigfd = -1
|
||||||
|
|
||||||
proc signalProc(udata: pointer) =
|
proc signalProc(udata: pointer) =
|
||||||
var cdata = cast[ptr CompletionData](udata)
|
signalCounter = cast[int](udata)
|
||||||
signalCounter = cast[int](cdata.udata)
|
|
||||||
try:
|
try:
|
||||||
removeSignal(int(cdata.fd))
|
removeSignal(sigfd)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raiseAssert exc.msg
|
raiseAssert exc.msg
|
||||||
|
|
||||||
@ -30,7 +31,7 @@ suite "Signal handling test suite":
|
|||||||
|
|
||||||
proc test(signal, value: int): bool =
|
proc test(signal, value: int): bool =
|
||||||
try:
|
try:
|
||||||
discard addSignal(signal, signalProc, cast[pointer](value))
|
sigfd = addSignal(signal, signalProc, cast[pointer](value))
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raiseAssert exc.msg
|
raiseAssert exc.msg
|
||||||
var fut = asyncProc()
|
var fut = asyncProc()
|
||||||
|
26
vendor/nim-eth/eth/utp/utp_socket.nim
vendored
26
vendor/nim-eth/eth/utp/utp_socket.nim
vendored
@ -1048,6 +1048,7 @@ proc processPacketInternal(socket: UtpSocket, p: Packet) =
|
|||||||
socketAckNr = socket.ackNr,
|
socketAckNr = socket.ackNr,
|
||||||
socketSeqNr = socket.seqNr,
|
socketSeqNr = socket.seqNr,
|
||||||
windowPackets = socket.curWindowPackets,
|
windowPackets = socket.curWindowPackets,
|
||||||
|
rcvBufferSize = socket.offset,
|
||||||
packetType = p.header.pType,
|
packetType = p.header.pType,
|
||||||
seqNr = p.header.seqNr,
|
seqNr = p.header.seqNr,
|
||||||
ackNr = p.header.ackNr,
|
ackNr = p.header.ackNr,
|
||||||
@ -1463,6 +1464,12 @@ template shiftBuffer(t, c: untyped) =
|
|||||||
(t).offset = 0
|
(t).offset = 0
|
||||||
|
|
||||||
proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
|
proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
|
||||||
|
debug "Handling incoming read",
|
||||||
|
rcvBufferSize = socket.offset,
|
||||||
|
reorderBufferSize = socket.inBufferBytes,
|
||||||
|
socketAtEOF = socket.atEof(),
|
||||||
|
readTillEOF = readReq.bytesToRead == 0
|
||||||
|
|
||||||
if readReq.reader.finished():
|
if readReq.reader.finished():
|
||||||
return ReadCancelled
|
return ReadCancelled
|
||||||
|
|
||||||
@ -1477,9 +1484,18 @@ proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
|
|||||||
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, socket.offset - 1))
|
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, socket.offset - 1))
|
||||||
socket.shiftBuffer(socket.offset)
|
socket.shiftBuffer(socket.offset)
|
||||||
if (socket.atEof()):
|
if (socket.atEof()):
|
||||||
|
|
||||||
|
debug "Read finished",
|
||||||
|
bytesRead = len(readReq.bytesAvailable),
|
||||||
|
socektAtEof = socket.atEof()
|
||||||
|
|
||||||
readReq.reader.complete(readReq.bytesAvailable)
|
readReq.reader.complete(readReq.bytesAvailable)
|
||||||
return ReadFinished
|
return ReadFinished
|
||||||
else:
|
else:
|
||||||
|
debug "Read not finished",
|
||||||
|
bytesRead = len(readReq.bytesAvailable),
|
||||||
|
socektAtEof = socket.atEof()
|
||||||
|
|
||||||
return ReadNotFinished
|
return ReadNotFinished
|
||||||
else:
|
else:
|
||||||
let bytesAlreadyRead = len(readReq.bytesAvailable)
|
let bytesAlreadyRead = len(readReq.bytesAvailable)
|
||||||
@ -1488,9 +1504,17 @@ proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
|
|||||||
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, count - 1))
|
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, count - 1))
|
||||||
socket.shiftBuffer(count)
|
socket.shiftBuffer(count)
|
||||||
if (len(readReq.bytesAvailable) == readReq.bytesToRead):
|
if (len(readReq.bytesAvailable) == readReq.bytesToRead):
|
||||||
|
debug "Read finished",
|
||||||
|
bytesRead = len(readReq.bytesAvailable),
|
||||||
|
socektAtEof = socket.atEof()
|
||||||
|
|
||||||
readReq.reader.complete(readReq.bytesAvailable)
|
readReq.reader.complete(readReq.bytesAvailable)
|
||||||
return ReadFinished
|
return ReadFinished
|
||||||
else:
|
else:
|
||||||
|
debug "Read not finished",
|
||||||
|
bytesRead = len(readReq.bytesAvailable),
|
||||||
|
socektAtEof = socket.atEof()
|
||||||
|
|
||||||
return ReadNotFinished
|
return ReadNotFinished
|
||||||
|
|
||||||
proc eventLoop(socket: UtpSocket) {.async.} =
|
proc eventLoop(socket: UtpSocket) {.async.} =
|
||||||
@ -1503,7 +1527,7 @@ proc eventLoop(socket: UtpSocket) {.async.} =
|
|||||||
|
|
||||||
# we processed a packet and rcv buffer size is larger than 0,
|
# we processed a packet and rcv buffer size is larger than 0,
|
||||||
# check if we can finish some pending readers
|
# check if we can finish some pending readers
|
||||||
while socket.pendingReads.len() > 0 and socket.offset > 0:
|
while socket.pendingReads.len() > 0:
|
||||||
let readResult = socket.onRead(socket.pendingReads[0])
|
let readResult = socket.onRead(socket.pendingReads[0])
|
||||||
case readResult
|
case readResult
|
||||||
of ReadFinished:
|
of ReadFinished:
|
||||||
|
@ -120,7 +120,6 @@ procSuite "Utp protocol over udp tests with loss and delays":
|
|||||||
bytesPerRead: int = 0): TestCase =
|
bytesPerRead: int = 0): TestCase =
|
||||||
TestCase(maxDelay: maxDelay, dropRate: dropRate, bytesToTransfer: bytesToTransfer, cfg: cfg, bytesPerRead: bytesPerRead)
|
TestCase(maxDelay: maxDelay, dropRate: dropRate, bytesToTransfer: bytesToTransfer, cfg: cfg, bytesPerRead: bytesPerRead)
|
||||||
|
|
||||||
|
|
||||||
let testCases = @[
|
let testCases = @[
|
||||||
TestCase.init(45, 10, 40000),
|
TestCase.init(45, 10, 40000),
|
||||||
TestCase.init(25, 15, 40000),
|
TestCase.init(25, 15, 40000),
|
||||||
@ -228,3 +227,32 @@ procSuite "Utp protocol over udp tests with loss and delays":
|
|||||||
await clientProtocol.shutdownWait()
|
await clientProtocol.shutdownWait()
|
||||||
await serverProtocol.shutdownWait()
|
await serverProtocol.shutdownWait()
|
||||||
|
|
||||||
|
let testCase2 = @[
|
||||||
|
TestCase.init(45, 0, 40000),
|
||||||
|
TestCase.init(45, 0, 80000),
|
||||||
|
TestCase.init(25, 15, 40000),
|
||||||
|
TestCase.init(15, 5, 40000, SocketConfig.init(optRcvBuffer = uint32(10000), remoteWindowResetTimeout = seconds(5)))
|
||||||
|
]
|
||||||
|
|
||||||
|
asyncTest "Write large data and read till EOF":
|
||||||
|
for testCase in testCase2:
|
||||||
|
let (
|
||||||
|
clientProtocol,
|
||||||
|
clientSocket,
|
||||||
|
serverProtocol,
|
||||||
|
serverSocket) = await testScenario(testCase.maxDelay, testCase.dropRate, testcase.cfg)
|
||||||
|
|
||||||
|
|
||||||
|
let numBytes = testCase.bytesToTransfer
|
||||||
|
let bytesToTransfer = generateByteArray(rng[], numBytes)
|
||||||
|
|
||||||
|
discard await clientSocket.write(bytesToTransfer)
|
||||||
|
clientSocket.close()
|
||||||
|
|
||||||
|
let read = await serverSocket.read()
|
||||||
|
|
||||||
|
check:
|
||||||
|
read == bytesToTransfer
|
||||||
|
|
||||||
|
await clientProtocol.shutdownWait()
|
||||||
|
await serverProtocol.shutdownWait()
|
||||||
|
3
vendor/nim-faststreams/faststreams.nimble
vendored
3
vendor/nim-faststreams/faststreams.nimble
vendored
@ -29,6 +29,9 @@ proc test(args, path: string) =
|
|||||||
# " -d:asyncBackend=asyncdispatch " & common_args & " " & path
|
# " -d:asyncBackend=asyncdispatch " & common_args & " " & path
|
||||||
|
|
||||||
task test, "Run all tests":
|
task test, "Run all tests":
|
||||||
|
test "-d:debug --threads:off", "tests/all_tests"
|
||||||
|
test "-d:release --threads:off", "tests/all_tests"
|
||||||
|
test "-d:danger --threads:off", "tests/all_tests"
|
||||||
test "-d:debug --threads:on", "tests/all_tests"
|
test "-d:debug --threads:on", "tests/all_tests"
|
||||||
test "-d:release --threads:on", "tests/all_tests"
|
test "-d:release --threads:on", "tests/all_tests"
|
||||||
test "-d:danger --threads:on", "tests/all_tests"
|
test "-d:danger --threads:on", "tests/all_tests"
|
||||||
|
@ -11,7 +11,7 @@ type
|
|||||||
startAddr*, endAddr*: ptr byte
|
startAddr*, endAddr*: ptr byte
|
||||||
|
|
||||||
Page* = object
|
Page* = object
|
||||||
consumedTo*: Natural
|
consumedTo*: int
|
||||||
writtenTo*: Natural
|
writtenTo*: Natural
|
||||||
data*: ref string
|
data*: ref string
|
||||||
|
|
||||||
|
@ -442,7 +442,7 @@ proc fileInput*(filename: string,
|
|||||||
return fileInput(file, offset, pageSize)
|
return fileInput(file, offset, pageSize)
|
||||||
|
|
||||||
proc unsafeMemoryInput*(mem: openArray[byte]): InputStreamHandle =
|
proc unsafeMemoryInput*(mem: openArray[byte]): InputStreamHandle =
|
||||||
let head = unsafeAddr mem[0]
|
let head = cast[ptr byte](mem)
|
||||||
|
|
||||||
makeHandle InputStream(
|
makeHandle InputStream(
|
||||||
span: PageSpan(
|
span: PageSpan(
|
||||||
|
2
vendor/nim-faststreams/tests/base64.nim
vendored
2
vendor/nim-faststreams/tests/base64.nim
vendored
@ -121,7 +121,7 @@ proc base64decode*(i: InputStream, o: OutputStream) {.fsMultiSync.} =
|
|||||||
inputChar(d)
|
inputChar(d)
|
||||||
outputChar(c shl 6 or d shr 0)
|
outputChar(c shl 6 or d shr 0)
|
||||||
elif i.readable:
|
elif i.readable:
|
||||||
raise newException(ValueError, "The input stream has insufficient nymber of bytes for base64 decoding")
|
raise newException(ValueError, "The input stream has insufficient number of bytes for base64 decoding")
|
||||||
|
|
||||||
close o
|
close o
|
||||||
|
|
||||||
|
25
vendor/nim-faststreams/tests/test_outputs.nim
vendored
25
vendor/nim-faststreams/tests/test_outputs.nim
vendored
@ -400,3 +400,28 @@ suite "randomized tests":
|
|||||||
if fileExists(randomBytesFileName):
|
if fileExists(randomBytesFileName):
|
||||||
removeFile randomBytesFileName
|
removeFile randomBytesFileName
|
||||||
|
|
||||||
|
test "ensureRunway":
|
||||||
|
var output = memoryOutput()
|
||||||
|
|
||||||
|
const writes = 256
|
||||||
|
var buffer = newSeq[byte](writes)
|
||||||
|
let totalBytes = block:
|
||||||
|
var tmp = 0
|
||||||
|
for i in 0..<writes:
|
||||||
|
tmp += i
|
||||||
|
buffer[i] = byte(i)
|
||||||
|
tmp
|
||||||
|
|
||||||
|
output.ensureRunway(totalBytes)
|
||||||
|
|
||||||
|
for i in 0..<writes:
|
||||||
|
output.write(buffer.toOpenArray(0, i - 1))
|
||||||
|
|
||||||
|
output.flush()
|
||||||
|
|
||||||
|
let res = output.getOutput()
|
||||||
|
var j = 0
|
||||||
|
for i in 0..<writes:
|
||||||
|
check:
|
||||||
|
res[j..<j+i] == buffer[0..<i]
|
||||||
|
j += i
|
||||||
|
@ -41,7 +41,7 @@ when fsAsyncSupport:
|
|||||||
template timeit(timerVar: var Nanos, code: untyped) =
|
template timeit(timerVar: var Nanos, code: untyped) =
|
||||||
let t0 = getTicks()
|
let t0 = getTicks()
|
||||||
code
|
code
|
||||||
timerVar = int(getTicks() - t0) div 1000000
|
timerVar = int64(getTicks() - t0) div 1000000
|
||||||
|
|
||||||
proc getOutput(sp: AsyncInputStream, T: type string): Future[string] {.async.} =
|
proc getOutput(sp: AsyncInputStream, T: type string): Future[string] {.async.} =
|
||||||
# this proc is a quick hack to let the test pass
|
# this proc is a quick hack to let the test pass
|
||||||
|
39
vendor/nim-json-rpc/.appveyor.yml
vendored
39
vendor/nim-json-rpc/.appveyor.yml
vendored
@ -1,39 +0,0 @@
|
|||||||
version: '{build}'
|
|
||||||
|
|
||||||
image: Visual Studio 2015
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- NimBinaries
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
# We always want 32 and 64-bit compilation
|
|
||||||
fast_finish: false
|
|
||||||
|
|
||||||
platform:
|
|
||||||
- x86
|
|
||||||
- x64
|
|
||||||
|
|
||||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
|
||||||
clone_depth: 10
|
|
||||||
|
|
||||||
install:
|
|
||||||
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
|
|
||||||
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
|
|
||||||
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
|
||||||
|
|
||||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
|
||||||
# regressions of the fast-paced Nim development while maintaining the
|
|
||||||
# flexibility to apply patches
|
|
||||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
- SET PATH=%CD%\Nim\bin;%PATH%
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
|
|
||||||
- nimble install -y
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- nimble test
|
|
||||||
|
|
||||||
deploy: off
|
|
||||||
|
|
27
vendor/nim-json-rpc/.travis.yml
vendored
27
vendor/nim-json-rpc/.travis.yml
vendored
@ -1,27 +0,0 @@
|
|||||||
language: c
|
|
||||||
|
|
||||||
# https://docs.travis-ci.com/user/caching/
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- NimBinaries
|
|
||||||
|
|
||||||
git:
|
|
||||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
|
||||||
depth: 10
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
|
|
||||||
install:
|
|
||||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
|
||||||
# regressions of the fast-paced Nim development while maintaining the
|
|
||||||
# flexibility to apply patches
|
|
||||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
- export PATH=$PWD/Nim/bin:$PATH
|
|
||||||
|
|
||||||
script:
|
|
||||||
- nimble install -y
|
|
||||||
- nimble test
|
|
||||||
|
|
8
vendor/nim-json-rpc/json_rpc.nimble
vendored
8
vendor/nim-json-rpc/json_rpc.nimble
vendored
@ -20,7 +20,13 @@ requires "nim >= 1.2.0",
|
|||||||
proc buildBinary(name: string, srcDir = "./", params = "", cmdParams = "") =
|
proc buildBinary(name: string, srcDir = "./", params = "", cmdParams = "") =
|
||||||
if not dirExists "build":
|
if not dirExists "build":
|
||||||
mkDir "build"
|
mkDir "build"
|
||||||
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") & " -r -f --skipUserCfg:on --skipParentCfg:on --verbosity:0 --hints:off --debuginfo --path:'.' --threads:on -d:chronicles_log_level=ERROR --out:./build/" & name & " " & params & " " & srcDir & name & ".nim" & " " & cmdParams
|
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") &
|
||||||
|
" -r -f --skipUserCfg:on --skipParentCfg:on --verbosity:0" &
|
||||||
|
" --debuginfo --path:'.' --threads:on -d:chronicles_log_level=ERROR" &
|
||||||
|
" --styleCheck:usages --styleCheck:hint" &
|
||||||
|
" --hint[XDeclaredButNotUsed]:off --hint[Processing]:off " &
|
||||||
|
" --out:./build/" & name & " " & params & " " & srcDir & name & ".nim" &
|
||||||
|
" " & cmdParams
|
||||||
|
|
||||||
task test, "run tests":
|
task test, "run tests":
|
||||||
buildBinary "all", "tests/",
|
buildBinary "all", "tests/",
|
||||||
|
2
vendor/nim-json-rpc/json_rpc/client.nim
vendored
2
vendor/nim-json-rpc/json_rpc/client.nim
vendored
@ -87,7 +87,7 @@ proc createRpcProc(procName, parameters, callBody: NimNode): NimNode =
|
|||||||
# make proc async
|
# make proc async
|
||||||
result.addPragma ident"async"
|
result.addPragma ident"async"
|
||||||
# export this proc
|
# export this proc
|
||||||
result[0] = nnkPostFix.newTree(ident"*", newIdentNode($procName))
|
result[0] = nnkPostfix.newTree(ident"*", newIdentNode($procName))
|
||||||
|
|
||||||
proc toJsonArray(parameters: NimNode): NimNode =
|
proc toJsonArray(parameters: NimNode): NimNode =
|
||||||
# outputs an array of jsonified parameters
|
# outputs an array of jsonified parameters
|
||||||
|
@ -65,17 +65,38 @@ method call*(client: RpcHttpClient, name: string,
|
|||||||
let
|
let
|
||||||
id = client.getNextId()
|
id = client.getNextId()
|
||||||
reqBody = $rpcCallNode(name, params, id)
|
reqBody = $rpcCallNode(name, params, id)
|
||||||
req = HttpClientRequestRef.post(client.httpSession,
|
|
||||||
client.httpAddress.get,
|
var req: HttpClientRequestRef
|
||||||
body = reqBody.toOpenArrayByte(0, reqBody.len - 1),
|
var res: HttpClientResponseRef
|
||||||
headers = headers)
|
|
||||||
res =
|
defer:
|
||||||
try:
|
# BEWARE!
|
||||||
await req.send()
|
# Using multiple defer statements in this function or multiple
|
||||||
except CancelledError as e:
|
# try/except blocks within a single defer statement doesn't
|
||||||
raise e
|
# produce the desired run-time code, so we use slightly bizzare
|
||||||
except CatchableError as e:
|
# code to ensure the exceptions safety of this function:
|
||||||
raise (ref RpcPostError)(msg: "Failed to send POST Request with JSON-RPC.", parent: e)
|
try:
|
||||||
|
var closeFutures = newSeq[Future[void]]()
|
||||||
|
if req != nil: closeFutures.add req.closeWait()
|
||||||
|
if res != nil: closeFutures.add res.closeWait()
|
||||||
|
if closeFutures.len > 0: await allFutures(closeFutures)
|
||||||
|
except CatchableError as err:
|
||||||
|
# TODO
|
||||||
|
# `close` functions shouldn't raise in general, but we first
|
||||||
|
# need to ensure this through exception tracking in Chronos
|
||||||
|
debug "Error closing JSON-RPC HTTP resuest/response", err = err.msg
|
||||||
|
|
||||||
|
req = HttpClientRequestRef.post(client.httpSession,
|
||||||
|
client.httpAddress.get,
|
||||||
|
body = reqBody.toOpenArrayByte(0, reqBody.len - 1),
|
||||||
|
headers = headers)
|
||||||
|
res =
|
||||||
|
try:
|
||||||
|
await req.send()
|
||||||
|
except CancelledError as e:
|
||||||
|
raise e
|
||||||
|
except CatchableError as e:
|
||||||
|
raise (ref RpcPostError)(msg: "Failed to send POST Request with JSON-RPC.", parent: e)
|
||||||
|
|
||||||
if res.status < 200 or res.status >= 300: # res.status is not 2xx (success)
|
if res.status < 200 or res.status >= 300: # res.status is not 2xx (success)
|
||||||
raise newException(ErrorResponse, "POST Response: " & $res.status)
|
raise newException(ErrorResponse, "POST Response: " & $res.status)
|
||||||
|
@ -79,7 +79,7 @@ proc processData(client: RpcWebSocketClient) {.async.} =
|
|||||||
else:
|
else:
|
||||||
let ws = client.transport
|
let ws = client.transport
|
||||||
try:
|
try:
|
||||||
while ws.readystate != ReadyState.Closed:
|
while ws.readyState != ReadyState.Closed:
|
||||||
var value = await ws.recvMsg()
|
var value = await ws.recvMsg()
|
||||||
|
|
||||||
if value.len == 0:
|
if value.len == 0:
|
||||||
|
4
vendor/nim-json-rpc/json_rpc/jsonmarshal.nim
vendored
4
vendor/nim-json-rpc/json_rpc/jsonmarshal.nim
vendored
@ -69,7 +69,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var int) =
|
|||||||
proc fromJson*[T: ref object](n: JsonNode, argName: string, result: var T) =
|
proc fromJson*[T: ref object](n: JsonNode, argName: string, result: var T) =
|
||||||
n.kind.expect(JObject, argName)
|
n.kind.expect(JObject, argName)
|
||||||
result = new T
|
result = new T
|
||||||
for k, v in fieldpairs(result[]):
|
for k, v in fieldPairs(result[]):
|
||||||
fromJson(n[k], k, v)
|
fromJson(n[k], k, v)
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var int64) =
|
proc fromJson*(n: JsonNode, argName: string, result: var int64) =
|
||||||
@ -165,7 +165,7 @@ iterator paramsIter(params: NimNode): tuple[name, ntype: NimNode] =
|
|||||||
yield (arg[j], argType)
|
yield (arg[j], argType)
|
||||||
|
|
||||||
iterator paramsRevIter(params: NimNode): tuple[name, ntype: NimNode] =
|
iterator paramsRevIter(params: NimNode): tuple[name, ntype: NimNode] =
|
||||||
for i in countDown(params.len-1,1):
|
for i in countdown(params.len-1,1):
|
||||||
let arg = params[i]
|
let arg = params[i]
|
||||||
let argType = arg[^2]
|
let argType = arg[^2]
|
||||||
for j in 0 ..< arg.len-2:
|
for j in 0 ..< arg.len-2:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import
|
import
|
||||||
chronicles, httputils, chronos, websock/websock,
|
chronicles, httputils, chronos, websock/[websock, types],
|
||||||
websock/extensions/compression/deflate,
|
websock/extensions/compression/deflate,
|
||||||
stew/byteutils, json_serialization/std/net,
|
stew/byteutils, json_serialization/std/net,
|
||||||
".."/[errors, server]
|
".."/[errors, server]
|
||||||
@ -10,17 +10,52 @@ logScope:
|
|||||||
topics = "JSONRPC-WS-SERVER"
|
topics = "JSONRPC-WS-SERVER"
|
||||||
|
|
||||||
type
|
type
|
||||||
|
RpcWebSocketServerAuth* = ##\
|
||||||
|
## Authenticator function. On error, the resulting `HttpCode` is sent back\
|
||||||
|
## to the client and the `string` argument will be used in an exception,\
|
||||||
|
## following.
|
||||||
|
proc(req: HttpTable): Result[void,(HttpCode,string)]
|
||||||
|
{.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
RpcWebSocketServer* = ref object of RpcServer
|
RpcWebSocketServer* = ref object of RpcServer
|
||||||
|
authHook: Option[RpcWebSocketServerAuth] ## Authorization call back handler
|
||||||
server: StreamServer
|
server: StreamServer
|
||||||
wsserver: WSServer
|
wsserver: WSServer
|
||||||
|
|
||||||
|
HookEx = ref object of Hook
|
||||||
|
handler: RpcWebSocketServerAuth ## from `RpcWebSocketServer`
|
||||||
|
request: HttpRequest ## current request needed for error response
|
||||||
|
|
||||||
|
proc authWithHtCodeResponse(ctx: Hook, headers: HttpTable):
|
||||||
|
Future[Result[void, string]] {.async, gcsafe, raises: [Defect].} =
|
||||||
|
## Wrapper around authorization handler which is stored in the
|
||||||
|
## extended `Hook` object.
|
||||||
|
let
|
||||||
|
cty = ctx.HookEx
|
||||||
|
rc = cty.handler(headers)
|
||||||
|
if rc.isErr:
|
||||||
|
await cty.request.stream.writer.sendError(rc.error[0])
|
||||||
|
return err(rc.error[1])
|
||||||
|
return ok()
|
||||||
|
|
||||||
proc handleRequest(rpc: RpcWebSocketServer, request: HttpRequest) {.async.} =
|
proc handleRequest(rpc: RpcWebSocketServer, request: HttpRequest) {.async.} =
|
||||||
trace "Handling request:", uri = request.uri.path
|
trace "Handling request:", uri = request.uri.path
|
||||||
trace "Initiating web socket connection."
|
trace "Initiating web socket connection."
|
||||||
|
|
||||||
|
# Authorization handler constructor (if enabled)
|
||||||
|
var hooks: seq[Hook]
|
||||||
|
if rpc.authHook.isSome:
|
||||||
|
let hookEx = HookEx(
|
||||||
|
append: nil,
|
||||||
|
request: request,
|
||||||
|
handler: rpc.authHook.get,
|
||||||
|
verify: authWithHtCodeResponse)
|
||||||
|
hooks = @[hookEx.Hook]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let server = rpc.wsserver
|
let server = rpc.wsserver
|
||||||
let ws = await server.handleRequest(request)
|
let ws = await server.handleRequest(request, hooks = hooks)
|
||||||
if ws.readyState != Open:
|
if ws.readyState != ReadyState.Open:
|
||||||
error "Failed to open websocket connection"
|
error "Failed to open websocket connection"
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -58,24 +93,26 @@ proc handleRequest(rpc: RpcWebSocketServer, request: HttpRequest) {.async.} =
|
|||||||
except WebSocketError as exc:
|
except WebSocketError as exc:
|
||||||
error "WebSocket error:", exception = exc.msg
|
error "WebSocket error:", exception = exc.msg
|
||||||
|
|
||||||
proc initWebsocket(rpc: RpcWebSocketServer, compression: bool) =
|
proc initWebsocket(rpc: RpcWebSocketServer, compression: bool,
|
||||||
|
authHandler: Option[RpcWebSocketServerAuth]) =
|
||||||
if compression:
|
if compression:
|
||||||
let deflateFactory = deflateFactory()
|
let deflateFactory = deflateFactory()
|
||||||
rpc.wsserver = WSServer.new(factories = [deflateFactory])
|
rpc.wsserver = WSServer.new(factories = [deflateFactory])
|
||||||
else:
|
else:
|
||||||
rpc.wsserver = WSServer.new()
|
rpc.wsserver = WSServer.new()
|
||||||
|
rpc.authHook = authHandler
|
||||||
|
|
||||||
proc newRpcWebSocketServer*(
|
proc newRpcWebSocketServer*(
|
||||||
address: TransportAddress,
|
address: TransportAddress,
|
||||||
compression: bool = false,
|
compression: bool = false,
|
||||||
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay,
|
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay,ServerFlags.ReuseAddr},
|
||||||
ServerFlags.ReuseAddr}): RpcWebSocketServer =
|
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
|
||||||
|
|
||||||
var server = new(RpcWebSocketServer)
|
var server = new(RpcWebSocketServer)
|
||||||
proc processCallback(request: HttpRequest): Future[void] =
|
proc processCallback(request: HttpRequest): Future[void] =
|
||||||
handleRequest(server, request)
|
handleRequest(server, request)
|
||||||
|
|
||||||
server.initWebsocket(compression)
|
server.initWebsocket(compression, authHandler)
|
||||||
server.server = HttpServer.create(
|
server.server = HttpServer.create(
|
||||||
address,
|
address,
|
||||||
processCallback,
|
processCallback,
|
||||||
@ -88,13 +125,14 @@ proc newRpcWebSocketServer*(
|
|||||||
host: string,
|
host: string,
|
||||||
port: Port,
|
port: Port,
|
||||||
compression: bool = false,
|
compression: bool = false,
|
||||||
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay,
|
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr},
|
||||||
ServerFlags.ReuseAddr}): RpcWebSocketServer =
|
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
|
||||||
|
|
||||||
newRpcWebSocketServer(
|
newRpcWebSocketServer(
|
||||||
initTAddress(host, port),
|
initTAddress(host, port),
|
||||||
compression,
|
compression,
|
||||||
flags
|
flags,
|
||||||
|
authHandler
|
||||||
)
|
)
|
||||||
|
|
||||||
proc newRpcWebSocketServer*(
|
proc newRpcWebSocketServer*(
|
||||||
@ -106,13 +144,14 @@ proc newRpcWebSocketServer*(
|
|||||||
ServerFlags.ReuseAddr},
|
ServerFlags.ReuseAddr},
|
||||||
tlsFlags: set[TLSFlags] = {},
|
tlsFlags: set[TLSFlags] = {},
|
||||||
tlsMinVersion = TLSVersion.TLS12,
|
tlsMinVersion = TLSVersion.TLS12,
|
||||||
tlsMaxVersion = TLSVersion.TLS12): RpcWebSocketServer =
|
tlsMaxVersion = TLSVersion.TLS12,
|
||||||
|
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
|
||||||
|
|
||||||
var server = new(RpcWebSocketServer)
|
var server = new(RpcWebSocketServer)
|
||||||
proc processCallback(request: HttpRequest): Future[void] =
|
proc processCallback(request: HttpRequest): Future[void] =
|
||||||
handleRequest(server, request)
|
handleRequest(server, request)
|
||||||
|
|
||||||
server.initWebsocket(compression)
|
server.initWebsocket(compression, authHandler)
|
||||||
server.server = TlsHttpServer.create(
|
server.server = TlsHttpServer.create(
|
||||||
address,
|
address,
|
||||||
tlsPrivateKey,
|
tlsPrivateKey,
|
||||||
@ -136,7 +175,8 @@ proc newRpcWebSocketServer*(
|
|||||||
ServerFlags.ReuseAddr},
|
ServerFlags.ReuseAddr},
|
||||||
tlsFlags: set[TLSFlags] = {},
|
tlsFlags: set[TLSFlags] = {},
|
||||||
tlsMinVersion = TLSVersion.TLS12,
|
tlsMinVersion = TLSVersion.TLS12,
|
||||||
tlsMaxVersion = TLSVersion.TLS12): RpcWebSocketServer =
|
tlsMaxVersion = TLSVersion.TLS12,
|
||||||
|
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
|
||||||
|
|
||||||
newRpcWebSocketServer(
|
newRpcWebSocketServer(
|
||||||
initTAddress(host, port),
|
initTAddress(host, port),
|
||||||
@ -146,7 +186,8 @@ proc newRpcWebSocketServer*(
|
|||||||
flags,
|
flags,
|
||||||
tlsFlags,
|
tlsFlags,
|
||||||
tlsMinVersion,
|
tlsMinVersion,
|
||||||
tlsMaxVersion
|
tlsMaxVersion,
|
||||||
|
authHandler
|
||||||
)
|
)
|
||||||
|
|
||||||
proc start*(server: RpcWebSocketServer) =
|
proc start*(server: RpcWebSocketServer) =
|
||||||
|
2
vendor/nim-json-rpc/tests/ethprocs.nim
vendored
2
vendor/nim-json-rpc/tests/ethprocs.nim
vendored
@ -43,7 +43,7 @@ proc addEthRpcs*(server: RpcServer) =
|
|||||||
var rawData: seq[byte]
|
var rawData: seq[byte]
|
||||||
rawData = nimcrypto.fromHex(data.string)
|
rawData = nimcrypto.fromHex(data.string)
|
||||||
# data will have 0x prefix
|
# data will have 0x prefix
|
||||||
result = hexDataStr "0x" & $keccak_256.digest(rawData)
|
result = hexDataStr "0x" & $keccak256.digest(rawData)
|
||||||
|
|
||||||
server.rpc("net_version") do() -> string:
|
server.rpc("net_version") do() -> string:
|
||||||
## Returns string of the current network id:
|
## Returns string of the current network id:
|
||||||
|
4
vendor/nim-json-rpc/tests/ethtypes.nim
vendored
4
vendor/nim-json-rpc/tests/ethtypes.nim
vendored
@ -42,8 +42,8 @@ type
|
|||||||
gasLimit*: int # the maximum gas allowed in this block.
|
gasLimit*: int # the maximum gas allowed in this block.
|
||||||
gasUsed*: int # the total used gas by all transactions in this block.
|
gasUsed*: int # the total used gas by all transactions in this block.
|
||||||
timestamp*: int # the unix timestamp for when the block was collated.
|
timestamp*: int # the unix timestamp for when the block was collated.
|
||||||
transactions*: seq[Uint256] # list of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter.
|
transactions*: seq[UInt256] # list of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter.
|
||||||
uncles*: seq[Uint256] # list of uncle hashes.
|
uncles*: seq[UInt256] # list of uncle hashes.
|
||||||
|
|
||||||
TransactionObject* = object # A transaction object, or null when no transaction was found:
|
TransactionObject* = object # A transaction object, or null when no transaction was found:
|
||||||
hash*: UInt256 # hash of the transaction.
|
hash*: UInt256 # hash of the transaction.
|
||||||
|
2
vendor/nim-json-rpc/tests/testethcalls.nim
vendored
2
vendor/nim-json-rpc/tests/testethcalls.nim
vendored
@ -22,7 +22,7 @@ func rpcDynamicName(name: string): string =
|
|||||||
|
|
||||||
## Create custom RPC with StUint input parameter
|
## Create custom RPC with StUint input parameter
|
||||||
server.rpc(rpcDynamicName "uint256Param") do(i: UInt256):
|
server.rpc(rpcDynamicName "uint256Param") do(i: UInt256):
|
||||||
let r = i + 1.stUint(256)
|
let r = i + 1.stuint(256)
|
||||||
return %r
|
return %r
|
||||||
|
|
||||||
## Create custom RPC with StUInt return parameter
|
## Create custom RPC with StUInt return parameter
|
||||||
|
22
vendor/nim-json-rpc/tests/testrpcmacro.nim
vendored
22
vendor/nim-json-rpc/tests/testrpcmacro.nim
vendored
@ -153,39 +153,39 @@ suite "Server types":
|
|||||||
check r == inp
|
check r == inp
|
||||||
|
|
||||||
test "Array parameters":
|
test "Array parameters":
|
||||||
let r1 = waitfor s.executeMethod("rpc.arrayParam", %[%[1, 2, 3], %"hello"])
|
let r1 = waitFor s.executeMethod("rpc.arrayParam", %[%[1, 2, 3], %"hello"])
|
||||||
var ckR1 = %[1, 2, 3, 0, 0, 0]
|
var ckR1 = %[1, 2, 3, 0, 0, 0]
|
||||||
ckR1.elems.add %"hello"
|
ckR1.elems.add %"hello"
|
||||||
check r1 == ckR1
|
check r1 == ckR1
|
||||||
|
|
||||||
test "Seq parameters":
|
test "Seq parameters":
|
||||||
let r2 = waitfor s.executeMethod("rpc.seqParam", %[%"abc", %[1, 2, 3, 4, 5]])
|
let r2 = waitFor s.executeMethod("rpc.seqParam", %[%"abc", %[1, 2, 3, 4, 5]])
|
||||||
var ckR2 = %["abc"]
|
var ckR2 = %["abc"]
|
||||||
for i in 0..4: ckR2.add %(i + 1)
|
for i in 0..4: ckR2.add %(i + 1)
|
||||||
check r2 == ckR2
|
check r2 == ckR2
|
||||||
|
|
||||||
test "Object parameters":
|
test "Object parameters":
|
||||||
let r = waitfor s.executeMethod("rpc.objParam", %[%"abc", testObj])
|
let r = waitFor s.executeMethod("rpc.objParam", %[%"abc", testObj])
|
||||||
check r == testObj
|
check r == testObj
|
||||||
|
|
||||||
test "Simple return types":
|
test "Simple return types":
|
||||||
let
|
let
|
||||||
inp = %99
|
inp = %99
|
||||||
r1 = waitfor s.executeMethod("rpc.returnTypeSimple", %[%inp])
|
r1 = waitFor s.executeMethod("rpc.returnTypeSimple", %[%inp])
|
||||||
check r1 == inp
|
check r1 == inp
|
||||||
|
|
||||||
test "Complex return types":
|
test "Complex return types":
|
||||||
let
|
let
|
||||||
inp = 99
|
inp = 99
|
||||||
r1 = waitfor s.executeMethod("rpc.returnTypeComplex", %[%inp])
|
r1 = waitFor s.executeMethod("rpc.returnTypeComplex", %[%inp])
|
||||||
check r1 == %*{"x": %[1, inp, 3], "y": "test"}
|
check r1 == %*{"x": %[1, inp, 3], "y": "test"}
|
||||||
|
|
||||||
test "Option types":
|
test "Option types":
|
||||||
let
|
let
|
||||||
inp1 = MyOptional(maybeInt: some(75))
|
inp1 = MyOptional(maybeInt: some(75))
|
||||||
inp2 = MyOptional()
|
inp2 = MyOptional()
|
||||||
r1 = waitfor s.executeMethod("rpc.optional", %[%inp1])
|
r1 = waitFor s.executeMethod("rpc.optional", %[%inp1])
|
||||||
r2 = waitfor s.executeMethod("rpc.optional", %[%inp2])
|
r2 = waitFor s.executeMethod("rpc.optional", %[%inp2])
|
||||||
check r1 == %inp1
|
check r1 == %inp1
|
||||||
check r2 == %inp2
|
check r2 == %inp2
|
||||||
|
|
||||||
@ -196,19 +196,19 @@ suite "Server types":
|
|||||||
test "Runtime errors":
|
test "Runtime errors":
|
||||||
expect ValueError:
|
expect ValueError:
|
||||||
# root param not array
|
# root param not array
|
||||||
discard waitfor s.executeMethod("rpc.arrayParam", %"test")
|
discard waitFor s.executeMethod("rpc.arrayParam", %"test")
|
||||||
expect ValueError:
|
expect ValueError:
|
||||||
# too big for array
|
# too big for array
|
||||||
discard waitfor s.executeMethod("rpc.arrayParam", %[%[0, 1, 2, 3, 4, 5, 6], %"hello"])
|
discard waitFor s.executeMethod("rpc.arrayParam", %[%[0, 1, 2, 3, 4, 5, 6], %"hello"])
|
||||||
expect ValueError:
|
expect ValueError:
|
||||||
# wrong sub parameter type
|
# wrong sub parameter type
|
||||||
discard waitfor s.executeMethod("rpc.arrayParam", %[%"test", %"hello"])
|
discard waitFor s.executeMethod("rpc.arrayParam", %[%"test", %"hello"])
|
||||||
expect ValueError:
|
expect ValueError:
|
||||||
# wrong param type
|
# wrong param type
|
||||||
discard waitFor s.executeMethod("rpc.differentParams", %[%"abc", %1])
|
discard waitFor s.executeMethod("rpc.differentParams", %[%"abc", %1])
|
||||||
|
|
||||||
test "Multiple variables of one type":
|
test "Multiple variables of one type":
|
||||||
let r = waitfor s.executeMethod("rpc.multiVarsOfOneType", %[%"hello", %"world"])
|
let r = waitFor s.executeMethod("rpc.multiVarsOfOneType", %[%"hello", %"world"])
|
||||||
check r == %"hello world"
|
check r == %"hello world"
|
||||||
|
|
||||||
test "Optional arg":
|
test "Optional arg":
|
||||||
|
3
vendor/nim-libbacktrace/Makefile
vendored
3
vendor/nim-libbacktrace/Makefile
vendored
@ -183,7 +183,8 @@ clean:
|
|||||||
cd vendor/libbacktrace-upstream && \
|
cd vendor/libbacktrace-upstream && \
|
||||||
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; }
|
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; }
|
||||||
cd vendor/libunwind && \
|
cd vendor/libunwind && \
|
||||||
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; }
|
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; } && \
|
||||||
|
rm -rf CMakeCache.txt CMakeFiles cmake_install.cmake install_manifest.txt Makefile
|
||||||
|
|
||||||
$(SILENT_TARGET_PREFIX).SILENT:
|
$(SILENT_TARGET_PREFIX).SILENT:
|
||||||
|
|
||||||
|
3
vendor/nim-libbacktrace/README.md
vendored
3
vendor/nim-libbacktrace/README.md
vendored
@ -11,6 +11,9 @@ overhead, by adding `nimln_()`, `nimfr_()` calls all over the place. The
|
|||||||
problem is being discussed upstream in [this GitHub
|
problem is being discussed upstream in [this GitHub
|
||||||
issue](https://github.com/nim-lang/Nim/issues/12702).
|
issue](https://github.com/nim-lang/Nim/issues/12702).
|
||||||
|
|
||||||
|
In practice, you can get as much as 66% improved performance by disabling the
|
||||||
|
default stack tracing: https://github.com/status-im/nimbus-eth2/pull/3466
|
||||||
|
|
||||||
That `popFrame()` at the end of each C function is particularly problematic,
|
That `popFrame()` at the end of each C function is particularly problematic,
|
||||||
since it prevents the C compiler from doing tail-call optimisations.
|
since it prevents the C compiler from doing tail-call optimisations.
|
||||||
|
|
||||||
|
18
vendor/nim-libbacktrace/libbacktrace.nim
vendored
18
vendor/nim-libbacktrace/libbacktrace.nim
vendored
@ -21,26 +21,28 @@ when defined(nimStackTraceOverride) and defined(nimHasStacktracesModule):
|
|||||||
# there, but we might still want to import this module with a global
|
# there, but we might still want to import this module with a global
|
||||||
# "--import:libbacktrace" Nim compiler flag.
|
# "--import:libbacktrace" Nim compiler flag.
|
||||||
when not (defined(nimscript) or defined(js)):
|
when not (defined(nimscript) or defined(js)):
|
||||||
import algorithm, libbacktrace_wrapper, os, system/ansi_c
|
import algorithm, libbacktrace_wrapper, os, system/ansi_c, strutils
|
||||||
|
|
||||||
const installPath = currentSourcePath.parentDir() / "install" / "usr"
|
const
|
||||||
|
topLevelPath = currentSourcePath.parentDir().replace('\\', '/')
|
||||||
|
installPath = topLevelPath & "/install/usr"
|
||||||
|
|
||||||
{.passc: "-I" & currentSourcePath.parentDir().}
|
{.passc: "-I" & topLevelPath.}
|
||||||
|
|
||||||
when defined(cpp):
|
when defined(cpp):
|
||||||
{.passl: installPath / "lib" / "libbacktracenimcpp.a".}
|
{.passl: installPath & "/lib/libbacktracenimcpp.a".}
|
||||||
else:
|
else:
|
||||||
{.passl: installPath / "lib" / "libbacktracenim.a".}
|
{.passl: installPath & "/lib/libbacktracenim.a".}
|
||||||
|
|
||||||
when defined(libbacktraceUseSystemLibs):
|
when defined(libbacktraceUseSystemLibs):
|
||||||
{.passl: "-lbacktrace".}
|
{.passl: "-lbacktrace".}
|
||||||
when defined(macosx) or defined(windows):
|
when defined(macosx) or defined(windows):
|
||||||
{.passl: "-lunwind".}
|
{.passl: "-lunwind".}
|
||||||
else:
|
else:
|
||||||
{.passc: "-I" & installPath / "include".}
|
{.passc: "-I" & installPath & "/include".}
|
||||||
{.passl: installPath / "lib" / "libbacktrace.a".}
|
{.passl: installPath & "/lib/libbacktrace.a".}
|
||||||
when defined(macosx) or defined(windows):
|
when defined(macosx) or defined(windows):
|
||||||
{.passl: installPath / "lib" / "libunwind.a".}
|
{.passl: installPath & "/lib/libunwind.a".}
|
||||||
|
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
{.passl: "-lpsapi".}
|
{.passl: "-lpsapi".}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
# libtool - Provide generalized library-building support services.
|
# libtool - Provide generalized library-building support services.
|
||||||
# Generated automatically by config.status (libbacktrace) version-unused
|
# Generated automatically by config.status (libbacktrace) version-unused
|
||||||
# Libtool was configured on host fv-az457-821:
|
# Libtool was configured on host fv-az129-611:
|
||||||
# NOTE: Changes made to this file will be lost: look at ltmain.sh.
|
# NOTE: Changes made to this file will be lost: look at ltmain.sh.
|
||||||
#
|
#
|
||||||
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
|
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
|
||||||
|
2
vendor/nim-libp2p/.github/workflows/ci.yml
vendored
2
vendor/nim-libp2p/.github/workflows/ci.yml
vendored
@ -154,7 +154,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: '^1.15.5'
|
go-version: '~1.15.5'
|
||||||
|
|
||||||
- name: Install p2pd
|
- name: Install p2pd
|
||||||
run: |
|
run: |
|
||||||
|
@ -152,7 +152,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: '^1.15.5'
|
go-version: '~1.15.5'
|
||||||
|
|
||||||
- name: Install p2pd
|
- name: Install p2pd
|
||||||
run: |
|
run: |
|
||||||
|
16
vendor/nim-libp2p/.pinned
vendored
16
vendor/nim-libp2p/.pinned
vendored
@ -1,17 +1,17 @@
|
|||||||
asynctest;https://github.com/markspanbroek/asynctest@#3882ed64ed3159578f796bc5ae0c6b13837fe798
|
asynctest;https://github.com/markspanbroek/asynctest@#5347c59b4b057443a014722aa40800cd8bb95c69
|
||||||
bearssl;https://github.com/status-im/nim-bearssl@#ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7
|
bearssl;https://github.com/status-im/nim-bearssl@#65b74302e03912ab5bde64b6da10d05896139007
|
||||||
chronicles;https://github.com/status-im/nim-chronicles@#2a2681b60289aaf7895b7056f22616081eb1a882
|
chronicles;https://github.com/status-im/nim-chronicles@#2a2681b60289aaf7895b7056f22616081eb1a882
|
||||||
chronos;https://github.com/status-im/nim-chronos@#87197230779002a2bfa8642f0e2ae07e2349e304
|
chronos;https://github.com/status-im/nim-chronos@#87197230779002a2bfa8642f0e2ae07e2349e304
|
||||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fbb76f8af8a33ab818184a7d4406d9fee20993be
|
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fbb76f8af8a33ab818184a7d4406d9fee20993be
|
||||||
faststreams;https://github.com/status-im/nim-faststreams@#37a183153c071539ab870f427c09a1376ba311b9
|
faststreams;https://github.com/status-im/nim-faststreams@#c80701f7d23815fab0b6362569f3195957e57856
|
||||||
httputils;https://github.com/status-im/nim-http-utils@#40048e8b3e69284bdb5d4daa0a16ad93402c55db
|
httputils;https://github.com/status-im/nim-http-utils@#40048e8b3e69284bdb5d4daa0a16ad93402c55db
|
||||||
json_serialization;https://github.com/status-im/nim-json-serialization@#4b8f487d2dfdd941df7408ceaa70b174cce02180
|
json_serialization;https://github.com/status-im/nim-json-serialization@#461fd03edb300b7946544b34442d1a05d4ef2270
|
||||||
metrics;https://github.com/status-im/nim-metrics@#71e0f0e354e1f4c59e3dc92153989c8b723c3440
|
metrics;https://github.com/status-im/nim-metrics@#11edec862f96e42374bc2d584c84cc88d5d1f95f
|
||||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#a5742a9a214ac33f91615f3862c7b099aec43b00
|
nimcrypto;https://github.com/cheatfate/nimcrypto@#a5742a9a214ac33f91615f3862c7b099aec43b00
|
||||||
secp256k1;https://github.com/status-im/nim-secp256k1@#e092373a5cbe1fa25abfc62e0f2a5f138dc3fb13
|
secp256k1;https://github.com/status-im/nim-secp256k1@#e092373a5cbe1fa25abfc62e0f2a5f138dc3fb13
|
||||||
serialization;https://github.com/status-im/nim-serialization@#37bc0db558d85711967acb16e9bb822b06911d46
|
serialization;https://github.com/status-im/nim-serialization@#9631fbd1c81c8b25ff8740df440ca7ba87fa6131
|
||||||
stew;https://github.com/status-im/nim-stew@#bb705bf17b46d2c8f9bfb106d9cc7437009a2501
|
stew;https://github.com/status-im/nim-stew@#419903c9a31ab253cf5cf19f24d9a912dc4b5154
|
||||||
testutils;https://github.com/status-im/nim-testutils@#aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2
|
testutils;https://github.com/status-im/nim-testutils@#aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2
|
||||||
unittest2;https://github.com/status-im/nim-unittest2@#4e2893eacb916c7678fdc4935ff7420f13bf3a9c
|
unittest2;https://github.com/status-im/nim-unittest2@#4e2893eacb916c7678fdc4935ff7420f13bf3a9c
|
||||||
websock;https://github.com/status-im/nim-websock@#853299e399746eff4096870067cbc61861ecd534
|
websock;https://github.com/status-im/nim-websock@#4a7a058843cdb7a6a4fd25a55f0959c51b0b5847
|
||||||
zlib;https://github.com/status-im/nim-zlib@#74cdeb54b21bededb5a515d36f608bc1850555a2
|
zlib;https://github.com/status-im/nim-zlib@#74cdeb54b21bededb5a515d36f608bc1850555a2
|
2
vendor/nim-libp2p/libp2p.nimble
vendored
2
vendor/nim-libp2p/libp2p.nimble
vendored
@ -27,7 +27,7 @@ const nimflags =
|
|||||||
|
|
||||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||||
moreoptions: string = "") =
|
moreoptions: string = "") =
|
||||||
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics "
|
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
|
||||||
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
||||||
excstr.add(" " & nimflags & " ")
|
excstr.add(" " & nimflags & " ")
|
||||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||||
|
@ -21,6 +21,12 @@ export connection
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p mplexchannel"
|
topics = "libp2p mplexchannel"
|
||||||
|
|
||||||
|
when defined(libp2p_mplex_metrics):
|
||||||
|
declareHistogram libp2p_mplex_qlen, "message queue length",
|
||||||
|
buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]
|
||||||
|
declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen"
|
||||||
|
declareHistogram libp2p_mplex_qtime, "message queuing time"
|
||||||
|
|
||||||
when defined(libp2p_network_protocols_metrics):
|
when defined(libp2p_network_protocols_metrics):
|
||||||
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
|
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
|
||||||
|
|
||||||
@ -187,6 +193,8 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
|||||||
if s.writes >= MaxWrites:
|
if s.writes >= MaxWrites:
|
||||||
debug "Closing connection, too many in-flight writes on channel",
|
debug "Closing connection, too many in-flight writes on channel",
|
||||||
s, conn = s.conn, writes = s.writes
|
s, conn = s.conn, writes = s.writes
|
||||||
|
when defined(libp2p_mplex_metrics):
|
||||||
|
libp2p_mplex_qlenclose.inc()
|
||||||
await s.reset()
|
await s.reset()
|
||||||
await s.conn.close()
|
await s.conn.close()
|
||||||
return
|
return
|
||||||
@ -201,8 +209,14 @@ proc completeWrite(
|
|||||||
try:
|
try:
|
||||||
s.writes += 1
|
s.writes += 1
|
||||||
|
|
||||||
await fut
|
when defined(libp2p_mplex_metrics):
|
||||||
when defined(libp2p_network_protocols_metrics):
|
libp2p_mplex_qlen.observe(s.writes.int64 - 1)
|
||||||
|
libp2p_mplex_qtime.time:
|
||||||
|
await fut
|
||||||
|
else:
|
||||||
|
await fut
|
||||||
|
|
||||||
|
when defined(libp2p_network_protocol_metrics):
|
||||||
if s.tag.len > 0:
|
if s.tag.len > 0:
|
||||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.tag, "out"])
|
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.tag, "out"])
|
||||||
|
|
||||||
|
@ -38,6 +38,8 @@ logScope:
|
|||||||
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
|
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
|
||||||
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
|
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
|
||||||
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
|
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
|
||||||
|
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
|
||||||
|
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
|
||||||
|
|
||||||
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||||
GossipSubParams(
|
GossipSubParams(
|
||||||
@ -385,9 +387,13 @@ method rpcHandler*(g: GossipSub,
|
|||||||
|
|
||||||
g.validationSeen.withValue(msgIdSalted, seen): seen[].incl(peer)
|
g.validationSeen.withValue(msgIdSalted, seen): seen[].incl(peer)
|
||||||
|
|
||||||
|
libp2p_gossipsub_duplicate.inc()
|
||||||
|
|
||||||
# onto the next message
|
# onto the next message
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
libp2p_gossipsub_received.inc()
|
||||||
|
|
||||||
# avoid processing messages we are not interested in
|
# avoid processing messages we are not interested in
|
||||||
if msg.topicIDs.allIt(it notin g.topics):
|
if msg.topicIDs.allIt(it notin g.topics):
|
||||||
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer
|
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer
|
||||||
|
@ -25,6 +25,7 @@ declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no
|
|||||||
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
|
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
|
||||||
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
|
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
|
||||||
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
|
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
|
||||||
|
declareSummary(libp2p_gossipsub_mcache_hit, "ratio of successful IWANT message cache lookups")
|
||||||
|
|
||||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
|
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
|
||||||
g.withPeerStats(p.peerId) do (stats: var PeerStats):
|
g.withPeerStats(p.peerId) do (stats: var PeerStats):
|
||||||
@ -276,12 +277,15 @@ proc handleIWant*(g: GossipSub,
|
|||||||
trace "peer sent iwant", peer, messageID = mid
|
trace "peer sent iwant", peer, messageID = mid
|
||||||
let msg = g.mcache.get(mid)
|
let msg = g.mcache.get(mid)
|
||||||
if msg.isSome:
|
if msg.isSome:
|
||||||
|
libp2p_gossipsub_mcache_hit.observe(1)
|
||||||
# avoid spam
|
# avoid spam
|
||||||
if peer.iWantBudget > 0:
|
if peer.iWantBudget > 0:
|
||||||
messages.add(msg.get())
|
messages.add(msg.get())
|
||||||
dec peer.iWantBudget
|
dec peer.iWantBudget
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
|
libp2p_gossipsub_mcache_hit.observe(0)
|
||||||
return messages
|
return messages
|
||||||
|
|
||||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
||||||
|
1508
vendor/nim-libp2p/tools/grafana/libp2p-metrics.json
vendored
Normal file
1508
vendor/nim-libp2p/tools/grafana/libp2p-metrics.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4
vendor/nim-metrics/.github/workflows/ci.yml
vendored
4
vendor/nim-metrics/.github/workflows/ci.yml
vendored
@ -91,8 +91,8 @@ jobs:
|
|||||||
id: windows-dlls-cache
|
id: windows-dlls-cache
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: external/dlls-${{ matrix.target.cpu }}
|
path: external/dlls
|
||||||
key: 'dlls-${{ matrix.target.cpu }}'
|
key: 'dlls-v3'
|
||||||
|
|
||||||
- name: Install DLLs dependencies (Windows)
|
- name: Install DLLs dependencies (Windows)
|
||||||
if: >
|
if: >
|
||||||
|
35
vendor/nim-metrics/README.md
vendored
35
vendor/nim-metrics/README.md
vendored
@ -186,6 +186,41 @@ myHistogram.time:
|
|||||||
histogram("one_off_histogram").observe(10)
|
histogram("one_off_histogram").observe(10)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Custom collectors
|
||||||
|
|
||||||
|
Sometimes you need to create metrics on the fly, with a custom `collect()`
|
||||||
|
method of a custom collector type.
|
||||||
|
|
||||||
|
Let's say you have an USB-attached power meter and, for some reason, you want
|
||||||
|
to read the power consumption every time Prometheus reads your metrics:
|
||||||
|
|
||||||
|
```nim
|
||||||
|
import metrics, times
|
||||||
|
|
||||||
|
when defined(metrics):
|
||||||
|
type PowerCollector = ref object of Gauge
|
||||||
|
var powerCollector = PowerCollector.newCollector(name = "power_usage", help = "Instantaneous power usage - in watts.")
|
||||||
|
|
||||||
|
method collect(collector: PowerCollector): Metrics =
|
||||||
|
let timestamp = getTime().toMilliseconds()
|
||||||
|
result[@[]] = @[
|
||||||
|
Metric(
|
||||||
|
name: "power_usage",
|
||||||
|
value: getPowerUsage(), # your power-meter reader
|
||||||
|
timestamp: timestamp,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
There's a bit of repetition in the collector and metric names, because we no
|
||||||
|
longer have behind-the-scenes name copying/deriving there.
|
||||||
|
|
||||||
|
You can output multiple metrics from your custom `collect()` method. It's
|
||||||
|
perfectly legal and we do that internally for our system/runtime metrics.
|
||||||
|
|
||||||
|
Try not to get creative with dynamic metric names - Prometheus has a hard time
|
||||||
|
dealing with that.
|
||||||
|
|
||||||
## Labels
|
## Labels
|
||||||
|
|
||||||
Metric labels are supported for the Prometheus backend, as a way to add extra
|
Metric labels are supported for the Prometheus backend, as a way to add extra
|
||||||
|
111
vendor/nim-metrics/metrics.nim
vendored
111
vendor/nim-metrics/metrics.nim
vendored
@ -20,6 +20,7 @@ import locks, net, os, sets, tables, times
|
|||||||
when defined(metrics):
|
when defined(metrics):
|
||||||
import algorithm, hashes, random, sequtils, strutils,
|
import algorithm, hashes, random, sequtils, strutils,
|
||||||
metrics/common
|
metrics/common
|
||||||
|
export tables # for custom collectors that need to work with the "Metrics" type
|
||||||
when defined(posix):
|
when defined(posix):
|
||||||
import posix
|
import posix
|
||||||
|
|
||||||
@ -210,11 +211,15 @@ proc `$`*(collector: type IgnoredCollector): string = ""
|
|||||||
|
|
||||||
# for testing
|
# for testing
|
||||||
template value*(collector: Collector | type IgnoredCollector, labelValues: LabelsParam = @[]): float64 =
|
template value*(collector: Collector | type IgnoredCollector, labelValues: LabelsParam = @[]): float64 =
|
||||||
|
var res: float64
|
||||||
when defined(metrics) and collector is not IgnoredCollector:
|
when defined(metrics) and collector is not IgnoredCollector:
|
||||||
{.gcsafe.}:
|
# Don't access the "metrics" field directly, so we can support custom
|
||||||
collector.metrics[@labelValues][0].value
|
# collectors.
|
||||||
|
withLock collector.lock:
|
||||||
|
res = collector.collect()[@labelValues][0].value
|
||||||
else:
|
else:
|
||||||
0.0
|
res = 0.0
|
||||||
|
res
|
||||||
|
|
||||||
# for testing
|
# for testing
|
||||||
proc valueByName*(collector: Collector | type IgnoredCollector,
|
proc valueByName*(collector: Collector | type IgnoredCollector,
|
||||||
@ -223,9 +228,10 @@ proc valueByName*(collector: Collector | type IgnoredCollector,
|
|||||||
extraLabelValues: LabelsParam = @[]): float64 {.raises: [Defect, ValueError].} =
|
extraLabelValues: LabelsParam = @[]): float64 {.raises: [Defect, ValueError].} =
|
||||||
when defined(metrics) and collector is not IgnoredCollector:
|
when defined(metrics) and collector is not IgnoredCollector:
|
||||||
let allLabelValues = @labelValues & @extraLabelValues
|
let allLabelValues = @labelValues & @extraLabelValues
|
||||||
for metric in collector.metrics[@labelValues]:
|
withLock collector.lock:
|
||||||
if metric.name == metricName and metric.labelValues == allLabelValues:
|
for metric in collector.collect()[@labelValues]:
|
||||||
return metric.value
|
if metric.name == metricName and metric.labelValues == allLabelValues:
|
||||||
|
return metric.value
|
||||||
raise newException(KeyError, "No such metric name for this collector: '" & metricName & "' (label values = " & $allLabelValues & ").")
|
raise newException(KeyError, "No such metric name for this collector: '" & metricName & "' (label values = " & $allLabelValues & ").")
|
||||||
|
|
||||||
############
|
############
|
||||||
@ -267,6 +273,7 @@ proc collect*(registry: Registry): OrderedTable[Collector, Metrics] =
|
|||||||
var collectorCopy: Collector
|
var collectorCopy: Collector
|
||||||
withLock collector.lock:
|
withLock collector.lock:
|
||||||
deepCopy(collectorCopy, collector)
|
deepCopy(collectorCopy, collector)
|
||||||
|
collectorCopy.lock.initLock()
|
||||||
result[collectorCopy] = collectorCopy.collect()
|
result[collectorCopy] = collectorCopy.collect()
|
||||||
|
|
||||||
proc toText*(registry: Registry, showTimestamp = true): string =
|
proc toText*(registry: Registry, showTimestamp = true): string =
|
||||||
@ -280,6 +287,28 @@ proc toText*(registry: Registry, showTimestamp = true): string =
|
|||||||
proc `$`*(registry: Registry): string =
|
proc `$`*(registry: Registry): string =
|
||||||
registry.toText()
|
registry.toText()
|
||||||
|
|
||||||
|
#####################
|
||||||
|
# custom collectors #
|
||||||
|
#####################
|
||||||
|
|
||||||
|
when defined(metrics):
|
||||||
|
# Used for custom collectors, to shield the API user from having to deal with
|
||||||
|
# internal details like lock initialisation.
|
||||||
|
# Also used internally, for creating standard collectors, to avoid code
|
||||||
|
# duplication.
|
||||||
|
proc newCollector* [T] (typ: typedesc[T], name: string, help: string, labels: LabelsParam = @[],
|
||||||
|
registry = defaultRegistry, standardType = "gauge"): T
|
||||||
|
{.raises: [Defect, ValueError, RegistrationError].} =
|
||||||
|
validateName(name)
|
||||||
|
validateLabels(labels)
|
||||||
|
result = T(name: name,
|
||||||
|
help: help,
|
||||||
|
typ: standardType, # Prometheus does not support a non-standard value here
|
||||||
|
labels: @labels,
|
||||||
|
creationThreadId: getThreadId())
|
||||||
|
result.lock.initLock()
|
||||||
|
result.register(registry)
|
||||||
|
|
||||||
#######################################
|
#######################################
|
||||||
# export metrics to StatsD and Carbon #
|
# export metrics to StatsD and Carbon #
|
||||||
#######################################
|
#######################################
|
||||||
@ -488,19 +517,13 @@ when defined(metrics):
|
|||||||
|
|
||||||
# don't document this one, even if we're forced to make it public, because it
|
# don't document this one, even if we're forced to make it public, because it
|
||||||
# won't work when all (or some) collectors are disabled
|
# won't work when all (or some) collectors are disabled
|
||||||
proc newCounter*(name: string, help: string, labels: LabelsParam = @[], registry = defaultRegistry, sampleRate = 1.float): Counter {.raises: [Defect, ValueError, RegistrationError].} =
|
proc newCounter*(name: string, help: string, labels: LabelsParam = @[],
|
||||||
validateName(name)
|
registry = defaultRegistry, sampleRate = 1.float): Counter
|
||||||
validateLabels(labels)
|
{.raises: [Defect, ValueError, RegistrationError].} =
|
||||||
result = Counter(name: name,
|
result = Counter.newCollector(name, help, labels, registry, "counter")
|
||||||
help: help,
|
result.sampleRate = sampleRate
|
||||||
typ: "counter",
|
|
||||||
labels: @labels,
|
|
||||||
creationThreadId: getThreadId(),
|
|
||||||
sampleRate: sampleRate)
|
|
||||||
result.lock.initLock()
|
|
||||||
if labels.len == 0:
|
if labels.len == 0:
|
||||||
result.metrics[@labels] = newCounterMetrics(name, labels, labels)
|
result.metrics[@labels] = newCounterMetrics(name, labels, labels)
|
||||||
result.register(registry)
|
|
||||||
|
|
||||||
template declareCounter*(identifier: untyped,
|
template declareCounter*(identifier: untyped,
|
||||||
help: static string,
|
help: static string,
|
||||||
@ -615,18 +638,12 @@ when defined(metrics):
|
|||||||
if result notin gauge.metrics:
|
if result notin gauge.metrics:
|
||||||
gauge.metrics[result] = newGaugeMetrics(gauge.name, gauge.labels, result)
|
gauge.metrics[result] = newGaugeMetrics(gauge.name, gauge.labels, result)
|
||||||
|
|
||||||
proc newGauge*(name: string, help: string, labels: LabelsParam = @[], registry = defaultRegistry): Gauge {.raises: [Defect, ValueError, RegistrationError].} =
|
proc newGauge*(name: string, help: string, labels: LabelsParam = @[],
|
||||||
validateName(name)
|
registry = defaultRegistry): Gauge
|
||||||
validateLabels(labels)
|
{.raises: [Defect, ValueError, RegistrationError].} =
|
||||||
result = Gauge(name: name,
|
result = Gauge.newCollector(name, help, labels, registry, "gauge")
|
||||||
help: help,
|
|
||||||
typ: "gauge",
|
|
||||||
labels: @labels,
|
|
||||||
creationThreadId: getThreadId())
|
|
||||||
result.lock.initLock()
|
|
||||||
if labels.len == 0:
|
if labels.len == 0:
|
||||||
result.metrics[@labels] = newGaugeMetrics(name, labels, labels)
|
result.metrics[@labels] = newGaugeMetrics(name, labels, labels)
|
||||||
result.register(registry)
|
|
||||||
|
|
||||||
template declareGauge*(identifier: untyped,
|
template declareGauge*(identifier: untyped,
|
||||||
help: static string,
|
help: static string,
|
||||||
@ -769,18 +786,13 @@ when defined(metrics):
|
|||||||
if result notin summary.metrics:
|
if result notin summary.metrics:
|
||||||
summary.metrics[result] = newSummaryMetrics(summary.name, summary.labels, result)
|
summary.metrics[result] = newSummaryMetrics(summary.name, summary.labels, result)
|
||||||
|
|
||||||
proc newSummary*(name: string, help: string, labels: LabelsParam = @[], registry = defaultRegistry): Summary {.raises: [Defect, ValueError, RegistrationError].} =
|
proc newSummary*(name: string, help: string, labels: LabelsParam = @[],
|
||||||
validateName(name)
|
registry = defaultRegistry): Summary
|
||||||
|
{.raises: [Defect, ValueError, RegistrationError].} =
|
||||||
validateLabels(labels, invalidLabelNames = ["quantile"])
|
validateLabels(labels, invalidLabelNames = ["quantile"])
|
||||||
result = Summary(name: name,
|
result = Summary.newCollector(name, help, labels, registry, "summary")
|
||||||
help: help,
|
|
||||||
typ: "summary",
|
|
||||||
labels: @labels,
|
|
||||||
creationThreadId: getThreadId())
|
|
||||||
result.lock.initLock()
|
|
||||||
if labels.len == 0:
|
if labels.len == 0:
|
||||||
result.metrics[@labels] = newSummaryMetrics(name, labels, labels)
|
result.metrics[@labels] = newSummaryMetrics(name, labels, labels)
|
||||||
result.register(registry)
|
|
||||||
|
|
||||||
template declareSummary*(identifier: untyped,
|
template declareSummary*(identifier: untyped,
|
||||||
help: static string,
|
help: static string,
|
||||||
@ -873,12 +885,9 @@ when defined(metrics):
|
|||||||
if result notin histogram.metrics:
|
if result notin histogram.metrics:
|
||||||
histogram.metrics[result] = newHistogramMetrics(histogram.name, histogram.labels, result, histogram.buckets)
|
histogram.metrics[result] = newHistogramMetrics(histogram.name, histogram.labels, result, histogram.buckets)
|
||||||
|
|
||||||
proc newHistogram*(name: string,
|
proc newHistogram*(name: string, help: string, labels: LabelsParam = @[],
|
||||||
help: string,
|
registry = defaultRegistry, buckets: openArray[float64] = defaultHistogramBuckets): Histogram
|
||||||
labels: LabelsParam = @[],
|
{.raises: [Defect, ValueError, RegistrationError].} =
|
||||||
registry = defaultRegistry,
|
|
||||||
buckets: openArray[float64] = defaultHistogramBuckets): Histogram {.raises: [Defect, ValueError, RegistrationError].} =
|
|
||||||
validateName(name)
|
|
||||||
validateLabels(labels, invalidLabelNames = ["le"])
|
validateLabels(labels, invalidLabelNames = ["le"])
|
||||||
var bucketsSeq = @buckets
|
var bucketsSeq = @buckets
|
||||||
if bucketsSeq.len > 0 and bucketsSeq[^1] != Inf:
|
if bucketsSeq.len > 0 and bucketsSeq[^1] != Inf:
|
||||||
@ -887,16 +896,10 @@ when defined(metrics):
|
|||||||
raise newException(ValueError, "Invalid buckets list: '" & $bucketsSeq & "'. At least 2 required.")
|
raise newException(ValueError, "Invalid buckets list: '" & $bucketsSeq & "'. At least 2 required.")
|
||||||
if not bucketsSeq.isSorted(system.cmp[float64]):
|
if not bucketsSeq.isSorted(system.cmp[float64]):
|
||||||
raise newException(ValueError, "Invalid buckets list: '" & $bucketsSeq & "'. Must be sorted.")
|
raise newException(ValueError, "Invalid buckets list: '" & $bucketsSeq & "'. Must be sorted.")
|
||||||
result = Histogram(name: name,
|
result = Histogram.newCollector(name, help, labels, registry, "histogram")
|
||||||
help: help,
|
result.buckets = bucketsSeq
|
||||||
typ: "histogram",
|
|
||||||
labels: @labels,
|
|
||||||
creationThreadId: getThreadId(),
|
|
||||||
buckets: bucketsSeq)
|
|
||||||
result.lock.initLock()
|
|
||||||
if labels.len == 0:
|
if labels.len == 0:
|
||||||
result.metrics[@labels] = newHistogramMetrics(name, labels, labels, bucketsSeq)
|
result.metrics[@labels] = newHistogramMetrics(name, labels, labels, bucketsSeq)
|
||||||
result.register(registry)
|
|
||||||
|
|
||||||
template declareHistogram*(identifier: untyped,
|
template declareHistogram*(identifier: untyped,
|
||||||
help: static string,
|
help: static string,
|
||||||
@ -1023,7 +1026,7 @@ when defined(metrics) and defined(linux):
|
|||||||
pagesize = sysconf(SC_PAGE_SIZE).float64
|
pagesize = sysconf(SC_PAGE_SIZE).float64
|
||||||
|
|
||||||
type ProcessInfo = ref object of Gauge
|
type ProcessInfo = ref object of Gauge
|
||||||
var processInfo* {.global.} = ProcessInfo.buildCollector("process_info", "CPU and memory usage")
|
var processInfo* {.global.} = ProcessInfo.newCollector("process_info", "CPU and memory usage")
|
||||||
|
|
||||||
method collect*(collector: ProcessInfo): Metrics =
|
method collect*(collector: ProcessInfo): Metrics =
|
||||||
let timestamp = getTime().toMilliseconds()
|
let timestamp = getTime().toMilliseconds()
|
||||||
@ -1083,15 +1086,13 @@ when defined(metrics) and defined(linux):
|
|||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
printError(e.msg)
|
printError(e.msg)
|
||||||
|
|
||||||
processInfo.register(defaultRegistry)
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Nim runtime info #
|
# Nim runtime info #
|
||||||
####################
|
####################
|
||||||
|
|
||||||
when defined(metrics):
|
when defined(metrics):
|
||||||
type NimRuntimeInfo = ref object of Gauge
|
type NimRuntimeInfo = ref object of Gauge
|
||||||
var nimRuntimeInfo* {.global.} = NimRuntimeInfo.buildCollector("nim_runtime_info", "Nim runtime info")
|
var nimRuntimeInfo* {.global.} = NimRuntimeInfo.newCollector("nim_runtime_info", "Nim runtime info")
|
||||||
|
|
||||||
method collect*(collector: NimRuntimeInfo): Metrics =
|
method collect*(collector: NimRuntimeInfo): Metrics =
|
||||||
let timestamp = getTime().toMilliseconds()
|
let timestamp = getTime().toMilliseconds()
|
||||||
@ -1144,8 +1145,6 @@ when defined(metrics):
|
|||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
printError(e.msg)
|
printError(e.msg)
|
||||||
|
|
||||||
nimRuntimeInfo.register(defaultRegistry)
|
|
||||||
|
|
||||||
declareGauge nim_gc_mem_bytes, "the number of bytes that are owned by a thread's GC", ["thread_id"]
|
declareGauge nim_gc_mem_bytes, "the number of bytes that are owned by a thread's GC", ["thread_id"]
|
||||||
declareGauge nim_gc_mem_occupied_bytes, "the number of bytes that are owned by a thread's GC and hold data", ["thread_id"]
|
declareGauge nim_gc_mem_occupied_bytes, "the number of bytes that are owned by a thread's GC and hold data", ["thread_id"]
|
||||||
|
|
||||||
|
29
vendor/nim-metrics/tests/main_tests.nim
vendored
29
vendor/nim-metrics/tests/main_tests.nim
vendored
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2019 Status Research & Development GmbH
|
# Copyright (c) 2019-2022 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license: http://opensource.org/licenses/MIT
|
# * MIT license: http://opensource.org/licenses/MIT
|
||||||
# * Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
|
# * Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
|
||||||
@ -339,6 +339,33 @@ suite "registry":
|
|||||||
declareCounter duplicate_counter, "duplicate counter"
|
declareCounter duplicate_counter, "duplicate counter"
|
||||||
duplicate_counter.inc()
|
duplicate_counter.inc()
|
||||||
|
|
||||||
|
when defined(metrics):
|
||||||
|
type MyCustomCollector = ref object of Gauge
|
||||||
|
var
|
||||||
|
myCustomCollector = MyCustomCollector.newCollector("my_custom_collector", "help")
|
||||||
|
registry2 = newRegistry()
|
||||||
|
myCustomCollector2 = MyCustomCollector.newCollector("my_custom_collector2", "help2", registry = registry2)
|
||||||
|
|
||||||
|
method collect(collector: MyCustomCollector): Metrics =
|
||||||
|
let timestamp = getTime().toMilliseconds()
|
||||||
|
result[@[]] = @[
|
||||||
|
Metric(
|
||||||
|
name: "custom_metric",
|
||||||
|
value: 42,
|
||||||
|
timestamp: timestamp,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
suite "custom collectors":
|
||||||
|
test "42":
|
||||||
|
check myCustomCollector.value == 42
|
||||||
|
|
||||||
|
test "custom registry":
|
||||||
|
let collectors = registry2.collect()
|
||||||
|
check collectors.len == 1
|
||||||
|
for collector, metrics in collectors:
|
||||||
|
check collector.value == 42
|
||||||
|
|
||||||
suite "system metrics":
|
suite "system metrics":
|
||||||
test "change update interval":
|
test "change update interval":
|
||||||
when defined(metrics):
|
when defined(metrics):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2019 Status Research & Development GmbH
|
# Copyright (c) 2019-2022 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
@ -12,19 +12,20 @@
|
|||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import ./utils
|
import std/strutils,
|
||||||
|
./utils
|
||||||
|
|
||||||
when defined(miniupnpcUseSystemLibs):
|
when defined(miniupnpcUseSystemLibs):
|
||||||
{.passC: staticExec("pkg-config --cflags miniupnpc").}
|
{.passC: staticExec("pkg-config --cflags miniupnpc").}
|
||||||
{.passL: staticExec("pkg-config --libs miniupnpc").}
|
{.passL: staticExec("pkg-config --libs miniupnpc").}
|
||||||
else:
|
else:
|
||||||
import os
|
import os
|
||||||
const includePath = currentSourcePath.parentDir().parentDir() / "vendor" / "miniupnp" / "miniupnpc"
|
const includePath = currentSourcePath.parentDir().parentDir().replace('\\', '/') & "/vendor/miniupnp/miniupnpc"
|
||||||
{.passC: "-I" & includePath.}
|
{.passC: "-I" & includePath.}
|
||||||
# We can't use the {.link.} pragma in here, because it would place the static
|
# We can't use the {.link.} pragma in here, because it would place the static
|
||||||
# library archive as the first object to be linked, which would lead to all
|
# library archive as the first object to be linked, which would lead to all
|
||||||
# its exported symbols being ignored. We move it into the last position with {.passL.}.
|
# its exported symbols being ignored. We move it into the last position with {.passL.}.
|
||||||
{.passL: includePath / "libminiupnpc.a".}
|
{.passL: includePath & "/libminiupnpc.a".}
|
||||||
|
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
import nativesockets # for that wsaStartup() call at the end
|
import nativesockets # for that wsaStartup() call at the end
|
||||||
@ -517,7 +518,7 @@ proc UPNPIGD_IsConnected*(a1: ptr UPNPUrls; a2: ptr IGDdatas): cint {.
|
|||||||
# custom wrappers #
|
# custom wrappers #
|
||||||
###################
|
###################
|
||||||
|
|
||||||
import stew/results, std/strutils
|
import stew/results
|
||||||
export results
|
export results
|
||||||
|
|
||||||
type Miniupnp* = ref object
|
type Miniupnp* = ref object
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2019 Status Research & Development GmbH
|
# Copyright (c) 2019-2022 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import os
|
import os, strutils
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
import winlean
|
import winlean
|
||||||
else:
|
else:
|
||||||
@ -21,9 +21,9 @@ else:
|
|||||||
when defined(libnatpmpUseSystemLibs):
|
when defined(libnatpmpUseSystemLibs):
|
||||||
{.passL: "-lnatpmp".}
|
{.passL: "-lnatpmp".}
|
||||||
else:
|
else:
|
||||||
const includePath = currentSourcePath.parentDir().parentDir() / "vendor" / "libnatpmp-upstream"
|
const includePath = currentSourcePath.parentDir().parentDir().replace('\\', '/') & "/vendor/libnatpmp-upstream"
|
||||||
{.passC: "-I" & includePath.}
|
{.passC: "-I" & includePath.}
|
||||||
{.passL: includePath / "libnatpmp.a".}
|
{.passL: includePath & "/libnatpmp.a".}
|
||||||
|
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
import nativesockets # for that wsaStartup() call at the end
|
import nativesockets # for that wsaStartup() call at the end
|
||||||
|
43
vendor/nim-stew/.appveyor.yml
vendored
43
vendor/nim-stew/.appveyor.yml
vendored
@ -1,43 +0,0 @@
|
|||||||
version: '{build}'
|
|
||||||
|
|
||||||
image: Visual Studio 2015
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- NimBinaries
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
# We always want 32 and 64-bit compilation
|
|
||||||
fast_finish: false
|
|
||||||
|
|
||||||
platform:
|
|
||||||
- x86
|
|
||||||
- x64
|
|
||||||
|
|
||||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
|
||||||
clone_depth: 10
|
|
||||||
|
|
||||||
install:
|
|
||||||
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
|
|
||||||
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
|
|
||||||
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
|
||||||
|
|
||||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
|
||||||
# regressions of the fast-paced Nim development while maintaining the
|
|
||||||
# flexibility to apply patches
|
|
||||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
- SET PATH=%CD%\Nim\bin;%PATH%
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
|
|
||||||
- nimble install -y
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- nimble test
|
|
||||||
- IF "%PLATFORM%" == "x86" CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x86
|
|
||||||
- IF "%PLATFORM%" == "x64" CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64
|
|
||||||
- IF "%PLATFORM%" == "x64" CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x86_amd64
|
|
||||||
- nimble testvcc
|
|
||||||
|
|
||||||
deploy: off
|
|
||||||
|
|
27
vendor/nim-stew/.travis.yml
vendored
27
vendor/nim-stew/.travis.yml
vendored
@ -1,27 +0,0 @@
|
|||||||
language: c
|
|
||||||
|
|
||||||
# https://docs.travis-ci.com/user/caching/
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- NimBinaries
|
|
||||||
|
|
||||||
git:
|
|
||||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
|
||||||
depth: 10
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
|
|
||||||
install:
|
|
||||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
|
||||||
# regressions of the fast-paced Nim development while maintaining the
|
|
||||||
# flexibility to apply patches
|
|
||||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
- export PATH=$PWD/Nim/bin:$PATH
|
|
||||||
|
|
||||||
script:
|
|
||||||
- nimble install -y
|
|
||||||
- nimble test
|
|
||||||
|
|
2
vendor/nim-stint/stint/private/datatypes.nim
vendored
2
vendor/nim-stint/stint/private/datatypes.nim
vendored
@ -60,7 +60,7 @@
|
|||||||
# with a recursive tree structure.
|
# with a recursive tree structure.
|
||||||
# On the other side, returning a `var array[N div 2, uint64]` is problematic at the moment.
|
# On the other side, returning a `var array[N div 2, uint64]` is problematic at the moment.
|
||||||
# - Compile-time computation is possible while due to the previous issue
|
# - Compile-time computation is possible while due to the previous issue
|
||||||
# an array backend would be required to use var openarray[uint64]
|
# an array backend would be required to use var openArray[uint64]
|
||||||
# i.e. pointers.
|
# i.e. pointers.
|
||||||
# - Note that while shift-right and left can easily be done an array of bytes
|
# - Note that while shift-right and left can easily be done an array of bytes
|
||||||
# this would have reduced performance compared to moving 64-bit words.
|
# this would have reduced performance compared to moving 64-bit words.
|
||||||
|
4
vendor/nim-testutils/ntu.nim
vendored
4
vendor/nim-testutils/ntu.nim
vendored
@ -130,7 +130,7 @@ proc composeOutputs(test: TestSpec, stdout: string): TestOutputs =
|
|||||||
if name == "stdout":
|
if name == "stdout":
|
||||||
result[name] = stdout
|
result[name] = stdout
|
||||||
else:
|
else:
|
||||||
if not existsFile(name):
|
if not fileExists(name):
|
||||||
continue
|
continue
|
||||||
result[name] = readFile(name)
|
result[name] = readFile(name)
|
||||||
removeFile(name)
|
removeFile(name)
|
||||||
@ -161,7 +161,7 @@ proc cmpOutputs(test: TestSpec, outputs: TestOutputs): TestStatus =
|
|||||||
proc compile(test: TestSpec; backend: string): TestStatus =
|
proc compile(test: TestSpec; backend: string): TestStatus =
|
||||||
## compile the test program for the requested backends
|
## compile the test program for the requested backends
|
||||||
block:
|
block:
|
||||||
if not existsFile(test.source):
|
if not fileExists(test.source):
|
||||||
logFailure(test, SourceFileNotFound)
|
logFailure(test, SourceFileNotFound)
|
||||||
result = FAILED
|
result = FAILED
|
||||||
break
|
break
|
||||||
|
40
vendor/nim-web3/.appveyor.yml
vendored
40
vendor/nim-web3/.appveyor.yml
vendored
@ -1,40 +0,0 @@
|
|||||||
version: '{build}'
|
|
||||||
|
|
||||||
image: Visual Studio 2015
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- NimBinaries
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
# We always want 32 and 64-bit compilation
|
|
||||||
fast_finish: false
|
|
||||||
|
|
||||||
platform:
|
|
||||||
- x86
|
|
||||||
- x64
|
|
||||||
|
|
||||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
|
||||||
clone_depth: 10
|
|
||||||
|
|
||||||
install:
|
|
||||||
- npm install -g ganache-cli
|
|
||||||
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
|
|
||||||
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
|
|
||||||
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
|
||||||
|
|
||||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
|
||||||
# regressions of the fast-paced Nim development while maintaining the
|
|
||||||
# flexibility to apply patches
|
|
||||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
- SET PATH=%CD%\Nim\bin;%PATH%
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
|
|
||||||
- nimble install -y
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- ps: Start-Process ganache-cli.cmd -ArgumentList "-s 0"
|
|
||||||
- nimble test
|
|
||||||
|
|
||||||
deploy: off
|
|
28
vendor/nim-web3/.travis.yml
vendored
28
vendor/nim-web3/.travis.yml
vendored
@ -1,28 +0,0 @@
|
|||||||
language: c
|
|
||||||
osx_image: xcode10.2
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
# https://docs.travis-ci.com/user/caching/
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- NimBinaries
|
|
||||||
|
|
||||||
git:
|
|
||||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
|
||||||
depth: 10
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
|
|
||||||
install:
|
|
||||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
|
||||||
# regressions of the fast-paced Nim development while maintaining the
|
|
||||||
# flexibility to apply patches
|
|
||||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
- export PATH=$PWD/Nim/bin:$PATH
|
|
||||||
|
|
||||||
script:
|
|
||||||
- ./ci-test.sh
|
|
||||||
|
|
12
vendor/nim-web3/tests/test.nim
vendored
12
vendor/nim-web3/tests/test.nim
vendored
@ -42,8 +42,8 @@ contract NumberStorage {
|
|||||||
}
|
}
|
||||||
]#
|
]#
|
||||||
contract(NumberStorage):
|
contract(NumberStorage):
|
||||||
proc setNumber(number: Uint256)
|
proc setNumber(number: UInt256)
|
||||||
proc getNumber(): Uint256 {.view.}
|
proc getNumber(): UInt256 {.view.}
|
||||||
|
|
||||||
const NumberStorageCode = "6060604052341561000f57600080fd5b60bb8061001d6000396000f30060606040526004361060485763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416633fb5c1cb8114604d578063f2c9ecd8146062575b600080fd5b3415605757600080fd5b60606004356084565b005b3415606c57600080fd5b60726089565b60405190815260200160405180910390f35b600055565b600054905600a165627a7a7230582023e722f35009f12d5698a4ab22fb9d55a6c0f479fc43875c65be46fbdd8db4310029"
|
const NumberStorageCode = "6060604052341561000f57600080fd5b60bb8061001d6000396000f30060606040526004361060485763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416633fb5c1cb8114604d578063f2c9ecd8146062575b600080fd5b3415605757600080fd5b60606004356084565b005b3415606c57600080fd5b60726089565b60405190815260200160405180910390f35b600055565b600054905600a165627a7a7230582023e722f35009f12d5698a4ab22fb9d55a6c0f479fc43875c65be46fbdd8db4310029"
|
||||||
|
|
||||||
@ -73,9 +73,9 @@ contract MetaCoin {
|
|||||||
}
|
}
|
||||||
]#
|
]#
|
||||||
contract(MetaCoin):
|
contract(MetaCoin):
|
||||||
proc sendCoin(receiver: Address, amount: Uint256): Bool
|
proc sendCoin(receiver: Address, amount: UInt256): Bool
|
||||||
proc getBalance(address: Address): Uint256 {.view.}
|
proc getBalance(address: Address): UInt256 {.view.}
|
||||||
proc Transfer(fromAddr, toAddr: indexed[Address], value: Uint256) {.event.}
|
proc Transfer(fromAddr, toAddr: indexed[Address], value: UInt256) {.event.}
|
||||||
proc BlaBla(fromAddr: indexed[Address]) {.event.}
|
proc BlaBla(fromAddr: indexed[Address]) {.event.}
|
||||||
|
|
||||||
const MetaCoinCode = "608060405234801561001057600080fd5b5032600090815260208190526040902061271090556101c2806100346000396000f30060806040526004361061004b5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166390b98a118114610050578063f8b2cb4f14610095575b600080fd5b34801561005c57600080fd5b5061008173ffffffffffffffffffffffffffffffffffffffff600435166024356100d5565b604080519115158252519081900360200190f35b3480156100a157600080fd5b506100c373ffffffffffffffffffffffffffffffffffffffff6004351661016e565b60408051918252519081900360200190f35b336000908152602081905260408120548211156100f457506000610168565b336000818152602081815260408083208054879003905573ffffffffffffffffffffffffffffffffffffffff871680845292819020805487019055805186815290519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35060015b92915050565b73ffffffffffffffffffffffffffffffffffffffff16600090815260208190526040902054905600a165627a7a72305820000313ec0ebbff4ffefbe79d615d0ab019d8566100c40eb95a4eee617a87d1090029"
|
const MetaCoinCode = "608060405234801561001057600080fd5b5032600090815260208190526040902061271090556101c2806100346000396000f30060806040526004361061004b5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166390b98a118114610050578063f8b2cb4f14610095575b600080fd5b34801561005c57600080fd5b5061008173ffffffffffffffffffffffffffffffffffffffff600435166024356100d5565b604080519115158252519081900360200190f35b3480156100a157600080fd5b506100c373ffffffffffffffffffffffffffffffffffffffff6004351661016e565b60408051918252519081900360200190f35b336000908152602081905260408120548211156100f457506000610168565b336000818152602081815260408083208054879003905573ffffffffffffffffffffffffffffffffffffffff871680845292819020805487019055805186815290519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35060015b92915050565b73ffffffffffffffffffffffffffffffffffffffff16600090815260208190526040902054905600a165627a7a72305820000313ec0ebbff4ffefbe79d615d0ab019d8566100c40eb95a4eee617a87d1090029"
|
||||||
@ -149,7 +149,7 @@ suite "Contracts":
|
|||||||
var notificationsReceived = 0
|
var notificationsReceived = 0
|
||||||
|
|
||||||
let s = await ns.subscribe(Transfer) do (
|
let s = await ns.subscribe(Transfer) do (
|
||||||
fromAddr, toAddr: Address, value: Uint256)
|
fromAddr, toAddr: Address, value: UInt256)
|
||||||
{.raises: [Defect], gcsafe.}:
|
{.raises: [Defect], gcsafe.}:
|
||||||
try:
|
try:
|
||||||
echo "onTransfer: ", fromAddr, " transferred ", value, " to ", toAddr
|
echo "onTransfer: ", fromAddr, " transferred ", value, " to ", toAddr
|
||||||
|
6
vendor/nim-web3/tests/test_logs.nim
vendored
6
vendor/nim-web3/tests/test_logs.nim
vendored
@ -21,8 +21,8 @@ contract LoggerContract {
|
|||||||
}
|
}
|
||||||
]#
|
]#
|
||||||
contract(LoggerContract):
|
contract(LoggerContract):
|
||||||
proc MyEvent(sender: Address, number: Uint256) {.event.}
|
proc MyEvent(sender: Address, number: UInt256) {.event.}
|
||||||
proc invoke(value: Uint256)
|
proc invoke(value: UInt256)
|
||||||
|
|
||||||
const LoggerContractCode = "6080604052348015600f57600080fd5b5060bc8061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80632b30d2b814602d575b600080fd5b604760048036036020811015604157600080fd5b50356049565b005b604080513381526020810183905281517fdf50c7bb3b25f812aedef81bc334454040e7b27e27de95a79451d663013b7e17929181900390910190a15056fea265627a7a723058202ed7f5086297d2a49fbe359f4e489a007b69eb5077f5c76328bffdb63f164b4b64736f6c63430005090032"
|
const LoggerContractCode = "6080604052348015600f57600080fd5b5060bc8061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80632b30d2b814602d575b600080fd5b604760048036036020811015604157600080fd5b50356049565b005b604080513381526020810183905281517fdf50c7bb3b25f812aedef81bc334454040e7b27e27de95a79451d663013b7e17929181900390910190a15056fea265627a7a723058202ed7f5086297d2a49fbe359f4e489a007b69eb5077f5c76328bffdb63f164b4b64736f6c63430005090032"
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ suite "Logs":
|
|||||||
var notificationsReceived = 0
|
var notificationsReceived = 0
|
||||||
|
|
||||||
let s = await ns.subscribe(MyEvent, %*{"fromBlock": "0x0"}) do (
|
let s = await ns.subscribe(MyEvent, %*{"fromBlock": "0x0"}) do (
|
||||||
sender: Address, value: Uint256)
|
sender: Address, value: UInt256)
|
||||||
{.raises: [Defect], gcsafe.}:
|
{.raises: [Defect], gcsafe.}:
|
||||||
try:
|
try:
|
||||||
echo "onEvent: ", sender, " value ", value
|
echo "onEvent: ", sender, " value ", value
|
||||||
|
4
vendor/nim-web3/tests/test_signed_tx.nim
vendored
4
vendor/nim-web3/tests/test_signed_tx.nim
vendored
@ -19,8 +19,8 @@ contract NumberStorage {
|
|||||||
}
|
}
|
||||||
]#
|
]#
|
||||||
contract(NumberStorage):
|
contract(NumberStorage):
|
||||||
proc setNumber(number: Uint256)
|
proc setNumber(number: UInt256)
|
||||||
proc getNumber(): Uint256 {.view.}
|
proc getNumber(): UInt256 {.view.}
|
||||||
|
|
||||||
const NumberStorageCode = "6060604052341561000f57600080fd5b60bb8061001d6000396000f30060606040526004361060485763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416633fb5c1cb8114604d578063f2c9ecd8146062575b600080fd5b3415605757600080fd5b60606004356084565b005b3415606c57600080fd5b60726089565b60405190815260200160405180910390f35b600055565b600054905600a165627a7a7230582023e722f35009f12d5698a4ab22fb9d55a6c0f479fc43875c65be46fbdd8db4310029"
|
const NumberStorageCode = "6060604052341561000f57600080fd5b60bb8061001d6000396000f30060606040526004361060485763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416633fb5c1cb8114604d578063f2c9ecd8146062575b600080fd5b3415605757600080fd5b60606004356084565b005b3415606c57600080fd5b60726089565b60405190815260200160405180910390f35b600055565b600054905600a165627a7a7230582023e722f35009f12d5698a4ab22fb9d55a6c0f479fc43875c65be46fbdd8db4310029"
|
||||||
|
|
||||||
|
6
vendor/nim-web3/web3.nim
vendored
6
vendor/nim-web3/web3.nim
vendored
@ -49,7 +49,7 @@ type
|
|||||||
web3: Web3
|
web3: Web3
|
||||||
data: string
|
data: string
|
||||||
to: Address
|
to: Address
|
||||||
value: Uint256
|
value: UInt256
|
||||||
|
|
||||||
ContractCall*[T] = ref object of ContractCallBase
|
ContractCall*[T] = ref object of ContractCallBase
|
||||||
|
|
||||||
@ -405,7 +405,7 @@ macro contract*(cname: untyped, body: untyped): untyped =
|
|||||||
`encoder`
|
`encoder`
|
||||||
return initContractCall[`output`](
|
return initContractCall[`output`](
|
||||||
`senderName`.web3,
|
`senderName`.web3,
|
||||||
($keccak_256.digest(`signature`))[0..<8].toLower & `encodedParams`,
|
($keccak256.digest(`signature`))[0..<8].toLower & `encodedParams`,
|
||||||
`senderName`.contractAddress)
|
`senderName`.contractAddress)
|
||||||
|
|
||||||
result.add procDef
|
result.add procDef
|
||||||
@ -681,4 +681,4 @@ proc subscribe*(s: Sender, t: typedesc, cb: proc): Future[Subscription] {.inline
|
|||||||
subscribe(s, t, newJObject(), cb, SubscriptionErrorHandler nil)
|
subscribe(s, t, newJObject(), cb, SubscriptionErrorHandler nil)
|
||||||
|
|
||||||
proc `$`*(b: Bool): string =
|
proc `$`*(b: Bool): string =
|
||||||
$(Stint[256](b))
|
$(StInt[256](b))
|
||||||
|
8
vendor/nim-web3/web3.nimble
vendored
8
vendor/nim-web3/web3.nimble
vendored
@ -6,7 +6,7 @@ description = "This is the humble begginings of library similar to web3.[js|py
|
|||||||
license = "MIT or Apache License 2.0"
|
license = "MIT or Apache License 2.0"
|
||||||
|
|
||||||
### Dependencies
|
### Dependencies
|
||||||
requires "nim >= 0.18.0"
|
requires "nim >= 1.2.0"
|
||||||
requires "chronicles"
|
requires "chronicles"
|
||||||
requires "chronos"
|
requires "chronos"
|
||||||
requires "eth"
|
requires "eth"
|
||||||
@ -22,7 +22,11 @@ proc test(args, path: string) =
|
|||||||
if not dirExists "build":
|
if not dirExists "build":
|
||||||
mkDir "build"
|
mkDir "build"
|
||||||
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") & " " & args &
|
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") & " " & args &
|
||||||
" --outdir:build -r --hints:off --warnings:off --skipParentCfg " & path
|
" --outdir:build -r --skipParentCfg" &
|
||||||
|
" --warning[ObservableStores]:off --warning[GcUnsafe2]:off" &
|
||||||
|
" --styleCheck:usages --styleCheck:hint" &
|
||||||
|
" --hint[XDeclaredButNotUsed]:off --hint[Processing]:off " &
|
||||||
|
path
|
||||||
|
|
||||||
|
|
||||||
### tasks
|
### tasks
|
||||||
|
10
vendor/nim-web3/web3/conversions.nim
vendored
10
vendor/nim-web3/web3/conversions.nim
vendored
@ -27,7 +27,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var ref UInt256) =
|
|||||||
new result
|
new result
|
||||||
result[] = hexStr.parse(StUint[256], 16) # TODO: Handle errors
|
result[] = hexStr.parse(StUint[256], 16) # TODO: Handle errors
|
||||||
|
|
||||||
proc bytesFromJson(n: JsonNode, argName: string, result: var openarray[byte]) =
|
proc bytesFromJson(n: JsonNode, argName: string, result: var openArray[byte]) =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
let hexStr = n.getStr()
|
let hexStr = n.getStr()
|
||||||
if hexStr.len != result.len * 2 + 2: # including "0x"
|
if hexStr.len != result.len * 2 + 2: # including "0x"
|
||||||
@ -98,7 +98,7 @@ proc `%`*(v: Address): JsonNode =
|
|||||||
proc `%`*(v: TypedTransaction): JsonNode =
|
proc `%`*(v: TypedTransaction): JsonNode =
|
||||||
result = %("0x" & distinctBase(v).toHex)
|
result = %("0x" & distinctBase(v).toHex)
|
||||||
|
|
||||||
proc writeHexValue(w: JsonWriter, v: openarray[byte]) =
|
proc writeHexValue(w: JsonWriter, v: openArray[byte]) =
|
||||||
w.stream.write "\"0x"
|
w.stream.write "\"0x"
|
||||||
w.stream.writeHex v
|
w.stream.writeHex v
|
||||||
w.stream.write "\""
|
w.stream.write "\""
|
||||||
@ -140,7 +140,7 @@ proc `$`*(v: DynamicBytes): string {.inline.} =
|
|||||||
"0x" & toHex(v)
|
"0x" & toHex(v)
|
||||||
|
|
||||||
proc `%`*(x: EthSend): JsonNode =
|
proc `%`*(x: EthSend): JsonNode =
|
||||||
result = newJobject()
|
result = newJObject()
|
||||||
result["from"] = %x.source
|
result["from"] = %x.source
|
||||||
if x.to.isSome:
|
if x.to.isSome:
|
||||||
result["to"] = %x.to.unsafeGet
|
result["to"] = %x.to.unsafeGet
|
||||||
@ -156,7 +156,7 @@ proc `%`*(x: EthSend): JsonNode =
|
|||||||
result["nonce"] = %x.nonce.unsafeGet
|
result["nonce"] = %x.nonce.unsafeGet
|
||||||
|
|
||||||
proc `%`*(x: EthCall): JsonNode =
|
proc `%`*(x: EthCall): JsonNode =
|
||||||
result = newJobject()
|
result = newJObject()
|
||||||
result["to"] = %x.to
|
result["to"] = %x.to
|
||||||
if x.source.isSome:
|
if x.source.isSome:
|
||||||
result["source"] = %x.source.unsafeGet
|
result["source"] = %x.source.unsafeGet
|
||||||
@ -173,7 +173,7 @@ proc `%`*(x: byte): JsonNode =
|
|||||||
%x.int
|
%x.int
|
||||||
|
|
||||||
proc `%`*(x: FilterOptions): JsonNode =
|
proc `%`*(x: FilterOptions): JsonNode =
|
||||||
result = newJobject()
|
result = newJObject()
|
||||||
if x.fromBlock.isSome:
|
if x.fromBlock.isSome:
|
||||||
result["fromBlock"] = %x.fromBlock.unsafeGet
|
result["fromBlock"] = %x.fromBlock.unsafeGet
|
||||||
if x.toBlock.isSome:
|
if x.toBlock.isSome:
|
||||||
|
30
vendor/nim-web3/web3/encoding.nim
vendored
30
vendor/nim-web3/web3/encoding.nim
vendored
@ -7,13 +7,13 @@ import
|
|||||||
type
|
type
|
||||||
EncodeResult* = tuple[dynamic: bool, data: string]
|
EncodeResult* = tuple[dynamic: bool, data: string]
|
||||||
|
|
||||||
func encode*[bits: static[int]](x: Stuint[bits]): EncodeResult =
|
func encode*[bits: static[int]](x: StUint[bits]): EncodeResult =
|
||||||
## Encodes a `Stuint` to a textual representation for use in the JsonRPC
|
## Encodes a `StUint` to a textual representation for use in the JsonRPC
|
||||||
## `sendTransaction` call.
|
## `sendTransaction` call.
|
||||||
(dynamic: false, data: '0'.repeat((256 - bits) div 4) & x.dumpHex)
|
(dynamic: false, data: '0'.repeat((256 - bits) div 4) & x.dumpHex)
|
||||||
|
|
||||||
func encode*[bits: static[int]](x: Stint[bits]): EncodeResult =
|
func encode*[bits: static[int]](x: StInt[bits]): EncodeResult =
|
||||||
## Encodes a `Stint` to a textual representation for use in the JsonRPC
|
## Encodes a `StInt` to a textual representation for use in the JsonRPC
|
||||||
## `sendTransaction` call.
|
## `sendTransaction` call.
|
||||||
(dynamic: false,
|
(dynamic: false,
|
||||||
data:
|
data:
|
||||||
@ -23,17 +23,17 @@ func encode*[bits: static[int]](x: Stint[bits]): EncodeResult =
|
|||||||
'0'.repeat((256 - bits) div 4) & x.dumpHex
|
'0'.repeat((256 - bits) div 4) & x.dumpHex
|
||||||
)
|
)
|
||||||
|
|
||||||
func decode*(input: string, offset: int, to: var Stuint): int =
|
func decode*(input: string, offset: int, to: var StUint): int =
|
||||||
let meaningfulLen = to.bits div 8 * 2
|
let meaningfulLen = to.bits div 8 * 2
|
||||||
to = type(to).fromHex(input[offset .. offset + meaningfulLen - 1])
|
to = type(to).fromHex(input[offset .. offset + meaningfulLen - 1])
|
||||||
meaningfulLen
|
meaningfulLen
|
||||||
|
|
||||||
func decode*[N](input: string, offset: int, to: var Stint[N]): int =
|
func decode*[N](input: string, offset: int, to: var StInt[N]): int =
|
||||||
let meaningfulLen = N div 8 * 2
|
let meaningfulLen = N div 8 * 2
|
||||||
fromHex(input[offset .. offset + meaningfulLen], to)
|
fromHex(input[offset .. offset + meaningfulLen], to)
|
||||||
meaningfulLen
|
meaningfulLen
|
||||||
|
|
||||||
func fixedEncode(a: openarray[byte]): EncodeResult =
|
func fixedEncode(a: openArray[byte]): EncodeResult =
|
||||||
var padding = a.len mod 32
|
var padding = a.len mod 32
|
||||||
if padding != 0: padding = 32 - padding
|
if padding != 0: padding = 32 - padding
|
||||||
result = (dynamic: false, data: "00".repeat(padding) & byteutils.toHex(a))
|
result = (dynamic: false, data: "00".repeat(padding) & byteutils.toHex(a))
|
||||||
@ -41,7 +41,7 @@ func fixedEncode(a: openarray[byte]): EncodeResult =
|
|||||||
func encode*[N](b: FixedBytes[N]): EncodeResult = fixedEncode(array[N, byte](b))
|
func encode*[N](b: FixedBytes[N]): EncodeResult = fixedEncode(array[N, byte](b))
|
||||||
func encode*(b: Address): EncodeResult = fixedEncode(array[20, byte](b))
|
func encode*(b: Address): EncodeResult = fixedEncode(array[20, byte](b))
|
||||||
|
|
||||||
func decodeFixed(input: string, offset: int, to: var openarray[byte]): int =
|
func decodeFixed(input: string, offset: int, to: var openArray[byte]): int =
|
||||||
let meaningfulLen = to.len * 2
|
let meaningfulLen = to.len * 2
|
||||||
var padding = to.len mod 32
|
var padding = to.len mod 32
|
||||||
if padding != 0: padding = (32 - padding) * 2
|
if padding != 0: padding = (32 - padding) * 2
|
||||||
@ -55,7 +55,7 @@ func decode*[N](input: string, offset: int, to: var FixedBytes[N]): int {.inline
|
|||||||
func decode*(input: string, offset: int, to: var Address): int {.inline.} =
|
func decode*(input: string, offset: int, to: var Address): int {.inline.} =
|
||||||
decodeFixed(input, offset, array[20, byte](to))
|
decodeFixed(input, offset, array[20, byte](to))
|
||||||
|
|
||||||
func encodeDynamic(v: openarray[byte]): EncodeResult =
|
func encodeDynamic(v: openArray[byte]): EncodeResult =
|
||||||
result.dynamic = true
|
result.dynamic = true
|
||||||
result.data = v.len.toHex(64).toLower
|
result.data = v.len.toHex(64).toLower
|
||||||
for y in v:
|
for y in v:
|
||||||
@ -86,18 +86,18 @@ macro makeTypeEnum(): untyped =
|
|||||||
identInt = newIdentNode("Int" & $i)
|
identInt = newIdentNode("Int" & $i)
|
||||||
if ceil(log2(i.float)) == floor(log2(i.float)):
|
if ceil(log2(i.float)) == floor(log2(i.float)):
|
||||||
lastpow2 = i
|
lastpow2 = i
|
||||||
if i notin {256, 125}: # Int/Uint256/128 are already defined in stint. No need to repeat.
|
if i notin {256, 125}: # Int/UInt256/128 are already defined in stint. No need to repeat.
|
||||||
result.add quote do:
|
result.add quote do:
|
||||||
type
|
type
|
||||||
`identUint`* = Stuint[`lastpow2`]
|
`identUint`* = StUint[`lastpow2`]
|
||||||
`identInt`* = Stint[`lastpow2`]
|
`identInt`* = StInt[`lastpow2`]
|
||||||
let
|
let
|
||||||
identUint = ident("Uint")
|
identUint = ident("Uint")
|
||||||
identInt = ident("Int")
|
identInt = ident("Int")
|
||||||
identBool = ident("Bool")
|
identBool = ident("Bool")
|
||||||
result.add quote do:
|
result.add quote do:
|
||||||
type
|
type
|
||||||
`identUint`* = Uint256
|
`identUint`* = UInt256
|
||||||
`identInt`* = Int256
|
`identInt`* = Int256
|
||||||
`identBool`* = distinct Int256
|
`identBool`* = distinct Int256
|
||||||
|
|
||||||
@ -139,7 +139,7 @@ macro makeTypeEnum(): untyped =
|
|||||||
result.add quote do:
|
result.add quote do:
|
||||||
type
|
type
|
||||||
`identFixed`* = distinct Int128
|
`identFixed`* = distinct Int128
|
||||||
`identUfixed`* = distinct Uint128
|
`identUfixed`* = distinct UInt128
|
||||||
for i in 1..256:
|
for i in 1..256:
|
||||||
let
|
let
|
||||||
identBytes = ident("Bytes" & $i)
|
identBytes = ident("Bytes" & $i)
|
||||||
@ -191,7 +191,7 @@ func encode*(x: seq[Encodable]): EncodeResult =
|
|||||||
result.data &= data
|
result.data &= data
|
||||||
|
|
||||||
func decode*[T](input: string, to: seq[T]): seq[T] =
|
func decode*[T](input: string, to: seq[T]): seq[T] =
|
||||||
var count = input[0..64].decode(Stuint)
|
var count = input[0..64].decode(StUint)
|
||||||
result = newSeq[T](count)
|
result = newSeq[T](count)
|
||||||
for i in 0..count:
|
for i in 0..count:
|
||||||
result[i] = input[i*64 .. (i+1)*64].decode(T)
|
result[i] = input[i*64 .. (i+1)*64].decode(T)
|
||||||
|
2
vendor/nim-web3/web3/engine_api_types.nim
vendored
2
vendor/nim-web3/web3/engine_api_types.nim
vendored
@ -42,7 +42,7 @@ type
|
|||||||
|
|
||||||
# https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.8/src/engine/specification.md#transitionconfigurationv1
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.8/src/engine/specification.md#transitionconfigurationv1
|
||||||
TransitionConfigurationV1* = object
|
TransitionConfigurationV1* = object
|
||||||
terminalTotalDifficulty*: Uint256
|
terminalTotalDifficulty*: UInt256
|
||||||
terminalBlockHash*: BlockHash
|
terminalBlockHash*: BlockHash
|
||||||
terminalBlockNumber*: Quantity
|
terminalBlockNumber*: Quantity
|
||||||
|
|
||||||
|
4
vendor/nim-web3/web3/ethtypes.nim
vendored
4
vendor/nim-web3/web3/ethtypes.nim
vendored
@ -38,7 +38,7 @@ type
|
|||||||
to*: Option[Address] # (optional when creating new contract) the address the transaction is directed to.
|
to*: Option[Address] # (optional when creating new contract) the address the transaction is directed to.
|
||||||
gas*: Option[Quantity] # (optional, default: 90000) integer of the gas provided for the transaction execution. It will return unused gas.
|
gas*: Option[Quantity] # (optional, default: 90000) integer of the gas provided for the transaction execution. It will return unused gas.
|
||||||
gasPrice*: Option[int] # (optional, default: To-Be-Determined) integer of the gasPrice used for each paid gas.
|
gasPrice*: Option[int] # (optional, default: To-Be-Determined) integer of the gasPrice used for each paid gas.
|
||||||
value*: Option[Uint256] # (optional) integer of the value sent with this transaction.
|
value*: Option[UInt256] # (optional) integer of the value sent with this transaction.
|
||||||
data*: string # the compiled code of a contract OR the hash of the invoked method signature and encoded parameters. For details see Ethereum Contract ABI.
|
data*: string # the compiled code of a contract OR the hash of the invoked method signature and encoded parameters. For details see Ethereum Contract ABI.
|
||||||
nonce*: Option[Nonce] # (optional) integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce
|
nonce*: Option[Nonce] # (optional) integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce
|
||||||
|
|
||||||
@ -291,4 +291,4 @@ template len*(data: DynamicBytes): int =
|
|||||||
len(distinctBase data)
|
len(distinctBase data)
|
||||||
|
|
||||||
func `$`*[minLen, maxLen](data: DynamicBytes[minLen, maxLen]): string =
|
func `$`*[minLen, maxLen](data: DynamicBytes[minLen, maxLen]): string =
|
||||||
"0x" & byteutils.toHex(distinctbase(data))
|
"0x" & byteutils.toHex(distinctBase(data))
|
||||||
|
@ -137,7 +137,7 @@ proc parseExt*[T: BChar](data: openArray[T], output: var seq[AppExt]): bool =
|
|||||||
ext.params[^1].name = system.move(param.name)
|
ext.params[^1].name = system.move(param.name)
|
||||||
ext.params[^1].value = system.move(param.value)
|
ext.params[^1].value = system.move(param.value)
|
||||||
|
|
||||||
if lex.tok notin {tkSemCol, tkComma, tkEof}:
|
if lex.tok notin {tkSemcol, tkComma, tkEof}:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
output.setLen(output.len + 1)
|
output.setLen(output.len + 1)
|
||||||
|
10
vendor/nim-websock/websock/http/server.nim
vendored
10
vendor/nim-websock/websock/http/server.nim
vendored
@ -149,12 +149,10 @@ proc handleTlsConnCb(
|
|||||||
maxVersion = tlsHttpServer.maxVersion,
|
maxVersion = tlsHttpServer.maxVersion,
|
||||||
flags = tlsHttpServer.tlsFlags)
|
flags = tlsHttpServer.tlsFlags)
|
||||||
|
|
||||||
var stream: AsyncStream
|
let stream = AsyncStream(
|
||||||
try:
|
|
||||||
stream = AsyncStream(
|
|
||||||
reader: tlsStream.reader,
|
reader: tlsStream.reader,
|
||||||
writer: tlsStream.writer)
|
writer: tlsStream.writer)
|
||||||
|
try:
|
||||||
let httpServer = HttpServer(server)
|
let httpServer = HttpServer(server)
|
||||||
let request = await httpServer.parseRequest(stream)
|
let request = await httpServer.parseRequest(stream)
|
||||||
|
|
||||||
@ -164,9 +162,7 @@ proc handleTlsConnCb(
|
|||||||
finally:
|
finally:
|
||||||
await stream.closeWait()
|
await stream.closeWait()
|
||||||
|
|
||||||
proc accept*(server: HttpServer): Future[HttpRequest]
|
proc accept*(server: HttpServer): Future[HttpRequest] {.async.} =
|
||||||
{.async, raises: [Defect, HttpError].} =
|
|
||||||
|
|
||||||
if not isNil(server.handler):
|
if not isNil(server.handler):
|
||||||
raise newException(HttpError,
|
raise newException(HttpError,
|
||||||
"Callback already registered - cannot mix callback and accepts styles!")
|
"Callback already registered - cannot mix callback and accepts styles!")
|
||||||
|
2
vendor/nim-websock/websock/session.nim
vendored
2
vendor/nim-websock/websock/session.nim
vendored
@ -153,7 +153,7 @@ proc handleClose*(
|
|||||||
code = StatusFulfilled
|
code = StatusFulfilled
|
||||||
reason = ""
|
reason = ""
|
||||||
|
|
||||||
case payload.len:
|
case payLoad.len:
|
||||||
of 0:
|
of 0:
|
||||||
code = StatusNoStatus
|
code = StatusNoStatus
|
||||||
of 1:
|
of 1:
|
||||||
|
@ -91,7 +91,7 @@ nim_needs_rebuilding() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Delete old Nim binaries, to put a limit on how much storage we use.
|
# Delete old Nim binaries, to put a limit on how much storage we use.
|
||||||
for F in "$(ls -t "${NIM_DIR}"/bin/nim_commit_* | tail -n +$((MAX_NIM_BINARIES + 1)))"; do
|
for F in "$(ls -t "${NIM_DIR}"/bin/nim_commit_* 2>/dev/null | tail -n +$((MAX_NIM_BINARIES + 1)))"; do
|
||||||
if [[ -e "${F}" ]]; then
|
if [[ -e "${F}" ]]; then
|
||||||
rm "${F}"
|
rm "${F}"
|
||||||
fi
|
fi
|
||||||
|
Loading…
x
Reference in New Issue
Block a user