deploy: 8ac8e758e065a1056a0de48e08e3180bf4aea89e

This commit is contained in:
jm-clius 2022-04-12 08:01:59 +00:00
parent 03ad0fa8ce
commit 5947bc7143
70 changed files with 2068 additions and 500 deletions

View File

@ -40,6 +40,9 @@ and it looks like this:
![textblocks format example](media/textlines.svg)
This format is compatible with tooling written for
[heroku/logfmt](https://brandur.org/logfmt).
Alternatively, you can use a multi-line format called `textblocks`:
![textblocks format example](media/textblocks.svg)

View File

@ -2,7 +2,7 @@ import
tables, strutils, strformat,
topics_registry
func parseTopicDirectives*(directives: openarray[string]): Table[string, TopicSettings] =
func parseTopicDirectives*(directives: openArray[string]): Table[string, TopicSettings] =
result = initTable[string, TopicSettings]()
for directive in directives:

View File

@ -562,7 +562,7 @@ proc setProperty*(r: var TextLineRecord, key: string, val: auto) =
# This is similar to how it's done in logfmt:
# https://github.com/csquared/node-logfmt/blob/master/lib/stringify.js#L13
let
needsEscape = valText.find(NewLines + {'"', '\\'}) > -1
needsEscape = valText.find(Newlines + {'"', '\\'}) > -1
needsQuote = valText.find({' ', '='}) > -1
if needsEscape or needsQuote:
@ -617,7 +617,7 @@ proc setProperty*(r: var TextBlockRecord, key: string, val: auto) =
append(r.output, ": ")
applyStyle(r, styleBright)
if valText.find(NewLines) == -1:
if valText.find(Newlines) == -1:
append(r.output, valText)
append(r.output, "\n")
else:
@ -681,7 +681,7 @@ template setFirstProperty*(r: var JsonRecord, key: string, val: auto) =
proc flushRecord*(r: var JsonRecord) =
when defined(js):
r.output.append JSON.stringify(r.record)
r.output.append Json.stringify(r.record)
else:
r.jsonWriter.endRecord()
r.outStream.write '\n'

View File

@ -82,7 +82,7 @@ proc setTopicState*(name: string,
return true
proc topicsMatch*(logStmtLevel: LogLevel,
logStmtTopics: openarray[ptr TopicSettings]): bool =
logStmtTopics: openArray[ptr TopicSettings]): bool =
lockRegistry:
var
hasEnabledTopics = gTotalEnabledTopics > 0

View File

@ -1,2 +1,3 @@
--path: "."
--styleCheck:usages
--styleCheck:error

View File

@ -978,6 +978,7 @@ proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {.
request.setError(exc)
raise exc
connection.flags.incl(HttpClientConnectionFlag.Request)
request.connection = connection
try:
@ -1028,6 +1029,7 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {.
request.setError(exc)
raise exc
connection.flags.incl(HttpClientConnectionFlag.Request)
request.connection = connection
try:

View File

@ -299,7 +299,6 @@ when defined(windows):
CompletionKey = ULONG_PTR
CompletionData* = object
fd*: AsyncFD
cb*: CallbackFunc
errCode*: OSErrorCode
bytesCount*: int32
@ -500,17 +499,9 @@ elif unixPlatform:
type
AsyncFD* = distinct cint
CompletionData* = object
fd*: AsyncFD
udata*: pointer
PCompletionData* = ptr CompletionData
SelectorData* = object
reader*: AsyncCallback
rdata*: CompletionData
writer*: AsyncCallback
wdata*: CompletionData
PDispatcher* = ref object of PDispatcherBase
selector: Selector[SelectorData]
@ -555,8 +546,6 @@ elif unixPlatform:
## Register file descriptor ``fd`` in thread's dispatcher.
let loop = getThreadDispatcher()
var data: SelectorData
data.rdata.fd = fd
data.wdata.fd = fd
loop.selector.registerHandle(int(fd), {}, data)
proc unregister*(fd: AsyncFD) {.raises: [Defect, CatchableError].} =
@ -574,9 +563,8 @@ elif unixPlatform:
let loop = getThreadDispatcher()
var newEvents = {Event.Read}
withData(loop.selector, int(fd), adata) do:
let acb = AsyncCallback(function: cb, udata: addr adata.rdata)
let acb = AsyncCallback(function: cb, udata: udata)
adata.reader = acb
adata.rdata = CompletionData(fd: fd, udata: udata)
newEvents.incl(Event.Read)
if not(isNil(adata.writer.function)):
newEvents.incl(Event.Write)
@ -592,7 +580,6 @@ elif unixPlatform:
withData(loop.selector, int(fd), adata) do:
# We need to clear `reader` data, because `selectors` don't do it
adata.reader = default(AsyncCallback)
# adata.rdata = CompletionData()
if not(isNil(adata.writer.function)):
newEvents.incl(Event.Write)
do:
@ -606,9 +593,8 @@ elif unixPlatform:
let loop = getThreadDispatcher()
var newEvents = {Event.Write}
withData(loop.selector, int(fd), adata) do:
let acb = AsyncCallback(function: cb, udata: addr adata.wdata)
let acb = AsyncCallback(function: cb, udata: udata)
adata.writer = acb
adata.wdata = CompletionData(fd: fd, udata: udata)
newEvents.incl(Event.Write)
if not(isNil(adata.reader.function)):
newEvents.incl(Event.Read)
@ -624,7 +610,6 @@ elif unixPlatform:
withData(loop.selector, int(fd), adata) do:
# We need to clear `writer` data, because `selectors` don't do it
adata.writer = default(AsyncCallback)
# adata.wdata = CompletionData()
if not(isNil(adata.reader.function)):
newEvents.incl(Event.Read)
do:
@ -692,9 +677,7 @@ elif unixPlatform:
var data: SelectorData
result = loop.selector.registerSignal(signal, data)
withData(loop.selector, result, adata) do:
adata.reader = AsyncCallback(function: cb, udata: addr adata.rdata)
adata.rdata.fd = AsyncFD(result)
adata.rdata.udata = udata
adata.reader = AsyncCallback(function: cb, udata: udata)
do:
raise newException(ValueError, "File descriptor not registered.")

View File

@ -72,6 +72,20 @@ proc verifyReturnType(typeName: string) {.compileTime.} =
macro unsupported(s: static[string]): untyped =
error s
proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} =
# Replace every Call -> OpenSymChoice by a Bracket expr
# ref https://github.com/nim-lang/Nim/issues/11091
if node.kind in nnkCallKinds and
node[0].kind == nnkOpenSymChoice and node[0].eqIdent("[]"):
result = newNimNode(nnkBracketExpr).add(
cleanupOpenSymChoice(node[1]),
cleanupOpenSymChoice(node[2])
)
else:
result = node.copyNimNode()
for child in node:
result.add(cleanupOpenSymChoice(child))
proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
## This macro transforms a single procedure into a closure iterator.
## The ``async`` macro supports a stmtList holding multiple async procedures.
@ -81,17 +95,13 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
let prcName = prc.name.getName
let returnType = prc.params[0]
let returnType = cleanupOpenSymChoice(prc.params[0])
var baseType: NimNode
# Verify that the return type is a Future[T]
if returnType.kind == nnkBracketExpr:
let fut = repr(returnType[0])
verifyReturnType(fut)
baseType = returnType[1]
elif returnType.kind in nnkCallKinds and returnType[0].eqIdent("[]"):
let fut = repr(returnType[1])
verifyReturnType(fut)
baseType = returnType[2]
elif returnType.kind == nnkEmpty:
baseType = returnType
else:
@ -102,6 +112,10 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
var outerProcBody = newNimNode(nnkStmtList, prc.body)
# Copy comment for nimdoc
if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt:
outerProcBody.add(prc.body[0])
# -> iterator nameIter(chronosInternalRetFuture: Future[T]): FutureBase {.closure.} =
# -> {.push warning[resultshadowed]: off.}

View File

@ -185,7 +185,7 @@ elif defined(windows):
else:
## Initiation
transp.state.incl(WritePending)
let fd = SocketHandle(ovl.data.fd)
let fd = SocketHandle(transp.fd)
var vector = transp.queue.popFirst()
transp.setWriterWSABuffer(vector)
var ret: cint
@ -258,7 +258,7 @@ elif defined(windows):
## Initiation
if transp.state * {ReadEof, ReadClosed, ReadError} == {}:
transp.state.incl(ReadPending)
let fd = SocketHandle(ovl.data.fd)
let fd = SocketHandle(transp.fd)
transp.rflag = 0
transp.ralen = SockLen(sizeof(Sockaddr_storage))
let ret = WSARecvFrom(fd, addr transp.rwsabuf, DWORD(1),
@ -406,9 +406,9 @@ elif defined(windows):
result.udata = udata
result.state = {WritePaused}
result.future = newFuture[void]("datagram.transport")
result.rovl.data = CompletionData(fd: localSock, cb: readDatagramLoop,
result.rovl.data = CompletionData(cb: readDatagramLoop,
udata: cast[pointer](result))
result.wovl.data = CompletionData(fd: localSock, cb: writeDatagramLoop,
result.wovl.data = CompletionData(cb: writeDatagramLoop,
udata: cast[pointer](result))
result.rwsabuf = TWSABuf(buf: cast[cstring](addr result.buffer[0]),
len: int32(len(result.buffer)))
@ -426,9 +426,8 @@ else:
proc readDatagramLoop(udata: pointer) {.raises: Defect.}=
var raddr: TransportAddress
doAssert(not isNil(udata))
var cdata = cast[ptr CompletionData](udata)
var transp = cast[DatagramTransport](cdata.udata)
let fd = SocketHandle(cdata.fd)
let transp = cast[DatagramTransport](udata)
let fd = SocketHandle(transp.fd)
if int(fd) == 0:
## This situation can be happen, when there events present
## after transport was closed.
@ -459,9 +458,8 @@ else:
proc writeDatagramLoop(udata: pointer) =
var res: int
doAssert(not isNil(udata))
var cdata = cast[ptr CompletionData](udata)
var transp = cast[DatagramTransport](cdata.udata)
let fd = SocketHandle(cdata.fd)
var transp = cast[DatagramTransport](udata)
let fd = SocketHandle(transp.fd)
if int(fd) == 0:
## This situation can be happen, when there events present
## after transport was closed.

View File

@ -407,7 +407,7 @@ elif defined(windows):
## Initiation
transp.state.incl(WritePending)
if transp.kind == TransportKind.Socket:
let sock = SocketHandle(transp.wovl.data.fd)
let sock = SocketHandle(transp.fd)
var vector = transp.queue.popFirst()
if vector.kind == VectorKind.DataBuffer:
transp.wovl.zeroOvelappedOffset()
@ -492,7 +492,7 @@ elif defined(windows):
else:
transp.queue.addFirst(vector)
elif transp.kind == TransportKind.Pipe:
let pipe = Handle(transp.wovl.data.fd)
let pipe = Handle(transp.fd)
var vector = transp.queue.popFirst()
if vector.kind == VectorKind.DataBuffer:
transp.wovl.zeroOvelappedOffset()
@ -587,7 +587,7 @@ elif defined(windows):
transp.state.excl(ReadPaused)
transp.state.incl(ReadPending)
if transp.kind == TransportKind.Socket:
let sock = SocketHandle(transp.rovl.data.fd)
let sock = SocketHandle(transp.fd)
transp.roffset = transp.offset
transp.setReaderWSABuffer()
let ret = WSARecv(sock, addr transp.rwsabuf, 1,
@ -610,7 +610,7 @@ elif defined(windows):
transp.setReadError(err)
transp.completeReader()
elif transp.kind == TransportKind.Pipe:
let pipe = Handle(transp.rovl.data.fd)
let pipe = Handle(transp.fd)
transp.roffset = transp.offset
transp.setReaderWSABuffer()
let ret = readFile(pipe, cast[pointer](transp.rwsabuf.buf),
@ -650,9 +650,9 @@ elif defined(windows):
else:
transp = StreamTransport(kind: TransportKind.Socket)
transp.fd = sock
transp.rovl.data = CompletionData(fd: sock, cb: readStreamLoop,
transp.rovl.data = CompletionData(cb: readStreamLoop,
udata: cast[pointer](transp))
transp.wovl.data = CompletionData(fd: sock, cb: writeStreamLoop,
transp.wovl.data = CompletionData(cb: writeStreamLoop,
udata: cast[pointer](transp))
transp.buffer = newSeq[byte](bufsize)
transp.state = {ReadPaused, WritePaused}
@ -670,9 +670,9 @@ elif defined(windows):
else:
transp = StreamTransport(kind: TransportKind.Pipe)
transp.fd = fd
transp.rovl.data = CompletionData(fd: fd, cb: readStreamLoop,
transp.rovl.data = CompletionData(cb: readStreamLoop,
udata: cast[pointer](transp))
transp.wovl.data = CompletionData(fd: fd, cb: writeStreamLoop,
transp.wovl.data = CompletionData(cb: writeStreamLoop,
udata: cast[pointer](transp))
transp.buffer = newSeq[byte](bufsize)
transp.flags = flags
@ -746,8 +746,7 @@ elif defined(windows):
sock.closeSocket()
retFuture.fail(getTransportOsError(err))
else:
let transp = newStreamSocketTransport(povl.data.fd, bufferSize,
child)
let transp = newStreamSocketTransport(sock, bufferSize, child)
# Start tracking transport
trackStream(transp)
retFuture.complete(transp)
@ -761,7 +760,7 @@ elif defined(windows):
povl = RefCustomOverlapped()
GC_ref(povl)
povl.data = CompletionData(fd: sock, cb: socketContinuation)
povl.data = CompletionData(cb: socketContinuation)
let res = loop.connectEx(SocketHandle(sock),
cast[ptr SockAddr](addr saddr),
DWORD(slen), nil, 0, nil,
@ -895,7 +894,6 @@ elif defined(windows):
if pipeHandle == INVALID_HANDLE_VALUE:
raiseAssert osErrorMsg(osLastError())
server.sock = AsyncFD(pipeHandle)
server.aovl.data.fd = AsyncFD(pipeHandle)
try: register(server.sock)
except CatchableError as exc:
raiseAsDefect exc, "register"
@ -1177,8 +1175,7 @@ elif defined(windows):
let dwLocalAddressLength = DWORD(sizeof(Sockaddr_in6) + 16)
let dwRemoteAddressLength = DWORD(sizeof(Sockaddr_in6) + 16)
server.aovl.data = CompletionData(fd: server.sock,
cb: continuationSocket,
server.aovl.data = CompletionData(cb: continuationSocket,
udata: cast[pointer](server))
server.apending = true
let res = loop.acceptEx(SocketHandle(server.sock),
@ -1219,8 +1216,7 @@ elif defined(windows):
retFuture.fail(getTransportOsError(err))
return retFuture
server.aovl.data = CompletionData(fd: server.sock,
cb: continuationPipe,
server.aovl.data = CompletionData(cb: continuationPipe,
udata: cast[pointer](server))
server.apending = true
let res = connectNamedPipe(Handle(server.sock),
@ -1260,15 +1256,17 @@ else:
raiseAsDefect exc, "removeWriter"
proc writeStreamLoop(udata: pointer) =
var cdata = cast[ptr CompletionData](udata)
var transp = cast[StreamTransport](cdata.udata)
let fd = SocketHandle(cdata.fd)
if int(fd) == 0 or isNil(transp):
## This situation can be happen, when there events present
## after transport was closed.
if isNil(udata):
# TODO this is an if rather than an assert for historical reasons:
# it should not happen unless there are race conditions - but if there
# are race conditions, `transp` might be invalid even if it's not nil:
# it could have been released
return
let
transp = cast[StreamTransport](udata)
fd = SocketHandle(transp.fd)
if WriteClosed in transp.state:
if transp.queue.len > 0:
transp.removeWriter()
@ -1357,15 +1355,17 @@ else:
transp.removeWriter()
proc readStreamLoop(udata: pointer) =
# TODO fix Defect raises - they "shouldn't" happen
var cdata = cast[ptr CompletionData](udata)
var transp = cast[StreamTransport](cdata.udata)
let fd = SocketHandle(cdata.fd)
if int(fd) == 0 or isNil(transp):
## This situation can be happen, when there events present
## after transport was closed.
if isNil(udata):
# TODO this is an if rather than an assert for historical reasons:
# it should not happen unless there are race conditions - but if there
# are race conditions, `transp` might be invalid even if it's not nil:
# it could have been released
return
let
transp = cast[StreamTransport](udata)
fd = SocketHandle(transp.fd)
if ReadClosed in transp.state:
transp.state.incl({ReadPaused})
transp.completeReader()
@ -1381,7 +1381,7 @@ else:
elif int(err) in {ECONNRESET}:
transp.state.incl({ReadEof, ReadPaused})
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1390,7 +1390,7 @@ else:
transp.state.incl(ReadPaused)
transp.setReadError(err)
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1398,7 +1398,7 @@ else:
elif res == 0:
transp.state.incl({ReadEof, ReadPaused})
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1408,7 +1408,7 @@ else:
if transp.offset == len(transp.buffer):
transp.state.incl(ReadPaused)
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1427,7 +1427,7 @@ else:
transp.state.incl(ReadPaused)
transp.setReadError(err)
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1435,7 +1435,7 @@ else:
elif res == 0:
transp.state.incl({ReadEof, ReadPaused})
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1445,7 +1445,7 @@ else:
if transp.offset == len(transp.buffer):
transp.state.incl(ReadPaused)
try:
cdata.fd.removeReader()
transp.fd.removeReader()
except IOSelectorsException as exc:
raiseAsDefect exc, "removeReader"
except ValueError as exc:
@ -1519,11 +1519,9 @@ else:
proc continuation(udata: pointer) =
if not(retFuture.finished()):
var data = cast[ptr CompletionData](udata)
var err = 0
let fd = data.fd
try:
fd.removeWriter()
sock.removeWriter()
except IOSelectorsException as exc:
retFuture.fail(exc)
return
@ -1531,15 +1529,15 @@ else:
retFuture.fail(exc)
return
if not(fd.getSocketError(err)):
closeSocket(fd)
if not(sock.getSocketError(err)):
closeSocket(sock)
retFuture.fail(getTransportOsError(osLastError()))
return
if err != 0:
closeSocket(fd)
closeSocket(sock)
retFuture.fail(getTransportOsError(OSErrorCode(err)))
return
let transp = newStreamSocketTransport(fd, bufferSize, child)
let transp = newStreamSocketTransport(sock, bufferSize, child)
# Start tracking transport
trackStream(transp)
retFuture.complete(transp)
@ -1581,11 +1579,18 @@ else:
break
return retFuture
proc acceptLoop(udata: pointer) {.gcsafe.} =
proc acceptLoop(udata: pointer) =
if isNil(udata):
# TODO this is an if rather than an assert for historical reasons:
# it should not happen unless there are race conditions - but if there
# are race conditions, `transp` might be invalid even if it's not nil:
# it could have been released
return
var
saddr: Sockaddr_storage
slen: SockLen
var server = cast[StreamServer](cast[ptr CompletionData](udata).udata)
let server = cast[StreamServer](udata)
while true:
if server.status in {ServerStatus.Stopped, ServerStatus.Closed}:
break
@ -1990,7 +1995,7 @@ proc createStreamServer*(host: TransportAddress,
cb = acceptPipeLoop
if not(isNil(cbproc)):
result.aovl.data = CompletionData(fd: serverSocket, cb: cb,
result.aovl.data = CompletionData(cb: cb,
udata: cast[pointer](result))
else:
if host.family == AddressFamily.Unix:

View File

@ -690,11 +690,11 @@ suite "HTTP client testing suite":
proc testBasicAuthorization(): Future[bool] {.async.} =
var session = createSession(true, maxRedirections = 10)
let url = parseUri("https://guest:guest@jigsaw.w3.org/HTTP/Basic/")
let url = parseUri("https://user:passwd@httpbin.org/basic-auth/user/passwd")
let resp = await session.fetch(url)
await session.closeWait()
if (resp.status == 200) and
("Your browser made it!" in cast[string](resp.data)):
("true," in cast[string](resp.data)):
return true
else:
return false

View File

@ -6,6 +6,7 @@
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import unittest2
import macros
import ../chronos
when defined(nimHasUsed): {.used.}
@ -80,3 +81,19 @@ suite "Macro transformations test suite":
check waitFor(testAwait()) == true
test "`awaitne` command test":
check waitFor(testAwaitne()) == true
test "template async macro transformation":
template templatedAsync(name, restype: untyped): untyped =
proc name(): Future[restype] {.async.} = return @[4]
templatedAsync(testTemplate, seq[int])
check waitFor(testTemplate()) == @[4]
macro macroAsync(name, restype, innerrestype: untyped): untyped =
quote do:
proc `name`(): Future[`restype`[`innerrestype`]] {.async.} = return
type OpenObject = object
macroAsync(testMacro, seq, OpenObject)
check waitFor(testMacro()).len == 0

View File

@ -15,13 +15,14 @@ when not defined(windows):
suite "Signal handling test suite":
when not defined(windows):
var signalCounter = 0
var
signalCounter = 0
sigfd = -1
proc signalProc(udata: pointer) =
var cdata = cast[ptr CompletionData](udata)
signalCounter = cast[int](cdata.udata)
signalCounter = cast[int](udata)
try:
removeSignal(int(cdata.fd))
removeSignal(sigfd)
except Exception as exc:
raiseAssert exc.msg
@ -30,7 +31,7 @@ suite "Signal handling test suite":
proc test(signal, value: int): bool =
try:
discard addSignal(signal, signalProc, cast[pointer](value))
sigfd = addSignal(signal, signalProc, cast[pointer](value))
except Exception as exc:
raiseAssert exc.msg
var fut = asyncProc()

View File

@ -1048,6 +1048,7 @@ proc processPacketInternal(socket: UtpSocket, p: Packet) =
socketAckNr = socket.ackNr,
socketSeqNr = socket.seqNr,
windowPackets = socket.curWindowPackets,
rcvBufferSize = socket.offset,
packetType = p.header.pType,
seqNr = p.header.seqNr,
ackNr = p.header.ackNr,
@ -1463,6 +1464,12 @@ template shiftBuffer(t, c: untyped) =
(t).offset = 0
proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
debug "Handling incoming read",
rcvBufferSize = socket.offset,
reorderBufferSize = socket.inBufferBytes,
socketAtEOF = socket.atEof(),
readTillEOF = readReq.bytesToRead == 0
if readReq.reader.finished():
return ReadCancelled
@ -1477,9 +1484,18 @@ proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, socket.offset - 1))
socket.shiftBuffer(socket.offset)
if (socket.atEof()):
debug "Read finished",
bytesRead = len(readReq.bytesAvailable),
socektAtEof = socket.atEof()
readReq.reader.complete(readReq.bytesAvailable)
return ReadFinished
else:
debug "Read not finished",
bytesRead = len(readReq.bytesAvailable),
socektAtEof = socket.atEof()
return ReadNotFinished
else:
let bytesAlreadyRead = len(readReq.bytesAvailable)
@ -1488,9 +1504,17 @@ proc onRead(socket: UtpSocket, readReq: var ReadReq): ReadResult =
readReq.bytesAvailable.add(socket.rcvBuffer.toOpenArray(0, count - 1))
socket.shiftBuffer(count)
if (len(readReq.bytesAvailable) == readReq.bytesToRead):
debug "Read finished",
bytesRead = len(readReq.bytesAvailable),
socektAtEof = socket.atEof()
readReq.reader.complete(readReq.bytesAvailable)
return ReadFinished
else:
debug "Read not finished",
bytesRead = len(readReq.bytesAvailable),
socektAtEof = socket.atEof()
return ReadNotFinished
proc eventLoop(socket: UtpSocket) {.async.} =
@ -1503,7 +1527,7 @@ proc eventLoop(socket: UtpSocket) {.async.} =
# we processed a packet and rcv buffer size is larger than 0,
# check if we can finish some pending readers
while socket.pendingReads.len() > 0 and socket.offset > 0:
while socket.pendingReads.len() > 0:
let readResult = socket.onRead(socket.pendingReads[0])
case readResult
of ReadFinished:

View File

@ -120,7 +120,6 @@ procSuite "Utp protocol over udp tests with loss and delays":
bytesPerRead: int = 0): TestCase =
TestCase(maxDelay: maxDelay, dropRate: dropRate, bytesToTransfer: bytesToTransfer, cfg: cfg, bytesPerRead: bytesPerRead)
let testCases = @[
TestCase.init(45, 10, 40000),
TestCase.init(25, 15, 40000),
@ -228,3 +227,32 @@ procSuite "Utp protocol over udp tests with loss and delays":
await clientProtocol.shutdownWait()
await serverProtocol.shutdownWait()
let testCase2 = @[
TestCase.init(45, 0, 40000),
TestCase.init(45, 0, 80000),
TestCase.init(25, 15, 40000),
TestCase.init(15, 5, 40000, SocketConfig.init(optRcvBuffer = uint32(10000), remoteWindowResetTimeout = seconds(5)))
]
asyncTest "Write large data and read till EOF":
for testCase in testCase2:
let (
clientProtocol,
clientSocket,
serverProtocol,
serverSocket) = await testScenario(testCase.maxDelay, testCase.dropRate, testcase.cfg)
let numBytes = testCase.bytesToTransfer
let bytesToTransfer = generateByteArray(rng[], numBytes)
discard await clientSocket.write(bytesToTransfer)
clientSocket.close()
let read = await serverSocket.read()
check:
read == bytesToTransfer
await clientProtocol.shutdownWait()
await serverProtocol.shutdownWait()

View File

@ -29,6 +29,9 @@ proc test(args, path: string) =
# " -d:asyncBackend=asyncdispatch " & common_args & " " & path
task test, "Run all tests":
test "-d:debug --threads:off", "tests/all_tests"
test "-d:release --threads:off", "tests/all_tests"
test "-d:danger --threads:off", "tests/all_tests"
test "-d:debug --threads:on", "tests/all_tests"
test "-d:release --threads:on", "tests/all_tests"
test "-d:danger --threads:on", "tests/all_tests"

View File

@ -11,7 +11,7 @@ type
startAddr*, endAddr*: ptr byte
Page* = object
consumedTo*: Natural
consumedTo*: int
writtenTo*: Natural
data*: ref string

View File

@ -442,7 +442,7 @@ proc fileInput*(filename: string,
return fileInput(file, offset, pageSize)
proc unsafeMemoryInput*(mem: openArray[byte]): InputStreamHandle =
let head = unsafeAddr mem[0]
let head = cast[ptr byte](mem)
makeHandle InputStream(
span: PageSpan(

View File

@ -121,7 +121,7 @@ proc base64decode*(i: InputStream, o: OutputStream) {.fsMultiSync.} =
inputChar(d)
outputChar(c shl 6 or d shr 0)
elif i.readable:
raise newException(ValueError, "The input stream has insufficient nymber of bytes for base64 decoding")
raise newException(ValueError, "The input stream has insufficient number of bytes for base64 decoding")
close o

View File

@ -400,3 +400,28 @@ suite "randomized tests":
if fileExists(randomBytesFileName):
removeFile randomBytesFileName
test "ensureRunway":
var output = memoryOutput()
const writes = 256
var buffer = newSeq[byte](writes)
let totalBytes = block:
var tmp = 0
for i in 0..<writes:
tmp += i
buffer[i] = byte(i)
tmp
output.ensureRunway(totalBytes)
for i in 0..<writes:
output.write(buffer.toOpenArray(0, i - 1))
output.flush()
let res = output.getOutput()
var j = 0
for i in 0..<writes:
check:
res[j..<j+i] == buffer[0..<i]
j += i

View File

@ -41,7 +41,7 @@ when fsAsyncSupport:
template timeit(timerVar: var Nanos, code: untyped) =
let t0 = getTicks()
code
timerVar = int(getTicks() - t0) div 1000000
timerVar = int64(getTicks() - t0) div 1000000
proc getOutput(sp: AsyncInputStream, T: type string): Future[string] {.async.} =
# this proc is a quick hack to let the test pass

View File

@ -1,39 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
build_script:
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
- nimble install -y
test_script:
- nimble test
deploy: off

View File

@ -1,27 +0,0 @@
language: c
# https://docs.travis-ci.com/user/caching/
cache:
directories:
- NimBinaries
git:
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
depth: 10
os:
- linux
- osx
install:
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
- export PATH=$PWD/Nim/bin:$PATH
script:
- nimble install -y
- nimble test

View File

@ -20,7 +20,13 @@ requires "nim >= 1.2.0",
proc buildBinary(name: string, srcDir = "./", params = "", cmdParams = "") =
if not dirExists "build":
mkDir "build"
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") & " -r -f --skipUserCfg:on --skipParentCfg:on --verbosity:0 --hints:off --debuginfo --path:'.' --threads:on -d:chronicles_log_level=ERROR --out:./build/" & name & " " & params & " " & srcDir & name & ".nim" & " " & cmdParams
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") &
" -r -f --skipUserCfg:on --skipParentCfg:on --verbosity:0" &
" --debuginfo --path:'.' --threads:on -d:chronicles_log_level=ERROR" &
" --styleCheck:usages --styleCheck:hint" &
" --hint[XDeclaredButNotUsed]:off --hint[Processing]:off " &
" --out:./build/" & name & " " & params & " " & srcDir & name & ".nim" &
" " & cmdParams
task test, "run tests":
buildBinary "all", "tests/",

View File

@ -87,7 +87,7 @@ proc createRpcProc(procName, parameters, callBody: NimNode): NimNode =
# make proc async
result.addPragma ident"async"
# export this proc
result[0] = nnkPostFix.newTree(ident"*", newIdentNode($procName))
result[0] = nnkPostfix.newTree(ident"*", newIdentNode($procName))
proc toJsonArray(parameters: NimNode): NimNode =
# outputs an array of jsonified parameters

View File

@ -65,17 +65,38 @@ method call*(client: RpcHttpClient, name: string,
let
id = client.getNextId()
reqBody = $rpcCallNode(name, params, id)
req = HttpClientRequestRef.post(client.httpSession,
client.httpAddress.get,
body = reqBody.toOpenArrayByte(0, reqBody.len - 1),
headers = headers)
res =
try:
await req.send()
except CancelledError as e:
raise e
except CatchableError as e:
raise (ref RpcPostError)(msg: "Failed to send POST Request with JSON-RPC.", parent: e)
var req: HttpClientRequestRef
var res: HttpClientResponseRef
defer:
# BEWARE!
# Using multiple defer statements in this function or multiple
# try/except blocks within a single defer statement doesn't
# produce the desired run-time code, so we use slightly bizzare
# code to ensure the exceptions safety of this function:
try:
var closeFutures = newSeq[Future[void]]()
if req != nil: closeFutures.add req.closeWait()
if res != nil: closeFutures.add res.closeWait()
if closeFutures.len > 0: await allFutures(closeFutures)
except CatchableError as err:
# TODO
# `close` functions shouldn't raise in general, but we first
# need to ensure this through exception tracking in Chronos
debug "Error closing JSON-RPC HTTP resuest/response", err = err.msg
req = HttpClientRequestRef.post(client.httpSession,
client.httpAddress.get,
body = reqBody.toOpenArrayByte(0, reqBody.len - 1),
headers = headers)
res =
try:
await req.send()
except CancelledError as e:
raise e
except CatchableError as e:
raise (ref RpcPostError)(msg: "Failed to send POST Request with JSON-RPC.", parent: e)
if res.status < 200 or res.status >= 300: # res.status is not 2xx (success)
raise newException(ErrorResponse, "POST Response: " & $res.status)

View File

@ -79,7 +79,7 @@ proc processData(client: RpcWebSocketClient) {.async.} =
else:
let ws = client.transport
try:
while ws.readystate != ReadyState.Closed:
while ws.readyState != ReadyState.Closed:
var value = await ws.recvMsg()
if value.len == 0:

View File

@ -69,7 +69,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var int) =
proc fromJson*[T: ref object](n: JsonNode, argName: string, result: var T) =
n.kind.expect(JObject, argName)
result = new T
for k, v in fieldpairs(result[]):
for k, v in fieldPairs(result[]):
fromJson(n[k], k, v)
proc fromJson*(n: JsonNode, argName: string, result: var int64) =
@ -165,7 +165,7 @@ iterator paramsIter(params: NimNode): tuple[name, ntype: NimNode] =
yield (arg[j], argType)
iterator paramsRevIter(params: NimNode): tuple[name, ntype: NimNode] =
for i in countDown(params.len-1,1):
for i in countdown(params.len-1,1):
let arg = params[i]
let argType = arg[^2]
for j in 0 ..< arg.len-2:

View File

@ -1,5 +1,5 @@
import
chronicles, httputils, chronos, websock/websock,
chronicles, httputils, chronos, websock/[websock, types],
websock/extensions/compression/deflate,
stew/byteutils, json_serialization/std/net,
".."/[errors, server]
@ -10,17 +10,52 @@ logScope:
topics = "JSONRPC-WS-SERVER"
type
RpcWebSocketServerAuth* = ##\
## Authenticator function. On error, the resulting `HttpCode` is sent back\
## to the client and the `string` argument will be used in an exception,\
## following.
proc(req: HttpTable): Result[void,(HttpCode,string)]
{.gcsafe, raises: [Defect].}
RpcWebSocketServer* = ref object of RpcServer
authHook: Option[RpcWebSocketServerAuth] ## Authorization call back handler
server: StreamServer
wsserver: WSServer
HookEx = ref object of Hook
handler: RpcWebSocketServerAuth ## from `RpcWebSocketServer`
request: HttpRequest ## current request needed for error response
proc authWithHtCodeResponse(ctx: Hook, headers: HttpTable):
Future[Result[void, string]] {.async, gcsafe, raises: [Defect].} =
## Wrapper around authorization handler which is stored in the
## extended `Hook` object.
let
cty = ctx.HookEx
rc = cty.handler(headers)
if rc.isErr:
await cty.request.stream.writer.sendError(rc.error[0])
return err(rc.error[1])
return ok()
proc handleRequest(rpc: RpcWebSocketServer, request: HttpRequest) {.async.} =
trace "Handling request:", uri = request.uri.path
trace "Initiating web socket connection."
# Authorization handler constructor (if enabled)
var hooks: seq[Hook]
if rpc.authHook.isSome:
let hookEx = HookEx(
append: nil,
request: request,
handler: rpc.authHook.get,
verify: authWithHtCodeResponse)
hooks = @[hookEx.Hook]
try:
let server = rpc.wsserver
let ws = await server.handleRequest(request)
if ws.readyState != Open:
let ws = await server.handleRequest(request, hooks = hooks)
if ws.readyState != ReadyState.Open:
error "Failed to open websocket connection"
return
@ -58,24 +93,26 @@ proc handleRequest(rpc: RpcWebSocketServer, request: HttpRequest) {.async.} =
except WebSocketError as exc:
error "WebSocket error:", exception = exc.msg
proc initWebsocket(rpc: RpcWebSocketServer, compression: bool) =
proc initWebsocket(rpc: RpcWebSocketServer, compression: bool,
authHandler: Option[RpcWebSocketServerAuth]) =
if compression:
let deflateFactory = deflateFactory()
rpc.wsserver = WSServer.new(factories = [deflateFactory])
else:
rpc.wsserver = WSServer.new()
rpc.authHook = authHandler
proc newRpcWebSocketServer*(
address: TransportAddress,
compression: bool = false,
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay,
ServerFlags.ReuseAddr}): RpcWebSocketServer =
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay,ServerFlags.ReuseAddr},
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
var server = new(RpcWebSocketServer)
proc processCallback(request: HttpRequest): Future[void] =
handleRequest(server, request)
server.initWebsocket(compression)
server.initWebsocket(compression, authHandler)
server.server = HttpServer.create(
address,
processCallback,
@ -88,13 +125,14 @@ proc newRpcWebSocketServer*(
host: string,
port: Port,
compression: bool = false,
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay,
ServerFlags.ReuseAddr}): RpcWebSocketServer =
flags: set[ServerFlags] = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr},
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
newRpcWebSocketServer(
initTAddress(host, port),
compression,
flags
flags,
authHandler
)
proc newRpcWebSocketServer*(
@ -106,13 +144,14 @@ proc newRpcWebSocketServer*(
ServerFlags.ReuseAddr},
tlsFlags: set[TLSFlags] = {},
tlsMinVersion = TLSVersion.TLS12,
tlsMaxVersion = TLSVersion.TLS12): RpcWebSocketServer =
tlsMaxVersion = TLSVersion.TLS12,
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
var server = new(RpcWebSocketServer)
proc processCallback(request: HttpRequest): Future[void] =
handleRequest(server, request)
server.initWebsocket(compression)
server.initWebsocket(compression, authHandler)
server.server = TlsHttpServer.create(
address,
tlsPrivateKey,
@ -136,7 +175,8 @@ proc newRpcWebSocketServer*(
ServerFlags.ReuseAddr},
tlsFlags: set[TLSFlags] = {},
tlsMinVersion = TLSVersion.TLS12,
tlsMaxVersion = TLSVersion.TLS12): RpcWebSocketServer =
tlsMaxVersion = TLSVersion.TLS12,
authHandler = none(RpcWebSocketServerAuth)): RpcWebSocketServer =
newRpcWebSocketServer(
initTAddress(host, port),
@ -146,7 +186,8 @@ proc newRpcWebSocketServer*(
flags,
tlsFlags,
tlsMinVersion,
tlsMaxVersion
tlsMaxVersion,
authHandler
)
proc start*(server: RpcWebSocketServer) =

View File

@ -43,7 +43,7 @@ proc addEthRpcs*(server: RpcServer) =
var rawData: seq[byte]
rawData = nimcrypto.fromHex(data.string)
# data will have 0x prefix
result = hexDataStr "0x" & $keccak_256.digest(rawData)
result = hexDataStr "0x" & $keccak256.digest(rawData)
server.rpc("net_version") do() -> string:
## Returns string of the current network id:

View File

@ -42,8 +42,8 @@ type
gasLimit*: int # the maximum gas allowed in this block.
gasUsed*: int # the total used gas by all transactions in this block.
timestamp*: int # the unix timestamp for when the block was collated.
transactions*: seq[Uint256] # list of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter.
uncles*: seq[Uint256] # list of uncle hashes.
transactions*: seq[UInt256] # list of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter.
uncles*: seq[UInt256] # list of uncle hashes.
TransactionObject* = object # A transaction object, or null when no transaction was found:
hash*: UInt256 # hash of the transaction.

View File

@ -22,7 +22,7 @@ func rpcDynamicName(name: string): string =
## Create custom RPC with StUint input parameter
server.rpc(rpcDynamicName "uint256Param") do(i: UInt256):
let r = i + 1.stUint(256)
let r = i + 1.stuint(256)
return %r
## Create custom RPC with StUInt return parameter

View File

@ -153,39 +153,39 @@ suite "Server types":
check r == inp
test "Array parameters":
let r1 = waitfor s.executeMethod("rpc.arrayParam", %[%[1, 2, 3], %"hello"])
let r1 = waitFor s.executeMethod("rpc.arrayParam", %[%[1, 2, 3], %"hello"])
var ckR1 = %[1, 2, 3, 0, 0, 0]
ckR1.elems.add %"hello"
check r1 == ckR1
test "Seq parameters":
let r2 = waitfor s.executeMethod("rpc.seqParam", %[%"abc", %[1, 2, 3, 4, 5]])
let r2 = waitFor s.executeMethod("rpc.seqParam", %[%"abc", %[1, 2, 3, 4, 5]])
var ckR2 = %["abc"]
for i in 0..4: ckR2.add %(i + 1)
check r2 == ckR2
test "Object parameters":
let r = waitfor s.executeMethod("rpc.objParam", %[%"abc", testObj])
let r = waitFor s.executeMethod("rpc.objParam", %[%"abc", testObj])
check r == testObj
test "Simple return types":
let
inp = %99
r1 = waitfor s.executeMethod("rpc.returnTypeSimple", %[%inp])
r1 = waitFor s.executeMethod("rpc.returnTypeSimple", %[%inp])
check r1 == inp
test "Complex return types":
let
inp = 99
r1 = waitfor s.executeMethod("rpc.returnTypeComplex", %[%inp])
r1 = waitFor s.executeMethod("rpc.returnTypeComplex", %[%inp])
check r1 == %*{"x": %[1, inp, 3], "y": "test"}
test "Option types":
let
inp1 = MyOptional(maybeInt: some(75))
inp2 = MyOptional()
r1 = waitfor s.executeMethod("rpc.optional", %[%inp1])
r2 = waitfor s.executeMethod("rpc.optional", %[%inp2])
r1 = waitFor s.executeMethod("rpc.optional", %[%inp1])
r2 = waitFor s.executeMethod("rpc.optional", %[%inp2])
check r1 == %inp1
check r2 == %inp2
@ -196,19 +196,19 @@ suite "Server types":
test "Runtime errors":
expect ValueError:
# root param not array
discard waitfor s.executeMethod("rpc.arrayParam", %"test")
discard waitFor s.executeMethod("rpc.arrayParam", %"test")
expect ValueError:
# too big for array
discard waitfor s.executeMethod("rpc.arrayParam", %[%[0, 1, 2, 3, 4, 5, 6], %"hello"])
discard waitFor s.executeMethod("rpc.arrayParam", %[%[0, 1, 2, 3, 4, 5, 6], %"hello"])
expect ValueError:
# wrong sub parameter type
discard waitfor s.executeMethod("rpc.arrayParam", %[%"test", %"hello"])
discard waitFor s.executeMethod("rpc.arrayParam", %[%"test", %"hello"])
expect ValueError:
# wrong param type
discard waitFor s.executeMethod("rpc.differentParams", %[%"abc", %1])
test "Multiple variables of one type":
let r = waitfor s.executeMethod("rpc.multiVarsOfOneType", %[%"hello", %"world"])
let r = waitFor s.executeMethod("rpc.multiVarsOfOneType", %[%"hello", %"world"])
check r == %"hello world"
test "Optional arg":

View File

@ -183,7 +183,8 @@ clean:
cd vendor/libbacktrace-upstream && \
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; }
cd vendor/libunwind && \
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; }
{ [[ -e Makefile ]] && $(MAKE) clean $(HANDLE_OUTPUT) || true; } && \
rm -rf CMakeCache.txt CMakeFiles cmake_install.cmake install_manifest.txt Makefile
$(SILENT_TARGET_PREFIX).SILENT:

View File

@ -11,6 +11,9 @@ overhead, by adding `nimln_()`, `nimfr_()` calls all over the place. The
problem is being discussed upstream in [this GitHub
issue](https://github.com/nim-lang/Nim/issues/12702).
In practice, you can get as much as 66% improved performance by disabling the
default stack tracing: https://github.com/status-im/nimbus-eth2/pull/3466
That `popFrame()` at the end of each C function is particularly problematic,
since it prevents the C compiler from doing tail-call optimisations.

View File

@ -21,26 +21,28 @@ when defined(nimStackTraceOverride) and defined(nimHasStacktracesModule):
# there, but we might still want to import this module with a global
# "--import:libbacktrace" Nim compiler flag.
when not (defined(nimscript) or defined(js)):
import algorithm, libbacktrace_wrapper, os, system/ansi_c
import algorithm, libbacktrace_wrapper, os, system/ansi_c, strutils
const installPath = currentSourcePath.parentDir() / "install" / "usr"
const
topLevelPath = currentSourcePath.parentDir().replace('\\', '/')
installPath = topLevelPath & "/install/usr"
{.passc: "-I" & currentSourcePath.parentDir().}
{.passc: "-I" & topLevelPath.}
when defined(cpp):
{.passl: installPath / "lib" / "libbacktracenimcpp.a".}
{.passl: installPath & "/lib/libbacktracenimcpp.a".}
else:
{.passl: installPath / "lib" / "libbacktracenim.a".}
{.passl: installPath & "/lib/libbacktracenim.a".}
when defined(libbacktraceUseSystemLibs):
{.passl: "-lbacktrace".}
when defined(macosx) or defined(windows):
{.passl: "-lunwind".}
else:
{.passc: "-I" & installPath / "include".}
{.passl: installPath / "lib" / "libbacktrace.a".}
{.passc: "-I" & installPath & "/include".}
{.passl: installPath & "/lib/libbacktrace.a".}
when defined(macosx) or defined(windows):
{.passl: installPath / "lib" / "libunwind.a".}
{.passl: installPath & "/lib/libunwind.a".}
when defined(windows):
{.passl: "-lpsapi".}

View File

@ -2,7 +2,7 @@
# libtool - Provide generalized library-building support services.
# Generated automatically by config.status (libbacktrace) version-unused
# Libtool was configured on host fv-az457-821:
# Libtool was configured on host fv-az129-611:
# NOTE: Changes made to this file will be lost: look at ltmain.sh.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,

View File

@ -154,7 +154,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '^1.15.5'
go-version: '~1.15.5'
- name: Install p2pd
run: |

View File

@ -152,7 +152,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '^1.15.5'
go-version: '~1.15.5'
- name: Install p2pd
run: |

View File

@ -1,17 +1,17 @@
asynctest;https://github.com/markspanbroek/asynctest@#3882ed64ed3159578f796bc5ae0c6b13837fe798
bearssl;https://github.com/status-im/nim-bearssl@#ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7
asynctest;https://github.com/markspanbroek/asynctest@#5347c59b4b057443a014722aa40800cd8bb95c69
bearssl;https://github.com/status-im/nim-bearssl@#65b74302e03912ab5bde64b6da10d05896139007
chronicles;https://github.com/status-im/nim-chronicles@#2a2681b60289aaf7895b7056f22616081eb1a882
chronos;https://github.com/status-im/nim-chronos@#87197230779002a2bfa8642f0e2ae07e2349e304
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fbb76f8af8a33ab818184a7d4406d9fee20993be
faststreams;https://github.com/status-im/nim-faststreams@#37a183153c071539ab870f427c09a1376ba311b9
faststreams;https://github.com/status-im/nim-faststreams@#c80701f7d23815fab0b6362569f3195957e57856
httputils;https://github.com/status-im/nim-http-utils@#40048e8b3e69284bdb5d4daa0a16ad93402c55db
json_serialization;https://github.com/status-im/nim-json-serialization@#4b8f487d2dfdd941df7408ceaa70b174cce02180
metrics;https://github.com/status-im/nim-metrics@#71e0f0e354e1f4c59e3dc92153989c8b723c3440
json_serialization;https://github.com/status-im/nim-json-serialization@#461fd03edb300b7946544b34442d1a05d4ef2270
metrics;https://github.com/status-im/nim-metrics@#11edec862f96e42374bc2d584c84cc88d5d1f95f
nimcrypto;https://github.com/cheatfate/nimcrypto@#a5742a9a214ac33f91615f3862c7b099aec43b00
secp256k1;https://github.com/status-im/nim-secp256k1@#e092373a5cbe1fa25abfc62e0f2a5f138dc3fb13
serialization;https://github.com/status-im/nim-serialization@#37bc0db558d85711967acb16e9bb822b06911d46
stew;https://github.com/status-im/nim-stew@#bb705bf17b46d2c8f9bfb106d9cc7437009a2501
serialization;https://github.com/status-im/nim-serialization@#9631fbd1c81c8b25ff8740df440ca7ba87fa6131
stew;https://github.com/status-im/nim-stew@#419903c9a31ab253cf5cf19f24d9a912dc4b5154
testutils;https://github.com/status-im/nim-testutils@#aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2
unittest2;https://github.com/status-im/nim-unittest2@#4e2893eacb916c7678fdc4935ff7420f13bf3a9c
websock;https://github.com/status-im/nim-websock@#853299e399746eff4096870067cbc61861ecd534
websock;https://github.com/status-im/nim-websock@#4a7a058843cdb7a6a4fd25a55f0959c51b0b5847
zlib;https://github.com/status-im/nim-zlib@#74cdeb54b21bededb5a515d36f608bc1850555a2

View File

@ -27,7 +27,7 @@ const nimflags =
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics "
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
excstr.add(" " & getEnv("NIMFLAGS") & " ")
excstr.add(" " & nimflags & " ")
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)

View File

@ -21,6 +21,12 @@ export connection
logScope:
topics = "libp2p mplexchannel"
when defined(libp2p_mplex_metrics):
declareHistogram libp2p_mplex_qlen, "message queue length",
buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]
declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen"
declareHistogram libp2p_mplex_qtime, "message queuing time"
when defined(libp2p_network_protocols_metrics):
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
@ -187,6 +193,8 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
if s.writes >= MaxWrites:
debug "Closing connection, too many in-flight writes on channel",
s, conn = s.conn, writes = s.writes
when defined(libp2p_mplex_metrics):
libp2p_mplex_qlenclose.inc()
await s.reset()
await s.conn.close()
return
@ -201,8 +209,14 @@ proc completeWrite(
try:
s.writes += 1
await fut
when defined(libp2p_network_protocols_metrics):
when defined(libp2p_mplex_metrics):
libp2p_mplex_qlen.observe(s.writes.int64 - 1)
libp2p_mplex_qtime.time:
await fut
else:
await fut
when defined(libp2p_network_protocol_metrics):
if s.tag.len > 0:
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.tag, "out"])

View File

@ -38,6 +38,8 @@ logScope:
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
proc init*(_: type[GossipSubParams]): GossipSubParams =
GossipSubParams(
@ -385,9 +387,13 @@ method rpcHandler*(g: GossipSub,
g.validationSeen.withValue(msgIdSalted, seen): seen[].incl(peer)
libp2p_gossipsub_duplicate.inc()
# onto the next message
continue
libp2p_gossipsub_received.inc()
# avoid processing messages we are not interested in
if msg.topicIDs.allIt(it notin g.topics):
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer

View File

@ -25,6 +25,7 @@ declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
declareSummary(libp2p_gossipsub_mcache_hit, "ratio of successful IWANT message cache lookups")
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
g.withPeerStats(p.peerId) do (stats: var PeerStats):
@ -276,12 +277,15 @@ proc handleIWant*(g: GossipSub,
trace "peer sent iwant", peer, messageID = mid
let msg = g.mcache.get(mid)
if msg.isSome:
libp2p_gossipsub_mcache_hit.observe(1)
# avoid spam
if peer.iWantBudget > 0:
messages.add(msg.get())
dec peer.iWantBudget
else:
break
else:
libp2p_gossipsub_mcache_hit.observe(0)
return messages
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =

File diff suppressed because it is too large Load Diff

View File

@ -91,8 +91,8 @@ jobs:
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls-${{ matrix.target.cpu }}
key: 'dlls-${{ matrix.target.cpu }}'
path: external/dlls
key: 'dlls-v3'
- name: Install DLLs dependencies (Windows)
if: >

View File

@ -186,6 +186,41 @@ myHistogram.time:
histogram("one_off_histogram").observe(10)
```
### Custom collectors
Sometimes you need to create metrics on the fly, with a custom `collect()`
method of a custom collector type.
Let's say you have an USB-attached power meter and, for some reason, you want
to read the power consumption every time Prometheus reads your metrics:
```nim
import metrics, times
when defined(metrics):
type PowerCollector = ref object of Gauge
var powerCollector = PowerCollector.newCollector(name = "power_usage", help = "Instantaneous power usage - in watts.")
method collect(collector: PowerCollector): Metrics =
let timestamp = getTime().toMilliseconds()
result[@[]] = @[
Metric(
name: "power_usage",
value: getPowerUsage(), # your power-meter reader
timestamp: timestamp,
)
]
```
There's a bit of repetition in the collector and metric names, because we no
longer have behind-the-scenes name copying/deriving there.
You can output multiple metrics from your custom `collect()` method. It's
perfectly legal and we do that internally for our system/runtime metrics.
Try not to get creative with dynamic metric names - Prometheus has a hard time
dealing with that.
## Labels
Metric labels are supported for the Prometheus backend, as a way to add extra

View File

@ -20,6 +20,7 @@ import locks, net, os, sets, tables, times
when defined(metrics):
import algorithm, hashes, random, sequtils, strutils,
metrics/common
export tables # for custom collectors that need to work with the "Metrics" type
when defined(posix):
import posix
@ -210,11 +211,15 @@ proc `$`*(collector: type IgnoredCollector): string = ""
# for testing
template value*(collector: Collector | type IgnoredCollector, labelValues: LabelsParam = @[]): float64 =
var res: float64
when defined(metrics) and collector is not IgnoredCollector:
{.gcsafe.}:
collector.metrics[@labelValues][0].value
# Don't access the "metrics" field directly, so we can support custom
# collectors.
withLock collector.lock:
res = collector.collect()[@labelValues][0].value
else:
0.0
res = 0.0
res
# for testing
proc valueByName*(collector: Collector | type IgnoredCollector,
@ -223,9 +228,10 @@ proc valueByName*(collector: Collector | type IgnoredCollector,
extraLabelValues: LabelsParam = @[]): float64 {.raises: [Defect, ValueError].} =
when defined(metrics) and collector is not IgnoredCollector:
let allLabelValues = @labelValues & @extraLabelValues
for metric in collector.metrics[@labelValues]:
if metric.name == metricName and metric.labelValues == allLabelValues:
return metric.value
withLock collector.lock:
for metric in collector.collect()[@labelValues]:
if metric.name == metricName and metric.labelValues == allLabelValues:
return metric.value
raise newException(KeyError, "No such metric name for this collector: '" & metricName & "' (label values = " & $allLabelValues & ").")
############
@ -267,6 +273,7 @@ proc collect*(registry: Registry): OrderedTable[Collector, Metrics] =
var collectorCopy: Collector
withLock collector.lock:
deepCopy(collectorCopy, collector)
collectorCopy.lock.initLock()
result[collectorCopy] = collectorCopy.collect()
proc toText*(registry: Registry, showTimestamp = true): string =
@ -280,6 +287,28 @@ proc toText*(registry: Registry, showTimestamp = true): string =
proc `$`*(registry: Registry): string =
registry.toText()
#####################
# custom collectors #
#####################
when defined(metrics):
# Used for custom collectors, to shield the API user from having to deal with
# internal details like lock initialisation.
# Also used internally, for creating standard collectors, to avoid code
# duplication.
proc newCollector* [T] (typ: typedesc[T], name: string, help: string, labels: LabelsParam = @[],
registry = defaultRegistry, standardType = "gauge"): T
{.raises: [Defect, ValueError, RegistrationError].} =
validateName(name)
validateLabels(labels)
result = T(name: name,
help: help,
typ: standardType, # Prometheus does not support a non-standard value here
labels: @labels,
creationThreadId: getThreadId())
result.lock.initLock()
result.register(registry)
#######################################
# export metrics to StatsD and Carbon #
#######################################
@ -488,19 +517,13 @@ when defined(metrics):
# don't document this one, even if we're forced to make it public, because it
# won't work when all (or some) collectors are disabled
proc newCounter*(name: string, help: string, labels: LabelsParam = @[], registry = defaultRegistry, sampleRate = 1.float): Counter {.raises: [Defect, ValueError, RegistrationError].} =
validateName(name)
validateLabels(labels)
result = Counter(name: name,
help: help,
typ: "counter",
labels: @labels,
creationThreadId: getThreadId(),
sampleRate: sampleRate)
result.lock.initLock()
proc newCounter*(name: string, help: string, labels: LabelsParam = @[],
registry = defaultRegistry, sampleRate = 1.float): Counter
{.raises: [Defect, ValueError, RegistrationError].} =
result = Counter.newCollector(name, help, labels, registry, "counter")
result.sampleRate = sampleRate
if labels.len == 0:
result.metrics[@labels] = newCounterMetrics(name, labels, labels)
result.register(registry)
template declareCounter*(identifier: untyped,
help: static string,
@ -615,18 +638,12 @@ when defined(metrics):
if result notin gauge.metrics:
gauge.metrics[result] = newGaugeMetrics(gauge.name, gauge.labels, result)
proc newGauge*(name: string, help: string, labels: LabelsParam = @[], registry = defaultRegistry): Gauge {.raises: [Defect, ValueError, RegistrationError].} =
validateName(name)
validateLabels(labels)
result = Gauge(name: name,
help: help,
typ: "gauge",
labels: @labels,
creationThreadId: getThreadId())
result.lock.initLock()
proc newGauge*(name: string, help: string, labels: LabelsParam = @[],
registry = defaultRegistry): Gauge
{.raises: [Defect, ValueError, RegistrationError].} =
result = Gauge.newCollector(name, help, labels, registry, "gauge")
if labels.len == 0:
result.metrics[@labels] = newGaugeMetrics(name, labels, labels)
result.register(registry)
template declareGauge*(identifier: untyped,
help: static string,
@ -769,18 +786,13 @@ when defined(metrics):
if result notin summary.metrics:
summary.metrics[result] = newSummaryMetrics(summary.name, summary.labels, result)
proc newSummary*(name: string, help: string, labels: LabelsParam = @[], registry = defaultRegistry): Summary {.raises: [Defect, ValueError, RegistrationError].} =
validateName(name)
proc newSummary*(name: string, help: string, labels: LabelsParam = @[],
registry = defaultRegistry): Summary
{.raises: [Defect, ValueError, RegistrationError].} =
validateLabels(labels, invalidLabelNames = ["quantile"])
result = Summary(name: name,
help: help,
typ: "summary",
labels: @labels,
creationThreadId: getThreadId())
result.lock.initLock()
result = Summary.newCollector(name, help, labels, registry, "summary")
if labels.len == 0:
result.metrics[@labels] = newSummaryMetrics(name, labels, labels)
result.register(registry)
template declareSummary*(identifier: untyped,
help: static string,
@ -873,12 +885,9 @@ when defined(metrics):
if result notin histogram.metrics:
histogram.metrics[result] = newHistogramMetrics(histogram.name, histogram.labels, result, histogram.buckets)
proc newHistogram*(name: string,
help: string,
labels: LabelsParam = @[],
registry = defaultRegistry,
buckets: openArray[float64] = defaultHistogramBuckets): Histogram {.raises: [Defect, ValueError, RegistrationError].} =
validateName(name)
proc newHistogram*(name: string, help: string, labels: LabelsParam = @[],
registry = defaultRegistry, buckets: openArray[float64] = defaultHistogramBuckets): Histogram
{.raises: [Defect, ValueError, RegistrationError].} =
validateLabels(labels, invalidLabelNames = ["le"])
var bucketsSeq = @buckets
if bucketsSeq.len > 0 and bucketsSeq[^1] != Inf:
@ -887,16 +896,10 @@ when defined(metrics):
raise newException(ValueError, "Invalid buckets list: '" & $bucketsSeq & "'. At least 2 required.")
if not bucketsSeq.isSorted(system.cmp[float64]):
raise newException(ValueError, "Invalid buckets list: '" & $bucketsSeq & "'. Must be sorted.")
result = Histogram(name: name,
help: help,
typ: "histogram",
labels: @labels,
creationThreadId: getThreadId(),
buckets: bucketsSeq)
result.lock.initLock()
result = Histogram.newCollector(name, help, labels, registry, "histogram")
result.buckets = bucketsSeq
if labels.len == 0:
result.metrics[@labels] = newHistogramMetrics(name, labels, labels, bucketsSeq)
result.register(registry)
template declareHistogram*(identifier: untyped,
help: static string,
@ -1023,7 +1026,7 @@ when defined(metrics) and defined(linux):
pagesize = sysconf(SC_PAGE_SIZE).float64
type ProcessInfo = ref object of Gauge
var processInfo* {.global.} = ProcessInfo.buildCollector("process_info", "CPU and memory usage")
var processInfo* {.global.} = ProcessInfo.newCollector("process_info", "CPU and memory usage")
method collect*(collector: ProcessInfo): Metrics =
let timestamp = getTime().toMilliseconds()
@ -1083,15 +1086,13 @@ when defined(metrics) and defined(linux):
except CatchableError as e:
printError(e.msg)
processInfo.register(defaultRegistry)
####################
# Nim runtime info #
####################
when defined(metrics):
type NimRuntimeInfo = ref object of Gauge
var nimRuntimeInfo* {.global.} = NimRuntimeInfo.buildCollector("nim_runtime_info", "Nim runtime info")
var nimRuntimeInfo* {.global.} = NimRuntimeInfo.newCollector("nim_runtime_info", "Nim runtime info")
method collect*(collector: NimRuntimeInfo): Metrics =
let timestamp = getTime().toMilliseconds()
@ -1144,8 +1145,6 @@ when defined(metrics):
except CatchableError as e:
printError(e.msg)
nimRuntimeInfo.register(defaultRegistry)
declareGauge nim_gc_mem_bytes, "the number of bytes that are owned by a thread's GC", ["thread_id"]
declareGauge nim_gc_mem_occupied_bytes, "the number of bytes that are owned by a thread's GC and hold data", ["thread_id"]

View File

@ -1,4 +1,4 @@
# Copyright (c) 2019 Status Research & Development GmbH
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license: http://opensource.org/licenses/MIT
# * Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
@ -339,6 +339,33 @@ suite "registry":
declareCounter duplicate_counter, "duplicate counter"
duplicate_counter.inc()
when defined(metrics):
type MyCustomCollector = ref object of Gauge
var
myCustomCollector = MyCustomCollector.newCollector("my_custom_collector", "help")
registry2 = newRegistry()
myCustomCollector2 = MyCustomCollector.newCollector("my_custom_collector2", "help2", registry = registry2)
method collect(collector: MyCustomCollector): Metrics =
let timestamp = getTime().toMilliseconds()
result[@[]] = @[
Metric(
name: "custom_metric",
value: 42,
timestamp: timestamp,
)
]
suite "custom collectors":
test "42":
check myCustomCollector.value == 42
test "custom registry":
let collectors = registry2.collect()
check collectors.len == 1
for collector, metrics in collectors:
check collector.value == 42
suite "system metrics":
test "change update interval":
when defined(metrics):

View File

@ -1,4 +1,4 @@
# Copyright (c) 2019 Status Research & Development GmbH
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -12,19 +12,20 @@
{.push raises: [Defect].}
import ./utils
import std/strutils,
./utils
when defined(miniupnpcUseSystemLibs):
{.passC: staticExec("pkg-config --cflags miniupnpc").}
{.passL: staticExec("pkg-config --libs miniupnpc").}
else:
import os
const includePath = currentSourcePath.parentDir().parentDir() / "vendor" / "miniupnp" / "miniupnpc"
const includePath = currentSourcePath.parentDir().parentDir().replace('\\', '/') & "/vendor/miniupnp/miniupnpc"
{.passC: "-I" & includePath.}
# We can't use the {.link.} pragma in here, because it would place the static
# library archive as the first object to be linked, which would lead to all
# its exported symbols being ignored. We move it into the last position with {.passL.}.
{.passL: includePath / "libminiupnpc.a".}
{.passL: includePath & "/libminiupnpc.a".}
when defined(windows):
import nativesockets # for that wsaStartup() call at the end
@ -517,7 +518,7 @@ proc UPNPIGD_IsConnected*(a1: ptr UPNPUrls; a2: ptr IGDdatas): cint {.
# custom wrappers #
###################
import stew/results, std/strutils
import stew/results
export results
type Miniupnp* = ref object

View File

@ -1,4 +1,4 @@
# Copyright (c) 2019 Status Research & Development GmbH
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -12,7 +12,7 @@
{.push raises: [Defect].}
import os
import os, strutils
when defined(windows):
import winlean
else:
@ -21,9 +21,9 @@ else:
when defined(libnatpmpUseSystemLibs):
{.passL: "-lnatpmp".}
else:
const includePath = currentSourcePath.parentDir().parentDir() / "vendor" / "libnatpmp-upstream"
const includePath = currentSourcePath.parentDir().parentDir().replace('\\', '/') & "/vendor/libnatpmp-upstream"
{.passC: "-I" & includePath.}
{.passL: includePath / "libnatpmp.a".}
{.passL: includePath & "/libnatpmp.a".}
when defined(windows):
import nativesockets # for that wsaStartup() call at the end

View File

@ -1,43 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
build_script:
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
- nimble install -y
test_script:
- nimble test
- IF "%PLATFORM%" == "x86" CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x86
- IF "%PLATFORM%" == "x64" CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64
- IF "%PLATFORM%" == "x64" CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x86_amd64
- nimble testvcc
deploy: off

View File

@ -1,27 +0,0 @@
language: c
# https://docs.travis-ci.com/user/caching/
cache:
directories:
- NimBinaries
git:
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
depth: 10
os:
- linux
- osx
install:
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
- export PATH=$PWD/Nim/bin:$PATH
script:
- nimble install -y
- nimble test

View File

@ -60,7 +60,7 @@
# with a recursive tree structure.
# On the other side, returning a `var array[N div 2, uint64]` is problematic at the moment.
# - Compile-time computation is possible while due to the previous issue
# an array backend would be required to use var openarray[uint64]
# an array backend would be required to use var openArray[uint64]
# i.e. pointers.
# - Note that while shift-right and left can easily be done an array of bytes
# this would have reduced performance compared to moving 64-bit words.

View File

@ -130,7 +130,7 @@ proc composeOutputs(test: TestSpec, stdout: string): TestOutputs =
if name == "stdout":
result[name] = stdout
else:
if not existsFile(name):
if not fileExists(name):
continue
result[name] = readFile(name)
removeFile(name)
@ -161,7 +161,7 @@ proc cmpOutputs(test: TestSpec, outputs: TestOutputs): TestStatus =
proc compile(test: TestSpec; backend: string): TestStatus =
## compile the test program for the requested backends
block:
if not existsFile(test.source):
if not fileExists(test.source):
logFailure(test, SourceFileNotFound)
result = FAILED
break

View File

@ -1,40 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
- npm install -g ganache-cli
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
build_script:
- cd C:\projects\%APPVEYOR_PROJECT_SLUG%
- nimble install -y
test_script:
- ps: Start-Process ganache-cli.cmd -ArgumentList "-s 0"
- nimble test
deploy: off

View File

@ -1,28 +0,0 @@
language: c
osx_image: xcode10.2
sudo: false
# https://docs.travis-ci.com/user/caching/
cache:
directories:
- NimBinaries
git:
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
depth: 10
os:
- linux
- osx
install:
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries
- export PATH=$PWD/Nim/bin:$PATH
script:
- ./ci-test.sh

View File

@ -42,8 +42,8 @@ contract NumberStorage {
}
]#
contract(NumberStorage):
proc setNumber(number: Uint256)
proc getNumber(): Uint256 {.view.}
proc setNumber(number: UInt256)
proc getNumber(): UInt256 {.view.}
const NumberStorageCode = "6060604052341561000f57600080fd5b60bb8061001d6000396000f30060606040526004361060485763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416633fb5c1cb8114604d578063f2c9ecd8146062575b600080fd5b3415605757600080fd5b60606004356084565b005b3415606c57600080fd5b60726089565b60405190815260200160405180910390f35b600055565b600054905600a165627a7a7230582023e722f35009f12d5698a4ab22fb9d55a6c0f479fc43875c65be46fbdd8db4310029"
@ -73,9 +73,9 @@ contract MetaCoin {
}
]#
contract(MetaCoin):
proc sendCoin(receiver: Address, amount: Uint256): Bool
proc getBalance(address: Address): Uint256 {.view.}
proc Transfer(fromAddr, toAddr: indexed[Address], value: Uint256) {.event.}
proc sendCoin(receiver: Address, amount: UInt256): Bool
proc getBalance(address: Address): UInt256 {.view.}
proc Transfer(fromAddr, toAddr: indexed[Address], value: UInt256) {.event.}
proc BlaBla(fromAddr: indexed[Address]) {.event.}
const MetaCoinCode = "608060405234801561001057600080fd5b5032600090815260208190526040902061271090556101c2806100346000396000f30060806040526004361061004b5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166390b98a118114610050578063f8b2cb4f14610095575b600080fd5b34801561005c57600080fd5b5061008173ffffffffffffffffffffffffffffffffffffffff600435166024356100d5565b604080519115158252519081900360200190f35b3480156100a157600080fd5b506100c373ffffffffffffffffffffffffffffffffffffffff6004351661016e565b60408051918252519081900360200190f35b336000908152602081905260408120548211156100f457506000610168565b336000818152602081815260408083208054879003905573ffffffffffffffffffffffffffffffffffffffff871680845292819020805487019055805186815290519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35060015b92915050565b73ffffffffffffffffffffffffffffffffffffffff16600090815260208190526040902054905600a165627a7a72305820000313ec0ebbff4ffefbe79d615d0ab019d8566100c40eb95a4eee617a87d1090029"
@ -149,7 +149,7 @@ suite "Contracts":
var notificationsReceived = 0
let s = await ns.subscribe(Transfer) do (
fromAddr, toAddr: Address, value: Uint256)
fromAddr, toAddr: Address, value: UInt256)
{.raises: [Defect], gcsafe.}:
try:
echo "onTransfer: ", fromAddr, " transferred ", value, " to ", toAddr

View File

@ -21,8 +21,8 @@ contract LoggerContract {
}
]#
contract(LoggerContract):
proc MyEvent(sender: Address, number: Uint256) {.event.}
proc invoke(value: Uint256)
proc MyEvent(sender: Address, number: UInt256) {.event.}
proc invoke(value: UInt256)
const LoggerContractCode = "6080604052348015600f57600080fd5b5060bc8061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80632b30d2b814602d575b600080fd5b604760048036036020811015604157600080fd5b50356049565b005b604080513381526020810183905281517fdf50c7bb3b25f812aedef81bc334454040e7b27e27de95a79451d663013b7e17929181900390910190a15056fea265627a7a723058202ed7f5086297d2a49fbe359f4e489a007b69eb5077f5c76328bffdb63f164b4b64736f6c63430005090032"
@ -67,7 +67,7 @@ suite "Logs":
var notificationsReceived = 0
let s = await ns.subscribe(MyEvent, %*{"fromBlock": "0x0"}) do (
sender: Address, value: Uint256)
sender: Address, value: UInt256)
{.raises: [Defect], gcsafe.}:
try:
echo "onEvent: ", sender, " value ", value

View File

@ -19,8 +19,8 @@ contract NumberStorage {
}
]#
contract(NumberStorage):
proc setNumber(number: Uint256)
proc getNumber(): Uint256 {.view.}
proc setNumber(number: UInt256)
proc getNumber(): UInt256 {.view.}
const NumberStorageCode = "6060604052341561000f57600080fd5b60bb8061001d6000396000f30060606040526004361060485763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416633fb5c1cb8114604d578063f2c9ecd8146062575b600080fd5b3415605757600080fd5b60606004356084565b005b3415606c57600080fd5b60726089565b60405190815260200160405180910390f35b600055565b600054905600a165627a7a7230582023e722f35009f12d5698a4ab22fb9d55a6c0f479fc43875c65be46fbdd8db4310029"

View File

@ -49,7 +49,7 @@ type
web3: Web3
data: string
to: Address
value: Uint256
value: UInt256
ContractCall*[T] = ref object of ContractCallBase
@ -405,7 +405,7 @@ macro contract*(cname: untyped, body: untyped): untyped =
`encoder`
return initContractCall[`output`](
`senderName`.web3,
($keccak_256.digest(`signature`))[0..<8].toLower & `encodedParams`,
($keccak256.digest(`signature`))[0..<8].toLower & `encodedParams`,
`senderName`.contractAddress)
result.add procDef
@ -681,4 +681,4 @@ proc subscribe*(s: Sender, t: typedesc, cb: proc): Future[Subscription] {.inline
subscribe(s, t, newJObject(), cb, SubscriptionErrorHandler nil)
proc `$`*(b: Bool): string =
$(Stint[256](b))
$(StInt[256](b))

View File

@ -6,7 +6,7 @@ description = "This is the humble begginings of library similar to web3.[js|py
license = "MIT or Apache License 2.0"
### Dependencies
requires "nim >= 0.18.0"
requires "nim >= 1.2.0"
requires "chronicles"
requires "chronos"
requires "eth"
@ -22,7 +22,11 @@ proc test(args, path: string) =
if not dirExists "build":
mkDir "build"
exec "nim " & getEnv("TEST_LANG", "c") & " " & getEnv("NIMFLAGS") & " " & args &
" --outdir:build -r --hints:off --warnings:off --skipParentCfg " & path
" --outdir:build -r --skipParentCfg" &
" --warning[ObservableStores]:off --warning[GcUnsafe2]:off" &
" --styleCheck:usages --styleCheck:hint" &
" --hint[XDeclaredButNotUsed]:off --hint[Processing]:off " &
path
### tasks

View File

@ -27,7 +27,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var ref UInt256) =
new result
result[] = hexStr.parse(StUint[256], 16) # TODO: Handle errors
proc bytesFromJson(n: JsonNode, argName: string, result: var openarray[byte]) =
proc bytesFromJson(n: JsonNode, argName: string, result: var openArray[byte]) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if hexStr.len != result.len * 2 + 2: # including "0x"
@ -98,7 +98,7 @@ proc `%`*(v: Address): JsonNode =
proc `%`*(v: TypedTransaction): JsonNode =
result = %("0x" & distinctBase(v).toHex)
proc writeHexValue(w: JsonWriter, v: openarray[byte]) =
proc writeHexValue(w: JsonWriter, v: openArray[byte]) =
w.stream.write "\"0x"
w.stream.writeHex v
w.stream.write "\""
@ -140,7 +140,7 @@ proc `$`*(v: DynamicBytes): string {.inline.} =
"0x" & toHex(v)
proc `%`*(x: EthSend): JsonNode =
result = newJobject()
result = newJObject()
result["from"] = %x.source
if x.to.isSome:
result["to"] = %x.to.unsafeGet
@ -156,7 +156,7 @@ proc `%`*(x: EthSend): JsonNode =
result["nonce"] = %x.nonce.unsafeGet
proc `%`*(x: EthCall): JsonNode =
result = newJobject()
result = newJObject()
result["to"] = %x.to
if x.source.isSome:
result["source"] = %x.source.unsafeGet
@ -173,7 +173,7 @@ proc `%`*(x: byte): JsonNode =
%x.int
proc `%`*(x: FilterOptions): JsonNode =
result = newJobject()
result = newJObject()
if x.fromBlock.isSome:
result["fromBlock"] = %x.fromBlock.unsafeGet
if x.toBlock.isSome:

View File

@ -7,13 +7,13 @@ import
type
EncodeResult* = tuple[dynamic: bool, data: string]
func encode*[bits: static[int]](x: Stuint[bits]): EncodeResult =
## Encodes a `Stuint` to a textual representation for use in the JsonRPC
func encode*[bits: static[int]](x: StUint[bits]): EncodeResult =
## Encodes a `StUint` to a textual representation for use in the JsonRPC
## `sendTransaction` call.
(dynamic: false, data: '0'.repeat((256 - bits) div 4) & x.dumpHex)
func encode*[bits: static[int]](x: Stint[bits]): EncodeResult =
## Encodes a `Stint` to a textual representation for use in the JsonRPC
func encode*[bits: static[int]](x: StInt[bits]): EncodeResult =
## Encodes a `StInt` to a textual representation for use in the JsonRPC
## `sendTransaction` call.
(dynamic: false,
data:
@ -23,17 +23,17 @@ func encode*[bits: static[int]](x: Stint[bits]): EncodeResult =
'0'.repeat((256 - bits) div 4) & x.dumpHex
)
func decode*(input: string, offset: int, to: var Stuint): int =
func decode*(input: string, offset: int, to: var StUint): int =
let meaningfulLen = to.bits div 8 * 2
to = type(to).fromHex(input[offset .. offset + meaningfulLen - 1])
meaningfulLen
func decode*[N](input: string, offset: int, to: var Stint[N]): int =
func decode*[N](input: string, offset: int, to: var StInt[N]): int =
let meaningfulLen = N div 8 * 2
fromHex(input[offset .. offset + meaningfulLen], to)
meaningfulLen
func fixedEncode(a: openarray[byte]): EncodeResult =
func fixedEncode(a: openArray[byte]): EncodeResult =
var padding = a.len mod 32
if padding != 0: padding = 32 - padding
result = (dynamic: false, data: "00".repeat(padding) & byteutils.toHex(a))
@ -41,7 +41,7 @@ func fixedEncode(a: openarray[byte]): EncodeResult =
func encode*[N](b: FixedBytes[N]): EncodeResult = fixedEncode(array[N, byte](b))
func encode*(b: Address): EncodeResult = fixedEncode(array[20, byte](b))
func decodeFixed(input: string, offset: int, to: var openarray[byte]): int =
func decodeFixed(input: string, offset: int, to: var openArray[byte]): int =
let meaningfulLen = to.len * 2
var padding = to.len mod 32
if padding != 0: padding = (32 - padding) * 2
@ -55,7 +55,7 @@ func decode*[N](input: string, offset: int, to: var FixedBytes[N]): int {.inline
func decode*(input: string, offset: int, to: var Address): int {.inline.} =
decodeFixed(input, offset, array[20, byte](to))
func encodeDynamic(v: openarray[byte]): EncodeResult =
func encodeDynamic(v: openArray[byte]): EncodeResult =
result.dynamic = true
result.data = v.len.toHex(64).toLower
for y in v:
@ -86,18 +86,18 @@ macro makeTypeEnum(): untyped =
identInt = newIdentNode("Int" & $i)
if ceil(log2(i.float)) == floor(log2(i.float)):
lastpow2 = i
if i notin {256, 125}: # Int/Uint256/128 are already defined in stint. No need to repeat.
if i notin {256, 125}: # Int/UInt256/128 are already defined in stint. No need to repeat.
result.add quote do:
type
`identUint`* = Stuint[`lastpow2`]
`identInt`* = Stint[`lastpow2`]
`identUint`* = StUint[`lastpow2`]
`identInt`* = StInt[`lastpow2`]
let
identUint = ident("Uint")
identInt = ident("Int")
identBool = ident("Bool")
result.add quote do:
type
`identUint`* = Uint256
`identUint`* = UInt256
`identInt`* = Int256
`identBool`* = distinct Int256
@ -139,7 +139,7 @@ macro makeTypeEnum(): untyped =
result.add quote do:
type
`identFixed`* = distinct Int128
`identUfixed`* = distinct Uint128
`identUfixed`* = distinct UInt128
for i in 1..256:
let
identBytes = ident("Bytes" & $i)
@ -191,7 +191,7 @@ func encode*(x: seq[Encodable]): EncodeResult =
result.data &= data
func decode*[T](input: string, to: seq[T]): seq[T] =
var count = input[0..64].decode(Stuint)
var count = input[0..64].decode(StUint)
result = newSeq[T](count)
for i in 0..count:
result[i] = input[i*64 .. (i+1)*64].decode(T)

View File

@ -42,7 +42,7 @@ type
# https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.8/src/engine/specification.md#transitionconfigurationv1
TransitionConfigurationV1* = object
terminalTotalDifficulty*: Uint256
terminalTotalDifficulty*: UInt256
terminalBlockHash*: BlockHash
terminalBlockNumber*: Quantity

View File

@ -38,7 +38,7 @@ type
to*: Option[Address] # (optional when creating new contract) the address the transaction is directed to.
gas*: Option[Quantity] # (optional, default: 90000) integer of the gas provided for the transaction execution. It will return unused gas.
gasPrice*: Option[int] # (optional, default: To-Be-Determined) integer of the gasPrice used for each paid gas.
value*: Option[Uint256] # (optional) integer of the value sent with this transaction.
value*: Option[UInt256] # (optional) integer of the value sent with this transaction.
data*: string # the compiled code of a contract OR the hash of the invoked method signature and encoded parameters. For details see Ethereum Contract ABI.
nonce*: Option[Nonce] # (optional) integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce
@ -291,4 +291,4 @@ template len*(data: DynamicBytes): int =
len(distinctBase data)
func `$`*[minLen, maxLen](data: DynamicBytes[minLen, maxLen]): string =
"0x" & byteutils.toHex(distinctbase(data))
"0x" & byteutils.toHex(distinctBase(data))

View File

@ -137,7 +137,7 @@ proc parseExt*[T: BChar](data: openArray[T], output: var seq[AppExt]): bool =
ext.params[^1].name = system.move(param.name)
ext.params[^1].value = system.move(param.value)
if lex.tok notin {tkSemCol, tkComma, tkEof}:
if lex.tok notin {tkSemcol, tkComma, tkEof}:
return false
output.setLen(output.len + 1)

View File

@ -149,12 +149,10 @@ proc handleTlsConnCb(
maxVersion = tlsHttpServer.maxVersion,
flags = tlsHttpServer.tlsFlags)
var stream: AsyncStream
try:
stream = AsyncStream(
let stream = AsyncStream(
reader: tlsStream.reader,
writer: tlsStream.writer)
try:
let httpServer = HttpServer(server)
let request = await httpServer.parseRequest(stream)
@ -164,9 +162,7 @@ proc handleTlsConnCb(
finally:
await stream.closeWait()
proc accept*(server: HttpServer): Future[HttpRequest]
{.async, raises: [Defect, HttpError].} =
proc accept*(server: HttpServer): Future[HttpRequest] {.async.} =
if not isNil(server.handler):
raise newException(HttpError,
"Callback already registered - cannot mix callback and accepts styles!")

View File

@ -153,7 +153,7 @@ proc handleClose*(
code = StatusFulfilled
reason = ""
case payload.len:
case payLoad.len:
of 0:
code = StatusNoStatus
of 1:

View File

@ -91,7 +91,7 @@ nim_needs_rebuilding() {
fi
# Delete old Nim binaries, to put a limit on how much storage we use.
for F in "$(ls -t "${NIM_DIR}"/bin/nim_commit_* | tail -n +$((MAX_NIM_BINARIES + 1)))"; do
for F in "$(ls -t "${NIM_DIR}"/bin/nim_commit_* 2>/dev/null | tail -n +$((MAX_NIM_BINARIES + 1)))"; do
if [[ -e "${F}" ]]; then
rm "${F}"
fi