mirror of https://github.com/status-im/nim-eth.git
More shared code extracted out of RLPx
This commit is contained in:
parent
f761889518
commit
76bb2cde5c
|
@ -23,6 +23,48 @@ template networkState*(connection: Peer, Protocol: type): untyped =
|
||||||
|
|
||||||
proc initProtocolState*[T](state: T, x: Peer|EthereumNode) {.gcsafe.} = discard
|
proc initProtocolState*[T](state: T, x: Peer|EthereumNode) {.gcsafe.} = discard
|
||||||
|
|
||||||
proc initFuture[T](loc: var Future[T]) =
|
proc requestResolver[MsgType](msg: pointer, future: FutureBase) {.gcsafe.} =
|
||||||
loc = newFuture[T]()
|
var f = Future[Option[MsgType]](future)
|
||||||
|
if not f.finished:
|
||||||
|
if msg != nil:
|
||||||
|
f.complete some(cast[ptr MsgType](msg)[])
|
||||||
|
else:
|
||||||
|
f.complete none(MsgType)
|
||||||
|
else:
|
||||||
|
# This future was already resolved, but let's do some sanity checks
|
||||||
|
# here. The only reasonable explanation is that the request should
|
||||||
|
# have timed out.
|
||||||
|
if msg != nil:
|
||||||
|
if f.read.isSome:
|
||||||
|
doAssert false, "trying to resolve a request twice"
|
||||||
|
else:
|
||||||
|
doAssert false, "trying to resolve a timed out request with a value"
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if not f.read.isSome:
|
||||||
|
doAssert false, "a request timed out twice"
|
||||||
|
# This can except when the future still completes with an error.
|
||||||
|
# E.g. the `sendMsg` fails because of an already closed transport or a
|
||||||
|
# broken pipe
|
||||||
|
except TransportOsError:
|
||||||
|
# E.g. broken pipe
|
||||||
|
trace "TransportOsError during request", err = getCurrentExceptionMsg()
|
||||||
|
except TransportError:
|
||||||
|
trace "Transport got closed during request"
|
||||||
|
except:
|
||||||
|
debug "Exception in requestResolver()",
|
||||||
|
exc = getCurrentException().name,
|
||||||
|
err = getCurrentExceptionMsg()
|
||||||
|
raise
|
||||||
|
|
||||||
|
proc linkSendFailureToReqFuture[S, R](sendFut: Future[S], resFut: Future[R]) =
|
||||||
|
sendFut.addCallback() do (arg: pointer):
|
||||||
|
if not sendFut.error.isNil:
|
||||||
|
resFut.fail(sendFut.error)
|
||||||
|
|
||||||
|
proc messagePrinter[MsgType](msg: pointer): string {.gcsafe.} =
|
||||||
|
result = ""
|
||||||
|
# TODO: uncommenting the line below increases the compile-time
|
||||||
|
# tremendously (for reasons not yet known)
|
||||||
|
# result = $(cast[ptr MsgType](msg)[])
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import
|
import
|
||||||
macros,
|
macros,
|
||||||
std_shims/macros_shim, chronos/timer
|
std_shims/macros_shim, chronos
|
||||||
|
|
||||||
type
|
type
|
||||||
MessageKind* = enum
|
MessageKind* = enum
|
||||||
|
@ -18,6 +18,10 @@ type
|
||||||
recIdent*: NimNode
|
recIdent*: NimNode
|
||||||
recBody*: NimNode
|
recBody*: NimNode
|
||||||
userHandler*: NimNode
|
userHandler*: NimNode
|
||||||
|
protocol*: P2PProtocol
|
||||||
|
|
||||||
|
sendProcPeerParam*: NimNode
|
||||||
|
sendProcMsgParams*: seq[NimNode]
|
||||||
|
|
||||||
Request* = ref object
|
Request* = ref object
|
||||||
queries*: seq[Message]
|
queries*: seq[Message]
|
||||||
|
@ -69,12 +73,18 @@ type
|
||||||
# Bound symbols to the back-end run-time types and procs
|
# Bound symbols to the back-end run-time types and procs
|
||||||
PeerType*: NimNode
|
PeerType*: NimNode
|
||||||
NetworkType*: NimNode
|
NetworkType*: NimNode
|
||||||
|
SerializationFormat*: NimNode
|
||||||
|
|
||||||
registerProtocol*: NimNode
|
registerProtocol*: NimNode
|
||||||
setEventHandlers*: NimNode
|
setEventHandlers*: NimNode
|
||||||
|
|
||||||
BackendFactory* = proc (p: P2PProtocol): Backend
|
BackendFactory* = proc (p: P2PProtocol): Backend
|
||||||
|
|
||||||
|
P2PBackendError* = object of CatchableError
|
||||||
|
InvalidMsgError* = object of P2PBackendError
|
||||||
|
|
||||||
|
ProtocolInfoBase* = object
|
||||||
|
|
||||||
const
|
const
|
||||||
defaultReqTimeout = 10.seconds
|
defaultReqTimeout = 10.seconds
|
||||||
|
|
||||||
|
@ -296,24 +306,108 @@ proc createSendProc*(msg: Message, procType = nnkProcDef): NimNode =
|
||||||
# TODO: file an issue:
|
# TODO: file an issue:
|
||||||
# macros.newProc and macros.params doesn't work with nnkMacroDef
|
# macros.newProc and macros.params doesn't work with nnkMacroDef
|
||||||
|
|
||||||
let pragmas = if procType == nnkProcDef: newTree(nnkPragma, ident"gcsafe")
|
# createSendProc must be called only once
|
||||||
else: newEmptyNode()
|
assert msg.sendProcPeerParam == nil
|
||||||
|
|
||||||
|
let
|
||||||
|
pragmas = if procType == nnkProcDef: newTree(nnkPragma, ident"gcsafe")
|
||||||
|
else: newEmptyNode()
|
||||||
|
|
||||||
result = newNimNode(procType).add(
|
result = newNimNode(procType).add(
|
||||||
msg.identWithExportMarker, ## name
|
msg.identWithExportMarker, ## name
|
||||||
newEmptyNode(),
|
newEmptyNode(),
|
||||||
newEmptyNode(),
|
newEmptyNode(),
|
||||||
msg.procDef.params.copy, ## params
|
copy msg.procDef.params, ## params
|
||||||
pragmas,
|
pragmas,
|
||||||
newEmptyNode(),
|
newEmptyNode(),
|
||||||
newStmtList()) ## body
|
newStmtList()) ## body
|
||||||
|
|
||||||
|
for param, paramType in result.typedParams():
|
||||||
|
if msg.sendProcPeerParam == nil:
|
||||||
|
msg.sendProcPeerParam = param
|
||||||
|
else:
|
||||||
|
msg.sendProcMsgParams.add param
|
||||||
|
|
||||||
if msg.kind in {msgHandshake, msgRequest}:
|
if msg.kind in {msgHandshake, msgRequest}:
|
||||||
result[3].add msg.timeoutParam
|
result[3].add msg.timeoutParam
|
||||||
|
|
||||||
result[3][0] = if procType == nnkMacroDef: ident "untyped"
|
result[3][0] = if procType == nnkMacroDef: ident "untyped"
|
||||||
else: newTree(nnkBracketExpr, ident("Future"), msg.recIdent)
|
else: newTree(nnkBracketExpr, ident("Future"), msg.recIdent)
|
||||||
|
|
||||||
|
const tracingEnabled = defined(p2pdump)
|
||||||
|
|
||||||
|
when tracingEnabled:
|
||||||
|
proc logSentMsgFields(peer: NimNode,
|
||||||
|
protocolInfo: NimNode,
|
||||||
|
msgName: string,
|
||||||
|
fields: openarray[NimNode]): NimNode =
|
||||||
|
## This generates the tracing code inserted in the message sending procs
|
||||||
|
## `fields` contains all the params that were serialized in the message
|
||||||
|
var tracer = ident("tracer")
|
||||||
|
|
||||||
|
result = quote do:
|
||||||
|
var `tracer` = init StringJsonWriter
|
||||||
|
beginRecord(`tracer`)
|
||||||
|
|
||||||
|
for f in fields:
|
||||||
|
result.add newCall(bindSym"writeField", tracer, newLit($f), f)
|
||||||
|
|
||||||
|
result.add quote do:
|
||||||
|
endRecord(`tracer`)
|
||||||
|
logMsgEventImpl("outgoing_msg", `peer`,
|
||||||
|
`protocolInfo`, `msgName`, getOutput(`tracer`))
|
||||||
|
|
||||||
|
proc initFuture*[T](loc: var Future[T]) =
|
||||||
|
loc = newFuture[T]()
|
||||||
|
|
||||||
|
proc createSendProcBody*(msg: Message,
|
||||||
|
preludeGenerator: proc(stream: NimNode): NimNode,
|
||||||
|
sendCallGenerator: proc (peer, bytes: NimNode): NimNode): NimNode =
|
||||||
|
let
|
||||||
|
outputStream = ident "outputStream"
|
||||||
|
msgBytes = ident "msgBytes"
|
||||||
|
writer = ident "writer"
|
||||||
|
writeField = ident "writeField"
|
||||||
|
resultIdent = ident "result"
|
||||||
|
|
||||||
|
initFuture = bindSym "initFuture"
|
||||||
|
recipient = msg.sendProcPeerParam
|
||||||
|
msgRecName = msg.recIdent
|
||||||
|
Format = msg.protocol.backend.SerializationFormat
|
||||||
|
|
||||||
|
prelude = if preludeGenerator.isNil: newStmtList()
|
||||||
|
else: preludeGenerator(outputStream)
|
||||||
|
appendParams = newStmtList()
|
||||||
|
|
||||||
|
initResultFuture = if msg.kind != msgRequest: newStmtList()
|
||||||
|
else: newCall(initFuture, resultIdent)
|
||||||
|
|
||||||
|
sendCall = sendCallGenerator(recipient, msgBytes)
|
||||||
|
|
||||||
|
tracing = when tracingEnabled: logSentMsgFields(recipient,
|
||||||
|
newLit(msg.protocol.name),
|
||||||
|
$msg.ident,
|
||||||
|
msg.sendProcMsgParams)
|
||||||
|
else: newStmtList()
|
||||||
|
|
||||||
|
|
||||||
|
for param in msg.sendProcMsgParams:
|
||||||
|
appendParams.add newCall(writeField, writer, newLit($param), param)
|
||||||
|
|
||||||
|
result = quote do:
|
||||||
|
mixin init, WriterType, beginRecord, endRecord, getOutput
|
||||||
|
|
||||||
|
`initResultFuture`
|
||||||
|
var `outputStream` = init OutputStream
|
||||||
|
`prelude`
|
||||||
|
var writer = init(WriterType(`Format`), `outputStream`)
|
||||||
|
var recordStartMemo = beginRecord(writer, `msgRecName`)
|
||||||
|
`appendParams`
|
||||||
|
`tracing`
|
||||||
|
endRecord(writer, recordStartMemo)
|
||||||
|
let `msgBytes` = getOutput(`outputStream`)
|
||||||
|
`sendCall`
|
||||||
|
|
||||||
proc appendAllParams*(node: NimNode, procDef: NimNode, skipFirst = 0): NimNode =
|
proc appendAllParams*(node: NimNode, procDef: NimNode, skipFirst = 0): NimNode =
|
||||||
result = node
|
result = node
|
||||||
for p, _ in procDef.typedParams(skip = skipFirst):
|
for p, _ in procDef.typedParams(skip = skipFirst):
|
||||||
|
|
|
@ -62,26 +62,6 @@ when tracingEnabled:
|
||||||
Msg.type.name,
|
Msg.type.name,
|
||||||
StringJsonWriter.encode(msg))
|
StringJsonWriter.encode(msg))
|
||||||
|
|
||||||
proc logSentMsgFields(peer: NimNode,
|
|
||||||
protocolInfo: NimNode,
|
|
||||||
msgName: string,
|
|
||||||
fields: openarray[NimNode]): NimNode =
|
|
||||||
## This generates the tracing code inserted in the message sending procs
|
|
||||||
## `fields` contains all the params that were serialized in the message
|
|
||||||
var tracer = ident("tracer")
|
|
||||||
|
|
||||||
result = quote do:
|
|
||||||
var `tracer` = init StringJsonWriter
|
|
||||||
beginRecord(`tracer`)
|
|
||||||
|
|
||||||
for f in fields:
|
|
||||||
result.add newCall(bindSym"writeField", tracer, newLit($f), f)
|
|
||||||
|
|
||||||
result.add quote do:
|
|
||||||
endRecord(`tracer`)
|
|
||||||
logMsgEventImpl("outgoing_msg", `peer`,
|
|
||||||
`protocolInfo`, `msgName`, getOutput(`tracer`))
|
|
||||||
|
|
||||||
template logSentMsg(peer: Peer, msg: auto) =
|
template logSentMsg(peer: Peer, msg: auto) =
|
||||||
logMsgEvent("outgoing_msg", peer, msg)
|
logMsgEvent("outgoing_msg", peer, msg)
|
||||||
|
|
||||||
|
@ -105,7 +85,7 @@ when tracingEnabled:
|
||||||
|
|
||||||
else:
|
else:
|
||||||
template initTracing(baseProtocol: ProtocolInfo,
|
template initTracing(baseProtocol: ProtocolInfo,
|
||||||
userProtocols: seq[ProtocolInfo])= discard
|
userProtocols: seq[ProtocolInfo])= discard
|
||||||
template logSentMsg(peer: Peer, msg: auto) = discard
|
template logSentMsg(peer: Peer, msg: auto) = discard
|
||||||
template logReceivedMsg(peer: Peer, msg: auto) = discard
|
template logReceivedMsg(peer: Peer, msg: auto) = discard
|
||||||
template logConnectedPeer(peer: Peer) = discard
|
template logConnectedPeer(peer: Peer) = discard
|
||||||
|
|
|
@ -101,7 +101,7 @@ type
|
||||||
name*: string
|
name*: string
|
||||||
|
|
||||||
# Private fields:
|
# Private fields:
|
||||||
thunk*: MessageHandler
|
thunk*: ThunkProc
|
||||||
printer*: MessageContentPrinter
|
printer*: MessageContentPrinter
|
||||||
requestResolver*: RequestResolver
|
requestResolver*: RequestResolver
|
||||||
nextMsgResolver*: NextMsgResolver
|
nextMsgResolver*: NextMsgResolver
|
||||||
|
@ -134,7 +134,7 @@ type
|
||||||
|
|
||||||
# Private types:
|
# Private types:
|
||||||
MessageHandlerDecorator* = proc(msgId: int, n: NimNode): NimNode
|
MessageHandlerDecorator* = proc(msgId: int, n: NimNode): NimNode
|
||||||
MessageHandler* = proc(x: Peer, msgId: int, data: Rlp): Future[void] {.gcsafe.}
|
ThunkProc* = proc(x: Peer, msgId: int, data: Rlp): Future[void] {.gcsafe.}
|
||||||
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
||||||
RequestResolver* = proc(msg: pointer, future: FutureBase) {.gcsafe.}
|
RequestResolver* = proc(msg: pointer, future: FutureBase) {.gcsafe.}
|
||||||
NextMsgResolver* = proc(msgData: Rlp, future: FutureBase) {.gcsafe.}
|
NextMsgResolver* = proc(msgData: Rlp, future: FutureBase) {.gcsafe.}
|
||||||
|
|
|
@ -53,6 +53,8 @@ proc disconnectAndRaise(peer: Peer,
|
||||||
await peer.disconnect(r)
|
await peer.disconnect(r)
|
||||||
raisePeerDisconnected(msg, r)
|
raisePeerDisconnected(msg, r)
|
||||||
|
|
||||||
|
include p2p_backends_helpers
|
||||||
|
|
||||||
# Dispatcher
|
# Dispatcher
|
||||||
#
|
#
|
||||||
|
|
||||||
|
@ -166,53 +168,13 @@ proc cmp*(lhs, rhs: ProtocolInfo): int =
|
||||||
return int16(lhs.name[i]) - int16(rhs.name[i])
|
return int16(lhs.name[i]) - int16(rhs.name[i])
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
proc messagePrinter[MsgType](msg: pointer): string {.gcsafe.} =
|
|
||||||
result = ""
|
|
||||||
# TODO: uncommenting the line below increases the compile-time
|
|
||||||
# tremendously (for reasons not yet known)
|
|
||||||
# result = $(cast[ptr MsgType](msg)[])
|
|
||||||
|
|
||||||
proc nextMsgResolver[MsgType](msgData: Rlp, future: FutureBase) {.gcsafe.} =
|
proc nextMsgResolver[MsgType](msgData: Rlp, future: FutureBase) {.gcsafe.} =
|
||||||
var reader = msgData
|
var reader = msgData
|
||||||
Future[MsgType](future).complete reader.readRecordType(MsgType, MsgType.rlpFieldsCount > 1)
|
Future[MsgType](future).complete reader.readRecordType(MsgType, MsgType.rlpFieldsCount > 1)
|
||||||
|
|
||||||
proc requestResolver[MsgType](msg: pointer, future: FutureBase) {.gcsafe.} =
|
|
||||||
var f = Future[Option[MsgType]](future)
|
|
||||||
if not f.finished:
|
|
||||||
if msg != nil:
|
|
||||||
f.complete some(cast[ptr MsgType](msg)[])
|
|
||||||
else:
|
|
||||||
f.complete none(MsgType)
|
|
||||||
else:
|
|
||||||
# This future was already resolved, but let's do some sanity checks
|
|
||||||
# here. The only reasonable explanation is that the request should
|
|
||||||
# have timed out.
|
|
||||||
if msg != nil:
|
|
||||||
if f.read.isSome:
|
|
||||||
doAssert false, "trying to resolve a request twice"
|
|
||||||
else:
|
|
||||||
doAssert false, "trying to resolve a timed out request with a value"
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
if not f.read.isSome:
|
|
||||||
doAssert false, "a request timed out twice"
|
|
||||||
# This can except when the future still completes with an error.
|
|
||||||
# E.g. the `sendMsg` fails because of an already closed transport or a
|
|
||||||
# broken pipe
|
|
||||||
except TransportOsError:
|
|
||||||
# E.g. broken pipe
|
|
||||||
trace "TransportOsError during request", err = getCurrentExceptionMsg()
|
|
||||||
except TransportError:
|
|
||||||
trace "Transport got closed during request"
|
|
||||||
except:
|
|
||||||
debug "Exception in requestResolver()",
|
|
||||||
exc = getCurrentException().name,
|
|
||||||
err = getCurrentExceptionMsg()
|
|
||||||
raise
|
|
||||||
|
|
||||||
proc registerMsg(protocol: ProtocolInfo,
|
proc registerMsg(protocol: ProtocolInfo,
|
||||||
id: int, name: string,
|
id: int, name: string,
|
||||||
thunk: MessageHandler,
|
thunk: ThunkProc,
|
||||||
printer: MessageContentPrinter,
|
printer: MessageContentPrinter,
|
||||||
requestResolver: RequestResolver,
|
requestResolver: RequestResolver,
|
||||||
nextMsgResolver: NextMsgResolver) =
|
nextMsgResolver: NextMsgResolver) =
|
||||||
|
@ -257,12 +219,6 @@ proc supports*(peer: Peer, Protocol: type): bool {.inline.} =
|
||||||
template perPeerMsgId(peer: Peer, MsgType: type): int =
|
template perPeerMsgId(peer: Peer, MsgType: type): int =
|
||||||
perPeerMsgIdImpl(peer, MsgType.msgProtocol.protocolInfo, MsgType.msgId)
|
perPeerMsgIdImpl(peer, MsgType.msgProtocol.protocolInfo, MsgType.msgId)
|
||||||
|
|
||||||
proc writeMsgId(p: ProtocolInfo, msgId: int, peer: Peer,
|
|
||||||
rlpOut: var RlpWriter) =
|
|
||||||
let baseMsgId = peer.dispatcher.protocolOffsets[p.index]
|
|
||||||
doAssert baseMsgId != -1
|
|
||||||
rlpOut.append(baseMsgId + msgId)
|
|
||||||
|
|
||||||
proc invokeThunk*(peer: Peer, msgId: int, msgData: var Rlp): Future[void] =
|
proc invokeThunk*(peer: Peer, msgId: int, msgData: var Rlp): Future[void] =
|
||||||
template invalidIdError: untyped =
|
template invalidIdError: untyped =
|
||||||
raise newException(UnsupportedMessageError,
|
raise newException(UnsupportedMessageError,
|
||||||
|
@ -275,11 +231,6 @@ proc invokeThunk*(peer: Peer, msgId: int, msgData: var Rlp): Future[void] =
|
||||||
|
|
||||||
return thunk(peer, msgId, msgData)
|
return thunk(peer, msgId, msgData)
|
||||||
|
|
||||||
proc linkSendFailureToReqFuture[S, R](sendFut: Future[S], resFut: Future[R]) =
|
|
||||||
sendFut.addCallback() do (arg: pointer):
|
|
||||||
if not sendFut.error.isNil:
|
|
||||||
resFut.fail(sendFut.error)
|
|
||||||
|
|
||||||
template compressMsg(peer: Peer, data: Bytes): Bytes =
|
template compressMsg(peer: Peer, data: Bytes): Bytes =
|
||||||
when useSnappy:
|
when useSnappy:
|
||||||
if peer.snappyEnabled:
|
if peer.snappyEnabled:
|
||||||
|
@ -499,8 +450,6 @@ proc waitSingleMsg(peer: Peer, MsgType: type): Future[MsgType] {.async.} =
|
||||||
warn "Dropped RLPX message",
|
warn "Dropped RLPX message",
|
||||||
msg = peer.dispatcher.messages[nextMsgId].name
|
msg = peer.dispatcher.messages[nextMsgId].name
|
||||||
|
|
||||||
include p2p_backends_helpers
|
|
||||||
|
|
||||||
proc nextMsg*(peer: Peer, MsgType: type): Future[MsgType] =
|
proc nextMsg*(peer: Peer, MsgType: type): Future[MsgType] =
|
||||||
## This procs awaits a specific RLPx message.
|
## This procs awaits a specific RLPx message.
|
||||||
## Any messages received while waiting will be dispatched to their
|
## Any messages received while waiting will be dispatched to their
|
||||||
|
@ -619,7 +568,6 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||||
nextMsg = bindSym "nextMsg"
|
nextMsg = bindSym "nextMsg"
|
||||||
initProtocol = bindSym"initProtocol"
|
initProtocol = bindSym"initProtocol"
|
||||||
registerMsg = bindSym "registerMsg"
|
registerMsg = bindSym "registerMsg"
|
||||||
writeMsgId = bindSym "writeMsgId"
|
|
||||||
perPeerMsgId = bindSym "perPeerMsgId"
|
perPeerMsgId = bindSym "perPeerMsgId"
|
||||||
perPeerMsgIdImpl = bindSym "perPeerMsgIdImpl"
|
perPeerMsgIdImpl = bindSym "perPeerMsgIdImpl"
|
||||||
linkSendFailureToReqFuture = bindSym "linkSendFailureToReqFuture"
|
linkSendFailureToReqFuture = bindSym "linkSendFailureToReqFuture"
|
||||||
|
|
Loading…
Reference in New Issue