Use asyncraises in p2p (#675)

* Use asyncraises in p2p

* Fix capture error in async proc

* Remove gcsafe from async procs
This commit is contained in:
andri lim 2024-02-14 15:59:13 +07:00 committed by GitHub
parent ce834e1287
commit b9c40e1380
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 46 additions and 42 deletions

View File

@ -122,14 +122,17 @@ proc newEthereumNode*(
result.addCapability(cap)
proc processIncoming(server: StreamServer,
remote: StreamTransport): Future[void] {.async, gcsafe.} =
var node = getUserData[EthereumNode](server)
let peer = await node.rlpxAccept(remote)
if not peer.isNil:
trace "Connection established (incoming)", peer
if node.peerPool != nil:
node.peerPool.connectingNodes.excl(peer.remote)
node.peerPool.addPeer(peer)
remote: StreamTransport): Future[void] {.async: (raises: []).} =
try:
var node = getUserData[EthereumNode](server)
let peer = await node.rlpxAccept(remote)
if not peer.isNil:
trace "Connection established (incoming)", peer
if node.peerPool != nil:
node.peerPool.connectingNodes.excl(peer.remote)
node.peerPool.addPeer(peer)
except CatchableError as exc:
error "processIncoming", msg=exc.msg
proc listeningAddress*(node: EthereumNode): ENode =
node.toENode()

View File

@ -120,8 +120,9 @@ proc timeKeyPong(n: Node): TimeKey =
proc timeKeyPing(n: Node): TimeKey =
timeKey(n.id, n.ip, cmdPing)
proc lastPingReceived(k: KademliaProtocol, n: Node): Time =
k.pingPongTime.getOrDefault(n.timeKeyPing, 0'i64).fromUnix
when false:
proc lastPingReceived(k: KademliaProtocol, n: Node): Time =
k.pingPongTime.getOrDefault(n.timeKeyPing, 0'i64).fromUnix
proc lastPongReceived(k: KademliaProtocol, n: Node): Time =
k.pingPongTime.getOrDefault(n.timeKeyPong, 0'i64).fromUnix
@ -182,9 +183,10 @@ proc updateLastPongReceived(k: KademliaProtocol, n: Node, t: Time) =
k.removeTooOldPingPongTime()
k.pingPongTime[n.timeKeyPong] = t.toUnix
# checkBond checks if the given node has a recent enough endpoint proof.
proc checkBond(k: KademliaProtocol, n: Node): bool =
getTime() - k.lastPongReceived(n) < BOND_EXPIRATION
when false:
# checkBond checks if the given node has a recent enough endpoint proof.
proc checkBond(k: KademliaProtocol, n: Node): bool =
getTime() - k.lastPongReceived(n) < BOND_EXPIRATION
proc newKBucket(istart, iend: NodeId): KBucket =
result.new()

View File

@ -1,3 +1,14 @@
# nim-eth
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
let protocolManager = ProtocolManager()
# The variables above are immutable RTTI information. We need to tell
@ -64,3 +75,5 @@ proc initProtocolStates(peer: Peer, protocols: openArray[ProtocolInfo])
if peerStateInit != nil:
peer.protocolStates[protocol.index] = peerStateInit(peer)
{.pop.}

View File

@ -149,7 +149,6 @@ let
const
protocolCounter = CacheCounter"protocolCounter"
template Opt(T): auto = newTree(nnkBracketExpr, Option, T)
template Fut(T): auto = newTree(nnkBracketExpr, Future, T)
proc initFuture*[T](loc: var Future[T]) =
@ -330,10 +329,6 @@ proc init*(T: type P2PProtocol, backendFactory: BackendFactory,
if not result.backend.afterProtocolInit.isNil:
result.backend.afterProtocolInit(result)
proc isFuture(t: NimNode): bool =
t.kind == nnkBracketExpr and eqIdent(t[0], "Future")
proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, msgId = -1) =
## This procs adds a set of common helpers available in all messages handlers
## (e.g. `perProtocolMsgId`, `peer.state`, etc).
@ -362,22 +357,22 @@ proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, msgId = -1) =
param[^2] = chooseFieldType(param[^2])
prelude.add quote do:
type `currentProtocolSym` = `protocolNameIdent`
type `currentProtocolSym` {.used.} = `protocolNameIdent`
if msgId >= 0 and p.isRlpx:
prelude.add quote do:
const `perProtocolMsgIdVar` = `msgId`
const `perProtocolMsgIdVar` {.used.} = `msgId`
# Define local accessors for the peer and the network protocol states
# inside each user message handler proc (e.g. peer.state.foo = bar)
if PeerStateType != nil:
prelude.add quote do:
template state(`peerVar`: `PeerType`): `PeerStateType` =
template state(`peerVar`: `PeerType`): `PeerStateType` {.used.} =
`PeerStateType`(`getState`(`peerVar`, `protocolInfo`))
if NetworkStateType != nil:
prelude.add quote do:
template networkState(`peerVar`: `PeerType`): `NetworkStateType` =
template networkState(`peerVar`: `PeerType`): `NetworkStateType` {.used.} =
`NetworkStateType`(`getNetworkState`(`peerVar`.network, `protocolInfo`))
proc addPreludeDefs*(userHandlerProc: NimNode, definitions: NimNode) =
@ -395,7 +390,6 @@ proc addTimeoutParam(procDef: NimNode, defaultValue: int64) =
var
Duration = bindSym"Duration"
milliseconds = bindSym"milliseconds"
lastParam = procDef.params[^1]
procDef.params.add newTree(nnkIdentDefs,
timeoutVar,
@ -696,8 +690,6 @@ proc useStandardBody*(sendProc: SendProc,
postSerialization = if postSerializationStep.isNil: newStmtList()
else: postSerializationStep(outputStream)
appendParams = newStmtList()
tracing = when not tracingEnabled:
newStmtList()
else:
@ -979,7 +971,7 @@ proc genCode*(p: P2PProtocol): NimNode =
regBody.add newCall(p.backend.registerProtocol, protocolVar)
result.add quote do:
proc `protocolReg`() {.raises: [RlpError, Defect].} =
proc `protocolReg`() {.raises: [RlpError].} =
let `protocolVar` = `protocolInit`
`regBody`
`protocolReg`()

View File

@ -155,7 +155,7 @@ type
MessageHandlerDecorator* = proc(msgId: int, n: NimNode): NimNode
ThunkProc* = proc(x: Peer, msgId: int, data: Rlp): Future[void]
{.gcsafe, raises: [RlpError].}
{.gcsafe, async: (raises: [RlpError, CatchableError]).}
MessageContentPrinter* = proc(msg: pointer): string
{.gcsafe, raises: [].}

View File

@ -176,7 +176,7 @@ proc messagePrinter[MsgType](msg: pointer): string {.gcsafe.} =
# result = $(cast[ptr MsgType](msg)[])
proc disconnect*(peer: Peer, reason: DisconnectionReason,
notifyOtherPeer = false) {.gcsafe, async.}
notifyOtherPeer = false) {.async: (raises:[CatchableError]).}
template raisePeerDisconnected(msg: string, r: DisconnectionReason) =
var e = newException(PeerDisconnected, msg)
@ -215,9 +215,6 @@ proc handshakeImpl[T](peer: Peer,
# Dispatcher
#
proc hash(d: Dispatcher): int =
hash(d.protocolOffsets)
proc `==`(lhs, rhs: Dispatcher): bool =
lhs.activeProtocols == rhs.activeProtocols
@ -365,8 +362,8 @@ proc supports*(peer: Peer, Protocol: type): bool =
template perPeerMsgId(peer: Peer, MsgType: type): int =
perPeerMsgIdImpl(peer, MsgType.msgProtocol.protocolInfo, MsgType.msgId)
proc invokeThunk*(peer: Peer, msgId: int, msgData: var Rlp): Future[void]
{.raises: [UnsupportedMessageError, RlpError].} =
proc invokeThunk*(peer: Peer, msgId: int, msgData: Rlp): Future[void]
{.async: (raises: [CatchableError, rlp.RlpError]).} =
template invalidIdError: untyped =
raise newException(UnsupportedMessageError,
"RLPx message with an invalid id " & $msgId &
@ -379,7 +376,7 @@ proc invokeThunk*(peer: Peer, msgId: int, msgData: var Rlp): Future[void]
let thunk = peer.dispatcher.messages[msgId].thunk
if thunk == nil: invalidIdError()
return thunk(peer, msgId, msgData)
await thunk(peer, msgId, msgData)
template compressMsg(peer: Peer, data: seq[byte]): seq[byte] =
when useSnappy:
@ -389,7 +386,7 @@ template compressMsg(peer: Peer, data: seq[byte]): seq[byte] =
else:
data
proc sendMsg*(peer: Peer, data: seq[byte]) {.gcsafe, async.} =
proc sendMsg*(peer: Peer, data: seq[byte]) {.async.} =
var cipherText = encryptMsg(peer.compressMsg(data), peer.secretsState)
try:
var res = await peer.transport.write(cipherText)
@ -861,10 +858,8 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend =
msgName = $msgIdent
msgRecName = msg.recName
responseMsgId = if msg.response != nil: msg.response.id else: -1
ResponseRecord = if msg.response != nil: msg.response.recName else: nil
hasReqId = msg.hasReqId
protocol = msg.protocol
userPragmas = msg.procDef.pragma
# variables used in the sending procs
peerOrResponder = ident"peerOrResponder"
@ -922,7 +917,6 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend =
# The received RLP data is deserialized to a local variable of
# the message-specific type. This is done field by field here:
let msgNameLit = newLit(msgName)
readParams.add quote do:
`receivedMsg`.`param` = `checkedRlpRead`(`peerVar`, `receivedRlp`, `paramType`)
@ -954,7 +948,7 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend =
proc `thunkName`(`peerVar`: `Peer`, _: int, data: Rlp)
# Fun error if you just use `RlpError` instead of `rlp.RlpError`:
# "Error: type expected, but got symbol 'RlpError' of kind 'EnumField'"
{.async, gcsafe, raises: [rlp.RlpError].} =
{.async: (raises: [rlp.RlpError, CatchableError]).} =
var `receivedRlp` = data
var `receivedMsg` {.noinit.}: `msgRecName`
`readParamsPrelude`
@ -1000,7 +994,7 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend =
let initWriter = quote do:
var `rlpWriter` = `initRlpWriter`()
const `perProtocolMsgIdVar` = `msgId`
const `perProtocolMsgIdVar` {.used.} = `msgId`
let `perPeerMsgIdVar` = `perPeerMsgIdValue`
`append`(`rlpWriter`, `perPeerMsgIdVar`)
@ -1088,7 +1082,7 @@ proc callDisconnectHandlers(peer: Peer, reason: DisconnectionReason):
trace "Disconnection handler ended with an error", err = f.error.msg
proc disconnect*(peer: Peer, reason: DisconnectionReason,
notifyOtherPeer = false) {.async.} =
notifyOtherPeer = false) {.async: (raises: [CatchableError]).} =
if peer.connectionState notin {Disconnecting, Disconnected}:
peer.connectionState = Disconnecting
# Do this first so sub-protocols have time to clean up and stop sending
@ -1391,7 +1385,7 @@ proc rlpxConnect*(node: EthereumNode, remote: Node):
# TODO: rework rlpxAccept similar to rlpxConnect.
proc rlpxAccept*(
node: EthereumNode, transport: StreamTransport): Future[Peer] {.async.} =
node: EthereumNode, transport: StreamTransport): Future[Peer] {.async: (raises: [CatchableError]).} =
initTracing(devp2pInfo, node.protocols)
let peer = Peer(transport: transport, network: node)