Snap sync simplify object inheritance (#1098)

* Reorg SnapPeerBase descriptor, notably start/stop flags

details:
  Instead of using three boolean flags startedFetch, stopped, and
  stopThisState a single enum type is used with values SyncRunningOk,
  SyncStopRequest, and SyncStopped.

* Restricting snap to eth66 and later

why:
  Id-tracked request/response wire protocol can handle overlapped
  responses when requests are sent in row.

* Align function names with source code file names

why:
  Easier to reconcile when following the implemented logic.

* Update trace logging (want file locations)

why:
  The macros previously used hid the relevant file location (when
  `chroniclesLineNumbers` turned on.) It rather printed the file
  location of the template that was wrapping `trace`.

* Use KeyedQueue table instead of sequence

why:
  Quick access, easy configuration as LRU or FIFO with max entries
  (currently LRU.)

* Dissolve `SnapPeerEx` object extension into `SnapPeer`

why;
  It is logically cleaner and more obvious not to inherit from
  `SnapPeerBase` but to specify opaque field object references of the
  merged `SnapPeer` object. These can then be locally inherited.

* Dissolve `SnapSyncEx` object extension into `SnapSync`

why;
  It is logically cleaner and more obvious not to inherit from
  `SnapSyncEx` but to specify opaque field object references of
  the `SnapPeer` object. These can then be locally inherited.

  Also, in the re-factored code here the interface descriptor
  `SnapSyncCtx` inherited `SnapSyncEx` which was sub-optimal (OO
  inheritance makes it easier to work with call back functions.)
This commit is contained in:
Jordan Hrycaj 2022-05-23 17:53:19 +01:00 committed by GitHub
parent a69b16abff
commit ba940a5ce7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 981 additions and 926 deletions

View File

@ -137,7 +137,7 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
# Early-initialise "--snap-sync" before starting any network connections. # Early-initialise "--snap-sync" before starting any network connections.
if ProtocolFlag.Eth in protocols and conf.snapSync: if ProtocolFlag.Eth in protocols and conf.snapSync:
SnapSyncCtx.new(nimbus.ethNode).start SnapSyncCtx.new(nimbus.ethNode, conf.maxPeers).start
# Connect directly to the static nodes # Connect directly to the static nodes
let staticPeers = conf.getStaticPeers() let staticPeers = conf.getStaticPeers()

View File

@ -15,10 +15,13 @@ import
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
eth/p2p/[private/p2p_types, peer_pool], eth/p2p/[private/p2p_types, peer_pool],
stew/byteutils, stew/byteutils,
"."/[protocol, trace_helper] "."/[protocol, types]
{.push raises:[Defect].} {.push raises:[Defect].}
logScope:
topics = "fast sync"
const const
minPeersToStartSync* = 2 # Wait for consensus of at least this minPeersToStartSync* = 2 # Wait for consensus of at least this
# number of peers before syncing # number of peers before syncing
@ -210,16 +213,16 @@ proc getBestBlockNumber(p: Peer): Future[BlockNumber] {.async.} =
skip: 0, skip: 0,
reverse: true) reverse: true)
tracePacket ">> Sending eth.GetBlockHeaders (0x03)", peer=p, trace trEthSendSending & "GetBlockHeaders (0x03)", peer=p,
startBlock=request.startBlock.hash.toHex, max=request.maxResults startBlock=request.startBlock.hash.toHex, max=request.maxResults
let latestBlock = await p.getBlockHeaders(request) let latestBlock = await p.getBlockHeaders(request)
if latestBlock.isSome: if latestBlock.isSome:
if latestBlock.get.headers.len > 0: if latestBlock.get.headers.len > 0:
result = latestBlock.get.headers[0].blockNumber result = latestBlock.get.headers[0].blockNumber
tracePacket "<< Got reply eth.BlockHeaders (0x04)", peer=p, trace trEthRecvGot & "BlockHeaders (0x04)", peer=p,
count=latestBlock.get.headers.len, count=latestBlock.get.headers.len,
blockNumber=(if latestBlock.get.headers.len > 0: $result else: "missing") blockNumber=(if latestBlock.get.headers.len > 0: $result else: "missing")
proc obtainBlocksFromPeer(syncCtx: FastSyncCtx, peer: Peer) {.async.} = proc obtainBlocksFromPeer(syncCtx: FastSyncCtx, peer: Peer) {.async.} =
# Update our best block number # Update our best block number
@ -249,25 +252,26 @@ proc obtainBlocksFromPeer(syncCtx: FastSyncCtx, peer: Peer) {.async.} =
var dataReceived = false var dataReceived = false
try: try:
tracePacket ">> Sending eth.GetBlockHeaders (0x03)", peer, trace trEthSendSending & "GetBlockHeaders (0x03)", peer,
startBlock=request.startBlock.number, max=request.maxResults, startBlock=request.startBlock.number, max=request.maxResults,
step=traceStep(request) step=traceStep(request)
let results = await peer.getBlockHeaders(request) let results = await peer.getBlockHeaders(request)
if results.isSome: if results.isSome:
tracePacket "<< Got reply eth.BlockHeaders (0x04)", peer, trace trEthRecvGot & "BlockHeaders (0x04)", peer,
count=results.get.headers.len, requested=request.maxResults count=results.get.headers.len, requested=request.maxResults
shallowCopy(workItem.headers, results.get.headers) shallowCopy(workItem.headers, results.get.headers)
var bodies = newSeqOfCap[BlockBody](workItem.headers.len) var bodies = newSeqOfCap[BlockBody](workItem.headers.len)
var hashes = newSeqOfCap[KeccakHash](maxBodiesFetch) var hashes = newSeqOfCap[KeccakHash](maxBodiesFetch)
template fetchBodies() = template fetchBodies() =
tracePacket ">> Sending eth.GetBlockBodies (0x05)", peer, trace trEthSendSending & "GetBlockBodies (0x05)", peer,
hashes=hashes.len hashes=hashes.len
let b = await peer.getBlockBodies(hashes) let b = await peer.getBlockBodies(hashes)
if b.isNone: if b.isNone:
raise newException(CatchableError, "Was not able to get the block bodies") raise newException(
CatchableError, "Was not able to get the block bodies")
let bodiesLen = b.get.blocks.len let bodiesLen = b.get.blocks.len
tracePacket "<< Got reply eth.BlockBodies (0x06)", peer, trace trEthRecvGot & "BlockBodies (0x06)", peer,
count=bodiesLen, requested=hashes.len count=bodiesLen, requested=hashes.len
if bodiesLen == 0: if bodiesLen == 0:
raise newException(CatchableError, "Zero block bodies received for request") raise newException(CatchableError, "Zero block bodies received for request")
@ -342,15 +346,15 @@ proc peersAgreeOnChain(a, b: Peer): Future[bool] {.async.} =
skip: 0, skip: 0,
reverse: true) reverse: true)
tracePacket ">> Sending eth.GetBlockHeaders (0x03)", peer=a, trace trEthSendSending & "GetBlockHeaders (0x03)", peer=a,
startBlock=request.startBlock.hash.toHex, max=request.maxResults startBlock=request.startBlock.hash.toHex, max=request.maxResults
let latestBlock = await a.getBlockHeaders(request) let latestBlock = await a.getBlockHeaders(request)
result = latestBlock.isSome and latestBlock.get.headers.len > 0 result = latestBlock.isSome and latestBlock.get.headers.len > 0
if tracePackets and latestBlock.isSome: if latestBlock.isSome:
let blockNumber = if result: $latestBlock.get.headers[0].blockNumber let blockNumber = if result: $latestBlock.get.headers[0].blockNumber
else: "missing" else: "missing"
tracePacket "<< Got reply eth.BlockHeaders (0x04)", peer=a, trace trEthRecvGot & "BlockHeaders (0x04)", peer=a,
count=latestBlock.get.headers.len, blockNumber count=latestBlock.get.headers.len, blockNumber
proc randomTrustedPeer(ctx: FastSyncCtx): Peer = proc randomTrustedPeer(ctx: FastSyncCtx): Peer =
@ -362,7 +366,7 @@ proc randomTrustedPeer(ctx: FastSyncCtx): Peer =
inc i inc i
proc startSyncWithPeer(ctx: FastSyncCtx, peer: Peer) {.async.} = proc startSyncWithPeer(ctx: FastSyncCtx, peer: Peer) {.async.} =
trace "start sync", peer, trustedPeers = ctx.trustedPeers.len trace "Start sync", peer, trustedPeers = ctx.trustedPeers.len
if ctx.trustedPeers.len >= minPeersToStartSync: if ctx.trustedPeers.len >= minPeersToStartSync:
# We have enough trusted peers. Validate new peer against trusted # We have enough trusted peers. Validate new peer against trusted
if await peersAgreeOnChain(peer, ctx.randomTrustedPeer()): if await peersAgreeOnChain(peer, ctx.randomTrustedPeer()):

View File

@ -40,15 +40,11 @@ import
chronos, chronos,
eth/[common/eth_types, p2p, p2p/private/p2p_types, p2p/blockchain_utils], eth/[common/eth_types, p2p, p2p/private/p2p_types, p2p/blockchain_utils],
stew/byteutils, stew/byteutils,
".."/trace_helper, ../types,
./pickeled_eth_tracers ./trace_config
export logScope:
tracePackets, tracePacket, topics = "datax"
traceGossips, traceGossip,
traceTimeouts, traceTimeout,
traceNetworkErrors, traceNetworkError,
tracePacketErrors, tracePacketError
type type
NewBlockHashesAnnounce* = object NewBlockHashesAnnounce* = object
@ -80,6 +76,25 @@ const
ethVersion* = 66 ethVersion* = 66
prettyEthProtoName* = "[eth/" & $ethVersion & "]" prettyEthProtoName* = "[eth/" & $ethVersion & "]"
# Pickeled tracer texts
trEthRecvReceived* =
"<< " & prettyEthProtoName & " Received "
trEthRecvGot* =
"<< " & prettyEthProtoName & " Got "
trEthRecvProtocolViolation* =
"<< " & prettyEthProtoName & " Protocol violation, "
trEthRecvError* =
"<< " & prettyEthProtoName & " Error "
trEthRecvTimeoutWaiting* =
"<< " & prettyEthProtoName & " Timeout waiting "
trEthSendSending* =
">> " & prettyEthProtoName & " Sending "
trEthSendReplying* =
">> " & prettyEthProtoName & " Replying "
trEthSendDelaying* =
">> " & prettyEthProtoName & " Delaying "
trEthSendDiscarding* =
"<< " & prettyEthProtoName & " Discarding "
p2pProtocol eth66(version = ethVersion, p2pProtocol eth66(version = ethVersion,
rlpxName = "eth", rlpxName = "eth",
@ -96,8 +111,8 @@ p2pProtocol eth66(version = ethVersion,
forkHash: chainForkId.crc.toBytesBE, forkHash: chainForkId.crc.toBytesBE,
forkNext: chainForkId.nextFork.toBlockNumber) forkNext: chainForkId.nextFork.toBlockNumber)
traceSendSending "Status (0x00) " & prettyEthProtoName, trace trEthSendSending & "Status (0x00)", peer,
peer, td=bestBlock.difficulty, td=bestBlock.difficulty,
bestHash=bestBlock.blockHash.toHex, bestHash=bestBlock.blockHash.toHex,
networkId=network.networkId, networkId=network.networkId,
genesis=chain.genesisHash.toHex, genesis=chain.genesisHash.toHex,
@ -111,7 +126,7 @@ p2pProtocol eth66(version = ethVersion,
forkId, forkId,
timeout = chronos.seconds(10)) timeout = chronos.seconds(10))
if traceHandshakes: when trEthTraceHandshakesOk:
trace "Handshake: Local and remote networkId", trace "Handshake: Local and remote networkId",
local=network.networkId, remote=m.networkId local=network.networkId, remote=m.networkId
trace "Handshake: Local and remote genesisHash", trace "Handshake: Local and remote genesisHash",
@ -123,12 +138,14 @@ p2pProtocol eth66(version = ethVersion,
if m.networkId != network.networkId: if m.networkId != network.networkId:
trace "Peer for a different network (networkId)", peer, trace "Peer for a different network (networkId)", peer,
expectNetworkId=network.networkId, gotNetworkId=m.networkId expectNetworkId=network.networkId, gotNetworkId=m.networkId
raise newException(UselessPeerError, "Eth handshake for different network") raise newException(
UselessPeerError, "Eth handshake for different network")
if m.genesisHash != chain.genesisHash: if m.genesisHash != chain.genesisHash:
trace "Peer for a different network (genesisHash)", peer, trace "Peer for a different network (genesisHash)", peer,
expectGenesis=chain.genesisHash.toHex, gotGenesis=m.genesisHash.toHex expectGenesis=chain.genesisHash.toHex, gotGenesis=m.genesisHash.toHex
raise newException(UselessPeerError, "Eth handshake for different network") raise newException(
UselessPeerError, "Eth handshake for different network")
trace "Peer matches our network", peer trace "Peer matches our network", peer
peer.state.initialized = true peer.state.initialized = true
@ -144,42 +161,37 @@ p2pProtocol eth66(version = ethVersion,
bestHash: Hash256, bestHash: Hash256,
genesisHash: Hash256, genesisHash: Hash256,
forkId: ForkId) = forkId: ForkId) =
traceRecvReceived "Status (0x00)", trace trEthRecvReceived & "Status (0x00)", peer,
peer, networkId, totalDifficulty, bestHash, genesisHash, networkId, totalDifficulty, bestHash, genesisHash,
forkHash=forkId.forkHash.toHex, forkNext=forkId.forkNext forkHash=forkId.forkHash.toHex, forkNext=forkId.forkNext
# User message 0x01: NewBlockHashes. # User message 0x01: NewBlockHashes.
proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) = proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) =
traceSendGossipDiscarding "NewBlockHashes (0x01)", when trEthTraceGossipOk:
peer, hashes=hashes.len trace trEthSendDiscarding & "NewBlockHashes (0x01)", peer,
hashes=hashes.len
discard discard
# User message 0x02: Transactions. # User message 0x02: Transactions.
proc transactions(peer: Peer, transactions: openArray[Transaction]) = proc transactions(peer: Peer, transactions: openArray[Transaction]) =
traceSendGossipDiscarding "Transactions (0x02)", when trEthTraceGossipOk:
peer, transactions=transactions.len trace trEthSendDiscarding & "Transactions (0x02)", peer,
transactions=transactions.len
discard discard
requestResponse: requestResponse:
# User message 0x03: GetBlockHeaders. # User message 0x03: GetBlockHeaders.
proc getBlockHeaders(peer: Peer, request: BlocksRequest) = proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
if tracePackets: when trEthTracePacketsOk:
if request.maxResults == 1 and request.startBlock.isHash: let
traceRecvReceived "GetBlockHeaders/Hash (0x03)", startBlock =
peer, blockHash=($request.startBlock.hash), count=1 if request.startBlock.isHash: request.startBlock.hash.toHex
elif request.maxResults == 1: else: '#' & $request.startBlock.number
traceRecvReceived "GetBlockHeaders (0x03)", step =
peer, `block`=request.startBlock.number, count=1 if request.maxResults == 1: "n/a"
elif request.startBlock.isHash: else: $request.traceStep
traceRecvReceived "GetBlockHeaders/Hash (0x03)", trace trEthRecvReceived & "GetBlockHeaders (0x03)", peer,
peer, firstBlockHash=($request.startBlock.hash), startBlock, count=request.maxResults, step
count=request.maxResults,
step=traceStep(request)
else:
traceRecvReceived "GetBlockHeaders (0x03)",
peer, firstBlock=request.startBlock.number,
count=request.maxResults,
step=traceStep(request)
if request.maxResults > uint64(maxHeadersFetch): if request.maxResults > uint64(maxHeadersFetch):
debug "GetBlockHeaders (0x03) requested too many headers", debug "GetBlockHeaders (0x03) requested too many headers",
@ -189,11 +201,11 @@ p2pProtocol eth66(version = ethVersion,
let headers = peer.network.chain.getBlockHeaders(request) let headers = peer.network.chain.getBlockHeaders(request)
if headers.len > 0: if headers.len > 0:
traceSendReplying "with BlockHeaders (0x04)", trace trEthSendReplying & "with BlockHeaders (0x04)", peer,
peer, sent=headers.len, requested=request.maxResults sent=headers.len, requested=request.maxResults
else: else:
traceSendReplying "EMPTY BlockHeaders (0x04)", trace trEthSendReplying & "EMPTY BlockHeaders (0x04)", peer,
peer, sent=0, requested=request.maxResults sent=0, requested=request.maxResults
await response.send(headers) await response.send(headers)
@ -203,8 +215,8 @@ p2pProtocol eth66(version = ethVersion,
requestResponse: requestResponse:
# User message 0x05: GetBlockBodies. # User message 0x05: GetBlockBodies.
proc getBlockBodies(peer: Peer, hashes: openArray[Hash256]) = proc getBlockBodies(peer: Peer, hashes: openArray[Hash256]) =
traceRecvReceived "GetBlockBodies (0x05)", trace trEthRecvReceived & "GetBlockBodies (0x05)", peer,
peer, hashes=hashes.len hashes=hashes.len
if hashes.len > maxBodiesFetch: if hashes.len > maxBodiesFetch:
debug "GetBlockBodies (0x05) requested too many bodies", debug "GetBlockBodies (0x05) requested too many bodies",
peer, requested=hashes.len, max=maxBodiesFetch peer, requested=hashes.len, max=maxBodiesFetch
@ -213,11 +225,11 @@ p2pProtocol eth66(version = ethVersion,
let bodies = peer.network.chain.getBlockBodies(hashes) let bodies = peer.network.chain.getBlockBodies(hashes)
if bodies.len > 0: if bodies.len > 0:
traceSendReplying "with BlockBodies (0x06)", trace trEthSendReplying & "with BlockBodies (0x06)", peer,
peer, sent=bodies.len, requested=hashes.len sent=bodies.len, requested=hashes.len
else: else:
traceSendReplying "EMPTY BlockBodies (0x06)", trace trEthSendReplying & "EMPTY BlockBodies (0x06)", peer,
peer, sent=0, requested=hashes.len sent=0, requested=hashes.len
await response.send(bodies) await response.send(bodies)
@ -228,26 +240,28 @@ p2pProtocol eth66(version = ethVersion,
proc newBlock(peer: Peer, bh: EthBlock, totalDifficulty: DifficultyInt) = proc newBlock(peer: Peer, bh: EthBlock, totalDifficulty: DifficultyInt) =
# (Note, needs to use `EthBlock` instead of its alias `NewBlockAnnounce` # (Note, needs to use `EthBlock` instead of its alias `NewBlockAnnounce`
# because either `p2pProtocol` or RLPx doesn't work with an alias.) # because either `p2pProtocol` or RLPx doesn't work with an alias.)
traceSendGossipDiscarding "NewBlock (0x07)", when trEthTraceGossipOk:
peer, totalDifficulty, trace trEthSendDiscarding & "NewBlock (0x07)", peer,
blockNumber = bh.header.blockNumber, totalDifficulty,
blockDifficulty = bh.header.difficulty blockNumber = bh.header.blockNumber,
blockDifficulty = bh.header.difficulty
discard discard
# User message 0x08: NewPooledTransactionHashes. # User message 0x08: NewPooledTransactionHashes.
proc newPooledTransactionHashes(peer: Peer, txHashes: openArray[Hash256]) = proc newPooledTransactionHashes(peer: Peer, txHashes: openArray[Hash256]) =
traceSendGossipDiscarding "NewPooledTransactionHashes (0x08)", when trEthTraceGossipOk:
peer, hashes=txHashes.len trace trEthSendDiscarding & "NewPooledTransactionHashes (0x08)", peer,
hashes=txHashes.len
discard discard
requestResponse: requestResponse:
# User message 0x09: GetPooledTransactions. # User message 0x09: GetPooledTransactions.
proc getPooledTransactions(peer: Peer, txHashes: openArray[Hash256]) = proc getPooledTransactions(peer: Peer, txHashes: openArray[Hash256]) =
traceRecvReceived "GetPooledTransactions (0x09)", trace trEthRecvReceived & "GetPooledTransactions (0x09)", peer,
peer, hashes=txHashes.len hashes=txHashes.len
traceSendReplying "EMPTY PooledTransactions (0x10)", trace trEthSendReplying & "EMPTY PooledTransactions (0x10)", peer,
peer, sent=0, requested=txHashes.len sent=0, requested=txHashes.len
await response.send([]) await response.send([])
# User message 0x0a: PooledTransactions. # User message 0x0a: PooledTransactions.
@ -257,7 +271,7 @@ p2pProtocol eth66(version = ethVersion,
# User message 0x0d: GetNodeData. # User message 0x0d: GetNodeData.
proc getNodeData(peer: Peer, nodeHashes: openArray[Hash256]) = proc getNodeData(peer: Peer, nodeHashes: openArray[Hash256]) =
traceRecvReceived "GetNodeData (0x0d)", peer, trace trEthRecvReceived & "GetNodeData (0x0d)", peer,
hashes=nodeHashes.len hashes=nodeHashes.len
var data: seq[Blob] var data: seq[Blob]
@ -267,10 +281,10 @@ p2pProtocol eth66(version = ethVersion,
data = peer.network.chain.getStorageNodes(nodeHashes) data = peer.network.chain.getStorageNodes(nodeHashes)
if data.len > 0: if data.len > 0:
traceSendReplying "with NodeData (0x0e)", peer, trace trEthSendReplying & "with NodeData (0x0e)", peer,
sent=data.len, requested=nodeHashes.len sent=data.len, requested=nodeHashes.len
else: else:
traceSendReplying "EMPTY NodeData (0x0e)", peer, trace trEthSendReplying & "EMPTY NodeData (0x0e)", peer,
sent=0, requested=nodeHashes.len sent=0, requested=nodeHashes.len
await peer.nodeData(data) await peer.nodeData(data)
@ -282,16 +296,16 @@ p2pProtocol eth66(version = ethVersion,
# know if this is a valid reply ("Got reply") or something else. # know if this is a valid reply ("Got reply") or something else.
peer.state.onNodeData(peer, data) peer.state.onNodeData(peer, data)
else: else:
traceSendDiscarding "NodeData (0x0e)", peer, trace trEthSendDiscarding & "NodeData (0x0e)", peer,
bytes=data.len bytes=data.len
requestResponse: requestResponse:
# User message 0x0f: GetReceipts. # User message 0x0f: GetReceipts.
proc getReceipts(peer: Peer, hashes: openArray[Hash256]) = proc getReceipts(peer: Peer, hashes: openArray[Hash256]) =
traceRecvReceived "GetReceipts (0x0f)", peer, trace trEthRecvReceived & "GetReceipts (0x0f)", peer,
hashes=hashes.len hashes=hashes.len
traceSendReplying "EMPTY Receipts (0x10)", peer, trace trEthSendReplying & "EMPTY Receipts (0x10)", peer,
sent=0, requested=hashes.len sent=0, requested=hashes.len
await response.send([]) await response.send([])
# TODO: implement `getReceipts` and reactivate this code # TODO: implement `getReceipts` and reactivate this code

View File

@ -1,52 +0,0 @@
# Nimbus - Rapidly converge on and track the canonical chain head of each peer
#
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
template traceRecvReceived*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettyEthProtoName & " Received " & msg,
`args`
template traceRecvGot*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettyEthProtoName & " Got " & msg,
`args`
template traceRecvProtocolViolation*(msg: static[string], args: varargs[untyped]) =
tracePacketError "<< " & prettyEthProtoName & " Protocol violation, " & msg,
`args`
template traceRecvError*(msg: static[string], args: varargs[untyped]) =
traceNetworkError "<< " & prettyEthProtoName & " Error " & msg,
`args`
template traceRecvTimeoutWaiting*(msg: static[string], args: varargs[untyped]) =
traceTimeout "<< " & prettyEthProtoName & " Timeout waiting " & msg,
`args`
template traceSendSending*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettyEthProtoName & " Sending " & msg,
`args`
template traceSendReplying*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettyEthProtoName & " Replying " & msg,
`args`
template traceSendDelaying*(msg: static[string], args: varargs[untyped]) =
tracePacket ">>" & prettyEthProtoName & " Delaying " & msg,
`args`
template traceSendGossipDiscarding*(msg: static[string], args: varargs[untyped]) =
traceGossip "<< " & prettyEthProtoName & " Discarding " & msg,
`args`
template traceSendDiscarding*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettyEthProtoName & " Discarding " & msg,
`args`
# End

View File

@ -1,40 +0,0 @@
# Nimbus - Rapidly converge on and track the canonical chain head of each peer
#
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
template traceRecvReceived*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettySnapProtoName & " Received " & msg,
`args`
template traceRecvGot*(msg: static[string], args: varargs[untyped]) =
tracePacket "<< " & prettySnapProtoName & " Got " & msg,
`args`
template traceRecvProtocolViolation*(msg: static[string], args: varargs[untyped]) =
tracePacketError "<< " & prettySnapProtoName & " Protocol violation, " & msg,
`args`
template traceRecvError*(msg: static[string], args: varargs[untyped]) =
traceNetworkError "<< " & prettySnapProtoName & " Error " & msg,
`args`
template traceRecvTimeoutWaiting*(msg: static[string], args: varargs[untyped]) =
traceTimeout "<< " & prettySnapProtoName & " Timeout waiting " & msg,
`args`
template traceSendSending*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettySnapProtoName & " Sending " & msg,
`args`
template traceSendReplying*(msg: static[string], args: varargs[untyped]) =
tracePacket ">> " & prettySnapProtoName & " Replying " & msg,
`args`
# End

View File

@ -203,9 +203,12 @@ import
eth/[common/eth_types, p2p, p2p/private/p2p_types], eth/[common/eth_types, p2p, p2p/private/p2p_types],
nimcrypto/hash, nimcrypto/hash,
stew/byteutils, stew/byteutils,
".."/[snap/path_desc, trace_helper],
../../constants, ../../constants,
./pickeled_snap_tracers ../snap/path_desc,
./trace_config
logScope:
topics = "datax"
type type
SnapAccount* = object SnapAccount* = object
@ -224,6 +227,23 @@ const
snapVersion* = 1 snapVersion* = 1
prettySnapProtoName* = "[snap/" & $snapVersion & "]" prettySnapProtoName* = "[snap/" & $snapVersion & "]"
# Pickeled tracer texts
trSnapRecvReceived* =
"<< " & prettySnapProtoName & " Received "
trSnapRecvGot* =
"<< " & prettySnapProtoName & " Got "
trSnapRecvProtocolViolation* =
"<< " & prettySnapProtoName & " Protocol violation, "
trSnapRecvError* =
"<< " & prettySnapProtoName & " Error "
trSnapRecvTimeoutWaiting* =
"<< " & prettySnapProtoName & " Timeout waiting "
trSnapSendSending* =
">> " & prettySnapProtoName & " Sending "
trSnapSendReplying* =
">> " & prettySnapProtoName & " Replying "
# The `snap` protocol represents `Account` differently from the regular RLP # The `snap` protocol represents `Account` differently from the regular RLP
# serialisation used in `eth` protocol as well as the canonical Merkle hash # serialisation used in `eth` protocol as well as the canonical Merkle hash
# over all accounts. In `snap`, empty storage hash and empty code hash are # over all accounts. In `snap`, empty storage hash and empty code hash are
@ -297,11 +317,12 @@ p2pProtocol snap1(version = 1,
# Next line differs from spec to match Geth. # Next line differs from spec to match Geth.
origin: LeafPath, limit: LeafPath, origin: LeafPath, limit: LeafPath,
responseBytes: uint64) = responseBytes: uint64) =
traceRecvReceived "GetAccountRange (0x00)", peer, trace trSnapRecvReceived & "GetAccountRange (0x00)", peer,
# traceRecvReceived "GetAccountRange (0x00)", peer,
accountRange=pathRange(origin, limit), accountRange=pathRange(origin, limit),
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
traceSendReplying "EMPTY AccountRange (0x01)", peer, sent=0 trace trSnapSendReplying & "EMPTY AccountRange (0x01)", peer, sent=0
await response.send(@[], @[]) await response.send(@[], @[])
# User message 0x01: AccountRange. # User message 0x01: AccountRange.
@ -316,7 +337,7 @@ p2pProtocol snap1(version = 1,
# Next line differs from spec to match Geth. # Next line differs from spec to match Geth.
origin: openArray[byte], limit: openArray[byte], origin: openArray[byte], limit: openArray[byte],
responseBytes: uint64) = responseBytes: uint64) =
if tracePackets: when trSnapTracePacketsOk:
var definiteFullRange = ((origin.len == 32 or origin.len == 0) and var definiteFullRange = ((origin.len == 32 or origin.len == 0) and
(limit.len == 32 or limit.len == 0)) (limit.len == 32 or limit.len == 0))
if definiteFullRange: if definiteFullRange:
@ -337,12 +358,12 @@ p2pProtocol snap1(version = 1,
if definiteFullRange: if definiteFullRange:
# Fetching storage for multiple accounts. # Fetching storage for multiple accounts.
traceRecvReceived "GetStorageRanges/A (0x02)", peer, trace trSnapRecvReceived & "GetStorageRanges/A (0x02)", peer,
accountPaths=accounts.len, accountPaths=accounts.len,
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
elif accounts.len == 1: elif accounts.len == 1:
# Fetching partial storage for one account, aka. "large contract". # Fetching partial storage for one account, aka. "large contract".
traceRecvReceived "GetStorageRanges/S (0x02)", peer, trace trSnapRecvReceived & "GetStorageRanges/S (0x02)", peer,
accountPaths=1, accountPaths=1,
storageRange=(describe(origin) & '-' & describe(limit)), storageRange=(describe(origin) & '-' & describe(limit)),
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
@ -350,12 +371,12 @@ p2pProtocol snap1(version = 1,
# This branch is separated because these shouldn't occur. It's not # This branch is separated because these shouldn't occur. It's not
# really specified what happens when there are multiple accounts and # really specified what happens when there are multiple accounts and
# non-default path range. # non-default path range.
traceRecvReceived "GetStorageRanges/AS?? (0x02)", peer, trace trSnapRecvReceived & "GetStorageRanges/AS?? (0x02)", peer,
accountPaths=accounts.len, accountPaths=accounts.len,
storageRange=(describe(origin) & '-' & describe(limit)), storageRange=(describe(origin) & '-' & describe(limit)),
stateRoot=($rootHash), responseBytes stateRoot=($rootHash), responseBytes
traceSendReplying "EMPTY StorageRanges (0x03)", peer, sent=0 trace trSnapSendReplying & "EMPTY StorageRanges (0x03)", peer, sent=0
await response.send(@[], @[]) await response.send(@[], @[])
# User message 0x03: StorageRanges. # User message 0x03: StorageRanges.
@ -367,10 +388,10 @@ p2pProtocol snap1(version = 1,
requestResponse: requestResponse:
proc getByteCodes(peer: Peer, nodeHashes: openArray[Hash256], proc getByteCodes(peer: Peer, nodeHashes: openArray[Hash256],
responseBytes: uint64) = responseBytes: uint64) =
traceRecvReceived "GetByteCodes (0x04)", peer, trace trSnapRecvReceived & "GetByteCodes (0x04)", peer,
hashes=nodeHashes.len, responseBytes hashes=nodeHashes.len, responseBytes
traceSendReplying "EMPTY ByteCodes (0x05)", peer, sent=0 trace trSnapSendReplying & "EMPTY ByteCodes (0x05)", peer, sent=0
await response.send(@[]) await response.send(@[])
# User message 0x05: ByteCodes. # User message 0x05: ByteCodes.
@ -380,10 +401,10 @@ p2pProtocol snap1(version = 1,
requestResponse: requestResponse:
proc getTrieNodes(peer: Peer, rootHash: Hash256, proc getTrieNodes(peer: Peer, rootHash: Hash256,
paths: openArray[InteriorPath], responseBytes: uint64) = paths: openArray[InteriorPath], responseBytes: uint64) =
traceRecvReceived "GetTrieNodes (0x06)", peer, trace trSnapRecvReceived & "GetTrieNodes (0x06)", peer,
nodePaths=paths.len, stateRoot=($rootHash), responseBytes nodePaths=paths.len, stateRoot=($rootHash), responseBytes
traceSendReplying "EMPTY TrieNodes (0x07)", peer, sent=0 trace trSnapSendReplying & "EMPTY TrieNodes (0x07)", peer, sent=0
await response.send(@[]) await response.send(@[])
# User message 0x07: TrieNodes. # User message 0x07: TrieNodes.

View File

@ -0,0 +1,30 @@
# Nimbus - Ethereum Wire Protocol, version eth/65
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
const
# Some static noisy settings for `eth` debugging
trEthTracePacketsOk* = true
## `trace` log each sync network message.
trEthTraceGossipOk* = true
## `trace` log each sync network message.
trEthTraceHandshakesOk* = true
## `trace` log each network handshake message.
trEthTraceIndividualNodesOk* = true
## `trace` log each trie node, account, storage, receipt, etc.
# Some static noisy settings for `snap` debugging
trSnapTracePacketsOk* = true
## `trace` log each sync network message.
# The files and lines clutter differently when sync tracing is enabled.
# publicLogScope: chroniclesLineNumbers=false
# End

View File

@ -10,102 +10,149 @@
# except according to those terms. # except according to those terms.
import import
std/[hashes, strutils],
chronicles, chronicles,
chronos, chronos,
eth/[common/eth_types, p2p, rlp], eth/[common/eth_types, p2p, p2p/peer_pool, p2p/private/p2p_types],
eth/p2p/[peer_pool, private/p2p_types, rlpx], stew/keyed_queue,
stint, "."/[protocol, types],
./protocol, ./snap/[base_desc, collect]
./snap/[base_desc, chain_head_tracker, get_nodedata, types],
./snap/pie/[sync_desc, peer_desc]
{.push raises: [Defect].} {.push raises: [Defect].}
logScope:
topics = "snap sync"
type type
SnapSyncCtx* = ref object of SnapSyncEx SnapSyncCtx* = ref object of SnapSync
peerPool: PeerPool peerTab: KeyedQueue[Peer,SnapPeer] ## LRU cache
tabSize: int ## maximal number of entries
pool: PeerPool ## for starting the system, debugging
# debugging
lastDump: seq[string]
lastlen: int
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc fetchPeerDesc(ns: SnapSyncCtx, peer: Peer): SnapPeerEx = proc nsCtx(sp: SnapPeer): SnapSyncCtx =
## Find matching peer and remove descriptor from list sp.ns.SnapSyncCtx
for i in 0 ..< ns.syncPeers.len:
if ns.syncPeers[i].peer == peer:
result = ns.syncPeers[i].ex
ns.syncPeers.delete(i)
return
proc new(T: type SnapPeerEx; ns: SnapSyncCtx; peer: Peer): T = proc hash(peer: Peer): Hash =
T( ## Needed for `peerTab` table key comparison
ns: ns, hash(peer.remote.id)
peer: peer,
stopped: false, # ------------------------------------------------------------------------------
# Initial state: hunt forward, maximum uncertainty range. # Private debugging helpers
syncMode: SyncHuntForward, # ------------------------------------------------------------------------------
huntLow: 0.toBlockNumber,
huntHigh: high(BlockNumber), proc dumpPeers(sn: SnapSyncCtx; force = false) =
huntStep: 0, if sn.lastLen != sn.peerTab.len or force:
bestBlockNumber: 0.toBlockNumber) sn.lastLen = sn.peerTab.len
let poolSize = sn.pool.len
if sn.peerTab.len == 0:
trace "*** Empty peer list", poolSize
else:
var n = sn.peerTab.len - 1
for sp in sn.peerTab.prevValues:
trace "*** Peer list entry",
n, poolSize, peer=sp, hunt=sp.hunt.pp
n.dec
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc syncPeerLoop(sp: SnapPeerEx) {.async.} = proc syncPeerLoop(sp: SnapPeer) {.async.} =
# This basic loop just runs the head-hunter for each peer. # This basic loop just runs the head-hunter for each peer.
while not sp.stopped: var cache = ""
await sp.peerHuntCanonical() while sp.ctrl.runState != SyncStopped:
if sp.stopped:
# Do something, work a bit
await sp.collectBlockHeaders()
if sp.ctrl.runState == SyncStopped:
trace "Ignoring stopped peer", peer=sp
return return
let delayMs = if sp.syncMode == SyncLocked: 1000 else: 50
# Rotate LRU connection table so the most used entry is at the list end
# TODO: Update implementation of lruFetch() using re-link, only
discard sp.nsCtx.peerTab.lruFetch(sp.peer)
let delayMs = if sp.hunt.syncMode == SyncLocked: 1000 else: 50
await sleepAsync(chronos.milliseconds(delayMs)) await sleepAsync(chronos.milliseconds(delayMs))
proc syncPeerStart(sp: SnapPeerEx) = proc syncPeerStart(sp: SnapPeer) =
asyncSpawn sp.syncPeerLoop() asyncSpawn sp.syncPeerLoop()
proc syncPeerStop(sp: SnapPeerEx) = proc syncPeerStop(sp: SnapPeer) =
sp.stopped = true sp.ctrl.runState = SyncStopped
# TODO: Cancel running `SnapPeerEx` instances. We need clean cancellation # TODO: Cancel running `SnapPeer` instances. We need clean cancellation
# for this. Doing so reliably will be addressed at a later time. # for this. Doing so reliably will be addressed at a later time.
proc onPeerConnected(ns: SnapSyncCtx, peer: Peer) = proc onPeerConnected(ns: SnapSyncCtx, peer: Peer) =
trace "Snap: Peer connected", peer trace "Peer connected", peer
let sp = SnapPeerEx.new(ns, peer) let sp = SnapPeer.new(ns, peer, SyncHuntForward, SyncRunningOk)
sp.setupGetNodeData() sp.collectDataSetup()
if peer.state(eth).initialized: if peer.state(eth).initialized:
# We know the hash but not the block number. # We know the hash but not the block number.
sp.bestBlockHash = peer.state(eth).bestBlockHash.BlockHash sp.hunt.bestHash = peer.state(eth).bestBlockHash.BlockHash
# TODO: Temporarily disabled because it's useful to test the head hunter. # TODO: Temporarily disabled because it's useful to test the head hunter.
# sp.syncMode = SyncOnlyHash # sp.syncMode = SyncOnlyHash
else: else:
trace "Snap: state(eth) not initialized!" trace "State(eth) not initialized!"
ns.syncPeers.add(sp) # Manage connection table, check for existing entry
if ns.peerTab.hasKey(peer):
trace "Peer exists already!", peer
return
# Check for table overflow. An overflow should not happen if the table is
# as large as the peer connection table.
if ns.tabSize <= ns.peerTab.len:
let leastPeer = ns.peerTab.shift.value.data
leastPeer.syncPeerStop
trace "Peer table full, deleted least used",
leastPeer, poolSize=ns.pool.len, tabLen=ns.peerTab.len, tabMax=ns.tabSize
# Add peer entry
discard ns.peerTab.append(sp.peer,sp)
trace "Starting peer",
peer, poolSize=ns.pool.len, tabLen=ns.peerTab.len, tabMax=ns.tabSize
# Debugging, peer table dump after adding gentry
#ns.dumpPeers(true)
sp.syncPeerStart() sp.syncPeerStart()
proc onPeerDisconnected(ns: SnapSyncCtx, peer: Peer) = proc onPeerDisconnected(ns: SnapSyncCtx, peer: Peer) =
trace "Snap: Peer disconnected", peer trace "Peer disconnected", peer
let sp = ns.fetchPeerDesc(peer) # Debugging, peer table dump before removing entry
if sp.isNil: #ns.dumpPeers(true)
debug "Snap: Disconnected from unregistered peer", peer
let rc = ns.peerTab.delete(peer)
if rc.isOk:
rc.value.data.syncPeerStop()
else: else:
sp.syncPeerStop() debug "Disconnected from unregistered peer", peer
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc new*(T: type SnapSyncCtx; ethNode: EthereumNode): T = proc new*(T: type SnapSyncCtx; ethNode: EthereumNode; maxPeers: int): T =
## Constructor ## Constructor
new result new result
result.peerPool = ethNode.peerPool let size = max(1,2*maxPeers) # allow double argument size
result.peerTab.init(size)
result.tabSize = size
result.pool = ethNode.peerPool
proc start*(ctx: SnapSyncCtx) = proc start*(ctx: SnapSyncCtx) =
## Set up syncing. This call should come early. ## Set up syncing. This call should come early.
@ -117,7 +164,7 @@ proc start*(ctx: SnapSyncCtx) =
proc(p: Peer) {.gcsafe.} = proc(p: Peer) {.gcsafe.} =
ctx.onPeerDisconnected(p)) ctx.onPeerDisconnected(p))
po.setProtocol eth po.setProtocol eth
ctx.peerPool.addObserver(ctx, po) ctx.pool.addObserver(ctx, po)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -12,7 +12,8 @@
import import
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
stew/[byteutils, keyed_queue, results], stew/[byteutils, keyed_queue, results],
./types ../../constants,
../types
{.push raises: [Defect].} {.push raises: [Defect].}
@ -21,28 +22,13 @@ const
## Internal size of LRU cache (for debugging) ## Internal size of LRU cache (for debugging)
type type
SnapStat* = distinct int SnapPeerStat* = distinct uint
SnapPeerStatsOk = object SnapPeerFetchBase* = ref object of RootObj
reorgDetected*: SnapStat ## Stub object, to be inherited
getBlockHeaders*: SnapStat
getNodeData*: SnapStat
SnapPeerStatsMinor = object SnapPeerRequestsBase* = ref object of RootObj
timeoutBlockHeaders*: SnapStat ## Stub object, to be inherited
unexpectedBlockHash*: SnapStat
SnapPeerStatsMajor = object
networkErrors*: SnapStat
excessBlockHeaders*: SnapStat
wrongBlockHeader*: SnapStat
SnapPeerStats* = object
## Statistics counters for events associated with this peer.
## These may be used to recognise errors and select good peers.
ok*: SnapPeerStatsOk
minor*: SnapPeerStatsMinor
major*: SnapPeerStatsMajor
SnapPeerMode* = enum SnapPeerMode* = enum
## The current state of tracking the peer's canonical chain head. ## The current state of tracking the peer's canonical chain head.
@ -54,63 +40,120 @@ type
SyncHuntRange SyncHuntRange
SyncHuntRangeFinal SyncHuntRangeFinal
SnapPeerBase* = ref object of RootObj SnapPeerRunState* = enum
## Peer state tracking. SyncRunningOk
ns*: SnapSyncBase ## Opaque object reference SyncStopRequest
peer*: Peer ## eth p2pProtocol SyncStopped
stopped*: bool
pendingGetBlockHeaders*:bool
stats*: SnapPeerStats
# Peer canonical chain head ("best block") search state. SnapPeerStats* = tuple
syncMode*: SnapPeerMode ## Statistics counters for events associated with this peer.
bestBlockNumber*: BlockNumber ## These may be used to recognise errors and select good peers.
bestBlockHash*: BlockHash ok: tuple[
huntLow*: BlockNumber ## Recent highest known present block. reorgDetected: SnapPeerStat,
huntHigh*: BlockNumber ## Recent lowest known absent block. getBlockHeaders: SnapPeerStat,
huntStep*: typeof(BlocksRequest.skip) # aka uint getNodeData: SnapPeerStat]
minor: tuple[
timeoutBlockHeaders: SnapPeerStat,
unexpectedBlockHash: SnapPeerStat]
major: tuple[
networkErrors: SnapPeerStat,
excessBlockHeaders: SnapPeerStat,
wrongBlockHeader: SnapPeerStat]
# State root to fetch state for. SnapPeerHunt* = tuple
# This changes during sync and is slightly different for each peer. ## Peer canonical chain head ("best block") search state.
syncStateRoot*: Option[TrieHash] syncMode: SnapPeerMode ## Action mode
startedFetch*: bool lowNumber: BlockNumber ## Recent lowest known block number.
stopThisState*: bool highNumber: BlockNumber ## Recent highest known block number.
bestNumber: BlockNumber
bestHash: BlockHash
step: uint
SnapSyncBase* = ref object of RootObj SnapPeerCtrl* = tuple
## Shared state among all peers of a snap syncing node. ## Control and state settings
seenBlock: KeyedQueue[array[32,byte],BlockNumber] stateRoot: Option[TrieHash]
## Temporary for pretty debugging, BlockHash keyed lru cache ## State root to fetch state for. This changes during sync and is
syncPeers*: seq[SnapPeerBase] ## slightly different for each peer.
## Peer state tracking runState: SnapPeerRunState
# -------
SnapSyncSeenBlocks = KeyedQueue[array[32,byte],BlockNumber]
## Temporary for pretty debugging, `BlockHash` keyed lru cache
SnapSyncFetchBase* = ref object of RootObj
## Stub object, to be inherited
# -------
SnapPeer* = ref object
## Non-inheritable peer state tracking descriptor.
ns*: SnapSync ## Snap descriptor object back reference
peer*: Peer ## Reference to eth p2pProtocol entry
stats*: SnapPeerStats ## Statistics counters
hunt*: SnapPeerHunt ## Peer chain head search state
ctrl*: SnapPeerCtrl ## Control and state settings
requests*: SnapPeerRequestsBase ## Opaque object reference
fetchState*: SnapPeerFetchBase ## Opaque object reference
SnapSync* = ref object of RootObj
## Shared state among all peers of a snap syncing node. Will be
## amended/inherited into `SnapSyncCtx` by the `snap` module.
seenBlock: SnapSyncSeenBlocks ## Temporary, debugging, prettyfied logs
sharedFetch*: SnapSyncFetchBase ## Opaque object reference
# ------------------------------------------------------------------------------
# Public Constructor
# ------------------------------------------------------------------------------
proc new*(
T: type SnapPeer;
ns: SnapSync;
peer: Peer;
syncMode: SnapPeerMode;
runState: SnapPeerRunState): T =
## Initial state, maximum uncertainty range.
T(ns: ns,
peer: peer,
ctrl: (
stateRoot: none(TrieHash),
runState: runState),
hunt: (
syncMode: syncMode,
lowNumber: 0.toBlockNumber.BlockNumber,
highNumber: high(BlockNumber).BlockNumber, # maximum uncertainty range.
bestNumber: 0.toBlockNumber.BlockNumber,
bestHash: ZERO_HASH256.BlockHash, # whatever
step: 0u))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc `$`*(sp: SnapPeerBase): string = proc `$`*(sp: SnapPeer): string =
$sp.peer $sp.peer
proc inc(stat: var SnapStat) {.borrow.} proc inc(stat: var SnapPeerStat) {.borrow.}
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, debugging helpers (will go away eventually) # Public functions, debugging helpers (will go away eventually)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pp*(sn: SnapSyncBase; bh: BlockHash): string = proc pp*(sn: SnapSync; bh: BlockHash): string =
## Pretty printer for debugging ## Pretty printer for debugging
let rc = sn.seenBlock.lruFetch(bh.untie.data) let rc = sn.seenBlock.lruFetch(bh.untie.data)
if rc.isOk: if rc.isOk:
return "#" & $rc.value return "#" & $rc.value
$bh.untie.data.toHex $bh.untie.data.toHex
proc pp*(sn: SnapSyncBase; bh: BlockHash; bn: BlockNumber): string = proc pp*(sn: SnapSync; bh: BlockHash; bn: BlockNumber): string =
## Pretty printer for debugging ## Pretty printer for debugging
let rc = sn.seenBlock.lruFetch(bh.untie.data) let rc = sn.seenBlock.lruFetch(bh.untie.data)
if rc.isOk: if rc.isOk:
return "#" & $rc.value return "#" & $rc.value
"#" & $sn.seenBlock.lruAppend(bh.untie.data, bn, seenBlocksMax) "#" & $sn.seenBlock.lruAppend(bh.untie.data, bn, seenBlocksMax)
proc pp*(sn: SnapSyncBase; bhn: HashOrNum): string = proc pp*(sn: SnapSync; bhn: HashOrNum): string =
if not bhn.isHash: if not bhn.isHash:
return "num(#" & $bhn.number & ")" return "num(#" & $bhn.number & ")"
let rc = sn.seenBlock.lruFetch(bhn.hash.data) let rc = sn.seenBlock.lruFetch(bhn.hash.data)
@ -118,11 +161,30 @@ proc pp*(sn: SnapSyncBase; bhn: HashOrNum): string =
return "hash(#" & $rc.value & ")" return "hash(#" & $rc.value & ")"
return "hash(" & $bhn.hash.data.toHex & ")" return "hash(" & $bhn.hash.data.toHex & ")"
proc seen*(sn: SnapSyncBase; bh: BlockHash; bn: BlockNumber) = proc seen*(sn: SnapSync; bh: BlockHash; bn: BlockNumber) =
## Register for pretty printing ## Register for pretty printing
if not sn.seenBlock.lruFetch(bh.untie.data).isOk: if not sn.seenBlock.lruFetch(bh.untie.data).isOk:
discard sn.seenBlock.lruAppend(bh.untie.data, bn, seenBlocksMax) discard sn.seenBlock.lruAppend(bh.untie.data, bn, seenBlocksMax)
# -----------
import
../../../tests/replay/pp_light
proc pp*(bh: BlockHash): string =
bh.Hash256.pp
proc pp*(bn: BlockNumber): string =
if bn == high(BlockNumber): "#max"
else: "#" & $bn
proc pp*(sp: SnapPeerHunt): string =
result &= "(mode=" & $sp.syncMode
result &= ",num=(" & sp.lowNumber.pp & "," & sp.highNumber.pp & ")"
result &= ",best=(" & sp.bestNumber.pp & "," & sp.bestHash.pp & ")"
result &= ",step=" & $sp.step
result &= ")"
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -64,11 +64,14 @@ import
chronos, chronos,
eth/[common/eth_types, p2p, p2p/private/p2p_types], eth/[common/eth_types, p2p, p2p/private/p2p_types],
../../p2p/chain/chain_desc, ../../p2p/chain/chain_desc,
".."/[protocol, protocol/pickeled_eth_tracers, trace_helper], ".."/[protocol, types],
"."/[base_desc, pie/peer_desc, pie/slicer, types] "."/[base_desc, peer/fetch, peer/reply_data]
{.push raises: [Defect].} {.push raises: [Defect].}
logScope:
topics = "snap collect"
const const
syncLockedMinimumReply = 8 syncLockedMinimumReply = 8
## Minimum number of headers we assume any peers will send if they have ## Minimum number of headers we assume any peers will send if they have
@ -112,126 +115,134 @@ static:
doAssert syncHuntForwardExpandShift >= 1 and syncHuntForwardExpandShift <= 8 doAssert syncHuntForwardExpandShift >= 1 and syncHuntForwardExpandShift <= 8
doAssert syncHuntBackwardExpandShift >= 1 and syncHuntBackwardExpandShift <= 8 doAssert syncHuntBackwardExpandShift >= 1 and syncHuntBackwardExpandShift <= 8
# Make sure that request/response wire protocol messages are id-tracked and
# would not overlap (no multi-protocol legacy support)
doAssert 66 <= protocol.ethVersion
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private logging helpers # Private logging helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc traceSyncLocked(sp: SnapPeerEx, bestNumber: BlockNumber, proc traceSyncLocked(sp: SnapPeer, number: BlockNumber, hash: BlockHash) =
bestHash: BlockHash) =
## Trace messages when peer canonical head is confirmed or updated. ## Trace messages when peer canonical head is confirmed or updated.
let bestBlock = sp.ns.pp(bestHash,bestNumber) let
if sp.syncMode != SyncLocked: bestBlock = sp.ns.pp(hash, number)
debug "Snap: Now tracking chain head of peer", peer=sp, bestBlock peer = $sp
elif bestNumber > sp.bestBlockNumber: if sp.hunt.syncMode != SyncLocked:
if bestNumber == sp.bestBlockNumber + 1: debug "Now tracking chain head of peer", peer, bestBlock
debug "Snap: Peer chain head advanced one block", peer=sp, elif number > sp.hunt.bestNumber:
if number == sp.hunt.bestNumber + 1:
debug "Peer chain head advanced one block", peer,
advance=1, bestBlock advance=1, bestBlock
else: else:
debug "Snap: Peer chain head advanced some blocks", peer=sp, debug "Peer chain head advanced some blocks", peer,
advance=(sp.bestBlockNumber - bestNumber), bestBlock advance=(sp.hunt.bestNumber - number), bestBlock
elif bestNumber < sp.bestBlockNumber or bestHash != sp.bestBlockHash: elif number < sp.hunt.bestNumber or hash != sp.hunt.bestHash:
debug "Snap: Peer chain head reorg detected", peer=sp, debug "Peer chain head reorg detected", peer,
advance=(sp.bestBlockNumber - bestNumber), bestBlock advance=(sp.hunt.bestNumber - number), bestBlock
# proc peerSyncChainTrace(sp: SnapPeerEx) = # proc peerSyncChainTrace(sp: SnapPeer) =
# ## To be called after `peerSyncChainRequest` has updated state. # ## To be called after `peerSyncChainRequest` has updated state.
# case sp.syncMode: # case sp.hunt.syncMode:
# of SyncLocked: # of SyncLocked:
# trace "Snap: SyncLocked", # trace "SyncLocked",
# bestBlock=sp.bestBlockNumber, bestBlockHash=($sp.bestBlockHash) # bestBlock = sp.ns.pp(sp.hunt.bestHash, sp.hunt.bestNumber)
# of SyncOnlyHash: # of SyncOnlyHash:
# trace "Snap: OnlyHash", bestBlockHash=($sp.bestBlockHash) # trace "OnlyHash",
# bestBlock = sp.ns.pp(sp.hunt.bestHash, sp.hunt.bestNumber)
# of SyncHuntForward: # of SyncHuntForward:
# template highMax(n: BlockNumber): string = # template highMax(n: BlockNumber): string =
# if n == high(BlockNumber): "max" else: $n # if n == high(BlockNumber): "max" else: $n
# trace "Snap: HuntForward", # trace "HuntForward",
# low=sp.huntLow, high=highMax(sp.huntHigh), step=sp.huntStep # low=sp.hunt.lowNumber, high=highMax(sp.hunt.highNumber),
# step=sp.hunt.step
# of SyncHuntBackward: # of SyncHuntBackward:
# trace "Snap: HuntBackward", # trace "HuntBackward",
# low=sp.huntLow, high=sp.huntHigh, step=sp.huntStep # low=sp.hunt.lowNumber, high=sp.hunt.highNumber, step=sp.hunt.step
# of SyncHuntRange: # of SyncHuntRange:
# trace "Snap: HuntRange", # trace "HuntRange",
# low=sp.huntLow, high=sp.huntHigh, step=sp.huntStep # low=sp.hunt.lowNumber, high=sp.hunt.highNumber, step=sp.hunt.step
# of SyncHuntRangeFinal: # of SyncHuntRangeFinal:
# trace "Snap: HuntRangeFinal", # trace "HuntRangeFinal",
# low=sp.huntLow, high=sp.huntHigh, step=1 # low=sp.hunt.lowNumber, high=sp.hunt.highNumber, step=1
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc clearSyncStateRoot(sp: SnapPeerEx) = proc setSyncLocked(sp: SnapPeer, number: BlockNumber, hash: BlockHash) =
if sp.syncStateRoot.isSome:
debug "Snap: Stopping state sync from this peer", peer=sp
sp.syncStateRoot = none(TrieHash)
proc setSyncStateRoot(sp: SnapPeerEx, blockNumber: BlockNumber,
blockHash: BlockHash, stateRoot: TrieHash) =
let thisBlock = sp.ns.pp(blockHash,blockNumber)
if sp.syncStateRoot.isNone:
debug "Snap: Starting state sync from this peer", peer=sp,
thisBlock, stateRoot
elif sp.syncStateRoot.unsafeGet != stateRoot:
trace "Snap: Adjusting state sync root from this peer", peer=sp,
thisBlock, stateRoot
sp.syncStateRoot = some(stateRoot)
if not sp.startedFetch:
sp.startedFetch = true
trace "Snap: Starting to download block state", peer=sp,
thisBlock, stateRoot
asyncSpawn sp.stateFetch()
proc setSyncLocked(sp: SnapPeerEx, bestNumber: BlockNumber,
bestHash: BlockHash) =
## Actions to take when peer canonical head is confirmed or updated. ## Actions to take when peer canonical head is confirmed or updated.
sp.traceSyncLocked(bestNumber, bestHash) sp.traceSyncLocked(number, hash)
sp.bestBlockNumber = bestNumber sp.hunt.bestNumber = number
sp.bestBlockHash = bestHash sp.hunt.bestHash = hash
sp.syncMode = SyncLocked sp.hunt.syncMode = SyncLocked
proc setHuntBackward(sp: SnapPeerEx, lowestAbsent: BlockNumber) = proc clearSyncStateRoot(sp: SnapPeer) =
if sp.ctrl.stateRoot.isSome:
debug "Stopping state sync from this peer", peer=sp
sp.ctrl.stateRoot = none(TrieHash)
proc lockSyncStateRoot(sp: SnapPeer, number: BlockNumber, hash: BlockHash,
stateRoot: TrieHash) =
sp.setSyncLocked(number, hash)
let thisBlock = sp.ns.pp(hash, number)
if sp.ctrl.stateRoot.isNone:
debug "Starting state sync from this peer", peer=sp,
thisBlock, stateRoot
elif sp.ctrl.stateRoot.unsafeGet != stateRoot:
trace "Adjusting state sync root from this peer", peer=sp,
thisBlock, stateRoot
sp.ctrl.stateRoot = some(stateRoot)
if sp.ctrl.runState != SyncRunningOK:
sp.ctrl.runState = SyncRunningOK
trace "Starting to download block state", peer=sp,
thisBlock, stateRoot
asyncSpawn sp.fetch()
proc setHuntBackward(sp: SnapPeer, lowestAbsent: BlockNumber) =
## Start exponential search mode backward due to new uncertainty. ## Start exponential search mode backward due to new uncertainty.
sp.syncMode = SyncHuntBackward sp.hunt.syncMode = SyncHuntBackward
sp.huntStep = 0 sp.hunt.step = 0
# Block zero is always present. # Block zero is always present.
sp.huntLow = 0.toBlockNumber sp.hunt.lowNumber = 0.toBlockNumber
# Zero `lowestAbsent` is never correct, but an incorrect peer could send it. # Zero `lowestAbsent` is never correct, but an incorrect peer could send it.
sp.huntHigh = if lowestAbsent > 0: lowestAbsent else: 1.toBlockNumber sp.hunt.highNumber = if lowestAbsent > 0: lowestAbsent else: 1.toBlockNumber
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc setHuntForward(sp: SnapPeerEx, highestPresent: BlockNumber) = proc setHuntForward(sp: SnapPeer, highestPresent: BlockNumber) =
## Start exponential search mode forward due to new uncertainty. ## Start exponential search mode forward due to new uncertainty.
sp.syncMode = SyncHuntForward sp.hunt.syncMode = SyncHuntForward
sp.huntStep = 0 sp.hunt.step = 0
sp.huntLow = highestPresent sp.hunt.lowNumber = highestPresent
sp.huntHigh = high(BlockNumber) sp.hunt.highNumber = high(BlockNumber)
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc updateHuntAbsent(sp: SnapPeerEx, lowestAbsent: BlockNumber) = proc updateHuntAbsent(sp: SnapPeer, lowestAbsent: BlockNumber) =
## Converge uncertainty range backward. ## Converge uncertainty range backward.
if lowestAbsent < sp.huntHigh: if lowestAbsent < sp.hunt.highNumber:
sp.huntHigh = lowestAbsent sp.hunt.highNumber = lowestAbsent
# If uncertainty range has moved outside the search window, change to hunt # If uncertainty range has moved outside the search window, change to hunt
# backward to block zero. Note that empty uncertainty range is allowed # backward to block zero. Note that empty uncertainty range is allowed
# (empty range is `huntLow + 1 == huntHigh`). # (empty range is `hunt.lowNumber + 1 == hunt.highNumber`).
if sp.huntHigh <= sp.huntLow: if sp.hunt.highNumber <= sp.hunt.lowNumber:
sp.setHuntBackward(lowestAbsent) sp.setHuntBackward(lowestAbsent)
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc updateHuntPresent(sp: SnapPeerEx, highestPresent: BlockNumber) = proc updateHuntPresent(sp: SnapPeer, highestPresent: BlockNumber) =
## Converge uncertainty range forward. ## Converge uncertainty range forward.
if highestPresent > sp.huntLow: if highestPresent > sp.hunt.lowNumber:
sp.huntLow = highestPresent sp.hunt.lowNumber = highestPresent
# If uncertainty range has moved outside the search window, change to hunt # If uncertainty range has moved outside the search window, change to hunt
# forward to no upper limit. Note that empty uncertainty range is allowed # forward to no upper limit. Note that empty uncertainty range is allowed
# (empty range is `huntLow + 1 == huntHigh`). # (empty range is `hunt.lowNumber + 1 == hunt.highNumber`).
if sp.huntLow >= sp.huntHigh: if sp.hunt.lowNumber >= sp.hunt.highNumber:
sp.setHuntForward(highestPresent) sp.setHuntForward(highestPresent)
sp.clearSyncStateRoot() sp.clearSyncStateRoot()
proc peerSyncChainEmptyReply(sp: SnapPeerEx, request: BlocksRequest) = proc peerSyncChainEmptyReply(sp: SnapPeer, request: BlocksRequest) =
## Handle empty `GetBlockHeaders` reply. This means `request.startBlock` is ## Handle empty `GetBlockHeaders` reply. This means `request.startBlock` is
## absent on the peer. If it was `SyncLocked` there must have been a reorg ## absent on the peer. If it was `SyncLocked` there must have been a reorg
## and the previous canonical chain head has disappeared. If hunting, this ## and the previous canonical chain head has disappeared. If hunting, this
@ -240,26 +251,25 @@ proc peerSyncChainEmptyReply(sp: SnapPeerEx, request: BlocksRequest) =
# Treat empty response to a request starting from block 1 as equivalent to # Treat empty response to a request starting from block 1 as equivalent to
# length 1 starting from block 0 in `peerSyncChainNonEmptyReply`. We treat # length 1 starting from block 0 in `peerSyncChainNonEmptyReply`. We treat
# every peer as if it would send genesis for block 0, without asking for it. # every peer as if it would send genesis for block 0, without asking for it.
if request.skip == 0 and not request.reverse and if request.skip == 0 and
not request.reverse and
not request.startBlock.isHash and not request.startBlock.isHash and
request.startBlock.number == 1.toBlockNumber: request.startBlock.number == 1.toBlockNumber:
sp.setSyncLocked(0.toBlockNumber, sp.lockSyncStateRoot(0.toBlockNumber,
sp.peer.network.chain.genesisHash.BlockHash) sp.peer.network.chain.genesisHash.BlockHash,
sp.setSyncStateRoot(0.toBlockNumber, sp.peer.network.chain.Chain.genesisStateRoot.TrieHash)
sp.peer.network.chain.genesisHash.BlockHash,
sp.peer.network.chain.Chain.genesisStateRoot.TrieHash)
return return
if sp.syncMode == SyncLocked or sp.syncMode == SyncOnlyHash: if sp.hunt.syncMode == SyncLocked or sp.hunt.syncMode == SyncOnlyHash:
inc sp.stats.ok.reorgDetected inc sp.stats.ok.reorgDetected
trace "Snap: Peer reorg detected, best block disappeared", peer=sp, trace "Peer reorg detected, best block disappeared", peer=sp,
startBlock=request.startBlock startBlock=request.startBlock
let lowestAbsent = request.startBlock.number let lowestAbsent = request.startBlock.number
case sp.syncMode: case sp.hunt.syncMode:
of SyncLocked: of SyncLocked:
# If this message doesn't change our knowledge, ignore it. # If this message doesn't change our knowledge, ignore it.
if lowestAbsent > sp.bestBlockNumber: if lowestAbsent > sp.hunt.bestNumber:
return return
# Due to a reorg, peer's canonical head has lower block number, outside # Due to a reorg, peer's canonical head has lower block number, outside
# our tracking window. Sync lock is no longer valid. Switch to hunt # our tracking window. Sync lock is no longer valid. Switch to hunt
@ -275,13 +285,13 @@ proc peerSyncChainEmptyReply(sp: SnapPeerEx, request: BlocksRequest) =
# Update best block number. It is invalid except when `SyncLocked`, but # Update best block number. It is invalid except when `SyncLocked`, but
# still useful as a hint of what we knew recently, for example in displays. # still useful as a hint of what we knew recently, for example in displays.
if lowestAbsent <= sp.bestBlockNumber: if lowestAbsent <= sp.hunt.bestNumber:
sp.bestBlockNumber = if lowestAbsent == 0.toBlockNumber: lowestAbsent sp.hunt.bestNumber = if lowestAbsent == 0.toBlockNumber: lowestAbsent
else: lowestAbsent - 1.toBlockNumber else: lowestAbsent - 1.toBlockNumber
sp.bestBlockHash = default(typeof(sp.bestBlockHash)) sp.hunt.bestHash = default(typeof(sp.hunt.bestHash))
sp.ns.seen(sp.bestBlockHash,sp.bestBlockNumber) sp.ns.seen(sp.hunt.bestHash,sp.hunt.bestNumber)
proc peerSyncChainNonEmptyReply(sp: SnapPeerEx, request: BlocksRequest, proc peerSyncChainNonEmptyReply(sp: SnapPeer, request: BlocksRequest,
headers: openArray[BlockHeader]) = headers: openArray[BlockHeader]) =
## Handle non-empty `GetBlockHeaders` reply. This means `request.startBlock` ## Handle non-empty `GetBlockHeaders` reply. This means `request.startBlock`
## is present on the peer and in its canonical chain (unless the request was ## is present on the peer and in its canonical chain (unless the request was
@ -302,10 +312,9 @@ proc peerSyncChainNonEmptyReply(sp: SnapPeerEx, request: BlocksRequest,
if len < syncLockedMinimumReply and if len < syncLockedMinimumReply and
request.skip == 0 and not request.reverse and request.skip == 0 and not request.reverse and
len.uint < request.maxResults: len.uint < request.maxResults:
let blockHash = headers[highestIndex].blockHash.BlockHash sp.lockSyncStateRoot(headers[highestIndex].blockNumber,
sp.setSyncLocked(headers[highestIndex].blockNumber, blockHash) headers[highestIndex].blockHash.BlockHash,
sp.setSyncStateRoot(headers[highestIndex].blockNumber, blockHash, headers[highestIndex].stateRoot.TrieHash)
headers[highestIndex].stateRoot.TrieHash)
return return
# Be careful, this number is from externally supplied data and arithmetic # Be careful, this number is from externally supplied data and arithmetic
@ -316,10 +325,10 @@ proc peerSyncChainNonEmptyReply(sp: SnapPeerEx, request: BlocksRequest,
# tells us headers up to some number, but it doesn't tell us if there are # tells us headers up to some number, but it doesn't tell us if there are
# more after it in the peer's canonical chain. We have to request more # more after it in the peer's canonical chain. We have to request more
# headers to find out. # headers to find out.
case sp.syncMode: case sp.hunt.syncMode:
of SyncLocked: of SyncLocked:
# If this message doesn't change our knowledge, ignore it. # If this message doesn't change our knowledge, ignore it.
if highestPresent <= sp.bestBlockNumber: if highestPresent <= sp.hunt.bestNumber:
return return
# Sync lock is no longer valid as we don't have confirmed canonical head. # Sync lock is no longer valid as we don't have confirmed canonical head.
# Switch to hunt forward to find the new canonical head. # Switch to hunt forward to find the new canonical head.
@ -333,44 +342,38 @@ proc peerSyncChainNonEmptyReply(sp: SnapPeerEx, request: BlocksRequest,
# Update best block number. It is invalid except when `SyncLocked`, but # Update best block number. It is invalid except when `SyncLocked`, but
# still useful as a hint of what we knew recently, for example in displays. # still useful as a hint of what we knew recently, for example in displays.
if highestPresent > sp.bestBlockNumber: if highestPresent > sp.hunt.bestNumber:
sp.bestBlockNumber = highestPresent sp.hunt.bestNumber = highestPresent
sp.bestBlockHash = headers[highestIndex].blockHash.BlockHash sp.hunt.bestHash = headers[highestIndex].blockHash.BlockHash
sp.ns.seen(sp.bestBlockHash,sp.bestBlockNumber) sp.ns.seen(sp.hunt.bestHash,sp.hunt.bestNumber)
proc peerSyncChainRequest(sp: SnapPeerEx, request: var BlocksRequest) = proc peerSyncChainRequest(sp: SnapPeer): BlocksRequest =
## Choose `GetBlockHeaders` parameters when hunting or following the canonical ## Choose `GetBlockHeaders` parameters when hunting or following the canonical
## chain of a peer. ## chain of a peer.
request = BlocksRequest( if sp.hunt.syncMode == SyncLocked:
startBlock: HashOrNum(isHash: false),
skip: 0,
reverse: false
)
if sp.syncMode == SyncLocked:
# Stable and locked. This is just checking for changes including reorgs. # Stable and locked. This is just checking for changes including reorgs.
# `sp.bestBlockNumber` was recently the head of the peer's canonical # `sp.hunt.bestNumber` was recently the head of the peer's canonical
# chain. We must include this block number to detect when the canonical # chain. We must include this block number to detect when the canonical
# chain gets shorter versus no change. # chain gets shorter versus no change.
request.startBlock.number = result.startBlock.number =
if sp.bestBlockNumber <= syncLockedQueryOverlap: if sp.hunt.bestNumber <= syncLockedQueryOverlap:
# Every peer should send genesis for block 0, so don't ask for it. # Every peer should send genesis for block 0, so don't ask for it.
# `peerSyncChainEmptyReply` has logic to handle this reply as if it # `peerSyncChainEmptyReply` has logic to handle this reply as if it
# was for block 0. Aside from saving bytes, this is more robust if # was for block 0. Aside from saving bytes, this is more robust if
# some client doesn't do genesis reply correctly. # some client doesn't do genesis reply correctly.
1.toBlockNumber 1.toBlockNumber
else: else:
min(sp.bestBlockNumber - syncLockedQueryOverlap.toBlockNumber, min(sp.hunt.bestNumber - syncLockedQueryOverlap.toBlockNumber,
high(BlockNumber) - (syncLockedQuerySize - 1).toBlockNumber) high(BlockNumber) - (syncLockedQuerySize - 1).toBlockNumber)
request.maxResults = syncLockedQuerySize result.maxResults = syncLockedQuerySize
return return
if sp.syncMode == SyncOnlyHash: if sp.hunt.syncMode == SyncOnlyHash:
# We only have the hash of the recent head of the peer's canonical chain. # We only have the hash of the recent head of the peer's canonical chain.
# Like `SyncLocked`, query more than one item to detect when the # Like `SyncLocked`, query more than one item to detect when the
# canonical chain gets shorter, no change or longer. # canonical chain gets shorter, no change or longer.
request.startBlock = HashOrNum(isHash: true, hash: sp.bestBlockHash.untie) result.startBlock = sp.hunt.bestHash.toHashOrNum
request.maxResults = syncLockedQuerySize result.maxResults = syncLockedQuerySize
return return
# Searching for the peers's canonical head. An ascending query is always # Searching for the peers's canonical head. An ascending query is always
@ -386,29 +389,30 @@ proc peerSyncChainRequest(sp: SnapPeerEx, request: var BlocksRequest) =
# Guaranteeing O(log N) time convergence in all scenarios requires some # Guaranteeing O(log N) time convergence in all scenarios requires some
# properties to be true in both exponential search (expanding) and # properties to be true in both exponential search (expanding) and
# quasi-binary search (converging in a range). The most important is that # quasi-binary search (converging in a range). The most important is that
# the gap to `startBlock` after `huntLow` and also before `huntHigh` are # the gap to `startBlock` after `hunt.lowNumber` and also before
# proportional to the query step, where the query step is `huntStep` # `hunt.highNumber` are proportional to the query step, where the query step
# exponentially expanding each round, or `maxStep` approximately evenly # is `hunt.step` exponentially expanding each round, or `maxStep`
# distributed in the range. # approximately evenly distributed in the range.
# #
# `huntLow+1` must not be used consistently as the start, even with a large # `hunt.lowNumber+1` must not be used consistently as the start, even with a
# enough query step size, as that will sometimes take O(N) to converge in # large enough query step size, as that will sometimes take O(N) to converge
# both the exponential and quasi-binary searches. (Ending at `huntHigh-1` # in both the exponential and quasi-binary searches. (Ending at
# is fine if `syncHuntQuerySize > 1`. This asymmetry is due to ascending # `hunt.highNumber-1` is fine if `syncHuntQuerySize > 1`. This asymmetry is
# queries (see earlier comment), and non-empty truncated query reply being # due to ascending queries (see earlier comment), and non-empty truncated
# proof of presence before the truncation point, but not proof of absence # query reply being proof of presence before the truncation point, but not
# after it. A reply can be truncated just because the peer decides to.) # proof of absence after it. A reply can be truncated just because the peer
# decides to.)
# #
# The proportional gap requirement is why we divide by query size here, # The proportional gap requirement is why we divide by query size here,
# instead of stretching to fit more strictly with `(range-1)/(size-1)`. # instead of stretching to fit more strictly with `(range-1)/(size-1)`.
const syncHuntFinalSize = max(2, syncHuntQuerySize) const syncHuntFinalSize = max(2, syncHuntQuerySize)
var maxStep: typeof(request.skip) var maxStep = 0u
let fullRangeClamped = let fullRangeClamped =
if sp.huntHigh <= sp.huntLow: typeof(maxStep)(0) if sp.hunt.highNumber <= sp.hunt.lowNumber: 0u
else: min(high(typeof(maxStep)).toBlockNumber, else: min(high(uint).toBlockNumber,
sp.huntHigh - sp.huntLow).truncate(typeof(maxStep)) - 1 sp.hunt.highNumber - sp.hunt.lowNumber).truncate(uint) - 1
if fullRangeClamped >= syncHuntFinalSize: # `SyncHuntRangeFinal` condition. if fullRangeClamped >= syncHuntFinalSize: # `SyncHuntRangeFinal` condition.
maxStep = if syncHuntQuerySize == 1: maxStep = if syncHuntQuerySize == 1:
@ -420,28 +424,28 @@ proc peerSyncChainRequest(sp: SnapPeerEx, request: var BlocksRequest) =
doAssert syncHuntFinalSize >= syncHuntQuerySize doAssert syncHuntFinalSize >= syncHuntQuerySize
doAssert maxStep >= 1 # Ensured by the above assertion. doAssert maxStep >= 1 # Ensured by the above assertion.
# Check for exponential search (expanding). Iterate `huntStep`. O(log N) # Check for exponential search (expanding). Iterate `hunt.step`. O(log N)
# requires `startBlock` to be offset from `huntLow`/`huntHigh`. # requires `startBlock` to be offset from `hunt.lowNumber`/`hunt.highNumber`.
if sp.syncMode in {SyncHuntForward, SyncHuntBackward} and if sp.hunt.syncMode in {SyncHuntForward, SyncHuntBackward} and
fullRangeClamped >= syncHuntFinalSize: fullRangeClamped >= syncHuntFinalSize:
let forward = sp.syncMode == SyncHuntForward let forward = sp.hunt.syncMode == SyncHuntForward
let expandShift = if forward: syncHuntForwardExpandShift let expandShift = if forward: syncHuntForwardExpandShift
else: syncHuntBackwardExpandShift else: syncHuntBackwardExpandShift
# Switches to range search when this condition is no longer true. # Switches to range search when this condition is no longer true.
if sp.huntStep < maxStep shr expandShift: if sp.hunt.step < maxStep shr expandShift:
# The `if` above means the next line cannot overflow. # The `if` above means the next line cannot overflow.
sp.huntStep = if sp.huntStep > 0: sp.huntStep shl expandShift else: 1 sp.hunt.step = if sp.hunt.step > 0: sp.hunt.step shl expandShift else: 1
# Satisfy the O(log N) convergence conditions. # Satisfy the O(log N) convergence conditions.
request.startBlock.number = result.startBlock.number =
if forward: sp.huntLow + sp.huntStep.toBlockNumber if forward: sp.hunt.lowNumber + sp.hunt.step.toBlockNumber
else: sp.huntHigh - (sp.huntStep * syncHuntQuerySize).toBlockNumber else: sp.hunt.highNumber - (sp.hunt.step * syncHuntQuerySize).toBlockNumber
request.maxResults = syncHuntQuerySize result.maxResults = syncHuntQuerySize
request.skip = sp.huntStep - 1 result.skip = sp.hunt.step - 1
return return
# For tracing/display. # For tracing/display.
sp.huntStep = maxStep sp.hunt.step = maxStep
sp.syncMode = SyncHuntRange sp.hunt.syncMode = SyncHuntRange
if maxStep > 0: if maxStep > 0:
# Quasi-binary search (converging in a range). O(log N) requires # Quasi-binary search (converging in a range). O(log N) requires
# `startBlock` to satisfy the constraints described above, with the # `startBlock` to satisfy the constraints described above, with the
@ -451,9 +455,9 @@ proc peerSyncChainRequest(sp: SnapPeerEx, request: var BlocksRequest) =
var offset = fullRangeClamped - maxStep * (syncHuntQuerySize-1) var offset = fullRangeClamped - maxStep * (syncHuntQuerySize-1)
# Rounding must bias towards end to ensure `offset >= 1` after this. # Rounding must bias towards end to ensure `offset >= 1` after this.
offset -= offset shr 1 offset -= offset shr 1
request.startBlock.number = sp.huntLow + offset.toBlockNumber result.startBlock.number = sp.hunt.lowNumber + offset.toBlockNumber
request.maxResults = syncHuntQuerySize result.maxResults = syncHuntQuerySize
request.skip = maxStep - 1 result.skip = maxStep - 1
else: else:
# Small range, final step. At `fullRange == 0` we must query at least one # Small range, final step. At `fullRange == 0` we must query at least one
# block before and after the range to confirm the canonical head boundary, # block before and after the range to confirm the canonical head boundary,
@ -469,18 +473,18 @@ proc peerSyncChainRequest(sp: SnapPeerEx, request: var BlocksRequest) =
before = max(before + afterSoftMax, extra) - afterSoftMax before = max(before + afterSoftMax, extra) - afterSoftMax
before = min(before, beforeHardMax) before = min(before, beforeHardMax)
# See `SyncLocked` case. # See `SyncLocked` case.
request.startBlock.number = result.startBlock.number =
if sp.bestBlockNumber <= before.toBlockNumber: 1.toBlockNumber if sp.hunt.bestNumber <= before.toBlockNumber: 1.toBlockNumber
else: min(sp.bestBlockNumber - before.toBlockNumber, else: min(sp.hunt.bestNumber - before.toBlockNumber,
high(BlockNumber) - (syncHuntFinalSize - 1).toBlockNumber) high(BlockNumber) - (syncHuntFinalSize - 1).toBlockNumber)
request.maxResults = syncHuntFinalSize result.maxResults = syncHuntFinalSize
sp.syncMode = SyncHuntRangeFinal sp.hunt.syncMode = SyncHuntRangeFinal
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc peerHuntCanonical*(sp: SnapPeerEx) {.async.} = proc collectBlockHeaders*(sp: SnapPeer) {.async.} =
## Query a peer to update our knowledge of its canonical chain and its best ## Query a peer to update our knowledge of its canonical chain and its best
## block, which is its canonical chain head. This can be called at any time ## block, which is its canonical chain head. This can be called at any time
## after a peer has negotiated the connection. ## after a peer has negotiated the connection.
@ -492,52 +496,41 @@ proc peerHuntCanonical*(sp: SnapPeerEx) {.async.} =
## All replies to this query are part of the peer's canonical chain at the ## All replies to this query are part of the peer's canonical chain at the
## time the peer sends them. ## time the peer sends them.
# If we send multiple `GetBlockHeaders` requests, the replies can be out of let request = sp.peerSyncChainRequest
# order, and prior to eth/66 there is no request-id. We'll avoid this
# problem by never sending overlapping `GetBlockHeaders` to the same peer.
if sp.pendingGetBlockHeaders:
#trace ">| Blocked overlapping eth.GetBlockHeaders (0x03)", peer=sp
await sleepAsync(chronos.milliseconds(500))
return
sp.pendingGetBlockHeaders = true
var request {.noinit.}: BlocksRequest trace trEthSendSending & "GetBlockHeaders", peer=sp,
sp.peerSyncChainRequest(request) count=request.maxResults,
traceSendSending "GetBlockHeaders", peer=sp, count=request.maxResults,
startBlock=sp.ns.pp(request.startBlock), step=request.traceStep startBlock=sp.ns.pp(request.startBlock), step=request.traceStep
inc sp.stats.ok.getBlockHeaders inc sp.stats.ok.getBlockHeaders
var reply: typeof await sp.peer.getBlockHeaders(request) var reply: Option[protocol.blockHeadersObj]
try: try:
reply = await sp.peer.getBlockHeaders(request) reply = await sp.peer.getBlockHeaders(request)
except CatchableError as e: except CatchableError as e:
traceRecvError "waiting for reply to GetBlockHeaders", trace trEthRecvError & "waiting for reply to GetBlockHeaders", peer=sp,
peer=sp, error=e.msg error=e.msg
inc sp.stats.major.networkErrors inc sp.stats.major.networkErrors
sp.stopped = true sp.ctrl.runState = SyncStopped
return return
if reply.isNone: if reply.isNone:
traceRecvTimeoutWaiting "for reply to GetBlockHeaders", peer=sp trace trEthRecvTimeoutWaiting & "for reply to GetBlockHeaders", peer=sp
# TODO: Should disconnect? # TODO: Should disconnect?
inc sp.stats.minor.timeoutBlockHeaders inc sp.stats.minor.timeoutBlockHeaders
return return
let nHeaders = reply.get.headers.len let nHeaders = reply.get.headers.len
if nHeaders == 0: if nHeaders == 0:
traceRecvGot "EMPTY reply BlockHeaders", peer=sp, got=0, trace trEthRecvGot & "EMPTY reply BlockHeaders", peer=sp,
requested=request.maxResults got=0, requested=request.maxResults
else: else:
traceRecvGot "reply BlockHeaders", peer=sp, got=nHeaders, trace trEthRecvGot & "reply BlockHeaders", peer=sp,
requested=request.maxResults, got=nHeaders, requested=request.maxResults,
firstBlock=reply.get.headers[0].blockNumber, firstBlock=reply.get.headers[0].blockNumber,
lastBlock=reply.get.headers[^1].blockNumber lastBlock=reply.get.headers[^1].blockNumber
sp.pendingGetBlockHeaders = false
if request.maxResults.int < nHeaders: if request.maxResults.int < nHeaders:
traceRecvProtocolViolation "excess headers in BlockHeaders", trace trEthRecvProtocolViolation & "excess headers in BlockHeaders",
peer=sp, got=nHeaders, requested=request.maxResults peer=sp, got=nHeaders, requested=request.maxResults
# TODO: Should disconnect. # TODO: Should disconnect.
inc sp.stats.major.excessBlockHeaders inc sp.stats.major.excessBlockHeaders
@ -549,6 +542,9 @@ proc peerHuntCanonical*(sp: SnapPeerEx) {.async.} =
else: else:
sp.peerSyncChainEmptyReply(request) sp.peerSyncChainEmptyReply(request)
proc collectDataSetup*(sp: SnapPeer) =
sp.replyDataSetup
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -15,31 +15,34 @@ import
chronicles, chronicles,
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
stint, stint,
../path_desc, ".."/[base_desc, path_desc],
"."/[peer_desc, sync_desc] ./sync_fetch_xdesc
{.push raises: [Defect].} {.push raises: [Defect].}
proc hasSlice*(sp: SnapPeerEx): bool = logScope:
## Return `true` iff `getSlice` would return a free slice to work on. topics = "snap peer common"
if sp.nsx.sharedFetch.isNil:
sp.nsx.sharedFetch = SharedFetchState.new
result = 0 < sp.nsx.sharedFetch.leafRanges.len
trace "Snap: hasSlice", peer=sp, hasSlice=result
proc getSlice*(sp: SnapPeerEx, leafLow, leafHigh: var LeafPath): bool = proc hasSlice*(sp: SnapPeer): bool =
## Return `true` iff `getSlice` would return a free slice to work on.
if sp.ns.sharedFetchEx.isNil:
sp.ns.sharedFetchEx = SnapSyncFetchEx.new
result = 0 < sp.ns.sharedFetchEx.leafRanges.len
trace "hasSlice", peer=sp, hasSlice=result
proc getSlice*(sp: SnapPeer, leafLow, leafHigh: var LeafPath): bool =
## Claim a free slice to work on. If a slice was available, it's claimed, ## Claim a free slice to work on. If a slice was available, it's claimed,
## `leadLow` and `leafHigh` are set to the slice range and `true` is ## `leadLow` and `leafHigh` are set to the slice range and `true` is
## returned. Otherwise `false` is returned. ## returned. Otherwise `false` is returned.
if sp.nsx.sharedFetch.isNil: if sp.ns.sharedFetchEx.isNil:
sp.nsx.sharedFetch = SharedFetchState.new sp.ns.sharedFetchEx = SnapSyncFetchEx.new
let sharedFetch = sp.nsx.sharedFetch let sharedFetch = sp.ns.sharedFetchEx
template ranges: auto = sharedFetch.leafRanges template ranges: auto = sharedFetch.leafRanges
const leafMaxFetchRange = (high(LeafPath) - low(LeafPath)) div 1000 const leafMaxFetchRange = (high(LeafPath) - low(LeafPath)) div 1000
if ranges.len == 0: if ranges.len == 0:
trace "Snap: getSlice", leafRange="none" trace "GetSlice", leafRange="none"
return false return false
leafLow = ranges[0].leafLow leafLow = ranges[0].leafLow
if ranges[0].leafHigh - ranges[0].leafLow <= leafMaxFetchRange: if ranges[0].leafHigh - ranges[0].leafLow <= leafMaxFetchRange:
@ -48,16 +51,16 @@ proc getSlice*(sp: SnapPeerEx, leafLow, leafHigh: var LeafPath): bool =
else: else:
leafHigh = leafLow + leafMaxFetchRange leafHigh = leafLow + leafMaxFetchRange
ranges[0].leafLow = leafHigh + 1 ranges[0].leafLow = leafHigh + 1
trace "Snap: getSlice", peer=sp, leafRange=pathRange(leafLow, leafHigh) trace "GetSlice", peer=sp, leafRange=pathRange(leafLow, leafHigh)
return true return true
proc putSlice*(sp: SnapPeerEx, leafLow, leafHigh: LeafPath) = proc putSlice*(sp: SnapPeer, leafLow, leafHigh: LeafPath) =
## Return a slice to the free list, merging with the rest of the list. ## Return a slice to the free list, merging with the rest of the list.
let sharedFetch = sp.nsx.sharedFetch let sharedFetch = sp.ns.sharedFetchEx
template ranges: auto = sharedFetch.leafRanges template ranges: auto = sharedFetch.leafRanges
trace "Snap: putSlice", leafRange=pathRange(leafLow, leafHigh), peer=sp trace "PutSlice", leafRange=pathRange(leafLow, leafHigh), peer=sp
var i = 0 var i = 0
while i < ranges.len and leafLow > ranges[i].leafHigh: while i < ranges.len and leafLow > ranges[i].leafHigh:
inc i inc i
@ -79,25 +82,25 @@ proc putSlice*(sp: SnapPeerEx, leafLow, leafHigh: LeafPath) =
if leafHigh > ranges[i].leafHigh: if leafHigh > ranges[i].leafHigh:
ranges[i].leafHigh = leafHigh ranges[i].leafHigh = leafHigh
template getSlice*(sp: SnapPeerEx, leafRange: var LeafRange): bool = template getSlice*(sp: SnapPeer, leafRange: var LeafRange): bool =
sp.getSlice(leafRange.leafLow, leafRange.leafHigh) sp.getSlice(leafRange.leafLow, leafRange.leafHigh)
template putSlice*(sp: SnapPeerEx, leafRange: LeafRange) = template putSlice*(sp: SnapPeer, leafRange: LeafRange) =
sp.putSlice(leafRange.leafLow, leafRange.leafHigh) sp.putSlice(leafRange.leafLow, leafRange.leafHigh)
proc countSlice*(sp: SnapPeerEx, leafLow, leafHigh: LeafPath, which: bool) = proc countSlice*(sp: SnapPeer, leafLow, leafHigh: LeafPath, which: bool) =
doAssert leafLow <= leafHigh doAssert leafLow <= leafHigh
sp.nsx.sharedFetch.countRange += leafHigh - leafLow + 1 sp.ns.sharedFetchEx.countRange += leafHigh - leafLow + 1
sp.nsx.sharedFetch.countRangeStarted = true sp.ns.sharedFetchEx.countRangeStarted = true
if which: if which:
sp.nsx.sharedFetch.countRangeSnap += leafHigh - leafLow + 1 sp.ns.sharedFetchEx.countRangeSnap += leafHigh - leafLow + 1
sp.nsx.sharedFetch.countRangeSnapStarted = true sp.ns.sharedFetchEx.countRangeSnapStarted = true
else: else:
sp.nsx.sharedFetch.countRangeTrie += leafHigh - leafLow + 1 sp.ns.sharedFetchEx.countRangeTrie += leafHigh - leafLow + 1
sp.nsx.sharedFetch.countRangeTrieStarted = true sp.ns.sharedFetchEx.countRangeTrieStarted = true
template countSlice*(sp: SnapPeerEx, leafRange: LeafRange, which: bool) = template countSlice*(sp: SnapPeer, leafRange: LeafRange, which: bool) =
sp.countSlice(leafRange.leafLow, leafRange.leafHigh, which) sp.countSlice(leafRange.leafLow, leafRange.leafHigh, which)
proc countAccounts*(sp: SnapPeerEx, len: int) = proc countAccounts*(sp: SnapPeer, len: int) =
sp.nsx.sharedFetch.countAccounts += len sp.ns.sharedFetchEx.countAccounts += len

View File

@ -9,51 +9,55 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
{.push raises: [Defect].}
import import
std/[sets, random], std/[sets, random],
chronos, chronos,
nimcrypto/keccak, nimcrypto/keccak,
stint, stint,
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
".."/[path_desc, base_desc, types], ../../types,
"."/[common, fetch_trie, fetch_snap, peer_desc] ".."/[path_desc, base_desc],
"."/[common, fetch_trie, fetch_snap]
{.push raises: [Defect].}
logScope:
topics = "snap peer fetch"
# Note: To test disabling snap (or trie), modify `peerSupportsGetNodeData` or # Note: To test disabling snap (or trie), modify `peerSupportsGetNodeData` or
# `peerSupportsSnap` where those are defined. # `fetchSnapOk` where those are defined.
proc stateFetch*(sp: SnapPeerEx) {.async.} = proc fetch*(sp: SnapPeer) {.async.} =
var stateRoot = sp.syncStateRoot.get var stateRoot = sp.ctrl.stateRoot.get
trace "Snap: Syncing from stateRoot", peer=sp, stateRoot trace "Syncing from stateRoot", peer=sp, stateRoot
while true: while true:
if not sp.peerSupportsGetNodeData() and not sp.peerSupportsSnap(): if not sp.fetchTrieOk and not sp.fetchSnapOk:
trace "Snap: Cannot sync more from this peer", peer=sp trace "No more sync available from this peer", peer=sp
return return
if not sp.hasSlice(): if not sp.hasSlice():
trace "Snap: Nothing more to sync from this peer", peer=sp trace "Nothing more to sync from this peer", peer=sp
while not sp.hasSlice(): while not sp.hasSlice():
await sleepAsync(5.seconds) # TODO: Use an event trigger instead. await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
if sp.syncStateRoot.isNone: if sp.ctrl.stateRoot.isNone:
trace "Snap: No current state root for this peer", peer=sp trace "No current state root for this peer", peer=sp
while sp.syncStateRoot.isNone and while sp.ctrl.stateRoot.isNone and
(sp.peerSupportsGetNodeData() or sp.peerSupportsSnap()) and (sp.fetchTrieOk or sp.fetchSnapOk) and
sp.hasSlice(): sp.hasSlice():
await sleepAsync(5.seconds) # TODO: Use an event trigger instead. await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
continue continue
if stateRoot != sp.syncStateRoot.get: if stateRoot != sp.ctrl.stateRoot.get:
trace "Snap: Syncing from new stateRoot", peer=sp, stateRoot trace "Syncing from new stateRoot", peer=sp, stateRoot
stateRoot = sp.syncStateRoot.get stateRoot = sp.ctrl.stateRoot.get
sp.stopThisState = false sp.ctrl.runState = SyncRunningOK
if sp.stopThisState: if sp.ctrl.runState == SyncStopRequest:
trace "Snap: Pausing sync until we get a new state root", peer=sp trace "Pausing sync until we get a new state root", peer=sp
while sp.syncStateRoot.isSome and stateRoot == sp.syncStateRoot.get and while sp.ctrl.stateRoot.isSome and stateRoot == sp.ctrl.stateRoot.get and
(sp.peerSupportsGetNodeData() or sp.peerSupportsSnap()) and (sp.fetchTrieOk or sp.fetchSnapOk) and
sp.hasSlice(): sp.hasSlice():
await sleepAsync(5.seconds) # TODO: Use an event trigger instead. await sleepAsync(5.seconds) # TODO: Use an event trigger instead.
continue continue
@ -63,17 +67,18 @@ proc stateFetch*(sp: SnapPeerEx) {.async.} =
# Mix up different slice modes, because when connecting to static nodes one # Mix up different slice modes, because when connecting to static nodes one
# mode or the other tends to dominate, which makes the mix harder to test. # mode or the other tends to dominate, which makes the mix harder to test.
var allowSnap = true var allowSnap = true
if sp.peerSupportsSnap() and sp.peerSupportsGetNodeData(): if sp.fetchSnapOk and sp.fetchTrieOk:
if rand(99) < 50: if rand(99) < 50:
allowSnap = false allowSnap = false
if sp.peerSupportsSnap() and allowSnap: if sp.fetchSnapOk and allowSnap:
discard sp.getSlice(leafRange) discard sp.getSlice(leafRange)
trace "Snap: snap.GetAccountRange segment", peer=sp, trace "GetAccountRange segment", peer=sp,
leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), stateRoot leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), stateRoot
await sp.snapFetch(stateRoot, leafRange) await sp.fetchSnap(stateRoot, leafRange)
elif sp.peerSupportsGetNodeData():
elif sp.fetchTrieOk:
discard sp.getSlice(leafRange) discard sp.getSlice(leafRange)
trace "Snap: eth.GetNodeData segment", peer=sp, trace "GetNodeData segment", peer=sp,
leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), stateRoot leafRange=pathRange(leafRange.leafLow, leafRange.leafHigh), stateRoot
await sp.trieFetch(stateRoot, leafRange) await sp.fetchTrie(stateRoot, leafRange)

View File

@ -19,38 +19,42 @@
## different related tries (blocks at different times) together in a way that ## different related tries (blocks at different times) together in a way that
## eventually becomes a full trie for a single block. ## eventually becomes a full trie for a single block.
{.push raises: [Defect].}
import import
std/sets, std/sets,
chronos, chronos,
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
nimcrypto/keccak, nimcrypto/keccak,
stint, #stint,
"../.."/[protocol, protocol/pickeled_snap_tracers, trace_helper], "../.."/[protocol, types],
".."/[base_desc, path_desc, types], ".."/[base_desc, path_desc],
"."/[common, peer_desc] ./common
{.push raises: [Defect].}
logScope:
topics = "snap peer fetch"
const const
snapRequestBytesLimit = 2 * 1024 * 1024 snapRequestBytesLimit = 2 * 1024 * 1024
## Soft bytes limit to request in `snap` protocol calls. ## Soft bytes limit to request in `snap` protocol calls.
proc snapFetch*(sp: SnapPeerEx, stateRoot: TrieHash, leafRange: LeafRange) proc fetchSnap*(sp: SnapPeer, stateRoot: TrieHash, leafRange: LeafRange)
{.async.} = {.async.} =
## Fetch data using the `snap#` protocol
var origin = leafRange.leafLow var origin = leafRange.leafLow
var limit = leafRange.leafHigh var limit = leafRange.leafHigh
const responseBytes = 2 * 1024 * 1024 const responseBytes = 2 * 1024 * 1024
if sp.stopped: if sp.ctrl.runState == SyncStopped:
traceRecvError "peer already disconnected, not sending GetAccountRange", trace trSnapRecvError &
"peer already disconnected, not sending GetAccountRange",
peer=sp, accountRange=pathRange(origin, limit), peer=sp, accountRange=pathRange(origin, limit),
stateRoot=($stateRoot), bytesLimit=snapRequestBytesLimit stateRoot, bytesLimit=snapRequestBytesLimit
sp.putSlice(leafRange) sp.putSlice(leafRange)
if tracePackets: trace trSnapSendSending & "GetAccountRange", peer=sp,
traceSendSending "GetAccountRange", accountRange=pathRange(origin, limit),
accountRange=pathRange(origin, limit), stateRoot, bytesLimit=snapRequestBytesLimit
stateRoot=($stateRoot), bytesLimit=snapRequestBytesLimit, peer=sp
var var
reply: Option[accountRangeObj] reply: Option[accountRangeObj]
@ -58,16 +62,15 @@ proc snapFetch*(sp: SnapPeerEx, stateRoot: TrieHash, leafRange: LeafRange)
reply = await sp.peer.getAccountRange( reply = await sp.peer.getAccountRange(
stateRoot.untie, origin, limit, snapRequestBytesLimit) stateRoot.untie, origin, limit, snapRequestBytesLimit)
except CatchableError as e: except CatchableError as e:
traceRecvError "waiting for reply to GetAccountRange", trace trSnapRecvError & "waiting for reply to GetAccountRange", peer=sp,
peer=sp, error=e.msg error=e.msg
inc sp.stats.major.networkErrors inc sp.stats.major.networkErrors
sp.stopped = true sp.ctrl.runState = SyncStopped
sp.putSlice(leafRange) sp.putSlice(leafRange)
return return
if reply.isNone: if reply.isNone:
traceRecvTimeoutWaiting "for reply to GetAccountRange", trace trSnapRecvTimeoutWaiting & "for reply to GetAccountRange", peer=sp
peer=sp
sp.putSlice(leafRange) sp.putSlice(leafRange)
return return
@ -82,6 +85,7 @@ proc snapFetch*(sp: SnapPeerEx, stateRoot: TrieHash, leafRange: LeafRange)
template proof: auto = accountsAndProof.proof template proof: auto = accountsAndProof.proof
let len = accounts.len let len = accounts.len
let requestedRange = pathRange(origin, limit)
if len == 0: if len == 0:
# If there's no proof, this reply means the peer has no accounts available # If there's no proof, this reply means the peer has no accounts available
# in the range for this query. But if there's a proof, this reply means # in the range for this query. But if there's a proof, this reply means
@ -89,32 +93,31 @@ proc snapFetch*(sp: SnapPeerEx, stateRoot: TrieHash, leafRange: LeafRange)
# This makes all the difference to terminating the fetch. For now we'll # This makes all the difference to terminating the fetch. For now we'll
# trust the mere existence of the proof rather than verifying it. # trust the mere existence of the proof rather than verifying it.
if proof.len == 0: if proof.len == 0:
traceRecvGot "EMPTY reply AccountRange", peer=sp, trace trSnapRecvGot & "EMPTY reply AccountRange", peer=sp,
got=len, proofLen=proof.len, gotRange="-", got=len, proofLen=proof.len, gotRange="-", requestedRange, stateRoot
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot)
sp.putSlice(leafRange) sp.putSlice(leafRange)
# Don't keep retrying snap for this state. # Don't keep retrying snap for this state.
sp.stopThisState = true sp.ctrl.runState = SyncStopRequest
else: else:
traceRecvGot "END reply AccountRange", peer=sp, trace trSnapRecvGot & "END reply AccountRange", peer=sp,
got=len, proofLen=proof.len, gotRange=pathRange(origin, high(LeafPath)), got=len, proofLen=proof.len, gotRange=pathRange(origin, high(LeafPath)),
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange, stateRoot
# Current slicer can't accept more result data than was requested, so # Current slicer can't accept more result data than was requested, so
# just leave the requested slice claimed and update statistics. # just leave the requested slice claimed and update statistics.
sp.countSlice(origin, limit, true) sp.countSlice(origin, limit, true)
return return
var lastPath = accounts[len-1].accHash var lastPath = accounts[len-1].accHash
traceRecvGot "reply AccountRange", peer=sp, trace trSnapRecvGot & "reply AccountRange", peer=sp,
got=len, proofLen=proof.len, gotRange=pathRange(origin, lastPath), got=len, proofLen=proof.len, gotRange=pathRange(origin, lastPath),
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange, stateRoot
# Missing proof isn't allowed, unless `origin` is min path in which case # Missing proof isn't allowed, unless `origin` is min path in which case
# there might be no proof if the result spans the entire range. # there might be no proof if the result spans the entire range.
if proof.len == 0 and origin != low(LeafPath): if proof.len == 0 and origin != low(LeafPath):
traceRecvProtocolViolation "missing proof in AccountRange", trace trSnapRecvProtocolViolation & "missing proof in AccountRange",
peer=sp, got=len, proofLen=proof.len, gotRange=pathRange(origin,lastPath), peer=sp, got=len, proofLen=proof.len, gotRange=pathRange(origin,lastPath),
requestedRange=pathRange(origin, limit), stateRoot=($stateRoot) requestedRange, stateRoot
sp.putSlice(leafRange) sp.putSlice(leafRange)
return return
@ -134,5 +137,7 @@ proc snapFetch*(sp: SnapPeerEx, stateRoot: TrieHash, leafRange: LeafRange)
sp.countAccounts(keepAccounts) sp.countAccounts(keepAccounts)
proc peerSupportsSnap*(sp: SnapPeerEx): bool = proc fetchSnapOk*(sp: SnapPeer): bool =
not sp.stopped and sp.peer.supports(snap) ## Sort of getter: if `true`, fetching data using the `snap#` protocol
## is supported.
sp.ctrl.runState != SyncStopped and sp.peer.supports(snap)

View File

@ -26,12 +26,15 @@ import
std/[sets, tables, algorithm], std/[sets, tables, algorithm],
chronos, chronos,
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
../../trace_helper, "../.."/[protocol/trace_config, types],
".."/[base_desc, get_nodedata, path_desc, types, validate_trienode], ".."/[base_desc, path_desc],
"."/[common, peer_desc, sync_desc] "."/[common, reply_data, sync_fetch_xdesc, validate_trienode]
{.push raises: [Defect].} {.push raises: [Defect].}
logScope:
topics = "snap peer fetch"
const const
maxBatchGetNodeData = 384 maxBatchGetNodeData = 384
## Maximum number of node hashes to batch per `GetNodeData` request. ## Maximum number of node hashes to batch per `GetNodeData` request.
@ -40,61 +43,69 @@ const
## Maximum number of `GetNodeData` requests in parallel to a single peer. ## Maximum number of `GetNodeData` requests in parallel to a single peer.
type type
SingleNodeRequestEx = ref object of SingleNodeRequestBase SingleNodeRequest = ref object
hash: NodeHash hash: NodeHash
path: InteriorPath path: InteriorPath
future: Future[Blob] future: Future[Blob]
proc hash(n: SingleNodeRequestBase): NodeHash = FetchStateEx = ref object of SnapPeerFetchBase
n.SingleNodeRequestEx.hash ## Account fetching state on a single peer.
sp: SnapPeer
nodeGetQueue: seq[SingleNodeRequest]
nodeGetsInFlight: int
scheduledBatch: bool
progressPrefix: string
progressCount: int
nodesInFlight: int
getNodeDataErrors: int
leafRange: LeafRange
unwindAccounts: int64
unwindAccountBytes: int64
finish: Future[void]
proc path(n: SingleNodeRequestBase): InteriorPath = proc fetchStateEx(sp: SnapPeer): FetchStateEx =
n.SingleNodeRequestEx.path sp.fetchState.FetchStateEx
proc future(n: SingleNodeRequestBase): Future[Blob] = proc `fetchStateEx=`(sp: SnapPeer; value: FetchStateEx) =
n.SingleNodeRequestEx.future sp.fetchState = value
proc new(T: type FetchStateEx; peer: SnapPeer): T =
FetchStateEx(sp: peer)
# Forward declaration. # Forward declaration.
proc scheduleBatchGetNodeData(fetch: FetchState) {.gcsafe.} proc scheduleBatchGetNodeData(fetch: FetchStateEx) {.gcsafe.}
# --- # ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc wrapCallGetNodeData(fetch: FetchState, hashes: seq[NodeHash], proc wrapCallGetNodeData(fetch: FetchStateEx, hashes: seq[NodeHash],
futures: seq[Future[Blob]], futures: seq[Future[Blob]],
pathFrom, pathTo: InteriorPath) {.async.} = pathFrom, pathTo: InteriorPath) {.async.} =
inc fetch.nodeGetsInFlight inc fetch.nodeGetsInFlight
let reply = await fetch.sp.getNodeData(hashes, pathFrom, pathTo) let reply = await ReplyData.new(fetch.sp, hashes, pathFrom, pathTo)
# Timeout, packet and packet error trace messages are done in `get_nodedata`, # Timeout, packet and packet error trace messages are done in `get_nodedata`,
# where there is more context than here. Here we always received just valid # where there is more context than here. Here we always received just valid
# data with hashes already verified, or empty list of `nil`. # data with hashes already verified, or empty list of `nil`.
if reply.isNil: if reply.replyType == NoReplyData:
# Timeout or error. # Empty reply, timeout or error (i.e. `reply.isNil`).
fetch.sp.stopThisState = true # It means there are none of the nodes available.
for i in 0 ..< futures.len: fetch.sp.ctrl.runState = SyncStopRequest
futures[i].complete(@[])
elif reply.hashVerifiedData.len == 0:
# Empty reply, matched to request.
# It means there are none of the nodes available, but it's not an error.
fetch.sp.stopThisState = true
for i in 0 ..< futures.len: for i in 0 ..< futures.len:
futures[i].complete(@[]) futures[i].complete(@[])
else: else:
# Non-empty reply. # Non-empty reply.
for i in 0 ..< futures.len: for i in 0 ..< futures.len:
let index = reply.reverseMap(i) futures[i].complete(reply[i])
if index >= 0:
futures[i].complete(reply.hashVerifiedData[index])
else:
futures[i].complete(@[])
dec fetch.nodeGetsInFlight dec fetch.nodeGetsInFlight
# Receiving a reply may allow more requests to be sent. # Receiving a reply may allow more requests to be sent.
if fetch.nodeGetQueue.len > 0 and not fetch.scheduledBatch: if fetch.nodeGetQueue.len > 0 and not fetch.scheduledBatch:
fetch.scheduleBatchGetNodeData() fetch.scheduleBatchGetNodeData()
proc batchGetNodeData(fetch: FetchState) = proc batchGetNodeData(fetch: FetchStateEx) =
var i = fetch.nodeGetQueue.len var i = fetch.nodeGetQueue.len
if i == 0 or fetch.nodeGetsInFlight >= maxParallelGetNodeData: if i == 0 or fetch.nodeGetsInFlight >= maxParallelGetNodeData:
return return
@ -140,7 +151,7 @@ proc batchGetNodeData(fetch: FetchState) =
# internally (like SQLite by default), the left-to-right write order will # internally (like SQLite by default), the left-to-right write order will
# improve read performance when other peers sync reading this local node. # improve read performance when other peers sync reading this local node.
proc cmpSingleNodeRequest(x, y: SingleNodeRequestBase): int = proc cmpSingleNodeRequest(x, y: SingleNodeRequest): int =
# `x` and `y` are deliberately swapped to get descending order. See above. # `x` and `y` are deliberately swapped to get descending order. See above.
cmp(y.path, x.path) cmp(y.path, x.path)
sort(fetch.nodeGetQueue, cmpSingleNodeRequest) sort(fetch.nodeGetQueue, cmpSingleNodeRequest)
@ -148,7 +159,7 @@ proc batchGetNodeData(fetch: FetchState) =
trace "Trie: Sort length", sortLen=i trace "Trie: Sort length", sortLen=i
# If stopped, abort all waiting nodes, so they clean up. # If stopped, abort all waiting nodes, so they clean up.
if fetch.sp.stopThisState or fetch.sp.stopped: if fetch.sp.ctrl.runState != SyncRunningOk:
while i > 0: while i > 0:
fetch.nodeGetQueue[i].future.complete(@[]) fetch.nodeGetQueue[i].future.complete(@[])
dec i dec i
@ -177,26 +188,27 @@ proc batchGetNodeData(fetch: FetchState) =
futures.setLen(0) futures.setLen(0)
fetch.nodeGetQueue.setLen(i) fetch.nodeGetQueue.setLen(i)
proc scheduleBatchGetNodeData(fetch: FetchState) = proc scheduleBatchGetNodeData(fetch: FetchStateEx) =
if not fetch.scheduledBatch: if not fetch.scheduledBatch:
fetch.scheduledBatch = true fetch.scheduledBatch = true
proc batchGetNodeData(arg: pointer) = proc batchGetNodeData(arg: pointer) =
let fetch = cast[FetchState](arg) let fetch = cast[FetchStateEx](arg)
fetch.scheduledBatch = false fetch.scheduledBatch = false
fetch.batchGetNodeData() fetch.batchGetNodeData()
# We rely on `callSoon` scheduling for the _end_ of the current run list, # We rely on `callSoon` scheduling for the _end_ of the current run list,
# after other async functions finish adding more single node requests. # after other async functions finish adding more single node requests.
callSoon(batchGetNodeData, cast[pointer](fetch)) callSoon(batchGetNodeData, cast[pointer](fetch))
proc getNodeData(fetch: FetchState, proc getNodeData(fetch: FetchStateEx,
hash: TrieHash, path: InteriorPath): Future[Blob] {.async.} = hash: TrieHash, path: InteriorPath): Future[Blob] {.async.} =
## Request _one_ item of trie node data asynchronously. This function ## Request _one_ item of trie node data asynchronously. This function
## batches requested into larger `eth.GetNodeData` requests efficiently. ## batches requested into larger `eth.GetNodeData` requests efficiently.
traceIndividualNode "> Fetching individual NodeData", peer=fetch.sp, when trEthTraceIndividualNodesOk:
depth=path.depth, path, hash=($hash) trace "> Fetching individual NodeData", peer=fetch.sp,
depth=path.depth, path, hash=($hash)
let future = newFuture[Blob]() let future = newFuture[Blob]()
fetch.nodeGetQueue.add SingleNodeRequestEx( fetch.nodeGetQueue.add SingleNodeRequest(
hash: hash.NodeHash, hash: hash.NodeHash,
path: path, path: path,
future: future) future: future)
@ -205,23 +217,24 @@ proc getNodeData(fetch: FetchState,
fetch.scheduleBatchGetNodeData() fetch.scheduleBatchGetNodeData()
let nodeBytes = await future let nodeBytes = await future
if fetch.sp.stopThisState or fetch.sp.stopped: if fetch.sp.ctrl.runState != SyncRunningOk:
return nodebytes return nodebytes
if tracePackets: when trEthTracePacketsOk:
doAssert nodeBytes.len == 0 or nodeBytes.toNodeHash == hash doAssert nodeBytes.len == 0 or nodeBytes.toNodeHash == hash
if nodeBytes.len > 0: when trEthTraceIndividualNodesOk:
traceIndividualNode "< Received individual NodeData", peer=fetch.sp, if nodeBytes.len > 0:
depth=path.depth, path, hash=($hash), trace "< Received individual NodeData", peer=fetch.sp,
nodeLen=nodeBytes.len, nodeBytes depth=path.depth, path, hash=($hash),
else: nodeLen=nodeBytes.len, nodeBytes
traceIndividualNode "< Received EMPTY individual NodeData", peer=fetch.sp, else:
depth=path.depth, path, hash, trace "< Received EMPTY individual NodeData", peer=fetch.sp,
nodeLen=nodeBytes.len depth=path.depth, path, hash,
nodeLen=nodeBytes.len
return nodeBytes return nodeBytes
proc pathInRange(fetch: FetchState, path: InteriorPath): bool = proc pathInRange(fetch: FetchStateEx, path: InteriorPath): bool =
# TODO: This method is ugly and unnecessarily slow. # TODO: This method is ugly and unnecessarily slow.
var compare = fetch.leafRange.leafLow.toInteriorPath var compare = fetch.leafRange.leafLow.toInteriorPath
while compare.depth > path.depth: while compare.depth > path.depth:
@ -235,23 +248,23 @@ proc pathInRange(fetch: FetchState, path: InteriorPath): bool =
return false return false
return true return true
proc traverse(fetch: FetchState, hash: NodeHash, path: InteriorPath, proc traverse(fetch: FetchStateEx, hash: NodeHash, path: InteriorPath,
fromExtension: bool) {.async.} = fromExtension: bool) {.async.} =
template errorReturn() = template errorReturn() =
fetch.sp.stopThisState = true fetch.sp.ctrl.runState = SyncStopRequest
dec fetch.nodesInFlight dec fetch.nodesInFlight
if fetch.nodesInFlight == 0: if fetch.nodesInFlight == 0:
fetch.finish.complete() fetch.finish.complete()
return return
# If something triggered stop earlier, don't request, and clean up now. # If something triggered stop earlier, don't request, and clean up now.
if fetch.sp.stopThisState or fetch.sp.stopped: if fetch.sp.ctrl.runState != SyncRunningOk:
errorReturn() errorReturn()
let nodeBytes = await fetch.getNodeData(hash.TrieHash, path) let nodeBytes = await fetch.getNodeData(hash.TrieHash, path)
# If something triggered stop, clean up now. # If something triggered stop, clean up now.
if fetch.sp.stopThisState or fetch.sp.stopped: if fetch.sp.ctrl.runState != SyncRunningOk:
errorReturn() errorReturn()
# Don't keep emitting error messages after one error. We'll allow 10. # Don't keep emitting error messages after one error. We'll allow 10.
if fetch.getNodeDataErrors >= 10: if fetch.getNodeDataErrors >= 10:
@ -296,14 +309,14 @@ proc traverse(fetch: FetchState, hash: NodeHash, path: InteriorPath,
template leafBytes: auto = leafPtr[2] template leafBytes: auto = leafPtr[2]
inc fetch.unwindAccounts inc fetch.unwindAccounts
fetch.unwindAccountBytes += leafBytes.len fetch.unwindAccountBytes += leafBytes.len
inc fetch.sp.nsx.sharedFetch.countAccounts inc fetch.sp.ns.sharedFetchEx.countAccounts
fetch.sp.nsx.sharedFetch.countAccountBytes += leafBytes.len fetch.sp.ns.sharedFetchEx.countAccountBytes += leafBytes.len
dec fetch.nodesInFlight dec fetch.nodesInFlight
if fetch.nodesInFlight == 0: if fetch.nodesInFlight == 0:
fetch.finish.complete() fetch.finish.complete()
proc probeGetNodeData(sp: SnapPeerEx, stateRoot: TrieHash): Future[bool] proc probeGetNodeData(sp: SnapPeer, stateRoot: TrieHash): Future[bool]
{.async.} = {.async.} =
# Before doing real trie traversal on this peer, send a probe request for # Before doing real trie traversal on this peer, send a probe request for
# `stateRoot` to see if it's worth pursuing at all. We will avoid reserving # `stateRoot` to see if it's worth pursuing at all. We will avoid reserving
@ -323,15 +336,19 @@ proc probeGetNodeData(sp: SnapPeerEx, stateRoot: TrieHash): Future[bool]
# send an empty reply. We don't want to cut off a peer for other purposes # send an empty reply. We don't want to cut off a peer for other purposes
# such as a source of blocks and transactions, just because it doesn't # such as a source of blocks and transactions, just because it doesn't
# reply to `GetNodeData`. # reply to `GetNodeData`.
let reply = await sp.getNodeData( let reply = await ReplyData.new(sp, @[stateRoot.NodeHash])
@[stateRoot.NodeHash], InteriorPath(), InteriorPath()) return reply.replyType == SingleEntryReply
return not reply.isNil and reply.hashVerifiedData.len == 1
proc trieFetch*(sp: SnapPeerEx, stateRoot: TrieHash, # ------------------------------------------------------------------------------
leafRange: LeafRange) {.async.} = # Public functions
if sp.fetchState.isNil: # ------------------------------------------------------------------------------
sp.fetchState = FetchState(sp: sp)
template fetch: auto = sp.fetchState proc fetchTrie*(sp: SnapPeer, stateRoot: TrieHash, leafRange: LeafRange)
{.async.} =
if sp.fetchStateEx.isNil:
sp.fetchStateEx = FetchStateEx.new(sp)
let fetch = sp.fetchStateEx
fetch.leafRange = leafRange fetch.leafRange = leafRange
fetch.finish = newFuture[void]() fetch.finish = newFuture[void]()
@ -344,10 +361,14 @@ proc trieFetch*(sp: SnapPeerEx, stateRoot: TrieHash,
if fetch.getNodeDataErrors == 0: if fetch.getNodeDataErrors == 0:
sp.countSlice(leafRange, false) sp.countSlice(leafRange, false)
else: else:
sp.nsx.sharedFetch.countAccounts -= fetch.unwindAccounts sp.ns.sharedFetchEx.countAccounts -= fetch.unwindAccounts
sp.nsx.sharedFetch.countAccountBytes -= fetch.unwindAccountBytes sp.ns.sharedFetchEx.countAccountBytes -= fetch.unwindAccountBytes
sp.putSlice(leafRange) sp.putSlice(leafRange)
proc peerSupportsGetNodeData*(sp: SnapPeerEx): bool = proc fetchTrieOk*(sp: SnapPeer): bool =
template fetch(sp): FetchState = sp.fetchState sp.ctrl.runState != SyncStopped and
not sp.stopped and (sp.fetch.isNil or sp.fetch.getNodeDataErrors == 0) (sp.fetchStateEx.isNil or sp.fetchStateEx.getNodeDataErrors == 0)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -60,40 +60,53 @@
## matching. Before this module was written, we tended to accept whatever came ## matching. Before this module was written, we tended to accept whatever came
## and assume a lot about replies. It often worked but wasn't robust enough. ## and assume a lot about replies. It often worked but wasn't robust enough.
{.push raises: [Defect].}
import import
std/[sequtils, sets, tables, hashes], std/[sequtils, sets, tables, hashes],
chronos, chronos,
eth/[common/eth_types, p2p], eth/[common/eth_types, p2p],
nimcrypto/keccak, nimcrypto/keccak,
stint, stint,
".."/[protocol, protocol/pickeled_eth_tracers], "../.."/[protocol, protocol/trace_config, types],
"."/[base_desc, path_desc, pie/peer_desc, timer_helper, types] ".."/[base_desc, path_desc, timer_helper]
{.push raises: [Defect].}
logScope:
topics = "snap reply"
type type
NodeDataRequest = ref object of NodeDataRequestBase ReplyData* = ref object
sp: SnapPeerEx ## Opaque object handle for reply message
hashes: seq[NodeHash] reverseMap: seq[int] ## for reading out the `hashVerifiedData[]`
future: Future[NodeDataReply] hashVerifiedData: seq[Blob]
timer: TimerCallback
pathRange: (InteriorPath, InteriorPath)
fullHashed: bool
NodeDataReply* = ref object ReplyDataType* = enum
reverseMap: seq[int] # Access with `reversMap(i)` instead. NoReplyData
hashVerifiedData*: seq[Blob] SingleEntryReply
MultipleEntriesReply
RequestData = ref object
sp: SnapPeer
hashes: seq[NodeHash]
future: Future[ReplyData]
timer: TimerCallback
pathRange: (InteriorPath, InteriorPath)
fullHashed: bool
proc ex(base: NodeDataRequestBase): NodeDataRequest = RequestDataQueue = ref object of SnapPeerRequestsBase
## to extended object version liveRequests: HashSet[RequestData]
base.NodeDataRequest empties: int
# `OrderedSet` was considered instead of `seq` here, but it has a slow
# implementation of `excl`, defeating the motivation for using it.
waitingOnEmpties: seq[RequestData]
beforeFirstHash: seq[RequestData]
beforeFullHash: HashSet[RequestData]
# We need to be able to lookup requests by the hash of reply data.
# `ptr NodeHash` is used here so the table doesn't require an independent
# copy of the hash. The hash is part of the request object.
itemHash: Table[ptr NodeHash, (RequestData,int)]
proc ex(pair: (NodeDataRequestBase,int)): (NodeDataRequest, int) = proc hash(request: RequestData): Hash =
## to extended object version
(pair[0].ex, pair[1])
proc hash(request: NodeDataRequest|NodeDataRequestBase): Hash =
hash(cast[pointer](request)) hash(cast[pointer](request))
proc hash(hash: ptr Hash256): Hash = proc hash(hash: ptr Hash256): Hash =
@ -102,67 +115,73 @@ proc hash(hash: ptr Hash256): Hash =
proc `==`(hash1, hash2: ptr Hash256): bool = proc `==`(hash1, hash2: ptr Hash256): bool =
hash1[] == hash2[] hash1[] == hash2[]
proc requestsEx(sp: SnapPeer): RequestDataQueue =
sp.requests.RequestDataQueue
proc `requestsEx=`(sp: SnapPeer; value: RequestDataQueue) =
sp.requests = value
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private logging helpers # Private logging helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
template pathRange(request: NodeDataRequest): string = template pathRange(request: RequestData): string =
pathRange(request.pathRange[0], request.pathRange[1]) pathRange(request.pathRange[0], request.pathRange[1])
proc traceGetNodeDataSending(request: NodeDataRequest) = proc traceGetNodeDataSending(request: RequestData) =
traceSendSending "GetNodeData", peer=request.sp, trace trEthSendSending & "GetNodeData", peer=request.sp,
hashes=request.hashes.len, pathRange=request.pathRange hashes=request.hashes.len, pathRange=request.pathRange
proc traceGetNodeDataDelaying(request: NodeDataRequest) = proc traceGetNodeDataDelaying(request: RequestData) =
traceSendDelaying "GetNodeData", peer=request.sp, trace trEthSendDelaying & "GetNodeData", peer=request.sp,
hashes=request.hashes.len, pathRange=request.pathRange hashes=request.hashes.len, pathRange=request.pathRange
proc traceGetNodeDataSendError(request: NodeDataRequest, proc traceGetNodeDataSendError(request: RequestData,
e: ref CatchableError) = e: ref CatchableError) =
traceRecvError "sending GetNodeData", peer=request.sp, trace trEthRecvError & "sending GetNodeData", peer=request.sp,
error=e.msg, hashes=request.hashes.len, pathRange=request.pathRange error=e.msg, hashes=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyError(request: NodeDataRequest, proc traceReplyDataError(request: RequestData,
e: ref CatchableError) = e: ref CatchableError) =
traceRecvError "waiting for reply to GetNodeData", trace trEthRecvError & "waiting for reply to GetNodeData",
peer=request.sp, error=e.msg, peer=request.sp, error=e.msg,
hashes=request.hashes.len, pathRange=request.pathRange hashes=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyTimeout(request: NodeDataRequest) = proc traceReplyDataTimeout(request: RequestData) =
traceRecvTimeoutWaiting "for reply to GetNodeData", trace trEthRecvTimeoutWaiting & "for reply to GetNodeData",
hashes=request.hashes.len, pathRange=request.pathRange, peer=request.sp hashes=request.hashes.len, pathRange=request.pathRange, peer=request.sp
proc traceGetNodeDataDisconnected(request: NodeDataRequest) = proc traceGetNodeDataDisconnected(request: RequestData) =
traceRecvError "peer disconnected, not sending GetNodeData", trace trEthRecvError & "peer disconnected, not sending GetNodeData",
peer=request.sp, hashes=request.hashes.len, pathRange=request.pathRange peer=request.sp, hashes=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyEmpty(sp: SnapPeerEx, request: NodeDataRequest) = proc traceReplyDataEmpty(sp: SnapPeer, request: RequestData) =
# `request` can be `nil` because we don't always know which request # `request` can be `nil` because we don't always know which request
# the empty reply goes with. Therefore `sp` must be included. # the empty reply goes with. Therefore `sp` must be included.
if request.isNil: if request.isNil:
traceRecvGot "EMPTY NodeData", peer=sp, got=0 trace trEthRecvGot & "EMPTY NodeData", peer=sp, got=0
else: else:
traceRecvGot "NodeData", peer=sp, got=0, trace trEthRecvGot & "NodeData", peer=sp, got=0,
requested=request.hashes.len, pathRange=request.pathRange requested=request.hashes.len, pathRange=request.pathRange
proc traceNodeDataReplyUnmatched(sp: SnapPeerEx, got: int) = proc traceReplyDataUnmatched(sp: SnapPeer, got: int) =
# There is no request for this reply. Therefore `sp` must be included. # There is no request for this reply. Therefore `sp` must be included.
traceRecvProtocolViolation "non-reply NodeData", peer=sp, got trace trEthRecvProtocolViolation & "non-reply NodeData", peer=sp, got
debug "Snap: Warning: Unexpected non-reply NodeData from peer" debug "Warning: Unexpected non-reply NodeData from peer"
proc traceNodeDataReply(request: NodeDataRequest, proc traceReplyData(request: RequestData,
got, use, unmatched, other, duplicates: int) = got, use, unmatched, other, duplicates: int) =
if tracePackets: when trEthTracePacketsOk:
logScope: got=got logScope: got=got
logScope: requested=request.hashes.len logScope: requested=request.hashes.len
logScope: pathRange=request.pathRange logScope: pathRange=request.pathRange
logScope: peer=request.sp logScope: peer=request.sp
if got > request.hashes.len and (unmatched + other) == 0: if got > request.hashes.len and (unmatched + other) == 0:
traceRecvGot "EXCESS reply NodeData" trace trEthRecvGot & "EXCESS reply NodeData"
elif got == request.hashes.len or use != got: elif got == request.hashes.len or use != got:
traceRecvGot "reply NodeData" trace trEthRecvGot & "reply NodeData"
elif got < request.hashes.len: elif got < request.hashes.len:
traceRecvGot "TRUNCATED reply NodeData" trace trEthRecvGot & "TRUNCATED reply NodeData"
if use != got: if use != got:
logScope: logScope:
@ -173,27 +192,29 @@ proc traceNodeDataReply(request: NodeDataRequest,
pathRange=request.pathRange pathRange=request.pathRange
peer=request.sp peer=request.sp
if unmatched > 0: if unmatched > 0:
traceRecvProtocolViolation "incorrect hashes in NodeData" trace trEthRecvProtocolViolation & "incorrect hashes in NodeData"
debug "Snap: Warning: NodeData has nodes with incorrect hashes" debug "Warning: NodeData has nodes with incorrect hashes"
elif other > 0: elif other > 0:
traceRecvProtocolViolation "mixed request nodes in NodeData" trace trEthRecvProtocolViolation & "mixed request nodes in NodeData"
debug "Snap: Warning: NodeData has nodes from mixed requests" debug "Warning: NodeData has nodes from mixed requests"
elif got > request.hashes.len: elif got > request.hashes.len:
# Excess without unmatched/other is only possible with duplicates > 0. # Excess without unmatched/other is only possible with duplicates > 0.
traceRecvProtocolViolation "excess nodes in NodeData" trace trEthRecvProtocolViolation & "excess nodes in NodeData"
debug "Snap: Warning: NodeData has more nodes than requested" debug "Warning: NodeData has more nodes than requested"
else: else:
traceRecvProtocolViolation "duplicate nodes in NodeData" trace trEthRecvProtocolViolation & "duplicate nodes in NodeData"
debug "Snap: Warning: NodeData has duplicate nodes" debug "Warning: NodeData has duplicate nodes"
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob], proc nodeDataMatchRequest(
reverseMap: var seq[int], rq: RequestDataQueue,
use, unmatched, other, duplicates: var int data: openArray[Blob],
): NodeDataRequest = reverseMap: var seq[int],
use, unmatched, other, duplicates: var int
): RequestData =
## Verify hashes in the received node data and use them to find the matching ## Verify hashes in the received node data and use them to find the matching
## request, and match individual nodes to indices in the request in case they ## request, and match individual nodes to indices in the request in case they
## are out of order, which is allowed. Note, even if we know which request, ## are out of order, which is allowed. Note, even if we know which request,
@ -206,11 +227,11 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
## `use`, `unmatched`, `other` or `duplicates` are incremented for each node. ## `use`, `unmatched`, `other` or `duplicates` are incremented for each node.
## If the last three happen, the reply has errors, but the caller can decide ## If the last three happen, the reply has errors, but the caller can decide
## what to do. Non-nil `request` may still be returned with those errors. ## what to do. Non-nil `request` may still be returned with those errors.
var request: NodeDataRequest = nil var request: RequestData = nil
# Iterate through reply data, hashing and efficiently finding what matches. # Iterate through reply data, hashing and efficiently finding what matches.
for i in 0 ..< data.len: for i in 0 ..< data.len:
var itemRequest: NodeDataRequest var itemRequest: RequestData
var index = 0 var index = 0
let hash = data[i].toNodeHash let hash = data[i].toNodeHash
if i == 0: if i == 0:
@ -220,7 +241,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
# make sure we always find the oldest queued request first. # make sure we always find the oldest queued request first.
var j = 0 var j = 0
while j < rq.beforeFirstHash.len: while j < rq.beforeFirstHash.len:
let hashRequest = rq.beforeFirstHash[j].NodeDataRequest let hashRequest = rq.beforeFirstHash[j].RequestData
if hashRequest.hashes[0] == hash: if hashRequest.hashes[0] == hash:
itemRequest = hashRequest itemRequest = hashRequest
break break
@ -229,7 +250,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
# in the global request table when replies have items in requested # in the global request table when replies have items in requested
# order, even though replies themselves are out of order. # order, even though replies themselves are out of order.
if j == 0: if j == 0:
(itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash).ex (itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash)
if not itemRequest.isNil: if not itemRequest.isNil:
break break
rq.itemHash[addr hashRequest.hashes[0]] = (hashRequest, 0) rq.itemHash[addr hashRequest.hashes[0]] = (hashRequest, 0)
@ -247,7 +268,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
# If this succeeds, the reply must have items out of requested order. # If this succeeds, the reply must have items out of requested order.
# If it fails, a peer sent a bad reply. # If it fails, a peer sent a bad reply.
if itemRequest.isNil: if itemRequest.isNil:
(itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash).ex (itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash)
if itemRequest.isNil: if itemRequest.isNil:
# Hash and search items in the current request first, if there is one. # Hash and search items in the current request first, if there is one.
if not request.isNil and not request.fullHashed: if not request.isNil and not request.fullHashed:
@ -255,7 +276,7 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
for j in 0 ..< request.hashes.len: for j in 0 ..< request.hashes.len:
rq.itemHash[addr request.hashes[j]] = (request, j) rq.itemHash[addr request.hashes[j]] = (request, j)
(itemRequest, index) = (itemRequest, index) =
rq.itemHash.getOrDefault(unsafeAddr hash).ex rq.itemHash.getOrDefault(unsafeAddr hash)
if itemRequest.isNil: if itemRequest.isNil:
# Hash and search all items across all requests. # Hash and search all items across all requests.
if rq.beforeFirstHash.len + rq.beforeFullHash.len > 0: if rq.beforeFirstHash.len + rq.beforeFullHash.len > 0:
@ -263,12 +284,12 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
rq.beforeFirstHash.add(rq.beforeFullHash.toSeq) rq.beforeFirstHash.add(rq.beforeFullHash.toSeq)
rq.beforeFullHash.clear() rq.beforeFullHash.clear()
for hashRequest in rq.beforeFirstHash: for hashRequest in rq.beforeFirstHash:
if not hashRequest.ex.fullHashed: if not hashRequest.fullHashed:
hashRequest.ex.fullHashed = true hashRequest.fullHashed = true
for j in 0 ..< hashRequest.ex.hashes.len: for j in 0 ..< hashRequest.hashes.len:
rq.itemHash[addr hashRequest.ex.hashes[j]] = (hashRequest, j) rq.itemHash[addr hashRequest.hashes[j]] = (hashRequest, j)
rq.beforeFirstHash.setLen(0) rq.beforeFirstHash.setLen(0)
(itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash).ex (itemRequest, index) = rq.itemHash.getOrDefault(unsafeAddr hash)
if itemRequest.isNil: if itemRequest.isNil:
# Not found anywhere. # Not found anywhere.
inc unmatched inc unmatched
@ -297,15 +318,15 @@ proc nodeDataMatchRequest(rq: NodeDataRequestQueue, data: openArray[Blob],
return request return request
proc nodeDataRequestEnqueue(rq: NodeDataRequestQueue, proc nodeDataRequestEnqueue(rq: RequestDataQueue,
request: NodeDataRequest) = request: RequestData) =
## Add `request` to the data structures in `rq: NodeDataRequest`. ## Add `request` to the data structures in `rq: RequestData`.
doAssert not rq.liveRequests.containsOrIncl(request) doAssert not rq.liveRequests.containsOrIncl(request)
rq.beforeFirstHash.add(request) rq.beforeFirstHash.add(request)
proc nodeDataRequestDequeue(rq: NodeDataRequestQueue, proc nodeDataRequestDequeue(rq: RequestDataQueue,
request: NodeDataRequest) = request: RequestData) =
## Remove `request` from the data structures in `rq: NodeDataRequest`. ## Remove `request` from the data structures in `rq: RequestData`.
doAssert not rq.liveRequests.missingOrExcl(request) doAssert not rq.liveRequests.missingOrExcl(request)
let index = rq.beforeFirstHash.find(request) let index = rq.beforeFirstHash.find(request)
if index >= 0: if index >= 0:
@ -315,33 +336,33 @@ proc nodeDataRequestDequeue(rq: NodeDataRequestQueue,
rq.itemHash.del(addr request.hashes[j]) rq.itemHash.del(addr request.hashes[j])
# Forward declarations. # Forward declarations.
proc nodeDataTryEmpties(rq: NodeDataRequestQueue) proc nodeDataTryEmpties(rq: RequestDataQueue)
proc nodeDataEnqueueAndSend(request: NodeDataRequest) {.async.} proc nodeDataEnqueueAndSend(request: RequestData) {.async.}
proc nodeDataComplete(request: NodeDataRequest, reply: NodeDataReply, proc nodeDataComplete(request: RequestData, reply: ReplyData,
insideTryEmpties = false) = insideTryEmpties = false) =
## Complete `request` with received data or other reply. ## Complete `request` with received data or other reply.
if request.future.finished: if request.future.finished:
# Subtle: Timer can trigger and its callback be added to Chronos run loop, # Subtle: Timer can trigger and its callback be added to Chronos run loop,
# then data event trigger and call `clearTimer()`. The timer callback # then data event trigger and call `clearTimer()`. The timer callback
# will then run but it must be ignored. # will then run but it must be ignored.
debug "Snap: Warning: Resolved timer race over NodeData reply" debug "Warning: Resolved timer race over NodeData reply"
else: else:
request.timer.clearTimer() request.timer.clearTimer()
request.future.complete(reply) request.future.complete(reply)
let rq = request.sp.nodeDataRequests let rq = request.sp.requestsEx
trace "nodeDataRequestDequeue", addr=cast[pointer](request).repr trace "nodeDataRequestDequeue", addr=cast[pointer](request).repr
rq.nodeDataRequestDequeue(request) rq.nodeDataRequestDequeue(request)
# It may now be possible to match empty replies to earlier requests. # It may now be possible to match empty replies to earlier requests.
if not insideTryEmpties: if not insideTryEmpties:
rq.nodeDataTryEmpties() rq.nodeDataTryEmpties()
proc nodeDataTimeout(request: NodeDataRequest) = proc nodeDataTimeout(request: RequestData) =
## Complete `request` with timeout. ## Complete `request` with timeout.
request.traceNodeDataReplyTimeout() request.traceReplyDataTimeout()
{.gcsafe.}: request.nodeDataComplete(nil) {.gcsafe.}: request.nodeDataComplete(nil)
proc nodeDataTryEmpties(rq: NodeDataRequestQueue) = proc nodeDataTryEmpties(rq: RequestDataQueue) =
## See if we can match queued empty replies to earlier requests. ## See if we can match queued empty replies to earlier requests.
# TODO: This approach doesn't handle timeouts and errors correctly. # TODO: This approach doesn't handle timeouts and errors correctly.
# The problem is it's ambiguous whether an empty reply after timed out # The problem is it's ambiguous whether an empty reply after timed out
@ -351,43 +372,46 @@ proc nodeDataTryEmpties(rq: NodeDataRequestQueue) =
if rq.liveRequests.len > 0: if rq.liveRequests.len > 0:
# Careful: Use `.toSeq` below because we must not use the `HashSet` # Careful: Use `.toSeq` below because we must not use the `HashSet`
# iterator while the set is being changed. # iterator while the set is being changed.
for request in rq.liveRequests.toSeq.mapIt(it.ex): for request in rq.liveRequests.toSeq:
# Constructed reply object, because empty is different from timeout. # Constructed reply object, because empty is different from timeout.
request.nodeDataComplete(NodeDataReply(), true) request.nodeDataComplete(ReplyData(), true)
# Move all temporarily delayed requests to the live state, and send them. # Move all temporarily delayed requests to the live state, and send them.
if rq.waitingOnEmpties.len > 0: if rq.waitingOnEmpties.len > 0:
var tmpList: seq[NodeDataRequestBase] var tmpList: seq[RequestData]
swap(tmpList, rq.waitingOnEmpties) swap(tmpList, rq.waitingOnEmpties)
for i in 0 ..< tmpList.len: for i in 0 ..< tmpList.len:
asyncSpawn nodeDataEnqueueAndSend(tmpList[i].ex) asyncSpawn nodeDataEnqueueAndSend(tmpList[i])
proc nodeDataNewRequest(sp: SnapPeerEx, hashes: seq[NodeHash], proc new(
pathFrom, pathTo: InteriorPath T: type RequestData,
): NodeDataRequest = sp: SnapPeer,
## Make a new `NodeDataRequest` to receive a reply or timeout in future. The hashes: seq[NodeHash],
pathFrom, pathTo: InteriorPath
): RequestData =
## Make a new `RequestData` to receive a reply or timeout in future. The
## caller is responsible for sending the `GetNodeData` request, and must do ## caller is responsible for sending the `GetNodeData` request, and must do
## that after this setup (not before) to avoid race conditions. ## that after this setup (not before) to avoid race conditions.
let request = NodeDataRequest(sp: sp, hashes: hashes, let request = RequestData(sp: sp, hashes: hashes,
pathRange: (pathFrom, pathTo)) pathRange: (pathFrom, pathTo))
# TODO: Cache the time when making batches of requests, instead of calling # TODO: Cache the time when making batches of requests, instead of calling
# `Moment.fromNow()` which always does a system call. `p2pProtocol` request # `Moment.fromNow()` which always does a system call. `p2pProtocol` request
# timeouts have the same issue (and is where we got 10 seconds default). # timeouts have the same issue (and is where we got 10 seconds default).
# request.timer = setTimer(Moment.fromNow(10.seconds), # request.timer = setTimer(Moment.fromNow(10.seconds),
# nodeDataTimeout, cast[pointer](request)) # nodeDataTimeout, cast[pointer](request))
request.timer = safeSetTimer(Moment.fromNow(10.seconds), request.timer = safeSetTimer(Moment.fromNow(10.seconds),
nodeDataTimeout, request) nodeDataTimeout, request)
request.future = newFuture[NodeDataReply]() request.future = newFuture[ReplyData]()
return request return request
proc nodeDataEnqueueAndSend(request: NodeDataRequest) {.async.} = proc nodeDataEnqueueAndSend(request: RequestData) {.async.} =
## Helper function to send an `eth.GetNodeData` request. ## Helper function to send an `eth.GetNodeData` request.
## But not when we're draining the in flight queue to match empty replies. ## But not when we're draining the in flight queue to match empty replies.
let sp = request.sp let sp = request.sp
if sp.stopped: if sp.ctrl.runState == SyncStopped:
request.traceGetNodeDataDisconnected() request.traceGetNodeDataDisconnected()
request.future.complete(nil) request.future.complete(nil)
return return
let rq = sp.nodeDataRequests let rq = sp.requestsEx
if rq.empties > 0: if rq.empties > 0:
request.traceGetNodeDataDelaying() request.traceGetNodeDataDelaying()
rq.waitingOnEmpties.add(request) rq.waitingOnEmpties.add(request)
@ -403,13 +427,13 @@ proc nodeDataEnqueueAndSend(request: NodeDataRequest) {.async.} =
except CatchableError as e: except CatchableError as e:
request.traceGetNodeDataSendError(e) request.traceGetNodeDataSendError(e)
inc sp.stats.major.networkErrors inc sp.stats.major.networkErrors
sp.stopped = true sp.ctrl.runState = SyncStopped
request.future.fail(e) request.future.fail(e)
proc onNodeData(sp: SnapPeerEx, data: openArray[Blob]) = proc onNodeData(sp: SnapPeer, data: openArray[Blob]) =
## Handle an incoming `eth.NodeData` reply. ## Handle an incoming `eth.NodeData` reply.
## Practically, this is also where all the incoming packet trace messages go. ## Practically, this is also where all the incoming packet trace messages go.
let rq = sp.nodeDataRequests let rq = sp.requestsEx
# Empty replies are meaningful, but we can only associate them with requests # Empty replies are meaningful, but we can only associate them with requests
# when there are enough empty replies to cover all outstanding requests. If # when there are enough empty replies to cover all outstanding requests. If
@ -419,23 +443,23 @@ proc onNodeData(sp: SnapPeerEx, data: openArray[Blob]) =
# If there are no requests, don't queue, just let processing fall # If there are no requests, don't queue, just let processing fall
# through until the "non-reply" protocol violation error. # through until the "non-reply" protocol violation error.
if rq.liveRequests.len > 0: if rq.liveRequests.len > 0:
sp.traceNodeDataReplyEmpty(if rq.liveRequests.len != 1: nil sp.traceReplyDataEmpty(if rq.liveRequests.len != 1: nil
else: rq.liveRequests.toSeq[0].ex) else: rq.liveRequests.toSeq[0])
inc rq.empties inc rq.empties
# It may now be possible to match empty replies to earlier requests. # It may now be possible to match empty replies to earlier requests.
rq.nodeDataTryEmpties() rq.nodeDataTryEmpties()
return return
let reply = NodeDataReply() let reply = ReplyData()
var (use, unmatched, other, duplicates) = (0, 0, 0, 0) var (use, unmatched, other, duplicates) = (0, 0, 0, 0)
let request = nodeDataMatchRequest(rq, data, reply.reverseMap, let request = nodeDataMatchRequest(rq, data, reply.reverseMap,
use, unmatched, other, duplicates) use, unmatched, other, duplicates)
if request.isNil: if request.isNil:
sp.traceNodeDataReplyUnmatched(data.len) sp.traceReplyDataUnmatched(data.len)
return return
request.traceNodeDataReply(data.len, use, unmatched, other, duplicates) request.traceReplyData(data.len, use, unmatched, other, duplicates)
# TODO: Speed improvement possible here. # TODO: Speed improvement possible here.
if reply.reverseMap.len == 0: if reply.reverseMap.len == 0:
@ -458,11 +482,15 @@ proc onNodeData(sp: SnapPeerEx, data: openArray[Blob]) =
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc getNodeData*(sp: SnapPeerEx, hashes: seq[NodeHash], proc new*(
pathFrom, pathTo: InteriorPath): Future[NodeDataReply] T: type ReplyData,
{.async.} = sp: SnapPeer,
hashes: seq[NodeHash],
pathFrom = InteriorPath(),
pathTo = InteriorPath()
): Future[T] {.async.} =
## Async function to send a `GetNodeData` request to a peer, and when the ## Async function to send a `GetNodeData` request to a peer, and when the
## peer replies, or on timeout or error, return `NodeDataReply`. ## peer replies, or on timeout or error, return `ReplyData`.
## ##
## The request is a list of hashes. The reply is a list of trie nodes or ## The request is a list of hashes. The reply is a list of trie nodes or
## contract bytecodes matching those hashes, not necessarily in the same ## contract bytecodes matching those hashes, not necessarily in the same
@ -476,28 +504,55 @@ proc getNodeData*(sp: SnapPeerEx, hashes: seq[NodeHash],
## ##
## `pathFrom` and `pathTo` are not used except for logging. ## `pathFrom` and `pathTo` are not used except for logging.
let request = sp.nodeDataNewRequest(hashes, pathFrom, pathTo) let request = RequestData.new(sp, hashes, pathFrom, pathTo)
# There is no "Sending..." trace message here, because it can be delayed # There is no "Sending..." trace message here, because it can be delayed
# by the empty reply logic in `nodeDataEnqueueAndSend`. # by the empty reply logic in `nodeDataEnqueueAndSend`.
var reply: NodeDataReply = nil var reply: ReplyData = nil
try: try:
await request.nodeDataEnqueueAndSend() await request.nodeDataEnqueueAndSend()
reply = await request.future reply = await request.future
except CatchableError as e: except CatchableError as e:
request.traceNodeDataReplyError(e) request.traceReplyDataError(e)
inc sp.stats.major.networkErrors inc sp.stats.major.networkErrors
sp.stopped = true sp.ctrl.runState = SyncStopped
return nil return nil
# Timeout, packet and packet error trace messages are done in `onNodeData` # Timeout, packet and packet error trace messages are done in `onNodeData`
# and `nodeDataTimeout`, where there is more context than here. Here we # and `nodeDataTimeout`, where there is more context than here. Here we
# always received just valid data with hashes already verified, or `nil`. # always received just valid data with hashes already verified, or `nil`.
return reply return reply
proc setupGetNodeData*(sp: SnapPeerEx) = proc replyType*(reply: ReplyData): ReplyDataType =
## Initialise `SnapPeerEx` to support `GetNodeData` calls. ## Fancy interface for evaluating the reply lengths for none, 1, or many.
## If the `reply` argument is `nil`, the result `NoReplyData` is returned
## which is the same as for zero lengths reply.
if reply.isNil or reply.hashVerifiedData.len == 0:
NoReplyData
elif reply.hashVerifiedData.len == 1:
SingleEntryReply
else:
MultipleEntriesReply
if sp.nodeDataRequests.isNil: proc `[]`*(reply: ReplyData; inx: int): Blob =
sp.nodeDataRequests = NodeDataRequestQueue() ## Returns the reverse indexed item from the reply cache (if any). If
## `reply` is `nil` or `inx` is out of bounds, an empty `Blob` (i.e. `@[]`)
## is returned.
##
## Note that the length of the `reply` list is limited by the `new()`
## contructor argument `hashes`. So there is no `len` directive used.
if 0 <= inx:
if inx < reply.reverseMap.len:
let xni = reply.reverseMap[inx] - 1
if 0 <= xni:
return reply.hashVerifiedData[xni]
if inx < reply.hashVerifiedData.len:
return reply.hashVerifiedData[inx]
proc replyDataSetup*(sp: SnapPeer) =
## Initialise `SnapPeer` to support `replyDataGet()` calls.
if sp.requestsEx.isNil:
sp.requestsEx = RequestDataQueue()
sp.peer.state(eth).onNodeData = sp.peer.state(eth).onNodeData =
proc (_: Peer, data: openArray[Blob]) = proc (_: Peer, data: openArray[Blob]) =
@ -505,17 +560,10 @@ proc setupGetNodeData*(sp: SnapPeerEx) =
sp.peer.state(eth).onGetNodeData = sp.peer.state(eth).onGetNodeData =
proc (_: Peer, hashes: openArray[Hash256], data: var seq[Blob]) = proc (_: Peer, hashes: openArray[Hash256], data: var seq[Blob]) =
# Return empty nodes result. This callback is installed to ## Return empty nodes result. This callback is installed to
# ensure we don't reply with nodes from the chainDb. ## ensure we don't reply with nodes from the chainDb.
discard discard
proc reverseMap*(reply: NodeDataReply, index: int): int =
## Given an index into the request hash list, return index into the reply
## `hashVerifiedData`, or -1 if there is no data for that request hash.
if index < reply.reverseMap.len: reply.reverseMap[index] - 1
elif index < reply.hashVerifiedData.len: index
else: -1
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -20,7 +20,7 @@ import
{.push raises: [Defect].} {.push raises: [Defect].}
type type
SharedFetchState* = ref object SnapSyncFetchEx* = ref object of SnapSyncFetchBase
## Account fetching state that is shared among all peers. ## Account fetching state that is shared among all peers.
# Leaf path ranges not fetched or in progress on any peer. # Leaf path ranges not fetched or in progress on any peer.
leafRanges*: seq[LeafRange] leafRanges*: seq[LeafRange]
@ -34,9 +34,6 @@ type
countRangeTrieStarted*: bool countRangeTrieStarted*: bool
logTicker: TimerCallback logTicker: TimerCallback
SnapSyncEx* = ref object of SnapSyncBase
sharedFetch*: SharedFetchState
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private timer helpers # Private timer helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -69,9 +66,9 @@ proc percent(value: UInt256, discriminator: bool): string =
result.add('%') result.add('%')
proc setLogTicker(sf: SharedFetchState; at: Moment) {.gcsafe.} proc setLogTicker(sf: SnapSyncFetchEx; at: Moment) {.gcsafe.}
proc runLogTicker(sf: SharedFetchState) {.gcsafe.} = proc runLogTicker(sf: SnapSyncFetchEx) {.gcsafe.} =
doAssert not sf.isNil doAssert not sf.isNil
info "State: Account sync progress", info "State: Account sync progress",
percent = percent(sf.countRange, sf.countRangeStarted), percent = percent(sf.countRange, sf.countRangeStarted),
@ -80,20 +77,20 @@ proc runLogTicker(sf: SharedFetchState) {.gcsafe.} =
trie = percent(sf.countRangeTrie, sf.countRangeTrieStarted) trie = percent(sf.countRangeTrie, sf.countRangeTrieStarted)
sf.setLogTicker(Moment.fromNow(1.seconds)) sf.setLogTicker(Moment.fromNow(1.seconds))
proc setLogTicker(sf: SharedFetchState; at: Moment) = proc setLogTicker(sf: SnapSyncFetchEx; at: Moment) =
sf.logTicker = safeSetTimer(at, runLogTicker, sf) sf.logTicker = safeSetTimer(at, runLogTicker, sf)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public constructor # Public constructor
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc new*(T: type SharedFetchState; startLoggingAfter = 100.milliseconds): T = proc new*(T: type SnapSyncFetchEx; startAfter = 100.milliseconds): T =
result = SharedFetchState( result = SnapSyncFetchEx(
leafRanges: @[LeafRange( leafRanges: @[LeafRange(
leafLow: LeafPath.low, leafLow: LeafPath.low,
leafHigh: LeafPath.high)]) leafHigh: LeafPath.high)])
result.logTicker = safeSetTimer( result.logTicker = safeSetTimer(
Moment.fromNow(startLoggingAfter), Moment.fromNow(startAfter),
runLogTicker, runLogTicker,
result) result)
@ -101,9 +98,17 @@ proc new*(T: type SharedFetchState; startLoggingAfter = 100.milliseconds): T =
# Public getters # Public getters
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc nsx*[T](sp: T): SnapSyncEx = proc sharedFetchEx*(ns: SnapSync): SnapSyncFetchEx =
## Handy helper, typically used with `T` instantiated as `SnapPeerEx` ## Handy helper
sp.ns.SnapSyncEx ns.sharedFetch.SnapSyncFetchEx
# ------------------------------------------------------------------------------
# Public setters
# ------------------------------------------------------------------------------
proc `sharedFetchEx=`*(ns: SnapSync; value: SnapSyncFetchEx) =
## Handy helper
ns.sharedFetch = value
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -31,18 +31,24 @@
## exception to `parseTrieNodeError` if it occurs. ## exception to `parseTrieNodeError` if it occurs.
import import
eth/[common/eth_types, p2p, rlp], eth/[common/eth_types, p2p],
../trace_helper, ../../types,
"."/[base_desc, path_desc, types] ".."/[base_desc, path_desc]
{.push raises: [Defect].} {.push raises: [Defect].}
logScope:
topics = "snap validate trie node"
type type
TrieNodeParseContext* = object TrieNodeParseContext* = object
childQueue*: seq[(InteriorPath, NodeHash, bool)] childQueue*: seq[(InteriorPath, NodeHash, bool)]
leafQueue*: seq[(LeafPath, NodeHash, Blob)] leafQueue*: seq[(LeafPath, NodeHash, Blob)]
errors*: int errors*: int
const
# Local debugging
traceIndividualNodesOk = true
template read(rlp: var Rlp, T: type NodeHash): T = template read(rlp: var Rlp, T: type NodeHash): T =
rlp.read(Hash256).T rlp.read(Hash256).T
@ -82,7 +88,7 @@ template nodeError(msg: string{lit}, more: varargs[untyped]) =
#echo inspect(rlpFromBytes(nodeBytes)) #echo inspect(rlpFromBytes(nodeBytes))
inc context.errors inc context.errors
proc parseLeafValue(sp: SnapPeerBase, proc parseLeafValue(sp: SnapPeer,
nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob, nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob,
nodeRlp: var Rlp, leafPath: InteriorPath, nodeRlp: var Rlp, leafPath: InteriorPath,
context: var TrieNodeParseContext context: var TrieNodeParseContext
@ -114,7 +120,7 @@ proc parseLeafValue(sp: SnapPeerBase,
context.leafQueue.add((leafPath.toLeafPath, nodeHash, nodeRlp.toBytes)) context.leafQueue.add((leafPath.toLeafPath, nodeHash, nodeRlp.toBytes))
if traceIndividualNodes: when traceIndividualNodesOk:
let leafBytes = context.leafQueue[^1][2] let leafBytes = context.leafQueue[^1][2]
trace "Trie: Account leaf found", peer=sp, trace "Trie: Account leaf found", peer=sp,
path=combinePaths(nodePath, leafPath), path=combinePaths(nodePath, leafPath),
@ -123,13 +129,13 @@ proc parseLeafValue(sp: SnapPeerBase,
# echo inspect(rlpFromBytes(leafBytes)) # echo inspect(rlpFromBytes(leafBytes))
# Forward declaration, used for bounded, rare recursion. # Forward declaration, used for bounded, rare recursion.
proc parseTrieNode*(sp: SnapPeerBase, proc parseTrieNode*(sp: SnapPeer,
nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob, nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob,
fromExtension: bool, fromExtension: bool,
context: var TrieNodeParseContext context: var TrieNodeParseContext
) {.gcsafe, raises: [Defect, RlpError].} ) {.gcsafe, raises: [Defect, RlpError].}
proc parseExtensionChild(sp: SnapPeerBase, proc parseExtensionChild(sp: SnapPeer,
nodePath: InteriorPath, nodeHash: NodeHash, nodePath: InteriorPath, nodeHash: NodeHash,
nodeBytes: Blob, nodeRlp: var Rlp, nodeBytes: Blob, nodeRlp: var Rlp,
childPath: InteriorPath, childPath: InteriorPath,
@ -177,7 +183,7 @@ proc parseExtensionChild(sp: SnapPeerBase,
else: else:
childError "Extension node child (RLP element 1) has length > 32 bytes" childError "Extension node child (RLP element 1) has length > 32 bytes"
proc parseExtensionOrLeaf(sp: SnapPeerBase, proc parseExtensionOrLeaf(sp: SnapPeer,
nodePath: InteriorPath, nodeHash: NodeHash, nodePath: InteriorPath, nodeHash: NodeHash,
nodeBytes: Blob, nodeRlp: var Rlp, nodeBytes: Blob, nodeRlp: var Rlp,
fromExtension: bool, fromExtension: bool,
@ -265,7 +271,7 @@ proc parseExtensionOrLeaf(sp: SnapPeerBase,
sp.parseExtensionChild(nodePath, nodeHash, nodeBytes, nodeRlp, sp.parseExtensionChild(nodePath, nodeHash, nodeBytes, nodeRlp,
childPath, context) childPath, context)
proc parseBranchNode(sp: SnapPeerBase, proc parseBranchNode(sp: SnapPeer,
nodePath: InteriorPath, nodeHash: NodeHash, nodePath: InteriorPath, nodeHash: NodeHash,
nodeBytes: Blob, nodeRlp: var Rlp, nodeBytes: Blob, nodeRlp: var Rlp,
context: var TrieNodeParseContext context: var TrieNodeParseContext
@ -339,7 +345,7 @@ proc parseBranchNode(sp: SnapPeerBase,
branches=branchCount, minBranches=2 branches=branchCount, minBranches=2
return return
proc parseTrieNode*(sp: SnapPeerBase, proc parseTrieNode*(sp: SnapPeer,
nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob, nodePath: InteriorPath, nodeHash: NodeHash, nodeBytes: Blob,
fromExtension: bool, context: var TrieNodeParseContext fromExtension: bool, context: var TrieNodeParseContext
) {.raises: [Defect, RlpError].} = ) {.raises: [Defect, RlpError].} =
@ -439,7 +445,7 @@ proc parseTrieNode*(sp: SnapPeerBase,
listLen=nodeListLen listLen=nodeListLen
return return
proc parseTrieNodeError*(sp: SnapPeerBase, nodePath: InteriorPath, proc parseTrieNodeError*(sp: SnapPeer, nodePath: InteriorPath,
nodeHash: NodeHash, nodeBytes: Blob, nodeHash: NodeHash, nodeBytes: Blob,
context: var TrieNodeParseContext, context: var TrieNodeParseContext,
exception: ref RlpError) = exception: ref RlpError) =

View File

@ -1,78 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/[sets, tables],
chronos,
stint,
".."/[base_desc, path_desc, types]
type
NodeDataRequestBase* = ref object of RootObj
## Stub object, to be inherited
SingleNodeRequestBase* = ref object of RootObj
## Stub object, to be inherited
NodeDataRequestQueue* = ref object
liveRequests*: HashSet[NodeDataRequestBase]
empties*: int
# `OrderedSet` was considered instead of `seq` here, but it has a slow
# implementation of `excl`, defeating the motivation for using it.
waitingOnEmpties*: seq[NodeDataRequestBase]
beforeFirstHash*: seq[NodeDataRequestBase]
beforeFullHash*: HashSet[NodeDataRequestBase]
# We need to be able to lookup requests by the hash of reply data.
# `ptr NodeHash` is used here so the table doesn't require an independent
# copy of the hash. The hash is part of the request object.
itemHash*: Table[ptr NodeHash, (NodeDataRequestBase, int)]
FetchState* = ref object
## Account fetching state on a single peer.
sp*: SnapPeerEx
nodeGetQueue*: seq[SingleNodeRequestBase]
nodeGetsInFlight*: int
scheduledBatch*: bool
progressPrefix*: string
progressCount*: int
nodesInFlight*: int
getNodeDataErrors*: int
leafRange*: LeafRange
unwindAccounts*: int64
unwindAccountBytes*: int64
finish*: Future[void]
SnapPeerEx* = ref object of SnapPeerBase
nodeDataRequests*: NodeDataRequestQueue
fetchState*: FetchState
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc `$`*(sp: SnapPeerEx): string =
$sp.SnapPeerBase
# ------------------------------------------------------------------------------
# Public getter
# ------------------------------------------------------------------------------
proc ex*(base: SnapPeerBase): SnapPeerEx =
## to extended object instance version
base.SnapPeerEx
# ------------------------------------------------------------------------------
# Public setter
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,69 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
chronicles,
eth/common/eth_types,
stew/byteutils
{.push raises: [Defect].}
const
tracePackets* = true
## Whether to `trace` log each sync network message.
traceGossips* = false
## Whether to `trace` log each gossip network message.
traceHandshakes* = true
## Whether to `trace` log each network handshake message.
traceTimeouts* = true
## Whether to `trace` log each network request timeout.
traceNetworkErrors* = true
## Whether to `trace` log each network request error.
tracePacketErrors* = true
## Whether to `trace` log each messages with invalid data.
traceIndividualNodes* = false
## Whether to `trace` log each trie node, account, storage, receipt, etc.
template tracePacket*(msg: static[string], args: varargs[untyped]) =
if tracePackets: trace `msg`, `args`
template traceGossip*(msg: static[string], args: varargs[untyped]) =
if traceGossips: trace `msg`, `args`
template traceTimeout*(msg: static[string], args: varargs[untyped]) =
if traceTimeouts: trace `msg`, `args`
template traceNetworkError*(msg: static[string], args: varargs[untyped]) =
if traceNetworkErrors: trace `msg`, `args`
template tracePacketError*(msg: static[string], args: varargs[untyped]) =
if tracePacketErrors: trace `msg`, `args`
template traceIndividualNode*(msg: static[string], args: varargs[untyped]) =
if traceIndividualNodes: trace `msg`, `args`
func toHex*(hash: Hash256): string =
## Shortcut for buteutils.toHex(hash.data)
hash.data.toHex
func traceStep*(request: BlocksRequest): string =
var str = if request.reverse: "-" else: "+"
if request.skip < high(typeof(request.skip)):
return str & $(request.skip + 1)
return static($(high(typeof(request.skip)).u256 + 1))
proc `$`*(hash: Hash256): string =
hash.data.toHex
proc `$`*(blob: Blob): string =
blob.toHex
proc `$`*(hashOrNum: HashOrNum): string =
# It's always obvious which one from the visible length of the string.
if hashOrNum.isHash: $hashOrNum.hash
else: $hashOrNum.number
# End

View File

@ -46,8 +46,11 @@ type
# Public Constructor # Public Constructor
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc new*(T: type TxHash): T = Hash256().T
proc new*(T: type NodeHash): T = Hash256().T proc new*(T: type NodeHash): T = Hash256().T
proc new*(T: type BlockHash): T = Hash256().T
proc new*(T: type TrieHash): T = Hash256().T
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -69,12 +72,36 @@ proc `==`*(a,b: BlockHash): bool {.borrow.}
proc toNodeHash*(data: Blob): NodeHash = proc toNodeHash*(data: Blob): NodeHash =
keccak256.digest(data).NodeHash keccak256.digest(data).NodeHash
proc toHashOrNum*(bh: BlockHash): HashOrNum =
HashOrNum(isHash: true, hash: bh.Hash256)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public debugging helpers # Public debugging helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc `$`*(th: TrieHash|NodeHash): string = func toHex*(hash: Hash256): string =
th.Hash256.data.toHex ## Shortcut for buteutils.toHex(hash.data)
hash.data.toHex
func `$`*(th: TrieHash|NodeHash): string =
th.Hash256.toHex
func `$`*(hash: Hash256): string =
hash.toHex
func `$`*(blob: Blob): string =
blob.toHex
func `$`*(hashOrNum: HashOrNum): string =
# It's always obvious which one from the visible length of the string.
if hashOrNum.isHash: $hashOrNum.hash
else: $hashOrNum.number
func traceStep*(request: BlocksRequest): string =
var str = if request.reverse: "-" else: "+"
if request.skip < high(typeof(request.skip)):
return str & $(request.skip + 1)
return static($(high(typeof(request.skip)).u256 + 1))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

2
vendor/nim-stew vendored

@ -1 +1 @@
Subproject commit bb705bf17b46d2c8f9bfb106d9cc7437009a2501 Subproject commit 779ba052c827af46bea79ff8b12b159f68c0f14a