Prepare snap server client test scenario cont3 (#1491)

* Handle last/all node(s) proof conditions at leaf node extractor

detail:
  Flag whether the maximum extracted node is the last one in database
  No proof needed if the full tree was extracted

* Clean up some helpers & definitions

details:
  Move entities to more plausible locations, e.g. `Account` object need
  not be dealt with in the range extractor as it applies to any kind of
  leaf data.

* Fix next/prev database walk fringe condition

details:
  First check needed might be for a leaf node which was done too late.

* Homogenise snap/1 protocol function prototypes

why:
  The range arguments `origin` and `limit` data types differed in various
  function prototypes (`Hash256` vs. `openArray[byte]`.)

* Implement `GetStorageRange` handler

* Implement server timeout for leaf node retrieval

why:
  This feature leaves control on the server for probably costly action
  invoked by the network

* Implement maximal reply size for snap service

why:
  This feature leaves control on the server for probably costly action
  invoked by the network.
This commit is contained in:
Jordan Hrycaj 2023-03-10 17:10:30 +00:00 committed by GitHub
parent d8a1adacaa
commit 2f7f2dba2d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 314 additions and 245 deletions

View File

@ -13,12 +13,13 @@
import
std/sequtils,
chronicles,
eth/p2p,
stew/interval_set,
chronos,
eth/[p2p, trie/trie_defs],
stew/[byteutils, interval_set],
../../db/db_chain,
../../core/chain,
../snap/range_desc,
../snap/worker/db/[hexary_desc, hexary_range],
../snap/[constants, range_desc],
../snap/worker/db/[hexary_desc, hexary_paths, hexary_range],
../protocol,
../protocol/snap/snap_types
@ -28,13 +29,26 @@ logScope:
type
SnapWireRef* = ref object of SnapWireBase
chain: ChainRef
elaFetchMax: chronos.Duration
dataSizeMax: int
peerPool: PeerPool
const
proofNodeSizeMax = 532
## Branch node with all branches `high(UInt256)` within RLP list
extraTraceMessages = false or true
## Enabled additional logging noise
proc proofNodesSizeMax*(n: int): int {.gcsafe.}
estimatedProofSize = hexaryRangeRlpNodesListSizeMax(10)
## Some expected upper limit, typically not mote than 10 proof nodes
emptySnapStorageList = seq[SnapStorage].default
## Dummy list for empty slots
defaultElaFetchMax = 1500.milliseconds
## Fetching accounts or slots can be extensive, stop in the middle if
## it takes too long
defaultDataSizeMax = fetchRequestBytesLimit
## Truncate maximum data size
# ------------------------------------------------------------------------------
# Private functions: helpers
@ -46,38 +60,85 @@ template logTxt(info: static[string]): static[string] =
proc notImplemented(name: string) {.used.} =
debug "Wire handler method not implemented", meth=name
proc getAccountFn(chain: ChainRef): HexaryGetFn {.gcsafe.} =
# ----------------------------------
proc getAccountFn(
chain: ChainRef;
): HexaryGetFn
{.gcsafe.} =
let db = chain.com.db.db
return proc(key: openArray[byte]): Blob = db.get(key)
return proc(key: openArray[byte]): Blob =
db.get(key)
proc getStorageSlotsFn(
chain: ChainRef;
accKey: NodeKey;
): HexaryGetFn
{.gcsafe.} =
let db = chain.com.db.db
return proc(key: openArray[byte]): Blob =
db.get(key)
# ----------------------------------
proc to(
rl: RangeLeaf;
T: type SnapAccount;
): T
{.gcsafe, raises: [RlpError].} =
## Convert the generic `RangeLeaf` argument to payload type.
T(accHash: rl.key.to(Hash256),
accBody: rl.data.decode(Account))
proc to(
rl: RangeLeaf;
T: type SnapStorage;
): T
{.gcsafe.} =
## Convert the generic `RangeLeaf` argument to payload type.
T(slotHash: rl.key.to(Hash256),
slotData: rl.data)
# ------------------------------------------------------------------------------
# Private functions: fetch leaf range
# ------------------------------------------------------------------------------
proc mkNodeTagRange(
origin: openArray[byte];
limit: openArray[byte];
): Result[NodeTagRange,void] =
var (minPt, maxPt) = (low(NodeTag), high(NodeTag))
if 0 < origin.len or 0 < limit.len:
if not minPt.init(origin) or not maxPt.init(limit) or maxPt <= minPt:
when extraTraceMessages:
trace logTxt "mkNodeTagRange: malformed range", origin, limit
return err()
ok(NodeTagRange.new(minPt, maxPt))
proc fetchLeafRange(
ctx: SnapWireRef; # Handler descriptor
db: HexaryGetFn; # Database abstraction
root: Hash256; # State root
iv: NodeTagRange; # Proofed range of leaf paths
replySizeMax: int; # Updated size counter for the raw list
stopAt: Moment; # Implies timeout
): Result[RangeProof,void]
{.gcsafe, raises: [CatchableError].} =
let
rootKey = root.to(NodeKey)
estimatedProofSize = proofNodesSizeMax(10) # some expected upper limit
if replySizeMax <= estimatedProofSize:
trace logTxt "fetchLeafRange(): data size too small", iv, replySizeMax
return err() # package size too small
# Assemble result Note that the size limit is the size of the leaf nodes
# on wire. So the `sizeMax` is the argument size `replySizeMax` with some
# space removed to accomodate for the proof nodes.
let
rootKey = root.to(NodeKey)
sizeMax = replySizeMax - estimatedProofSize
rc = db.hexaryRangeLeafsProof(rootKey, iv, sizeMax)
now = Moment.now()
timeout = if now < stopAt: stopAt - now else: 1.milliseconds
rc = db.hexaryRangeLeafsProof(rootKey, iv, sizeMax, timeout)
if rc.isErr:
error logTxt "fetchLeafRange(): database problem",
debug logTxt "fetchLeafRange: database problem",
iv, replySizeMax, error=rc.error
return err() # database error
let sizeOnWire = rc.value.leafsSize + rc.value.proofSize
@ -98,8 +159,8 @@ proc fetchLeafRange(
tailSize += rpl.leafs[leafsTop - tailItems].data.len + extraSize
tailItems.inc
if leafsTop <= tailItems:
trace logTxt "fetchLeafRange(): stripping leaf list failed",
iv, replySizeMax,leafsTop, tailItems
debug logTxt "fetchLeafRange: stripping leaf list failed",
iv, replySizeMax, leafsTop, tailItems
return err() # package size too small
rpl.leafs.setLen(leafsTop - tailItems - 1) # chop off one more for slack
@ -109,7 +170,7 @@ proc fetchLeafRange(
if strippedSizeOnWire <= replySizeMax:
return ok(leafProof)
trace logTxt "fetchLeafRange(): data size problem",
debug logTxt "fetchLeafRange: data size problem",
iv, replySizeMax, leafsTop, tailItems, strippedSizeOnWire
err()
@ -149,6 +210,8 @@ proc init*(
## Constructor (uses `init()` as suggested in style guide.)
let ctx = T(
chain: chain,
elaFetchMax: defaultElaFetchMax,
dataSizeMax: defaultDataSizeMax,
peerPool: peerPool)
#ctx.setupPeerObserver()
@ -158,16 +221,6 @@ proc init*(
# Public functions: helpers
# ------------------------------------------------------------------------------
proc proofNodesSizeMax*(n: int): int =
## Max number of bytes needed to store a list of `n` RLP encoded hexary
## nodes which is a `Branch` node where every link reference is set to
## `high(UInt256)`.
const nMax = high(int) div proofNodeSizeMax
if n <= nMax:
hexaryRangeRlpSize(n * proofNodeSizeMax)
else:
high(int)
proc proofEncode*(proof: seq[SnapProof]): Blob =
var writer = initRlpWriter()
writer.snapAppend SnapProofNodes(nodes: proof)
@ -184,23 +237,41 @@ proc proofDecode*(data: Blob): seq[SnapProof] {.gcsafe, raises: [RlpError].} =
method getAccountRange*(
ctx: SnapWireRef;
root: Hash256;
origin: Hash256;
limit: Hash256;
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
): (seq[SnapAccount], SnapProofNodes)
{.gcsafe, raises: [CatchableError].} =
## Fetch accounts list from database
let sizeMax = min(replySizeMax, ctx.dataSizeMax.uint64).int
if sizeMax <= estimatedProofSize:
when extraTraceMessages:
trace logTxt "getAccountRange: max data size too small",
origin=origin.toHex, limit=limit.toHex, sizeMax
return # package size too small
let
iv = block: # Calculate effective accounts range (if any)
let rc = origin.mkNodeTagRange limit
if rc.isErr:
return
rc.value # malformed interval
db = ctx.chain.getAccountFn
iv = NodeTagRange.new(origin.to(NodeTag), limit.to(NodeTag))
sizeMax = min(replySizeMax,high(int).uint64).int
stopAt = Moment.now() + ctx.elaFetchMax
rc = ctx.fetchLeafRange(db, root, iv, sizeMax, stopAt)
trace logTxt "getAccountRange(): request data range", iv, replySizeMax
if rc.isErr:
return # extraction failed
let
accounts = rc.value.leafs.mapIt(it.to(SnapAccount))
proof = rc.value.proof
let rc = ctx.fetchLeafRange(db, root, iv, sizeMax)
if rc.isOk:
result[0] = rc.value.leafs.mapIt(it.to(SnapAccount))
result[1] = SnapProofNodes(nodes: rc.value.proof)
#when extraTraceMessages:
# trace logTxt "getAccountRange: done", iv, replySizeMax,
# nAccounts=accounts.len, nProof=proof.len
(accounts, SnapProofNodes(nodes: proof))
method getStorageRanges*(
@ -211,8 +282,91 @@ method getStorageRanges*(
limit: openArray[byte];
replySizeMax: uint64;
): (seq[seq[SnapStorage]], SnapProofNodes)
{.gcsafe.} =
notImplemented("getStorageRanges")
{.gcsafe, raises: [CatchableError].} =
## Fetch storage slots list from database
let sizeMax = min(replySizeMax, ctx.dataSizeMax.uint64).int
if sizeMax <= estimatedProofSize:
when extraTraceMessages:
trace logTxt "getStorageRanges: max data size too small",
origin=origin.toHex, limit=limit.toHex, sizeMax
return # package size too small
let
iv = block: # Calculate effective slots range (if any)
let rc = origin.mkNodeTagRange limit
if rc.isErr:
return
rc.value # malformed interval
accGetFn = ctx.chain.getAccountFn
rootKey = root.to(NodeKey)
stopAt = Moment.now() + ctx.elaFetchMax
# Loop over accounts
var
dataAllocated = 0
timeExceeded = false
slotLists: seq[seq[SnapStorage]]
proof: seq[SnapProof]
for accHash in accounts:
let
accKey = accHash.to(NodeKey)
accData = accKey.hexaryPath(rootKey, accGetFn).leafData
# Ignore missing account entry
if accData.len == 0:
slotLists.add emptySnapStorageList
dataAllocated.inc # empty list
when extraTraceMessages:
trace logTxt "getStorageRanges: no data", iv, sizeMax, dataAllocated,
accDataLen=accData.len
continue
# Ignore empty storage list
let stoRoot = rlp.decode(accData,Account).storageRoot
if stoRoot == emptyRlpHash:
slotLists.add emptySnapStorageList
dataAllocated.inc # empty list
trace logTxt "getStorageRanges: no slots", iv, sizeMax, dataAllocated,
accDataLen=accData.len, stoRoot
continue
# Collect data slots for this account
let
db = ctx.chain.getStorageSlotsFn(accKey)
rc = ctx.fetchLeafRange(db, stoRoot, iv, sizeMax - dataAllocated, stopAt)
if rc.isErr:
when extraTraceMessages:
trace logTxt "getStorageRanges: failed", iv, sizeMax, dataAllocated,
accDataLen=accData.len, stoRoot
return # extraction failed
# Process data slots for this account
dataAllocated += rc.value.leafsSize
#trace logTxt "getStorageRanges: data slots", iv, sizeMax, dataAllocated,
# accKey, stoRoot, nSlots=rc.value.leafs.len, nProof=rc.value.proof.len
slotLists.add rc.value.leafs.mapIt(it.to(SnapStorage))
if 0 < rc.value.proof.len:
proof = rc.value.proof
break # only last entry has a proof
# Stop unless there is enough space left
if sizeMax - dataAllocated <= estimatedProofSize:
break
if stopAt <= Moment.now():
timeExceeded = true
break
when extraTraceMessages:
trace logTxt "getStorageRanges: done", iv, sizeMax, dataAllocated,
nAccounts=accounts.len, nLeafLists=slotLists.len, nProof=proof.len,
timeExceeded
(slotLists, SnapProofNodes(nodes: proof))
method getByteCodes*(
ctx: SnapWireRef;

View File

@ -155,8 +155,8 @@ proc notImplemented(name: string) =
method getAccountRange*(
ctx: SnapWireBase;
root: Hash256;
origin: Hash256;
limit: Hash256;
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
): (seq[SnapAccount], SnapProofNodes)
{.base, raises: [CatchableError].} =
@ -170,7 +170,7 @@ method getStorageRanges*(
limit: openArray[byte];
replySizeMax: uint64;
): (seq[seq[SnapStorage]], SnapProofNodes)
{.base.} =
{.base, raises: [CatchableError].} =
notImplemented("getStorageRanges")
method getByteCodes*(

View File

@ -9,129 +9,9 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## This module implements `snap/1`, the `Ethereum Snapshot Protocol (SNAP)
## <https://github.com/ethereum/devp2p/blob/master/caps/snap.md>`_.
##
## Modified `GetStorageRanges` (0x02) message syntax
## -------------------------------------------------
## As implementes here, the request message is encoded as
##
## `[reqID, rootHash, accountHashes, origin, limit, responseBytes]`
##
## It requests the storage slots of multiple accounts' storage tries. Since
## certain contracts have huge state, the method can also request storage
## slots from a single account, starting at a specific storage key hash.
## The intended purpose of this message is to fetch a large number of
## subsequent storage slots from a remote node and reconstruct a state
## subtrie locally.
##
## * `reqID`: Request ID to match up responses with
## * `rootHash`: 32 byte root hash of the account trie to serve
## * `accountHashes`: Array of 32 byte account hashes of the storage tries to serve
## * `origin`: Storage slot hash fragment of the first to retrieve (see below)
## * `limit`: Storage slot hash fragment after which to stop serving (see below)
## * `responseBytes`: 64 bit number soft limit at which to stop returning data
##
## Discussion of *Geth* `GetStorageRanges` behaviour
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
## - Parameters `origin` and `limit` may each be empty blobs, which mean "all
## zeros" (0x00000...) or "no limit" (0xfffff...) respectively.
##
## (Blobs shorter than 32 bytes can also be given, and they are extended with
## zero bytes; longer than 32 bytes can be given and are truncated, but this
## is *Geth* being too accepting, and shouldn't be used.)
##
## - In the `slots` reply, the last account's storage list may be empty even if
## that account has non-empty storage.
##
## This happens when the bytes threshold is reached just after finishing
## storage for the previous account, or when `origin` is greater than the
## first account's last storage slot. When either of these happens, `proof`
## is non-empty. In the case of `origin` zero or empty, the non-empty proof
## only contains the left-side boundary proof, because it meets the condition
## for omitting the right-side proof described in the next point.
##
## - In the `proof` reply, the right-side boundary proof is only included if
## the last returned storage slot has non-zero path and `origin != 0`, or if
## the result stops due to reaching the bytes threshold.
##
## Because there's only one proof anyway if left-side and right-side are the
## same path, this works out to mean the right-side proof is omitted in cases
## where `origin == 0` and the result stops at a slot `>= limit` before
## reaching the bytes threshold.
##
## Although the specification doesn't say anything about `limit`, this is
## against the spirit of the specification rule, which says the right-side
## proof is always included if the last returned path differs from the
## starting hash.
##
## The omitted right-side proof can cause problems when using `limit`.
## In other words, when doing range queries, or merging results from
## pipelining where different `stateRoot` hashes are used as time progresses.
## Workarounds:
##
## * Fetch the proof using a second `GetStorageRanges` query with non-zero
## `origin` (perhaps equal to `limit`; use `origin = 1` if `limit == 0`).
##
## * Avoid the condition by using `origin >= 1` when using `limit`.
##
## * Use trie node traversal (`snap` `GetTrieNodes`) to obtain the omitted proof.
##
## - When multiple accounts are requested with `origin > 0`, only one account's
## storage is returned. There is no point requesting multiple accounts with
## `origin > 0`. (It might be useful if it treated `origin` as applying to
## only the first account, but it doesn't.)
##
## - When multiple accounts are requested with non-default `limit` and
## `origin == 0`, and the first account result stops at a slot `>= limit`
## before reaching the bytes threshold, storage for the other accounts in the
## request are returned as well. The other accounts are not limited by
## `limit`, only the bytes threshold. The right-side proof is omitted from
## `proof` when this happens, because this is the same condition as described
## earlier for omitting the right-side proof. (It might be useful if it
## treated `origin` as applying to only the first account and `limit` to only
## the last account, but it doesn't.)
##
##
## Performance benefits
## --------------------
## `snap` is used for much higher performance transfer of the entire Ethereum
## execution state (accounts, storage, bytecode) compared with hexary trie
## traversal using the now obsolete `eth/66` `GetNodeData`.
##
## It improves both network and local storage performance. The benefits are
## substantial, and summarised here:
##
## - `Ethereum Snapshot Protocol (SNAP) - Expected results
## <https://github.com/ethereum/devp2p/blob/master/caps/snap.md>`_
## - `Geth v1.10.0 - Snap sync
## <https://blog.ethereum.org/2021/03/03/geth-v1-10-0/#snap-sync>`_
##
## In the Snap sync model, local storage benefits require clients to adopt a
## different representation of Ethereum state than the trie storage that *Geth*
## (and most clients) traditionally used, and still do in archive mode,
##
## However, Nimbus's sync method obtains similar local storage benefits
## whichever network protocol is used. Nimbus uses `snap` protocol because it
## is a more efficient network protocol.
##
## Distributed hash table (DHT) building block
## -------------------------------------------
## Although `snap` was designed for bootstrapping clients with the entire
## Ethereum state, it is well suited to fetching only a subset of path ranges.
## This may be useful for bootstrapping distributed hash tables (DHTs).
##
## Path range metadata benefits
## ----------------------------
## Because data is handled in path ranges, this allows a compact metadata
## representation of what data is stored locally and what isn't, compared with
## the size of a representation of partially completed trie traversal with
## `eth` `GetNodeData`. Due to the smaller metadata, after aborting a partial
## sync and restarting, it is possible to resume quickly, without waiting for
## the very slow local database scan associated with older versions of *Geth*.
##
## However, Nimbus's sync method uses this principle as inspiration to
## obtain similar metadata benefits whichever network protocol is used.
## This module implements Ethereum Snapshot Protocol version 1, `snap/1`.
## Specification:
## `snap/1 <https://github.com/ethereum/devp2p/blob/master/caps/snap.md>`_
import
std/options,
@ -192,16 +72,15 @@ p2pProtocol snap1(version = snapVersion,
requestResponse:
# User message 0x00: GetAccountRange.
# Note: `origin` and `limit` differs from the specification to match Geth.
proc getAccountRange(
peer: Peer;
root: Hash256;
origin: Hash256;
limit: Hash256;
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
) =
trace trSnapRecvReceived & "GetAccountRange (0x00)", peer, root,
origin, limit, replySizeMax
nOrigin=origin.len, nLimit=limit.len, replySizeMax
let
ctx = peer.networkState()
@ -229,7 +108,6 @@ p2pProtocol snap1(version = snapVersion,
requestResponse:
# User message 0x02: GetStorageRanges.
# Note: `origin` and `limit` differs from the specification to match Geth.
proc getStorageRanges(
peer: Peer;
root: Hash256;

View File

@ -45,7 +45,8 @@ proc getAccountRangeReq(
peer = buddy.peer
try:
let reply = await peer.getAccountRange(
root, iv.minPt.to(Hash256), iv.maxPt.to(Hash256), fetchRequestBytesLimit)
root, iv.minPt.to(Hash256).data, iv.maxPt.to(Hash256).data,
fetchRequestBytesLimit)
return ok(reply)
except CatchableError as e:
let error {.used.} = e.msg

View File

@ -393,8 +393,6 @@ proc hexaryNearbyRight*(
if topLink.isZero or not db.tab.hasKey(topLink):
return err(NearbyDanglingLink) # error
let nextNibble = rPath.tail[0].int8
if start and nextNibble < 15:
let nextNode = db.tab[topLink]
case nextNode.kind
of Leaf:
@ -404,6 +402,8 @@ proc hexaryNearbyRight*(
if rPath.tail <= nextNode.ePfx:
return rPath.completeLeast(topLink, db)
of Branch:
let nextNibble = rPath.tail[0].int8
if start and nextNibble < 15:
# Step down and complete with a branch link on the child node
step = RPathStep(
key: topLink,
@ -484,14 +484,14 @@ proc hexaryNearbyRight*(
if topLink.len == 0 or topLink.getFn().len == 0:
return err(NearbyDanglingLink) # error
let nextNibble = xPath.tail[0].int8
if nextNibble < 15:
let nextNodeRlp = rlpFromBytes topLink.getFn()
case nextNodeRlp.listLen:
of 2:
if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]:
return xPath.completeLeast(topLink, getFn)
of 17:
let nextNibble = xPath.tail[0].int8
if nextNibble < 15:
# Step down and complete with a branch link on the child node
step = XPathStep(
key: topLink,
@ -616,8 +616,6 @@ proc hexaryNearbyLeft*(
if topLink.isZero or not db.tab.hasKey(topLink):
return err(NearbyDanglingLink) # error
let nextNibble = rPath.tail[0].int8
if 0 < nextNibble:
let nextNode = db.tab[topLink]
case nextNode.kind
of Leaf:
@ -627,6 +625,8 @@ proc hexaryNearbyLeft*(
if nextNode.ePfx <= rPath.tail:
return rPath.completeMost(topLink, db)
of Branch:
let nextNibble = rPath.tail[0].int8
if 0 < nextNibble:
# Step down and complete with a branch link on the child node
step = RPathStep(
key: topLink,
@ -708,14 +708,14 @@ proc hexaryNearbyLeft*(
if topLink.len == 0 or topLink.getFn().len == 0:
return err(NearbyDanglingLink) # error
let nextNibble = xPath.tail[0].int8
if 0 < nextNibble:
let nextNodeRlp = rlpFromBytes topLink.getFn()
case nextNodeRlp.listLen:
of 2:
if nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1] <= xPath.tail:
return xPath.completeMost(topLink, getFn)
of 17:
let nextNibble = xPath.tail[0].int8
if 0 < nextNibble:
# Step down and complete with a branch link on the child node
step = XPathStep(
key: topLink,

View File

@ -12,6 +12,7 @@
import
std/[sequtils, sets, tables],
chronos,
eth/[common, p2p, trie/nibbles],
stew/[byteutils, interval_set],
../../../protocol,
@ -26,10 +27,18 @@ type
RangeProof* = object
base*: NodeTag ## No node between `base` and `leafs[0]`
leafs*: seq[RangeLeaf] ## List of consecutive leaf nodes
leafsLast*: bool ## If no leaf exceeds `max(base,leafs[])`
leafsSize*: int ## RLP encoded size of `leafs` on wire
proof*: seq[SnapProof] ## Boundary proof
proofSize*: int ## RLP encoded size of `proof` on wire
const
proofNodeSizeMax = 532
## Branch node with all branches `high(UInt256)` within RLP list
veryLongDuration = 60.weeks
## Longer than any collection of data will probably take
proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) {.gcsafe.}
proc hexaryRangeRlpSize*(blobLen: int): int {.gcsafe.}
@ -50,6 +59,14 @@ proc rlpPairSize(aLen: int; bRlpLen: int): int =
else:
high(int)
proc timeIsOver(stopAt: Moment): bool =
## Helper (avoids `chronos` import when running generic function)
stopAt <= chronos.Moment.now()
proc stopAt(timeout: chronos.Duration): Moment =
## Helper (avoids `chronos` import when running generic function)
chronos.Moment.now() + timeout
proc nonLeafPathNodes(
nodeTag: NodeTag; # Left boundary
rootKey: NodeKey|RepairKey; # State root
@ -88,6 +105,7 @@ template collectLeafs(
rootKey: NodeKey|RepairKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths
nSizeLimit: int; # List of RLP encoded data must be smaller
stopAt: Moment; # limit search time
): auto =
## Collect trie database leafs prototype. This directive is provided as
## `template` for avoiding varying exceprion annotations.
@ -102,16 +120,16 @@ template collectLeafs(
rls: RangeProof
# Set up base node, the nearest node before `iv.minPt`
block:
if 0.to(NodeTag) < nodeTag:
let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyLeft(db)
if rx.isOk:
rls.base = getPartialPath(rx.value).convertTo(NodeKey).to(NodeTag)
elif rx.error != NearbyFailed:
elif rx.error notin {NearbyFailed,NearbyEmptyPath}:
rc = typeof(rc).err(rx.error)
break body
# Fill leaf nodes from interval range unless size reached
while nodeTag <= nodeMax:
# Fill leaf nodes (at least one) from interval range unless size reached
while nodeTag <= nodeMax or rls.leafs.len == 0:
# The following logic might be sub-optimal. A strict version of the
# `next()` function that stops with an error at dangling links could
# be faster if the leaf nodes are not too far apart on the hexary trie.
@ -119,7 +137,11 @@ template collectLeafs(
xPath = block:
let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyRight(db)
if rx.isErr:
if rx.error notin {NearbyFailed,NearbyEmptyPath}:
rc = typeof(rc).err(rx.error)
else:
rls.leafsLast = true
rc = typeof(rc).ok(rls) # done ok, last node reached
break body
rx.value
rightKey = getPartialPath(xPath).convertTo(NodeKey)
@ -134,15 +156,18 @@ template collectLeafs(
let (pairLen,listLen) =
hexaryRangeRlpLeafListSize(xPath.leafData.len, rls.leafsSize)
if listLen < nSizeLimit:
if listLen <= nSizeLimit:
rls.leafsSize += pairLen
else:
break
break # collected enough
rls.leafs.add RangeLeaf(
key: rightKey,
data: xPath.leafData)
if timeIsOver(stopAt):
break # timout
prevTag = nodeTag
nodeTag = rightTag + 1.u256
# End loop
@ -164,11 +189,13 @@ template updateProof(
): auto =
## Complement leafs list by adding proof nodes. This directive is provided as
## `template` for avoiding varying exceprion annotations.
var rp = rls
if 0.to(NodeTag) < rp.base or not rp.leafsLast:
var proof = allPathNodes(rls.base, rootKey, db)
if 0 < rls.leafs.len:
proof.incl nonLeafPathNodes(rls.leafs[^1].key.to(NodeTag), rootKey, db)
var rp = rls
rp.proof = toSeq(proof)
rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0)
@ -183,10 +210,11 @@ proc hexaryRangeLeafsProof*(
rootKey: NodeKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths
nSizeLimit = high(int); # List of RLP encoded data must be smaller
timeout = veryLongDuration; # Limit retrieval time
): Result[RangeProof,HexaryError]
{.gcsafe, raises: [CatchableError]} =
## Collect trie database leafs prototype and add proof.
let rc = db.collectLeafs(rootKey, iv, nSizeLimit)
let rc = db.collectLeafs(rootKey, iv, nSizeLimit, stopAt(timeout))
if rc.isErr:
err(rc.error)
else:
@ -206,16 +234,6 @@ proc hexaryRangeLeafsProof*(
# Public helpers
# ------------------------------------------------------------------------------
proc to*(
rl: RangeLeaf;
T: type SnapAccount;
): T
{.gcsafe, raises: [RlpError]} =
## Convert the generic `RangeLeaf` argument to payload type.
T(accHash: rl.key.to(Hash256),
accBody: rl.data.decode(Account))
proc hexaryRangeRlpSize*(blobLen: int): int =
## Returns the size of RLP encoded <blob> of argument length `blobLen`.
if blobLen < 56:
@ -259,6 +277,15 @@ proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) =
else:
(pairLen, high(int))
proc hexaryRangeRlpNodesListSizeMax*(n: int): int =
## Maximal size needs to RLP encode `n` nodes (handy for calculating the
## space needed to store proof nodes.)
const nMax = high(int) div proofNodeSizeMax
if n <= nMax:
hexaryRangeRlpSize(n * proofNodeSizeMax)
else:
high(int)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -137,6 +137,13 @@ proc init*(
## Constructor variant
HexaryTreeDbRef.init(ps.base)
proc init*(
T: type HexaryTreeDbRef;
): T =
## Constructor variant. It provides a `HexaryTreeDbRef()` with a key key cache attached
## for pretty printing. So this one is mainly for debugging.
HexaryTreeDbRef.init(SnapDbRef())
# ---------------
proc init*(

View File

@ -136,8 +136,9 @@ proc accountsRangefetchImpl(
let error = rc.error
if await buddy.ctrl.stopAfterSeriousComError(error, buddy.only.errors):
when extraTraceMessages:
let reqLen {.used.} = $iv
trace logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
reqLen=iv.len, error
reqLen, error
return
rc.value
@ -168,8 +169,9 @@ proc accountsRangefetchImpl(
# Bad data, just try another peer
buddy.ctrl.zombie = true
when extraTraceMessages:
let reqLen {.used.} = $iv
trace logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
gotAccounts, gotStorage, reqLen=iv.len, covered, error=rc.error
gotAccounts, gotStorage, reqLen, covered, error=rc.error
return
rc.value

View File

@ -118,7 +118,7 @@ proc test_calcProofsListSizes*() =
#echo "+++ ", n, " ", nodeBlobsEncoded.rlpFromBytes.inspect
#echo ">>> ", n, " ", nodeBlobsHex
#echo "<<< ", n, " ", brNodesHex
check nodeBlobsEncoded.len == n.proofNodesSizeMax
check nodeBlobsEncoded.len == n.hexaryRangeRlpNodesListSizeMax
check nodeBlobsDecoded == nodeSample
check nodeBlobsHex == brNodesHex