2022-09-16 07:24:12 +00:00
|
|
|
# nimbus-eth1
|
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
import
|
2022-12-24 09:54:18 +00:00
|
|
|
std/[sequtils, strutils, tables],
|
2022-09-30 08:22:14 +00:00
|
|
|
chronicles,
|
2022-10-20 16:59:54 +00:00
|
|
|
eth/[common, trie/nibbles],
|
2022-09-16 07:24:12 +00:00
|
|
|
stew/results,
|
|
|
|
../../range_desc,
|
|
|
|
"."/[hexary_desc, hexary_paths]
|
|
|
|
|
2023-01-30 22:10:23 +00:00
|
|
|
{.push raises: [].}
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
logScope:
|
|
|
|
topics = "snap-db"
|
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
type
|
|
|
|
TrieNodeStatCtxRef* = ref object
|
|
|
|
## Context to resume searching for dangling links
|
|
|
|
case persistent*: bool
|
|
|
|
of true:
|
|
|
|
hddCtx*: seq[(NodeKey,NibblesSeq)]
|
|
|
|
else:
|
|
|
|
memCtx*: seq[(RepairKey,NibblesSeq)]
|
|
|
|
|
|
|
|
TrieNodeStat* = object
|
|
|
|
## Trie inspection report
|
|
|
|
dangling*: seq[NodeSpecs] ## Referes to nodes with incomplete refs
|
|
|
|
count*: uint64 ## Number of nodes visited
|
|
|
|
level*: uint8 ## Maximum nesting depth of dangling nodes
|
|
|
|
stopped*: bool ## Potential loop detected if `true`
|
|
|
|
resumeCtx*: TrieNodeStatCtxRef ## Context for resuming inspection
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
const
|
|
|
|
extraTraceMessages = false # or true
|
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
import stew/byteutils
|
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers, debugging
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc ppDangling(a: seq[NodeSpecs]; maxItems = 30): string =
|
|
|
|
proc ppBlob(w: Blob): string =
|
|
|
|
w.mapIt(it.toHex(2)).join.toLowerAscii
|
|
|
|
let
|
|
|
|
q = a.mapIt(it.partialPath.ppBlob)[0 ..< min(maxItems,a.len)]
|
|
|
|
andMore = if maxItems < a.len: ", ..[#" & $a.len & "].." else: ""
|
|
|
|
"{" & q.join(",") & andMore & "}"
|
|
|
|
|
2022-09-16 07:24:12 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
|
|
|
## Might be lossy, check before use
|
|
|
|
discard result.init(key.ByteArray33[1 .. 32])
|
|
|
|
|
|
|
|
proc convertTo(key: Blob; T: type NodeKey): T =
|
|
|
|
## Might be lossy, check before use
|
|
|
|
discard result.init(key)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc processLink(
|
|
|
|
db: HexaryTreeDbRef;
|
|
|
|
stats: var TrieNodeStat;
|
2022-11-16 23:51:06 +00:00
|
|
|
inspect: var seq[(RepairKey,NibblesSeq)];
|
2022-09-16 07:24:12 +00:00
|
|
|
trail: NibblesSeq;
|
|
|
|
child: RepairKey;
|
2023-01-30 22:10:23 +00:00
|
|
|
) =
|
2022-09-16 07:24:12 +00:00
|
|
|
## Helper for `hexaryInspect()`
|
|
|
|
if not child.isZero:
|
|
|
|
if not child.isNodeKey:
|
|
|
|
# Oops -- caught in the middle of a repair process? Just register
|
|
|
|
# this node
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
stats.dangling.add NodeSpecs(
|
2022-10-28 07:26:17 +00:00
|
|
|
partialPath: trail.hexPrefixEncode(isLeaf = false))
|
2022-09-16 07:24:12 +00:00
|
|
|
elif db.tab.hasKey(child):
|
2022-11-16 23:51:06 +00:00
|
|
|
inspect.add (child,trail)
|
2022-09-16 07:24:12 +00:00
|
|
|
else:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
stats.dangling.add NodeSpecs(
|
|
|
|
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
2022-10-28 07:26:17 +00:00
|
|
|
nodeKey: child.convertTo(NodeKey))
|
2022-09-16 07:24:12 +00:00
|
|
|
|
|
|
|
proc processLink(
|
|
|
|
getFn: HexaryGetFn;
|
|
|
|
stats: var TrieNodeStat;
|
2022-11-16 23:51:06 +00:00
|
|
|
inspect: var seq[(NodeKey,NibblesSeq)];
|
2022-09-16 07:24:12 +00:00
|
|
|
trail: NibblesSeq;
|
|
|
|
child: Rlp;
|
2023-01-30 22:10:23 +00:00
|
|
|
) {.gcsafe, raises: [RlpError]} =
|
2022-09-16 07:24:12 +00:00
|
|
|
## Ditto
|
|
|
|
if not child.isEmpty:
|
2022-10-28 07:26:17 +00:00
|
|
|
let childBlob = child.toBytes
|
2022-09-16 07:24:12 +00:00
|
|
|
if childBlob.len != 32:
|
|
|
|
# Oops -- that is wrong, although the only sensible action is to
|
|
|
|
# register the node and otherwise ignore it
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
stats.dangling.add NodeSpecs(
|
2022-10-28 07:26:17 +00:00
|
|
|
partialPath: trail.hexPrefixEncode(isLeaf = false))
|
2022-09-16 07:24:12 +00:00
|
|
|
else:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
let childKey = childBlob.convertTo(NodeKey)
|
2022-09-16 07:24:12 +00:00
|
|
|
if 0 < child.toBytes.getFn().len:
|
2022-11-16 23:51:06 +00:00
|
|
|
inspect.add (childKey,trail)
|
2022-09-16 07:24:12 +00:00
|
|
|
else:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
stats.dangling.add NodeSpecs(
|
|
|
|
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
2022-10-28 07:26:17 +00:00
|
|
|
nodeKey: childKey)
|
2022-09-16 07:24:12 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-28 09:03:23 +00:00
|
|
|
proc to*(resumeCtx: TrieNodeStatCtxRef; T: type seq[NodeSpecs]): T =
|
|
|
|
## Convert resumption context to nodes that can be used otherwise. This
|
|
|
|
## function might be useful for error recovery.
|
|
|
|
##
|
|
|
|
## Note: In a non-persistant case, temporary `RepairKey` type node specs
|
|
|
|
## that cannot be converted to `NodeKey` type nodes are silently dropped.
|
|
|
|
## This should be no problem as a hexary trie with `RepairKey` type node
|
|
|
|
## refs must be repaired or discarded anyway.
|
|
|
|
if resumeCtx.persistent:
|
|
|
|
for (key,trail) in resumeCtx.hddCtx:
|
|
|
|
result.add NodeSpecs(
|
|
|
|
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
|
|
|
nodeKey: key)
|
|
|
|
else:
|
|
|
|
for (key,trail) in resumeCtx.memCtx:
|
|
|
|
if key.isNodeKey:
|
|
|
|
result.add NodeSpecs(
|
|
|
|
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
|
|
|
nodeKey: key.convertTo(NodeKey))
|
|
|
|
|
|
|
|
|
2022-09-16 07:24:12 +00:00
|
|
|
proc hexaryInspectTrie*(
|
2022-12-24 09:54:18 +00:00
|
|
|
db: HexaryTreeDbRef; # Database
|
|
|
|
root: NodeKey; # State root
|
|
|
|
partialPaths: seq[Blob] = @[]; # Starting paths for search
|
|
|
|
resumeCtx: TrieNodeStatCtxRef = nil; # Context for resuming inspection
|
|
|
|
suspendAfter = high(uint64); # To be resumed
|
|
|
|
stopAtLevel = 64u8; # Width-first depth level
|
|
|
|
maxDangling = high(int); # Maximal number of dangling results
|
2022-09-16 07:24:12 +00:00
|
|
|
): TrieNodeStat
|
2023-01-30 22:10:23 +00:00
|
|
|
{.gcsafe, raises: [KeyError]} =
|
2022-09-16 07:24:12 +00:00
|
|
|
## Starting with the argument list `paths`, find all the non-leaf nodes in
|
|
|
|
## the hexary trie which have at least one node key reference missing in
|
2022-10-08 17:20:50 +00:00
|
|
|
## the trie database. The references for these nodes are collected and
|
|
|
|
## returned.
|
|
|
|
##
|
2022-12-24 09:54:18 +00:00
|
|
|
## * Argument `partialPaths` list entries that do not refer to an existing
|
|
|
|
## and allocated hexary trie node are silently ignored. So are enytries
|
|
|
|
## that not refer to either a valid extension or a branch type node.
|
|
|
|
##
|
|
|
|
## * This function traverses the hexary trie in *width-first* mode
|
|
|
|
## simultaneously for any entry of the argument `partialPaths` list. Abart
|
|
|
|
## from completing the search there are three conditions when the search
|
|
|
|
## pauses to return the current state (via `resumeCtx`, see next bullet
|
|
|
|
## point):
|
|
|
|
## + The depth level of the running algorithm exceeds `stopAtLevel`.
|
|
|
|
## + The number of visited nodes exceeds `suspendAfter`.
|
|
|
|
## + Te number of cunnently collected dangling nodes exceeds `maxDangling`.
|
|
|
|
## If the function pauses because the current depth exceeds `stopAtLevel`
|
|
|
|
## then the `stopped` flag of the result object will be set, as well.
|
|
|
|
##
|
|
|
|
## * When paused for some of the reasons listed above, the `resumeCtx` field
|
|
|
|
## of the result object contains the current state so that the function
|
|
|
|
## can resume searching from where is paused. An application using this
|
|
|
|
## feature could look like:
|
|
|
|
## ::
|
|
|
|
## var ctx = TrieNodeStatCtxRef()
|
|
|
|
## while not ctx.isNil:
|
|
|
|
## let state = hexaryInspectTrie(db, root, paths, resumeCtx=ctx, 1024)
|
|
|
|
## ...
|
|
|
|
## ctx = state.resumeCtx
|
2022-11-16 23:51:06 +00:00
|
|
|
##
|
2022-09-16 07:24:12 +00:00
|
|
|
let rootKey = root.to(RepairKey)
|
|
|
|
if not db.tab.hasKey(rootKey):
|
|
|
|
return TrieNodeStat()
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
var
|
|
|
|
reVisit: seq[(RepairKey,NibblesSeq)]
|
|
|
|
again: seq[(RepairKey,NibblesSeq)]
|
|
|
|
resumeOk = false
|
|
|
|
|
2022-12-06 17:35:56 +00:00
|
|
|
# Initialise lists from previous session
|
2022-11-16 23:51:06 +00:00
|
|
|
if not resumeCtx.isNil and
|
|
|
|
not resumeCtx.persistent and
|
|
|
|
0 < resumeCtx.memCtx.len:
|
|
|
|
resumeOk = true
|
|
|
|
reVisit = resumeCtx.memCtx
|
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
if partialPaths.len == 0 and not resumeOk:
|
2022-11-16 23:51:06 +00:00
|
|
|
reVisit.add (rootKey,EmptyNibbleRange)
|
2022-09-16 07:24:12 +00:00
|
|
|
else:
|
2022-12-06 17:35:56 +00:00
|
|
|
# Add argument paths
|
2022-12-24 09:54:18 +00:00
|
|
|
for w in partialPaths:
|
2022-09-16 07:24:12 +00:00
|
|
|
let (isLeaf,nibbles) = hexPrefixDecode w
|
|
|
|
if not isLeaf:
|
2022-12-06 17:35:56 +00:00
|
|
|
let rc = nibbles.hexaryPathNodeKey(rootKey, db, missingOk=false)
|
2022-09-16 07:24:12 +00:00
|
|
|
if rc.isOk:
|
2022-12-06 17:35:56 +00:00
|
|
|
reVisit.add (rc.value.to(RepairKey), nibbles)
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
# Stopping on `suspendAfter` has precedence over `stopAtLevel`
|
|
|
|
while 0 < reVisit.len and result.count <= suspendAfter:
|
2022-09-30 08:22:14 +00:00
|
|
|
if stopAtLevel < result.level:
|
|
|
|
result.stopped = true
|
2022-09-16 07:24:12 +00:00
|
|
|
break
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
for n in 0 ..< reVisit.len:
|
2022-12-24 09:54:18 +00:00
|
|
|
if suspendAfter < result.count or
|
|
|
|
maxDangling <= result.dangling.len:
|
2022-11-16 23:51:06 +00:00
|
|
|
# Swallow rest
|
2022-12-24 09:54:18 +00:00
|
|
|
again &= reVisit[n ..< reVisit.len]
|
2022-11-16 23:51:06 +00:00
|
|
|
break
|
2022-09-16 07:24:12 +00:00
|
|
|
|
|
|
|
let
|
2022-12-24 09:54:18 +00:00
|
|
|
(rKey, parentTrail) = reVisit[n]
|
2022-09-16 07:24:12 +00:00
|
|
|
node = db.tab[rKey]
|
2023-01-30 22:10:23 +00:00
|
|
|
# parent = rKey.convertTo(NodeKey) -- unused
|
2022-09-16 07:24:12 +00:00
|
|
|
|
|
|
|
case node.kind:
|
|
|
|
of Extension:
|
|
|
|
let
|
|
|
|
trail = parentTrail & node.ePfx
|
|
|
|
child = node.eLink
|
2022-10-28 07:26:17 +00:00
|
|
|
db.processLink(stats=result, inspect=again, trail, child)
|
2022-09-16 07:24:12 +00:00
|
|
|
of Branch:
|
|
|
|
for n in 0 ..< 16:
|
|
|
|
let
|
|
|
|
trail = parentTrail & @[n.byte].initNibbleRange.slice(1)
|
|
|
|
child = node.bLink[n]
|
2022-10-28 07:26:17 +00:00
|
|
|
db.processLink(stats=result, inspect=again, trail, child)
|
2022-09-16 07:24:12 +00:00
|
|
|
of Leaf:
|
2022-10-14 16:40:32 +00:00
|
|
|
# Ooops, forget node and key
|
|
|
|
discard
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
result.count.inc
|
2022-09-16 07:24:12 +00:00
|
|
|
# End `for`
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
result.level.inc
|
2022-11-16 23:51:06 +00:00
|
|
|
swap(reVisit, again)
|
|
|
|
again.setLen(0)
|
2022-09-16 07:24:12 +00:00
|
|
|
# End while
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
if 0 < reVisit.len:
|
|
|
|
result.resumeCtx = TrieNodeStatCtxRef(
|
|
|
|
persistent: false,
|
|
|
|
memCtx: reVisit)
|
|
|
|
|
2022-09-16 07:24:12 +00:00
|
|
|
|
|
|
|
proc hexaryInspectTrie*(
|
2022-12-24 09:54:18 +00:00
|
|
|
getFn: HexaryGetFn; # Database abstraction
|
|
|
|
rootKey: NodeKey; # State root
|
|
|
|
partialPaths: seq[Blob] = @[]; # Starting paths for search
|
|
|
|
resumeCtx: TrieNodeStatCtxRef = nil; # Context for resuming inspection
|
|
|
|
suspendAfter = high(uint64); # To be resumed
|
|
|
|
stopAtLevel = 64u8; # Width-first depth level
|
|
|
|
maxDangling = high(int); # Maximal number of dangling results
|
2022-09-16 07:24:12 +00:00
|
|
|
): TrieNodeStat
|
2023-01-30 22:10:23 +00:00
|
|
|
{.gcsafe, raises: [RlpError]} =
|
2022-09-30 08:22:14 +00:00
|
|
|
## Variant of `hexaryInspectTrie()` for persistent database.
|
2022-10-08 17:20:50 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
let nPaths = paths.len
|
|
|
|
|
|
|
|
let root = rootKey.to(Blob)
|
|
|
|
if root.getFn().len == 0:
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace "Hexary inspect: missing root", nPaths, maxLeafPaths,
|
|
|
|
rootKey=root.toHex
|
2022-09-16 07:24:12 +00:00
|
|
|
return TrieNodeStat()
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
var
|
|
|
|
reVisit: seq[(NodeKey,NibblesSeq)]
|
|
|
|
again: seq[(NodeKey,NibblesSeq)]
|
|
|
|
resumeOk = false
|
|
|
|
|
2022-12-06 17:35:56 +00:00
|
|
|
# Initialise lists from previous session
|
2022-11-16 23:51:06 +00:00
|
|
|
if not resumeCtx.isNil and
|
|
|
|
resumeCtx.persistent and
|
|
|
|
0 < resumeCtx.hddCtx.len:
|
|
|
|
resumeOk = true
|
|
|
|
reVisit = resumeCtx.hddCtx
|
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
if partialPaths.len == 0 and not resumeOk:
|
2022-11-16 23:51:06 +00:00
|
|
|
reVisit.add (rootKey,EmptyNibbleRange)
|
2022-09-16 07:24:12 +00:00
|
|
|
else:
|
2022-12-06 17:35:56 +00:00
|
|
|
# Add argument paths
|
2022-12-24 09:54:18 +00:00
|
|
|
for w in partialPaths:
|
2022-09-16 07:24:12 +00:00
|
|
|
let (isLeaf,nibbles) = hexPrefixDecode w
|
|
|
|
if not isLeaf:
|
2022-12-06 17:35:56 +00:00
|
|
|
let rc = nibbles.hexaryPathNodeKey(rootKey, getFn, missingOk=false)
|
2022-09-16 07:24:12 +00:00
|
|
|
if rc.isOk:
|
2022-12-06 17:35:56 +00:00
|
|
|
reVisit.add (rc.value, nibbles)
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
# Stopping on `suspendAfter` has precedence over `stopAtLevel`
|
|
|
|
while 0 < reVisit.len and result.count <= suspendAfter:
|
2022-10-08 17:20:50 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace "Hexary inspect processing", nPaths, maxLeafPaths,
|
|
|
|
level=result.level, nReVisit=reVisit.len, nDangling=result.dangling.len
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
if stopAtLevel < result.level:
|
|
|
|
result.stopped = true
|
2022-09-16 07:24:12 +00:00
|
|
|
break
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
for n in 0 ..< reVisit.len:
|
2022-12-24 09:54:18 +00:00
|
|
|
if suspendAfter < result.count or
|
|
|
|
maxDangling <= result.dangling.len:
|
2022-11-16 23:51:06 +00:00
|
|
|
# Swallow rest
|
|
|
|
again = again & reVisit[n ..< reVisit.len]
|
|
|
|
break
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
let
|
|
|
|
(parent, parentTrail) = reVisit[n]
|
|
|
|
parentBlob = parent.to(Blob).getFn()
|
2022-10-08 17:20:50 +00:00
|
|
|
if parentBlob.len == 0:
|
|
|
|
# Ooops, forget node and key
|
|
|
|
continue
|
|
|
|
|
|
|
|
let nodeRlp = rlpFromBytes parentBlob
|
2022-09-16 07:24:12 +00:00
|
|
|
case nodeRlp.listLen:
|
|
|
|
of 2:
|
2022-10-08 17:20:50 +00:00
|
|
|
let (isLeaf,xPfx) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
2022-09-16 07:24:12 +00:00
|
|
|
if not isleaf:
|
|
|
|
let
|
2022-10-08 17:20:50 +00:00
|
|
|
trail = parentTrail & xPfx
|
2022-09-16 07:24:12 +00:00
|
|
|
child = nodeRlp.listElem(1)
|
2022-10-28 07:26:17 +00:00
|
|
|
getFn.processLink(stats=result, inspect=again, trail, child)
|
2022-09-16 07:24:12 +00:00
|
|
|
of 17:
|
|
|
|
for n in 0 ..< 16:
|
|
|
|
let
|
|
|
|
trail = parentTrail & @[n.byte].initNibbleRange.slice(1)
|
|
|
|
child = nodeRlp.listElem(n)
|
2022-10-28 07:26:17 +00:00
|
|
|
getFn.processLink(stats=result, inspect=again, trail, child)
|
2022-09-16 07:24:12 +00:00
|
|
|
else:
|
2022-10-08 17:20:50 +00:00
|
|
|
# Ooops, forget node and key
|
2022-09-16 07:24:12 +00:00
|
|
|
discard
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
result.count.inc
|
2022-09-16 07:24:12 +00:00
|
|
|
# End `for`
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
result.level.inc
|
2022-11-16 23:51:06 +00:00
|
|
|
swap(reVisit, again)
|
|
|
|
again.setLen(0)
|
2022-09-16 07:24:12 +00:00
|
|
|
# End while
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
if 0 < reVisit.len:
|
|
|
|
result.resumeCtx = TrieNodeStatCtxRef(
|
|
|
|
persistent: true,
|
|
|
|
hddCtx: reVisit)
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
when extraTraceMessages:
|
2022-10-08 17:20:50 +00:00
|
|
|
trace "Hexary inspect finished", nPaths, maxLeafPaths,
|
2022-11-16 23:51:06 +00:00
|
|
|
level=result.level, nResumeCtx=reVisit.len, nDangling=result.dangling.len,
|
2022-10-08 17:20:50 +00:00
|
|
|
maxLevel=stopAtLevel, stopped=result.stopped
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions, debugging
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc pp*(a: TrieNodeStat; db: HexaryTreeDbRef; maxItems = 30): string =
|
|
|
|
result = "(" & $a.level
|
|
|
|
if a.stopped:
|
|
|
|
result &= "stopped,"
|
|
|
|
result &= $a.dangling.len & "," &
|
|
|
|
a.dangling.ppDangling(maxItems) & ")"
|
|
|
|
|
2022-09-16 07:24:12 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|