Rename and update dismantle => hexaryEnvelopeDecompose() (#1351)
* Rename and update dismantle => hexaryEnvelopeDecompose() why: + As for naming, a positive connotation is prefered + The unit tests were really insufficient + The function result was wrong on a few boundry conditions detail: + Extracted the function from `hexary_paths.nim` and re-implemented it together with other envelope functions => `hexary_envelope.nim` + Re-wrote docu for `hexaryEnvelopeDecompose()` * Relaxed right condition for `hexaryEnvelopeDecompose()` range argument why; Previously, the right point of the argument interval had to be a path to an allocated leaf node. While this is typically a given for accounts, it is easier to require an arbitrary range of paths (or keys) with the requirement of a `boundary proof` for left and right (i.e. enough nodes in the database to find the end points.) also: Bug fixes for related functions (typos, missing conditions etc.) * Add missing unit tests include file
This commit is contained in:
parent
a26a9f9ece
commit
85de03fd6e
|
@ -24,7 +24,7 @@ import
|
||||||
when not defined(release):
|
when not defined(release):
|
||||||
import
|
import
|
||||||
../../tracer,
|
../../tracer,
|
||||||
../../utils
|
../../utils/utils
|
||||||
|
|
||||||
type
|
type
|
||||||
PersistBlockFlag = enum
|
PersistBlockFlag = enum
|
||||||
|
@ -86,7 +86,7 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
|
||||||
when not defined(release):
|
when not defined(release):
|
||||||
if validationResult == ValidationResult.Error and
|
if validationResult == ValidationResult.Error and
|
||||||
body.transactions.calcTxRoot == header.txRoot:
|
body.transactions.calcTxRoot == header.txRoot:
|
||||||
dumpDebuggingMetaData(c.db, header, body, vmState)
|
dumpDebuggingMetaData(c.com, header, body, vmState)
|
||||||
warn "Validation error. Debugging metadata dumped."
|
warn "Validation error. Debugging metadata dumped."
|
||||||
|
|
||||||
if validationResult != ValidationResult.OK:
|
if validationResult != ValidationResult.OK:
|
||||||
|
|
|
@ -324,7 +324,10 @@ proc pp*(key: NodeKey): string =
|
||||||
proc pp*(key: NodeKey|RepairKey; db: HexaryTreeDbRef): string =
|
proc pp*(key: NodeKey|RepairKey; db: HexaryTreeDbRef): string =
|
||||||
key.ppImpl(db)
|
key.ppImpl(db)
|
||||||
|
|
||||||
proc pp*(w: RNodeRef|XNodeObj|RPathStep; db: HexaryTreeDbRef): string =
|
proc pp*(
|
||||||
|
w: RNodeRef|XNodeObj|RPathStep|XPathStep;
|
||||||
|
db: HexaryTreeDbRef;
|
||||||
|
): string =
|
||||||
w.ppImpl(db)
|
w.ppImpl(db)
|
||||||
|
|
||||||
proc pp*(w:openArray[RPathStep|XPathStep];db:HexaryTreeDbRef;indent=4): string =
|
proc pp*(w:openArray[RPathStep|XPathStep];db:HexaryTreeDbRef;indent=4): string =
|
||||||
|
@ -392,6 +395,10 @@ proc convertTo*(data: Blob; T: type NodeKey): T =
|
||||||
## Probably lossy conversion, use `init()` for safe conversion
|
## Probably lossy conversion, use `init()` for safe conversion
|
||||||
discard result.init(data)
|
discard result.init(data)
|
||||||
|
|
||||||
|
proc convertTo*(data: Blob; T: type NodeTag): T =
|
||||||
|
## Ditto for node tag
|
||||||
|
data.convertTo(NodeKey).to(NodeTag)
|
||||||
|
|
||||||
proc convertTo*(data: Blob; T: type RepairKey): T =
|
proc convertTo*(data: Blob; T: type RepairKey): T =
|
||||||
## Probably lossy conversion, use `init()` for safe conversion
|
## Probably lossy conversion, use `init()` for safe conversion
|
||||||
discard result.initImpl(data)
|
discard result.initImpl(data)
|
||||||
|
|
|
@ -0,0 +1,546 @@
|
||||||
|
# nimbus-eth1
|
||||||
|
# Copyright (c) 2021 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
|
# except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[algorithm, sequtils, sets, tables],
|
||||||
|
eth/[common, trie/nibbles],
|
||||||
|
stew/[byteutils, interval_set],
|
||||||
|
../../range_desc,
|
||||||
|
"."/[hexary_desc, hexary_nearby, hexary_paths]
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc `==`(a, b: XNodeObj): bool =
|
||||||
|
if a.kind == b.kind:
|
||||||
|
case a.kind:
|
||||||
|
of Leaf:
|
||||||
|
return a.lPfx == b.lPfx and a.lData == b.lData
|
||||||
|
of Extension:
|
||||||
|
return a.ePfx == b.ePfx and a.eLink == b.eLink
|
||||||
|
of Branch:
|
||||||
|
return a.bLink == b.bLink
|
||||||
|
|
||||||
|
proc isZeroLink(a: Blob): bool =
|
||||||
|
## Persistent database has `Blob` as key
|
||||||
|
a.len == 0
|
||||||
|
|
||||||
|
proc isZeroLink(a: RepairKey): bool =
|
||||||
|
## Persistent database has `RepairKey` as key
|
||||||
|
a.isZero
|
||||||
|
|
||||||
|
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||||
|
## Might be lossy, check before use
|
||||||
|
discard result.init(key.ByteArray33[1 .. 32])
|
||||||
|
|
||||||
|
proc toNodeSpecs(nodeKey: RepairKey; partialPath: Blob): NodeSpecs =
|
||||||
|
NodeSpecs(
|
||||||
|
nodeKey: nodeKey.convertTo(NodeKey),
|
||||||
|
partialPath: partialPath)
|
||||||
|
|
||||||
|
proc toNodeSpecs(nodeKey: Blob; partialPath: Blob): NodeSpecs =
|
||||||
|
NodeSpecs(
|
||||||
|
nodeKey: nodeKey.convertTo(NodeKey),
|
||||||
|
partialPath: partialPath)
|
||||||
|
|
||||||
|
|
||||||
|
template noKeyErrorOops(info: static[string]; code: untyped) =
|
||||||
|
try:
|
||||||
|
code
|
||||||
|
except KeyError as e:
|
||||||
|
raiseAssert "Impossible KeyError (" & info & "): " & e.msg
|
||||||
|
|
||||||
|
template noRlpErrorOops(info: static[string]; code: untyped) =
|
||||||
|
try:
|
||||||
|
code
|
||||||
|
except RlpError as e:
|
||||||
|
raiseAssert "Impossible RlpError (" & info & "): " & e.msg
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc padPartialPath(pfx: NibblesSeq; dblNibble: byte): NodeKey =
|
||||||
|
## Extend (or cut) `partialPath` nibbles sequence and generate `NodeKey`
|
||||||
|
# Pad with zeroes
|
||||||
|
var padded: NibblesSeq
|
||||||
|
|
||||||
|
let padLen = 64 - pfx.len
|
||||||
|
if 0 <= padLen:
|
||||||
|
padded = pfx & dblNibble.repeat(padlen div 2).initNibbleRange
|
||||||
|
if (padLen and 1) == 1:
|
||||||
|
padded = padded & @[dblNibble].initNibbleRange.slice(1)
|
||||||
|
else:
|
||||||
|
let nope = seq[byte].default.initNibbleRange
|
||||||
|
padded = pfx.slice(0,63) & nope # nope forces re-alignment
|
||||||
|
|
||||||
|
let bytes = padded.getBytes
|
||||||
|
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
|
||||||
|
|
||||||
|
|
||||||
|
proc decomposeLeft(envPt, ivPt: RPath|XPath): Result[seq[NodeSpecs],void] =
|
||||||
|
## Helper for `hexaryEnvelopeDecompose()` for handling left side of
|
||||||
|
## envelope from partial path argument
|
||||||
|
#
|
||||||
|
# partialPath
|
||||||
|
# / \
|
||||||
|
# / \
|
||||||
|
# envPt.. -- envelope left end of partial path
|
||||||
|
# |
|
||||||
|
# ivPt.. -- `iv`, not fully covering left of `env`
|
||||||
|
#
|
||||||
|
var collect: seq[NodeSpecs]
|
||||||
|
block rightCurbEnvelope:
|
||||||
|
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len):
|
||||||
|
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]:
|
||||||
|
#
|
||||||
|
# At this point, the `node` entries of either `path[n]` step are
|
||||||
|
# the same. This is so because the predecessor steps were the same
|
||||||
|
# or were the `rootKey` in case n == 0.
|
||||||
|
#
|
||||||
|
# But then (`node` entries being equal) the only way for the
|
||||||
|
# `path[n]` steps to differ is in the entry selector `nibble` for
|
||||||
|
# a branch node.
|
||||||
|
#
|
||||||
|
for m in n ..< ivPt.path.len:
|
||||||
|
let
|
||||||
|
pfx = ivPt.getNibbles(0, m) # common path segment
|
||||||
|
top = ivPt.path[m].nibble # need nibbles smaller than top
|
||||||
|
#
|
||||||
|
# Incidentally for a non-`Branch` node, the value `top` becomes
|
||||||
|
# `-1` and the `for`- loop will be ignored (which is correct)
|
||||||
|
for nibble in 0 ..< top:
|
||||||
|
let nodeKey = ivPt.path[m].node.bLink[nibble]
|
||||||
|
if not nodeKey.isZeroLink:
|
||||||
|
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||||
|
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||||
|
break rightCurbEnvelope
|
||||||
|
#
|
||||||
|
# Fringe case, e.g. when `partialPath` is an empty prefix (aka `@[0]`)
|
||||||
|
# and the database has a single leaf node `(a,some-value)` where the
|
||||||
|
# `rootKey` is the hash of this node. In that case, `pMin == 0` and
|
||||||
|
# `pMax == high(NodeTag)` and `iv == [a,a]`.
|
||||||
|
#
|
||||||
|
return err()
|
||||||
|
|
||||||
|
ok(collect)
|
||||||
|
|
||||||
|
proc decomposeLeftDebug(
|
||||||
|
envPt, ivPt: RPath;
|
||||||
|
db: HexaryTreeDbRef;
|
||||||
|
): Result[seq[NodeSpecs],void] =
|
||||||
|
## Debugging only
|
||||||
|
var collect: seq[NodeSpecs]
|
||||||
|
block rightCurbEnvelope:
|
||||||
|
echo ">>> decomposeLeft",
|
||||||
|
" range 0..", min(envPt.path.len, ivPt.path.len),
|
||||||
|
"\n ", ivPt.pp(db)
|
||||||
|
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len):
|
||||||
|
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]:
|
||||||
|
for m in n ..< ivPt.path.len:
|
||||||
|
let
|
||||||
|
pfx = ivPt.getNibbles(0, m) # common path segment
|
||||||
|
top = ivPt.path[m].nibble # need nibbles smaller than top
|
||||||
|
echo ">>> decomposeLeft",
|
||||||
|
" len=", ivPt.path.len,
|
||||||
|
" m=", m,
|
||||||
|
" top=", top,
|
||||||
|
" pfx=", pfx,
|
||||||
|
" stepKey=", ivPt.path[m].pp(db)
|
||||||
|
for nibble in 0 ..< top:
|
||||||
|
let nodeKey = ivPt.path[m].node.bLink[nibble]
|
||||||
|
if not nodeKey.isZeroLink:
|
||||||
|
echo ">>> decomposeLeft",
|
||||||
|
" nibble=", nibble,
|
||||||
|
" nodeKey=", nodeKey.pp(db)
|
||||||
|
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||||
|
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||||
|
break rightCurbEnvelope
|
||||||
|
echo ">>> decomposeLeft oops"
|
||||||
|
return err()
|
||||||
|
|
||||||
|
ok(collect)
|
||||||
|
|
||||||
|
|
||||||
|
proc decomposeRight(envPt, ivPt: RPath|XPath): Result[seq[NodeSpecs],void] =
|
||||||
|
## Helper for `hexaryEnvelopeDecompose()` for handling right side of
|
||||||
|
## envelope from partial path argument
|
||||||
|
#
|
||||||
|
# partialPath
|
||||||
|
# / \
|
||||||
|
# / \
|
||||||
|
# .. envPt -- envelope right end of partial path
|
||||||
|
# |
|
||||||
|
# .. ivPt -- `iv`, not fully covering right of `env`
|
||||||
|
#
|
||||||
|
var collect: seq[NodeSpecs]
|
||||||
|
block leftCurbEnvelope:
|
||||||
|
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len):
|
||||||
|
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]:
|
||||||
|
for m in n ..< ivPt.path.len:
|
||||||
|
let
|
||||||
|
pfx = ivPt.getNibbles(0, m) # common path segment
|
||||||
|
base = ivPt.path[m].nibble # need nibbles greater/equal
|
||||||
|
if 0 <= base:
|
||||||
|
for nibble in base+1 .. 15:
|
||||||
|
let nodeKey = ivPt.path[m].node.bLink[nibble]
|
||||||
|
if not nodeKey.isZeroLink:
|
||||||
|
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||||
|
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||||
|
break leftCurbEnvelope
|
||||||
|
return err()
|
||||||
|
|
||||||
|
ok(collect)
|
||||||
|
|
||||||
|
|
||||||
|
proc decomposeImpl(
|
||||||
|
partialPath: Blob; ## Hex encoded partial path
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
iv: NodeTagRange; ## Proofed range of leaf paths
|
||||||
|
db: HexaryGetFn|HexaryTreeDbRef; ## Database abstraction
|
||||||
|
): Result[seq[NodeSpecs],void]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError,KeyError]} =
|
||||||
|
## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
|
||||||
|
let env = partialPath.hexaryEnvelope
|
||||||
|
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
|
||||||
|
return err()
|
||||||
|
|
||||||
|
var nodeSpex: seq[NodeSpecs]
|
||||||
|
|
||||||
|
# So ranges do overlap. The case that the `partialPath` envelope is fully
|
||||||
|
# contained in `iv` results in `@[]` which is implicitely handled by
|
||||||
|
# non-matching any of the cases, below.
|
||||||
|
if env.minPt < iv.minPt:
|
||||||
|
let
|
||||||
|
envPt = env.minPt.hexaryPath(rootKey, db)
|
||||||
|
# Make sure that the min point is the nearest node to the right
|
||||||
|
ivPt = block:
|
||||||
|
let rc = iv.minPt.hexaryPath(rootKey, db).hexaryNearbyRight(db)
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
rc.value
|
||||||
|
block:
|
||||||
|
let rc = envPt.decomposeLeft ivPt
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
nodeSpex &= rc.value
|
||||||
|
|
||||||
|
if iv.maxPt < env.maxPt:
|
||||||
|
let
|
||||||
|
envPt = env.maxPt.hexaryPath(rootKey, db)
|
||||||
|
ivPt = block:
|
||||||
|
let rc = iv.maxPt.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
rc.value
|
||||||
|
block:
|
||||||
|
let rc = envPt.decomposeRight ivPt
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
nodeSpex &= rc.value
|
||||||
|
|
||||||
|
ok(nodeSpex)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, envelope constructor
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryEnvelope*(partialPath: Blob): NodeTagRange =
|
||||||
|
## Convert partial path to range of all concievable node keys starting with
|
||||||
|
## the partial path argument `partialPath`.
|
||||||
|
let pfx = partialPath.hexPrefixDecode[1]
|
||||||
|
NodeTagRange.new(
|
||||||
|
pfx.padPartialPath(0).to(NodeTag),
|
||||||
|
pfx.padPartialPath(255).to(NodeTag))
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryEnvelopeUniq*(
|
||||||
|
partialPaths: openArray[Blob];
|
||||||
|
): seq[Blob]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Sort and simplify a list of partial paths by sorting envelopes while
|
||||||
|
## removing nested entries.
|
||||||
|
var tab: Table[NodeTag,(Blob,bool)]
|
||||||
|
|
||||||
|
for w in partialPaths:
|
||||||
|
let iv = w.hexaryEnvelope
|
||||||
|
tab[iv.minPt] = (w,true) # begin entry
|
||||||
|
tab[iv.maxPt] = (@[],false) # end entry
|
||||||
|
|
||||||
|
# When sorted, nested entries look like
|
||||||
|
#
|
||||||
|
# 123000000.. (w0, true)
|
||||||
|
# 123400000.. (w1, true)
|
||||||
|
# 1234fffff.. (, false)
|
||||||
|
# 123ffffff.. (, false)
|
||||||
|
# ...
|
||||||
|
# 777000000.. (w2, true)
|
||||||
|
#
|
||||||
|
var level = 0
|
||||||
|
for key in toSeq(tab.keys).sorted(cmp):
|
||||||
|
let (w,begin) = tab[key]
|
||||||
|
if begin:
|
||||||
|
if level == 0:
|
||||||
|
result.add w
|
||||||
|
level.inc
|
||||||
|
else:
|
||||||
|
level.dec
|
||||||
|
|
||||||
|
proc hexaryEnvelopeUniq*(
|
||||||
|
nodes: openArray[NodeSpecs];
|
||||||
|
): seq[NodeSpecs]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Variant of `hexaryEnvelopeUniq` for sorting a `NodeSpecs` list by
|
||||||
|
## partial paths.
|
||||||
|
var tab: Table[NodeTag,(NodeSpecs,bool)]
|
||||||
|
|
||||||
|
for w in nodes:
|
||||||
|
let iv = w.partialPath.hexaryEnvelope
|
||||||
|
tab[iv.minPt] = (w,true) # begin entry
|
||||||
|
tab[iv.maxPt] = (NodeSpecs(),false) # end entry
|
||||||
|
|
||||||
|
var level = 0
|
||||||
|
for key in toSeq(tab.keys).sorted(cmp):
|
||||||
|
let (w,begin) = tab[key]
|
||||||
|
if begin:
|
||||||
|
if level == 0:
|
||||||
|
result.add w
|
||||||
|
level.inc
|
||||||
|
else:
|
||||||
|
level.dec
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryEnvelopeTouchedBy*(
|
||||||
|
rangeSet: NodeTagRangeSet; ## Set of intervals (aka ranges)
|
||||||
|
partialPath: Blob; ## Partial path for some node
|
||||||
|
): NodeTagRangeSet =
|
||||||
|
## For the envelope interval of the `partialPath` argument, this function
|
||||||
|
## returns the complete set of intervals from the argument set `rangeSet`
|
||||||
|
## that have a common point with the envelope (i.e. they are non-disjunct to
|
||||||
|
## the envelope.)
|
||||||
|
result = NodeTagRangeSet.init()
|
||||||
|
let probe = partialPath.hexaryEnvelope
|
||||||
|
|
||||||
|
if 0 < rangeSet.covered probe:
|
||||||
|
# Find an interval `start` that starts before the `probe` interval.
|
||||||
|
# Preferably, this interval is the rightmost one starting before `probe`.
|
||||||
|
var startSearch = low(NodeTag)
|
||||||
|
|
||||||
|
# Try least interval starting within or to the right of `probe`.
|
||||||
|
let rc = rangeSet.ge probe.minPt
|
||||||
|
if rc.isOk:
|
||||||
|
# Try predecessor
|
||||||
|
let rx = rangeSet.le rc.value.minPt
|
||||||
|
if rx.isOk:
|
||||||
|
# Predecessor interval starts before `probe`, e.g.
|
||||||
|
#
|
||||||
|
# .. [..rx..] [..rc..] ..
|
||||||
|
# [..probe..]
|
||||||
|
#
|
||||||
|
startSearch = rx.value.minPt
|
||||||
|
else:
|
||||||
|
# No predecessor, so `rc.value` is the very first interval, e.g.
|
||||||
|
#
|
||||||
|
# [..rc..] ..
|
||||||
|
# [..probe..]
|
||||||
|
#
|
||||||
|
startSearch = rc.value.minPt
|
||||||
|
else:
|
||||||
|
# No interval starts in or after `probe`.
|
||||||
|
#
|
||||||
|
# So, if an interval ends before the right end of `probe`, it must
|
||||||
|
# start before `probe`.
|
||||||
|
let rx = rangeSet.le probe.maxPt
|
||||||
|
if rx.isOk:
|
||||||
|
#
|
||||||
|
# .. [..rx..] ..
|
||||||
|
# [..probe..]
|
||||||
|
#
|
||||||
|
startSearch = rc.value.minPt
|
||||||
|
else:
|
||||||
|
# Otherwise there is no interval preceding `probe`, so the zero
|
||||||
|
# value for `start` will do the job, e.g.
|
||||||
|
#
|
||||||
|
# [.....rx......]
|
||||||
|
# [..probe..]
|
||||||
|
discard
|
||||||
|
|
||||||
|
# Collect intervals left-to-right for non-disjunct to `probe`
|
||||||
|
for w in increasing[NodeTag,UInt256](rangeSet, startSearch):
|
||||||
|
if (w * probe).isOk:
|
||||||
|
discard result.merge w
|
||||||
|
elif probe.maxPt < w.minPt:
|
||||||
|
break # all the `w` following will be disjuct, too
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, complement sub-tries
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryEnvelopeDecompose*(
|
||||||
|
partialPath: Blob; ## Hex encoded partial path
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
iv: NodeTagRange; ## Proofed range of leaf paths
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
): Result[seq[NodeSpecs],void]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## The idea of this function is to compute the difference of the envelope
|
||||||
|
## of a `partialPath` off the range `iv` and express the result as a
|
||||||
|
## list of envelopes (represented by nodes.)
|
||||||
|
##
|
||||||
|
## More formally, let the argument `partialPath` refer to an allocated node
|
||||||
|
## and the argument `iv` to a range of `NodeTag` points where left and right
|
||||||
|
## end have boundary proofs (see discussion below) in the database (e.g. as
|
||||||
|
## downloaded via the `snap/1` protocol.)
|
||||||
|
##
|
||||||
|
## Then this function returns a set `W` of partial paths (represented by
|
||||||
|
## nodes) where the envelope of each partial path in `W` has no common node
|
||||||
|
## key with `iv` (i.e. it is disjunct to the sub-range of `iv` where the
|
||||||
|
## boundaries are node keys.)
|
||||||
|
##
|
||||||
|
## This set `W` is maximal in the sense that for every every envelope of a
|
||||||
|
## partial path which is prefixed by the argument `partialPath` there exists
|
||||||
|
## an envelope implied by `W` that contains the former envelope, i.e.
|
||||||
|
##
|
||||||
|
## * if `p = partialPath & extension` with `hexaryEnvelope(p) * iv` has no
|
||||||
|
## node key in the hexary trie database
|
||||||
|
##
|
||||||
|
## * then there is a `w` in `W` with `hexaryEnvelope(p) <= hexaryEnvelope(w)`
|
||||||
|
##
|
||||||
|
## Although not required here (see `hexaryEnvelopeUniq()`) the set `W` will
|
||||||
|
## be minimal.
|
||||||
|
##
|
||||||
|
## Beware:
|
||||||
|
## Currently, the right end must be an exisiting node rather than come
|
||||||
|
## with a boundaty proof.
|
||||||
|
##
|
||||||
|
## Comparison with `hexaryInspect()`
|
||||||
|
## ---------------------------------
|
||||||
|
## The function `hexaryInspect()` implements a width-first search for
|
||||||
|
## dangling nodes starting at the state root (think of the cathode ray of
|
||||||
|
## a CRT.) For the sake of comparison with `hexaryEnvelopeDecompose()`, the
|
||||||
|
## search may be amended to ignore nodes the envelope of is fully contained
|
||||||
|
## in some range `iv`. For a fully allocated hexary trie, there will be at
|
||||||
|
## least one sub-trie of length `N` with leafs not in `iv`. So the number
|
||||||
|
## of nodes visited is O(16^N) for some `N` at most 63.
|
||||||
|
##
|
||||||
|
## The function `hexaryEnvelopeDecompose()` take the left or rightmost leaf
|
||||||
|
## path from `iv`, calculates a chain length `N` of nodes from the state
|
||||||
|
## root to the leaf, and for each node collects the links not pointing inside
|
||||||
|
## the range `iv`. The number of nodes visited is O(N).
|
||||||
|
##
|
||||||
|
## The results of both functions are not interchangeable, though. The first
|
||||||
|
## function `hexaryInspect()`, always returns dangling nodes if there are
|
||||||
|
## any in which case the hexary trie is incomplete and there will be no way
|
||||||
|
## to visit all nodes as they simply do not exist. But iteratively adding
|
||||||
|
## nodes or sub-tries and re-running this algorithm will end up with having
|
||||||
|
## all nodes visited.
|
||||||
|
##
|
||||||
|
## The other function `hexaryEnvelopeDecompose()` always returns the same
|
||||||
|
## result where some nodes might be dangling and may be treated similar to
|
||||||
|
## what was discussed in the previous paragraph. This function also reveals
|
||||||
|
## allocated nodes which might be checked for whether they exist fully or
|
||||||
|
## partially for another state root hexary trie.
|
||||||
|
##
|
||||||
|
## So both are sort of complementary where the function
|
||||||
|
## `hexaryEnvelopeDecompose()` is a fast one and `hexaryInspect()` the
|
||||||
|
## thorough one of last resort.
|
||||||
|
##
|
||||||
|
## Relation to boundary proofs
|
||||||
|
## ---------------------------
|
||||||
|
## The `boundary proof` for a range of leaf paths (e.g. account hashes) for
|
||||||
|
## a given state root is a set of nodes enough to construct the partial
|
||||||
|
## Merkel Patricia trie containing the leafs. If the given range is larger
|
||||||
|
## than the left or rightmost leaf paths, the `boundary proof` also implies
|
||||||
|
## that there is no other leaf path between the range boundary and the left
|
||||||
|
## or rightmost leaf path.
|
||||||
|
##
|
||||||
|
## Consider the result of the function `hexaryEnvelopeDecompose()` of an
|
||||||
|
## empty partial path (the envelope of represents `UIn256`) for a range `iv`.
|
||||||
|
## This result is a `boundary proof` for `iv` according to the definition
|
||||||
|
## above though it is highly redundant. All bottom level nodes with
|
||||||
|
## envelopes disjunct from `iv` can be removed for a `boundary proof`.
|
||||||
|
##
|
||||||
|
when false: # or true:
|
||||||
|
noRlpErrorOops("in-memory hexaryEnvelopeDecompose"):
|
||||||
|
return partialPath.decomposeImpl(rootKey, iv, db)
|
||||||
|
else:
|
||||||
|
let env = partialPath.hexaryEnvelope
|
||||||
|
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
|
||||||
|
return err()
|
||||||
|
|
||||||
|
var nodeSpex: seq[NodeSpecs]
|
||||||
|
if env.minPt < iv.minPt:
|
||||||
|
let
|
||||||
|
envPt = env.minPt.hexaryPath(rootKey, db)
|
||||||
|
# Make sure that the min point is the nearest node to the right
|
||||||
|
ivPt = block:
|
||||||
|
let rc = iv.minPt.hexaryPath(rootKey, db).hexaryNearbyRight(db)
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
rc.value
|
||||||
|
when false: # or true:
|
||||||
|
echo ">>> chop envelope right end => decomposeLeft",
|
||||||
|
"\n envPt=", env.minPt,
|
||||||
|
"\n ", envPt.pp(db),
|
||||||
|
"\n -----",
|
||||||
|
"\n ivPt=", iv.minPt,
|
||||||
|
"\n ", ivPt.pp(db)
|
||||||
|
block:
|
||||||
|
#let rc = envPt.decomposeLeftDebug(ivPt,db)
|
||||||
|
let rc = envPt.decomposeLeft(ivPt)
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
nodeSpex &= rc.value
|
||||||
|
|
||||||
|
if iv.maxPt < env.maxPt:
|
||||||
|
let
|
||||||
|
envPt = env.maxPt.hexaryPath(rootKey, db)
|
||||||
|
ivPt = block:
|
||||||
|
let rc = iv.maxPt.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
rc.value
|
||||||
|
when false: # or true:
|
||||||
|
echo ">>> chop envelope left end => decomposeRight",
|
||||||
|
"\n envPt=", env.maxPt,
|
||||||
|
"\n ", envPt.pp(db),
|
||||||
|
"\n -----",
|
||||||
|
"\n ivPt=", iv.maxPt,
|
||||||
|
"\n ", ivPt.pp(db)
|
||||||
|
block:
|
||||||
|
let rc = envPt.decomposeRight(ivPt)
|
||||||
|
if rc.isErr:
|
||||||
|
return err()
|
||||||
|
nodeSpex &= rc.value
|
||||||
|
|
||||||
|
ok(nodeSpex)
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryEnvelopeDecompose*(
|
||||||
|
partialPath: Blob; ## Hex encoded partial path
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
iv: NodeTagRange; ## Proofed range of leaf paths
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[seq[NodeSpecs],void]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `decompose()` for persistent database.
|
||||||
|
noKeyErrorOops("persistent hexaryEnvelopeDecompose"):
|
||||||
|
return partialPath.decomposeImpl(rootKey, iv, getFn)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
|
@ -26,6 +26,17 @@ type
|
||||||
TooManyProcessedChunks
|
TooManyProcessedChunks
|
||||||
TooManySlotAccounts
|
TooManySlotAccounts
|
||||||
|
|
||||||
|
# nearby/boundary proofs
|
||||||
|
NearbyExtensionError
|
||||||
|
NearbyBranchError
|
||||||
|
NearbyGarbledNode
|
||||||
|
NearbyNestingTooDeep
|
||||||
|
NearbyUnexpectedNode
|
||||||
|
NearbyFailed
|
||||||
|
NearbyEmptyPath
|
||||||
|
NearbyLeafExpected
|
||||||
|
NearbyDanglingLink
|
||||||
|
|
||||||
# import
|
# import
|
||||||
DifferentNodeValueExists
|
DifferentNodeValueExists
|
||||||
ExpectedNodeKeyDiffers
|
ExpectedNodeKeyDiffers
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[hashes, sequtils, sets, tables],
|
std/tables,
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/results,
|
stew/results,
|
||||||
|
@ -27,6 +27,15 @@ const
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
import stew/byteutils
|
import stew/byteutils
|
||||||
|
|
||||||
|
# --------
|
||||||
|
#
|
||||||
|
#import
|
||||||
|
# std/strutils,
|
||||||
|
# stew/byteutils
|
||||||
|
#
|
||||||
|
#proc pp(w: (RepairKey, NibblesSeq); db: HexaryTreeDbRef): string =
|
||||||
|
# "(" & $w[1] & "," & w[0].pp(db) & ")"
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -39,75 +48,6 @@ proc convertTo(key: Blob; T: type NodeKey): T =
|
||||||
## Might be lossy, check before use
|
## Might be lossy, check before use
|
||||||
discard result.init(key)
|
discard result.init(key)
|
||||||
|
|
||||||
proc doStepLink(step: RPathStep): Result[RepairKey,bool] =
|
|
||||||
## Helper for `hexaryInspectPath()` variant
|
|
||||||
case step.node.kind:
|
|
||||||
of Branch:
|
|
||||||
if step.nibble < 0:
|
|
||||||
return err(false) # indicates caller should try parent
|
|
||||||
return ok(step.node.bLink[step.nibble])
|
|
||||||
of Extension:
|
|
||||||
return ok(step.node.eLink)
|
|
||||||
of Leaf:
|
|
||||||
discard
|
|
||||||
err(true) # fully fail
|
|
||||||
|
|
||||||
proc doStepLink(step: XPathStep): Result[NodeKey,bool] =
|
|
||||||
## Helper for `hexaryInspectPath()` variant
|
|
||||||
case step.node.kind:
|
|
||||||
of Branch:
|
|
||||||
if step.nibble < 0:
|
|
||||||
return err(false) # indicates caller should try parent
|
|
||||||
return ok(step.node.bLink[step.nibble].convertTo(NodeKey))
|
|
||||||
of Extension:
|
|
||||||
return ok(step.node.eLink.convertTo(NodeKey))
|
|
||||||
of Leaf:
|
|
||||||
discard
|
|
||||||
err(true) # fully fail
|
|
||||||
|
|
||||||
|
|
||||||
proc hexaryInspectPathImpl(
|
|
||||||
db: HexaryTreeDbRef; ## Database
|
|
||||||
rootKey: RepairKey; ## State root
|
|
||||||
path: NibblesSeq; ## Starting path
|
|
||||||
): Result[RepairKey,void]
|
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
|
||||||
## Translate `path` into `RepairKey`
|
|
||||||
let steps = path.hexaryPath(rootKey,db)
|
|
||||||
if 0 < steps.path.len and steps.tail.len == 0:
|
|
||||||
block:
|
|
||||||
let rc = steps.path[^1].doStepLink()
|
|
||||||
if rc.isOk:
|
|
||||||
return ok(rc.value)
|
|
||||||
if rc.error or steps.path.len == 1:
|
|
||||||
return err()
|
|
||||||
block:
|
|
||||||
let rc = steps.path[^2].doStepLink()
|
|
||||||
if rc.isOk:
|
|
||||||
return ok(rc.value)
|
|
||||||
err()
|
|
||||||
|
|
||||||
proc hexaryInspectPathImpl(
|
|
||||||
getFn: HexaryGetFn; ## Database retrieval function
|
|
||||||
root: NodeKey; ## State root
|
|
||||||
path: NibblesSeq; ## Starting path
|
|
||||||
): Result[NodeKey,void]
|
|
||||||
{.gcsafe, raises: [Defect,RlpError]} =
|
|
||||||
## Translate `path` into `RepairKey`
|
|
||||||
let steps = path.hexaryPath(root,getFn)
|
|
||||||
if 0 < steps.path.len and steps.tail.len == 0:
|
|
||||||
block:
|
|
||||||
let rc = steps.path[^1].doStepLink()
|
|
||||||
if rc.isOk:
|
|
||||||
return ok(rc.value)
|
|
||||||
if rc.error or steps.path.len == 1:
|
|
||||||
return err()
|
|
||||||
block:
|
|
||||||
let rc = steps.path[^2].doStepLink()
|
|
||||||
if rc.isOk:
|
|
||||||
return ok(rc.value)
|
|
||||||
err()
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -182,48 +122,6 @@ proc to*(resumeCtx: TrieNodeStatCtxRef; T: type seq[NodeSpecs]): T =
|
||||||
nodeKey: key.convertTo(NodeKey))
|
nodeKey: key.convertTo(NodeKey))
|
||||||
|
|
||||||
|
|
||||||
proc hexaryInspectPath*(
|
|
||||||
db: HexaryTreeDbRef; ## Database
|
|
||||||
root: NodeKey; ## State root
|
|
||||||
path: Blob; ## Starting path
|
|
||||||
): Result[NodeKey,void]
|
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
|
||||||
## Returns the `NodeKey` for a given path if there is any.
|
|
||||||
let (isLeaf,nibbles) = hexPrefixDecode path
|
|
||||||
if not isLeaf:
|
|
||||||
let rc = db.hexaryInspectPathImpl(root.to(RepairKey), nibbles)
|
|
||||||
if rc.isOk and rc.value.isNodeKey:
|
|
||||||
return ok(rc.value.convertTo(NodeKey))
|
|
||||||
err()
|
|
||||||
|
|
||||||
proc hexaryInspectPath*(
|
|
||||||
getFn: HexaryGetFn; ## Database abstraction
|
|
||||||
root: NodeKey; ## State root
|
|
||||||
path: Blob; ## Partial database path
|
|
||||||
): Result[NodeKey,void]
|
|
||||||
{.gcsafe, raises: [Defect,RlpError]} =
|
|
||||||
## Variant of `hexaryInspectPath()` for persistent database.
|
|
||||||
let (isLeaf,nibbles) = hexPrefixDecode path
|
|
||||||
if not isLeaf:
|
|
||||||
let rc = getFn.hexaryInspectPathImpl(root, nibbles)
|
|
||||||
if rc.isOk:
|
|
||||||
return ok(rc.value)
|
|
||||||
err()
|
|
||||||
|
|
||||||
proc hexaryInspectToKeys*(
|
|
||||||
db: HexaryTreeDbRef; ## Database
|
|
||||||
root: NodeKey; ## State root
|
|
||||||
paths: seq[Blob]; ## Paths segments
|
|
||||||
): HashSet[NodeKey]
|
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
|
||||||
## Convert a set of path segments to a node key set
|
|
||||||
paths.toSeq
|
|
||||||
.mapIt(db.hexaryInspectPath(root,it))
|
|
||||||
.filterIt(it.isOk)
|
|
||||||
.mapIt(it.value)
|
|
||||||
.toHashSet
|
|
||||||
|
|
||||||
|
|
||||||
proc hexaryInspectTrie*(
|
proc hexaryInspectTrie*(
|
||||||
db: HexaryTreeDbRef; ## Database
|
db: HexaryTreeDbRef; ## Database
|
||||||
root: NodeKey; ## State root
|
root: NodeKey; ## State root
|
||||||
|
@ -264,7 +162,7 @@ proc hexaryInspectTrie*(
|
||||||
numActions = 0u64
|
numActions = 0u64
|
||||||
resumeOk = false
|
resumeOk = false
|
||||||
|
|
||||||
# Initialise lists
|
# Initialise lists from previous session
|
||||||
if not resumeCtx.isNil and
|
if not resumeCtx.isNil and
|
||||||
not resumeCtx.persistent and
|
not resumeCtx.persistent and
|
||||||
0 < resumeCtx.memCtx.len:
|
0 < resumeCtx.memCtx.len:
|
||||||
|
@ -274,12 +172,13 @@ proc hexaryInspectTrie*(
|
||||||
if paths.len == 0 and not resumeOk:
|
if paths.len == 0 and not resumeOk:
|
||||||
reVisit.add (rootKey,EmptyNibbleRange)
|
reVisit.add (rootKey,EmptyNibbleRange)
|
||||||
else:
|
else:
|
||||||
|
# Add argument paths
|
||||||
for w in paths:
|
for w in paths:
|
||||||
let (isLeaf,nibbles) = hexPrefixDecode w
|
let (isLeaf,nibbles) = hexPrefixDecode w
|
||||||
if not isLeaf:
|
if not isLeaf:
|
||||||
let rc = db.hexaryInspectPathImpl(rootKey, nibbles)
|
let rc = nibbles.hexaryPathNodeKey(rootKey, db, missingOk=false)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
reVisit.add (rc.value,nibbles)
|
reVisit.add (rc.value.to(RepairKey), nibbles)
|
||||||
|
|
||||||
while 0 < reVisit.len and numActions <= suspendAfter:
|
while 0 < reVisit.len and numActions <= suspendAfter:
|
||||||
if stopAtLevel < result.level:
|
if stopAtLevel < result.level:
|
||||||
|
@ -353,7 +252,7 @@ proc hexaryInspectTrie*(
|
||||||
numActions = 0u64
|
numActions = 0u64
|
||||||
resumeOk = false
|
resumeOk = false
|
||||||
|
|
||||||
# Initialise lists
|
# Initialise lists from previous session
|
||||||
if not resumeCtx.isNil and
|
if not resumeCtx.isNil and
|
||||||
resumeCtx.persistent and
|
resumeCtx.persistent and
|
||||||
0 < resumeCtx.hddCtx.len:
|
0 < resumeCtx.hddCtx.len:
|
||||||
|
@ -363,12 +262,13 @@ proc hexaryInspectTrie*(
|
||||||
if paths.len == 0 and not resumeOk:
|
if paths.len == 0 and not resumeOk:
|
||||||
reVisit.add (rootKey,EmptyNibbleRange)
|
reVisit.add (rootKey,EmptyNibbleRange)
|
||||||
else:
|
else:
|
||||||
|
# Add argument paths
|
||||||
for w in paths:
|
for w in paths:
|
||||||
let (isLeaf,nibbles) = hexPrefixDecode w
|
let (isLeaf,nibbles) = hexPrefixDecode w
|
||||||
if not isLeaf:
|
if not isLeaf:
|
||||||
let rc = getFn.hexaryInspectPathImpl(rootKey, nibbles)
|
let rc = nibbles.hexaryPathNodeKey(rootKey, getFn, missingOk=false)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
reVisit.add (rc.value,nibbles)
|
reVisit.add (rc.value, nibbles)
|
||||||
|
|
||||||
while 0 < reVisit.len and numActions <= suspendAfter:
|
while 0 < reVisit.len and numActions <= suspendAfter:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
|
|
|
@ -53,15 +53,6 @@ proc dup(node: RNodeRef): RNodeRef =
|
||||||
new result
|
new result
|
||||||
result[] = node[]
|
result[] = node[]
|
||||||
|
|
||||||
proc hexaryPath(
|
|
||||||
tag: NodeTag;
|
|
||||||
root: NodeKey;
|
|
||||||
db: HexaryTreeDbRef;
|
|
||||||
): RPath
|
|
||||||
{.gcsafe, raises: [Defect,KeyError].} =
|
|
||||||
## Shortcut
|
|
||||||
tag.to(NodeKey).hexaryPath(root.to(RepairKey), db)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private getters & setters
|
# Private getters & setters
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -0,0 +1,734 @@
|
||||||
|
# nimbus-eth1
|
||||||
|
# Copyright (c) 2021 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
|
# except according to those terms.
|
||||||
|
|
||||||
|
|
||||||
|
import
|
||||||
|
std/tables,
|
||||||
|
eth/[common, trie/nibbles],
|
||||||
|
stew/results,
|
||||||
|
../../range_desc,
|
||||||
|
"."/[hexary_desc, hexary_error, hexary_paths]
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
proc hexaryNearbyRight*(path: RPath; db: HexaryTreeDbRef;
|
||||||
|
): Result[RPath,HexaryError] {.gcsafe, raises: [Defect,KeyError]}
|
||||||
|
|
||||||
|
proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn;
|
||||||
|
): Result[XPath,HexaryError] {.gcsafe, raises: [Defect,RlpError]}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc toBranchNode(
|
||||||
|
rlp: Rlp
|
||||||
|
): XNodeObj
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
var rlp = rlp
|
||||||
|
XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob]))
|
||||||
|
|
||||||
|
proc toLeafNode(
|
||||||
|
rlp: Rlp;
|
||||||
|
pSegm: NibblesSeq
|
||||||
|
): XNodeObj
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes)
|
||||||
|
|
||||||
|
proc toExtensionNode(
|
||||||
|
rlp: Rlp;
|
||||||
|
pSegm: NibblesSeq
|
||||||
|
): XNodeObj
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
|
||||||
|
|
||||||
|
proc `<=`(a, b: NibblesSeq): bool =
|
||||||
|
## Compare nibbles, different lengths are padded to the right with zeros
|
||||||
|
let abMin = min(a.len, b.len)
|
||||||
|
for n in 0 ..< abMin:
|
||||||
|
if a[n] < b[n]:
|
||||||
|
return true
|
||||||
|
if b[n] < a[n]:
|
||||||
|
return false
|
||||||
|
# otherwise a[n] == b[n]
|
||||||
|
|
||||||
|
# Assuming zero for missing entries
|
||||||
|
if b.len < a.len:
|
||||||
|
for n in abMin + 1 ..< a.len:
|
||||||
|
if 0 < a[n]:
|
||||||
|
return false
|
||||||
|
true
|
||||||
|
|
||||||
|
proc `<`(a, b: NibblesSeq): bool =
|
||||||
|
not (b <= a)
|
||||||
|
|
||||||
|
|
||||||
|
template noKeyErrorOops(info: static[string]; code: untyped) =
|
||||||
|
try:
|
||||||
|
code
|
||||||
|
except KeyError as e:
|
||||||
|
raiseAssert "Impossible KeyError (" & info & "): " & e.msg
|
||||||
|
|
||||||
|
template noRlpErrorOops(info: static[string]; code: untyped) =
|
||||||
|
try:
|
||||||
|
code
|
||||||
|
except RlpError as e:
|
||||||
|
raiseAssert "Impossible RlpError (" & info & "): " & e.msg
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions, wrappers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryNearbyRightImpl(
|
||||||
|
baseTag: NodeTag; ## Some node
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[NodeTag,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError,RlpError]} =
|
||||||
|
## Wrapper
|
||||||
|
let path = block:
|
||||||
|
let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyRight(db)
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
if 0 < path.path.len and path.path[^1].node.kind == Leaf:
|
||||||
|
let nibbles = path.getNibbles
|
||||||
|
if nibbles.len == 64:
|
||||||
|
return ok(nibbles.getBytes.convertTo(NodeTag))
|
||||||
|
|
||||||
|
err(NearbyLeafExpected)
|
||||||
|
|
||||||
|
proc hexaryNearbyLeftImpl(
|
||||||
|
baseTag: NodeTag; ## Some node
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[NodeTag,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError,RlpError]} =
|
||||||
|
## Wrapper
|
||||||
|
let path = block:
|
||||||
|
let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
if 0 < path.path.len and path.path[^1].node.kind == Leaf:
|
||||||
|
let nibbles = path.getNibbles
|
||||||
|
if nibbles.len == 64:
|
||||||
|
return ok(nibbles.getBytes.convertTo(NodeTag))
|
||||||
|
|
||||||
|
err(NearbyLeafExpected)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc completeLeast(
|
||||||
|
path: RPath;
|
||||||
|
key: RepairKey;
|
||||||
|
db: HexaryTreeDbRef;
|
||||||
|
pathLenMax = 64;
|
||||||
|
): Result[RPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError].} =
|
||||||
|
## Extend path using least nodes without recursion.
|
||||||
|
var rPath = RPath(path: path.path)
|
||||||
|
|
||||||
|
if not db.tab.hasKey(key):
|
||||||
|
return err(NearbyDanglingLink)
|
||||||
|
var
|
||||||
|
key = key
|
||||||
|
node = db.tab[key]
|
||||||
|
|
||||||
|
while rPath.path.len < pathLenMax:
|
||||||
|
case node.kind:
|
||||||
|
of Leaf:
|
||||||
|
rPath.path.add RPathStep(key: key, node: node, nibble: -1)
|
||||||
|
return ok(rPath) # done
|
||||||
|
|
||||||
|
of Extension:
|
||||||
|
block useExtensionLink:
|
||||||
|
let newKey = node.eLink
|
||||||
|
if not newkey.isZero:
|
||||||
|
if db.tab.hasKey(newKey):
|
||||||
|
rPath.path.add RPathStep(key: key, node: node, nibble: -1)
|
||||||
|
key = newKey
|
||||||
|
node = db.tab[key]
|
||||||
|
break useExtensionLink
|
||||||
|
return err(NearbyExtensionError) # Oops, no way
|
||||||
|
|
||||||
|
of Branch:
|
||||||
|
block findBranchLink:
|
||||||
|
for inx in 0 .. 15:
|
||||||
|
let newKey = node.bLink[inx]
|
||||||
|
if not newKey.isZero:
|
||||||
|
if db.tab.hasKey(newKey):
|
||||||
|
rPath.path.add RPathStep(key: key, node: node, nibble: inx.int8)
|
||||||
|
key = newKey
|
||||||
|
node = db.tab[key]
|
||||||
|
break findBranchLink
|
||||||
|
return err(NearbyBranchError) # Oops, no way
|
||||||
|
|
||||||
|
err(NearbyNestingTooDeep)
|
||||||
|
|
||||||
|
|
||||||
|
proc completeLeast(
|
||||||
|
path: XPath;
|
||||||
|
key: Blob;
|
||||||
|
getFn: HexaryGetFn;
|
||||||
|
pathLenMax = 64;
|
||||||
|
): Result[XPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError].} =
|
||||||
|
## Variant of `completeLeast()` for persistent database
|
||||||
|
var xPath = XPath(path: path.path)
|
||||||
|
|
||||||
|
if key.getFn().len == 0:
|
||||||
|
return err(NearbyDanglingLink)
|
||||||
|
var
|
||||||
|
key = key
|
||||||
|
nodeRlp = rlpFromBytes key.getFn()
|
||||||
|
|
||||||
|
while xPath.path.len < pathLenMax:
|
||||||
|
case nodeRlp.listLen:
|
||||||
|
of 2:
|
||||||
|
let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
||||||
|
if isLeaf:
|
||||||
|
let node = nodeRlp.toLeafNode(pathSegment)
|
||||||
|
xPath.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||||
|
return ok(xPath) # done
|
||||||
|
|
||||||
|
# Extension
|
||||||
|
block useExtensionLink:
|
||||||
|
let
|
||||||
|
node = nodeRlp.toExtensionNode(pathSegment)
|
||||||
|
newKey = node.eLink
|
||||||
|
if 0 < newKey.len:
|
||||||
|
let newNode = newKey.getFn()
|
||||||
|
if 0 < newNode.len:
|
||||||
|
xPath.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||||
|
key = newKey
|
||||||
|
nodeRlp = rlpFromBytes newNode
|
||||||
|
break useExtensionLink
|
||||||
|
return err(NearbyExtensionError) # Oops, no way
|
||||||
|
|
||||||
|
of 17:
|
||||||
|
block findBranchLink:
|
||||||
|
let node = nodeRlp.toBranchNode()
|
||||||
|
for inx in 0 .. 15:
|
||||||
|
let newKey = node.bLink[inx]
|
||||||
|
if 0 < newKey.len:
|
||||||
|
let newNode = newKey.getFn()
|
||||||
|
if 0 < newNode.len:
|
||||||
|
xPath.path.add XPathStep(key: key, node: node, nibble: inx.int8)
|
||||||
|
key = newKey
|
||||||
|
nodeRlp = rlpFromBytes newNode
|
||||||
|
break findBranchLink
|
||||||
|
return err(NearbyBranchError) # Oops, no way
|
||||||
|
|
||||||
|
else:
|
||||||
|
return err(NearbyGarbledNode) # Oops, no way
|
||||||
|
|
||||||
|
err(NearbyNestingTooDeep)
|
||||||
|
|
||||||
|
|
||||||
|
proc completeMost(
|
||||||
|
path: RPath;
|
||||||
|
key: RepairKey;
|
||||||
|
db: HexaryTreeDbRef;
|
||||||
|
pathLenMax = 64;
|
||||||
|
): Result[RPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError].} =
|
||||||
|
## Extend path using max nodes without recursion.
|
||||||
|
var rPath = RPath(path: path.path)
|
||||||
|
|
||||||
|
if not db.tab.hasKey(key):
|
||||||
|
return err(NearbyDanglingLink)
|
||||||
|
var
|
||||||
|
key = key
|
||||||
|
node = db.tab[key]
|
||||||
|
|
||||||
|
while rPath.path.len < pathLenMax:
|
||||||
|
case node.kind:
|
||||||
|
of Leaf:
|
||||||
|
rPath.path.add RPathStep(key: key, node: node, nibble: -1)
|
||||||
|
return ok(rPath) # done
|
||||||
|
|
||||||
|
of Extension:
|
||||||
|
block useExtensionLink:
|
||||||
|
let newKey = node.eLink
|
||||||
|
if not newkey.isZero:
|
||||||
|
if db.tab.hasKey(newKey):
|
||||||
|
rPath.path.add RPathStep(key: key, node: node, nibble: -1)
|
||||||
|
key = newKey
|
||||||
|
node = db.tab[newKey]
|
||||||
|
break useExtensionLink
|
||||||
|
return err(NearbyExtensionError) # Oops, no way
|
||||||
|
|
||||||
|
of Branch:
|
||||||
|
block findBranchLink:
|
||||||
|
for inx in 15.countDown(0):
|
||||||
|
let newKey = node.bLink[inx]
|
||||||
|
if not newKey.isZero:
|
||||||
|
if db.tab.hasKey(newKey):
|
||||||
|
rPath.path.add RPathStep(key: key, node: node, nibble: inx.int8)
|
||||||
|
key = newKey
|
||||||
|
node = db.tab[key]
|
||||||
|
break findBranchLink
|
||||||
|
return err(NearbyBranchError) # Oops, no way
|
||||||
|
|
||||||
|
err(NearbyNestingTooDeep)
|
||||||
|
|
||||||
|
proc completeMost(
|
||||||
|
path: XPath;
|
||||||
|
key: Blob;
|
||||||
|
getFn: HexaryGetFn;
|
||||||
|
pathLenMax = 64;
|
||||||
|
): Result[XPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError].} =
|
||||||
|
## Variant of `completeLeast()` for persistent database
|
||||||
|
var xPath = XPath(path: path.path)
|
||||||
|
|
||||||
|
if key.getFn().len == 0:
|
||||||
|
return err(NearbyDanglingLink)
|
||||||
|
var
|
||||||
|
key = key
|
||||||
|
nodeRlp = rlpFromBytes key.getFn()
|
||||||
|
|
||||||
|
while xPath.path.len < pathLenMax:
|
||||||
|
case nodeRlp.listLen:
|
||||||
|
of 2:
|
||||||
|
let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
||||||
|
if isLeaf:
|
||||||
|
let node = nodeRlp.toLeafNode(pathSegment)
|
||||||
|
xPath.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||||
|
return ok(xPath) # done
|
||||||
|
|
||||||
|
# Extension
|
||||||
|
block useExtensionLink:
|
||||||
|
let
|
||||||
|
node = nodeRlp.toExtensionNode(pathSegment)
|
||||||
|
newKey = node.eLink
|
||||||
|
if 0 < newKey.len:
|
||||||
|
let newNode = newKey.getFn()
|
||||||
|
if 0 < newNode.len:
|
||||||
|
xPath.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||||
|
key = newKey
|
||||||
|
nodeRlp = rlpFromBytes newNode
|
||||||
|
break useExtensionLink
|
||||||
|
return err(NearbyExtensionError) # Oops, no way
|
||||||
|
|
||||||
|
of 17:
|
||||||
|
block findBranchLink:
|
||||||
|
let node = nodeRlp.toBranchNode()
|
||||||
|
for inx in 15.countDown(0):
|
||||||
|
let newKey = node.bLink[inx]
|
||||||
|
if 0 < newKey.len:
|
||||||
|
let newNode = newKey.getFn()
|
||||||
|
if 0 < newNode.len:
|
||||||
|
xPath.path.add XPathStep(key: key, node: node, nibble: inx.int8)
|
||||||
|
key = newKey
|
||||||
|
nodeRlp = rlpFromBytes newNode
|
||||||
|
break findBranchLink
|
||||||
|
return err(NearbyBranchError) # Oops, no way
|
||||||
|
|
||||||
|
else:
|
||||||
|
return err(NearbyGarbledNode) # Oops, no way
|
||||||
|
|
||||||
|
err(NearbyNestingTooDeep)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, left boundary proofs (moving right)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryNearbyRight*(
|
||||||
|
path: RPath; ## Partially expanded path
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
): Result[RPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Extends the maximally extended argument nodes `path` to the right (i.e.
|
||||||
|
## with non-decreasing path value). This is similar to the
|
||||||
|
## `hexary_path.next()` function, only that this algorithm does not
|
||||||
|
## backtrack if there are dangling links in between and rather returns
|
||||||
|
## a error.
|
||||||
|
##
|
||||||
|
## This code is intended be used for verifying a left-bound proof to verify
|
||||||
|
## that there is no leaf node.
|
||||||
|
|
||||||
|
# Some easy cases
|
||||||
|
if path.path.len == 0:
|
||||||
|
return err(NearbyEmptyPath) # error
|
||||||
|
if path.path[^1].node.kind == Leaf:
|
||||||
|
return ok(path)
|
||||||
|
|
||||||
|
var rPath = path
|
||||||
|
while 0 < rPath.path.len:
|
||||||
|
let top = rPath.path[^1]
|
||||||
|
if top.node.kind != Branch or
|
||||||
|
top.nibble < 0 or
|
||||||
|
rPath.tail.len == 0:
|
||||||
|
return err(NearbyUnexpectedNode) # error
|
||||||
|
|
||||||
|
let topLink = top.node.bLink[top.nibble]
|
||||||
|
if topLink.isZero or not db.tab.hasKey(topLink):
|
||||||
|
return err(NearbyDanglingLink) # error
|
||||||
|
|
||||||
|
let nextNibble = rPath.tail[0].int8
|
||||||
|
if nextNibble < 15:
|
||||||
|
let
|
||||||
|
nextNode = db.tab[topLink]
|
||||||
|
rPathLen = rPath.path.len # in case of backtracking
|
||||||
|
rPathTail = rPath.tail
|
||||||
|
case nextNode.kind
|
||||||
|
of Leaf:
|
||||||
|
if rPath.tail <= nextNode.lPfx:
|
||||||
|
return rPath.completeLeast(topLink, db)
|
||||||
|
of Extension:
|
||||||
|
if rPath.tail <= nextNode.ePfx:
|
||||||
|
return rPath.completeLeast(topLink, db)
|
||||||
|
of Branch:
|
||||||
|
# Step down and complete with a branch link on the child node
|
||||||
|
rPath.path = rPath.path & RPathStep(
|
||||||
|
key: topLink,
|
||||||
|
node: nextNode,
|
||||||
|
nibble: nextNibble)
|
||||||
|
|
||||||
|
# Find the next item to the right of the new top entry
|
||||||
|
let step = rPath.path[^1]
|
||||||
|
for inx in (step.nibble + 1) .. 15:
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if not link.isZero:
|
||||||
|
rPath.path[^1].nibble = inx.int8
|
||||||
|
return rPath.completeLeast(link, db)
|
||||||
|
|
||||||
|
# Restore `rPath` and backtrack
|
||||||
|
rPath.path.setLen(rPathLen)
|
||||||
|
rPath.tail = rPathTail
|
||||||
|
|
||||||
|
# Pop `Branch` node on top and append nibble to `tail`
|
||||||
|
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
||||||
|
rPath.path.setLen(rPath.path.len - 1)
|
||||||
|
|
||||||
|
# Pathological case: nfffff.. for n < f
|
||||||
|
var step = path.path[0]
|
||||||
|
for inx in (step.nibble + 1) .. 15:
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if not link.isZero:
|
||||||
|
step.nibble = inx.int8
|
||||||
|
rPath.path = @[step]
|
||||||
|
return rPath.completeLeast(link, db)
|
||||||
|
|
||||||
|
err(NearbyFailed) # error
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryNearbyRight*(
|
||||||
|
path: XPath; ## Partially expanded path
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[XPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryNearbyRight()` for persistant database
|
||||||
|
|
||||||
|
# Some easy cases
|
||||||
|
if path.path.len == 0:
|
||||||
|
return err(NearbyEmptyPath) # error
|
||||||
|
if path.path[^1].node.kind == Leaf:
|
||||||
|
return ok(path)
|
||||||
|
|
||||||
|
var xPath = path
|
||||||
|
while 0 < xPath.path.len:
|
||||||
|
let top = xPath.path[^1]
|
||||||
|
if top.node.kind != Branch or
|
||||||
|
top.nibble < 0 or
|
||||||
|
xPath.tail.len == 0:
|
||||||
|
return err(NearbyUnexpectedNode) # error
|
||||||
|
|
||||||
|
let topLink = top.node.bLink[top.nibble]
|
||||||
|
if topLink.len == 0 or topLink.getFn().len == 0:
|
||||||
|
return err(NearbyDanglingLink) # error
|
||||||
|
|
||||||
|
let nextNibble = xPath.tail[0].int8
|
||||||
|
if nextNibble < 15:
|
||||||
|
let
|
||||||
|
nextNodeRlp = rlpFromBytes topLink.getFn()
|
||||||
|
xPathLen = xPath.path.len # in case of backtracking
|
||||||
|
xPathTail = xPath.tail
|
||||||
|
case nextNodeRlp.listLen:
|
||||||
|
of 2:
|
||||||
|
if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]:
|
||||||
|
return xPath.completeLeast(topLink, getFn)
|
||||||
|
of 17:
|
||||||
|
# Step down and complete with a branch link on the child node
|
||||||
|
xPath.path = xPath.path & XPathStep(
|
||||||
|
key: topLink,
|
||||||
|
node: nextNodeRlp.toBranchNode,
|
||||||
|
nibble: nextNibble)
|
||||||
|
else:
|
||||||
|
return err(NearbyGarbledNode) # error
|
||||||
|
|
||||||
|
# Find the next item to the right of the new top entry
|
||||||
|
let step = xPath.path[^1]
|
||||||
|
for inx in (step.nibble + 1) .. 15:
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if 0 < link.len:
|
||||||
|
xPath.path[^1].nibble = inx.int8
|
||||||
|
return xPath.completeLeast(link, getFn)
|
||||||
|
|
||||||
|
# Restore `xPath` and backtrack
|
||||||
|
xPath.path.setLen(xPathLen)
|
||||||
|
xPath.tail = xPathTail
|
||||||
|
|
||||||
|
# Pop `Branch` node on top and append nibble to `tail`
|
||||||
|
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
|
||||||
|
xPath.path.setLen(xPath.path.len - 1)
|
||||||
|
|
||||||
|
# Pathological case: nfffff.. for n < f
|
||||||
|
var step = path.path[0]
|
||||||
|
for inx in (step.nibble + 1) .. 15:
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if 0 < link.len:
|
||||||
|
step.nibble = inx.int8
|
||||||
|
xPath.path = @[step]
|
||||||
|
return xPath.completeLeast(link, getFn)
|
||||||
|
|
||||||
|
err(NearbyFailed) # error
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryNearbyRightMissing*(
|
||||||
|
path: RPath;
|
||||||
|
db: HexaryTreeDbRef;
|
||||||
|
): bool
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Returns `true` if the maximally extended argument nodes `path` is the
|
||||||
|
## rightmost on the hexary trie database. It verifies that there is no more
|
||||||
|
## leaf entry to the right of the argument `path`.
|
||||||
|
##
|
||||||
|
## This code is intended be used for verifying a left-bound proof.
|
||||||
|
if 0 < path.path.len and 0 < path.tail.len:
|
||||||
|
let top = path.path[^1]
|
||||||
|
if top.node.kind == Branch and 0 <= top.nibble:
|
||||||
|
|
||||||
|
let topLink = top.node.bLink[top.nibble]
|
||||||
|
if not topLink.isZero and db.tab.hasKey(topLink):
|
||||||
|
let
|
||||||
|
nextNibble = path.tail[0]
|
||||||
|
nextNode = db.tab[topLink]
|
||||||
|
|
||||||
|
case nextNode.kind
|
||||||
|
of Leaf:
|
||||||
|
return nextNode.lPfx < path.tail
|
||||||
|
|
||||||
|
of Extension:
|
||||||
|
return nextNode.ePfx < path.tail
|
||||||
|
|
||||||
|
of Branch:
|
||||||
|
# Step down and verify that there is no branch link
|
||||||
|
for inx in nextNibble .. 15:
|
||||||
|
if not nextNode.bLink[inx].isZero:
|
||||||
|
return false
|
||||||
|
return true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, right boundary proofs (moving left)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryNearbyLeft*(
|
||||||
|
path: RPath; ## Partially expanded path
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
): Result[RPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Similar to `hexaryNearbyRight()`.
|
||||||
|
##
|
||||||
|
## This code is intended be used for verifying a right-bound proof to verify
|
||||||
|
## that there is no leaf node.
|
||||||
|
|
||||||
|
# Some easy cases
|
||||||
|
if path.path.len == 0:
|
||||||
|
return err(NearbyEmptyPath) # error
|
||||||
|
if path.path[^1].node.kind == Leaf:
|
||||||
|
return ok(path)
|
||||||
|
|
||||||
|
var rPath = path
|
||||||
|
while 0 < rPath.path.len:
|
||||||
|
let top = rPath.path[^1]
|
||||||
|
if top.node.kind != Branch or
|
||||||
|
top.nibble < 0 or
|
||||||
|
rPath.tail.len == 0:
|
||||||
|
return err(NearbyUnexpectedNode) # error
|
||||||
|
|
||||||
|
let topLink = top.node.bLink[top.nibble]
|
||||||
|
if topLink.isZero or not db.tab.hasKey(topLink):
|
||||||
|
return err(NearbyDanglingLink) # error
|
||||||
|
|
||||||
|
let nextNibble = rPath.tail[0].int8
|
||||||
|
if 0 < nextNibble:
|
||||||
|
let
|
||||||
|
nextNode = db.tab[topLink]
|
||||||
|
rPathLen = rPath.path.len # in case of backtracking
|
||||||
|
rPathTail = rPath.tail
|
||||||
|
case nextNode.kind
|
||||||
|
of Leaf:
|
||||||
|
if nextNode.lPfx <= rPath.tail:
|
||||||
|
return rPath.completeMost(topLink, db)
|
||||||
|
of Extension:
|
||||||
|
if nextNode.ePfx <= rPath.tail:
|
||||||
|
return rPath.completeMost(topLink, db)
|
||||||
|
of Branch:
|
||||||
|
# Step down and complete with a branch link on the child node
|
||||||
|
rPath.path = rPath.path & RPathStep(
|
||||||
|
key: topLink,
|
||||||
|
node: nextNode,
|
||||||
|
nibble: nextNibble)
|
||||||
|
|
||||||
|
# Find the next item to the right of the new top entry
|
||||||
|
let step = rPath.path[^1]
|
||||||
|
for inx in (step.nibble - 1).countDown(0):
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if not link.isZero:
|
||||||
|
rPath.path[^1].nibble = inx.int8
|
||||||
|
return rPath.completeMost(link, db)
|
||||||
|
|
||||||
|
# Restore `rPath` and backtrack
|
||||||
|
rPath.path.setLen(rPathLen)
|
||||||
|
rPath.tail = rPathTail
|
||||||
|
|
||||||
|
# Pop `Branch` node on top and append nibble to `tail`
|
||||||
|
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
||||||
|
rPath.path.setLen(rPath.path.len - 1)
|
||||||
|
|
||||||
|
# Pathological case: n0000.. for 0 < n
|
||||||
|
var step = path.path[0]
|
||||||
|
for inx in (step.nibble - 1).countDown(0):
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if not link.isZero:
|
||||||
|
step.nibble = inx.int8
|
||||||
|
rPath.path = @[step]
|
||||||
|
return rPath.completeMost(link, db)
|
||||||
|
|
||||||
|
err(NearbyFailed) # error
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryNearbyLeft*(
|
||||||
|
path: XPath; ## Partially expanded path
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[XPath,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryNearbyLeft()` for persistant database
|
||||||
|
|
||||||
|
# Some easy cases
|
||||||
|
if path.path.len == 0:
|
||||||
|
return err(NearbyEmptyPath) # error
|
||||||
|
if path.path[^1].node.kind == Leaf:
|
||||||
|
return ok(path)
|
||||||
|
|
||||||
|
var xPath = path
|
||||||
|
while 0 < xPath.path.len:
|
||||||
|
let top = xPath.path[^1]
|
||||||
|
if top.node.kind != Branch or
|
||||||
|
top.nibble < 0 or
|
||||||
|
xPath.tail.len == 0:
|
||||||
|
return err(NearbyUnexpectedNode) # error
|
||||||
|
|
||||||
|
let topLink = top.node.bLink[top.nibble]
|
||||||
|
if topLink.len == 0 or topLink.getFn().len == 0:
|
||||||
|
return err(NearbyDanglingLink) # error
|
||||||
|
|
||||||
|
let nextNibble = xPath.tail[0].int8
|
||||||
|
if 0 < nextNibble:
|
||||||
|
let
|
||||||
|
nextNodeRlp = rlpFromBytes topLink.getFn()
|
||||||
|
xPathLen = xPath.path.len # in case of backtracking
|
||||||
|
xPathTail = xPath.tail
|
||||||
|
case nextNodeRlp.listLen:
|
||||||
|
of 2:
|
||||||
|
if nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1] <= xPath.tail:
|
||||||
|
return xPath.completeMost(topLink, getFn)
|
||||||
|
of 17:
|
||||||
|
# Step down and complete with a branch link on the child node
|
||||||
|
xPath.path = xPath.path & XPathStep(
|
||||||
|
key: topLink,
|
||||||
|
node: nextNodeRlp.toBranchNode,
|
||||||
|
nibble: nextNibble)
|
||||||
|
else:
|
||||||
|
return err(NearbyGarbledNode) # error
|
||||||
|
|
||||||
|
# Find the next item to the right of the new top entry
|
||||||
|
let step = xPath.path[^1]
|
||||||
|
for inx in (step.nibble - 1).countDown(0):
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if 0 < link.len:
|
||||||
|
xPath.path[^1].nibble = inx.int8
|
||||||
|
return xPath.completeMost(link, getFn)
|
||||||
|
|
||||||
|
# Restore `xPath` and backtrack
|
||||||
|
xPath.path.setLen(xPathLen)
|
||||||
|
xPath.tail = xPathTail
|
||||||
|
|
||||||
|
# Pop `Branch` node on top and append nibble to `tail`
|
||||||
|
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
|
||||||
|
xPath.path.setLen(xPath.path.len - 1)
|
||||||
|
|
||||||
|
# Pathological case: n00000.. for 0 < n
|
||||||
|
var step = path.path[0]
|
||||||
|
for inx in (step.nibble - 1).countDown(0):
|
||||||
|
let link = step.node.bLink[inx]
|
||||||
|
if 0 < link.len:
|
||||||
|
step.nibble = inx.int8
|
||||||
|
xPath.path = @[step]
|
||||||
|
return xPath.completeMost(link, getFn)
|
||||||
|
|
||||||
|
err(NearbyFailed) # error
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, convenience wrappers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryNearbyRight*(
|
||||||
|
baseTag: NodeTag; ## Some node
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
): Result[NodeTag,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather
|
||||||
|
## than `RPath()` ones.
|
||||||
|
noRlpErrorOops("hexaryNearbyRight"):
|
||||||
|
return baseTag.hexaryNearbyRightImpl(rootKey, db)
|
||||||
|
|
||||||
|
proc hexaryNearbyRight*(
|
||||||
|
baseTag: NodeTag; ## Some node
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[NodeTag,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryNearbyRight()` for persistant database
|
||||||
|
noKeyErrorOops("hexaryNearbyRight"):
|
||||||
|
return baseTag.hexaryNearbyRightImpl(rootKey, getFn)
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryNearbyLeft*(
|
||||||
|
baseTag: NodeTag; ## Some node
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
): Result[NodeTag,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Similar to `hexaryNearbyRight()` for `NodeKey` arguments.
|
||||||
|
noRlpErrorOops("hexaryNearbyLeft"):
|
||||||
|
return baseTag.hexaryNearbyLeftImpl(rootKey, db)
|
||||||
|
|
||||||
|
proc hexaryNearbyLeft*(
|
||||||
|
baseTag: NodeTag; ## Some node
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
): Result[NodeTag,HexaryError]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryNearbyLeft()` for persistant database
|
||||||
|
noKeyErrorOops("hexaryNearbyLeft"):
|
||||||
|
return baseTag.hexaryNearbyLeftImpl(rootKey, getFn)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
|
@ -11,7 +11,7 @@
|
||||||
## Find node paths in hexary tries.
|
## Find node paths in hexary tries.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, sequtils, tables],
|
std/[sequtils, sets, tables],
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/[byteutils, interval_set],
|
stew/[byteutils, interval_set],
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
|
@ -30,15 +30,9 @@ proc pp(w: Blob; db: HexaryTreeDbRef): string =
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc `==`(a, b: XNodeObj): bool =
|
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||||
if a.kind == b.kind:
|
## Might be lossy, check before use
|
||||||
case a.kind:
|
discard result.init(key.ByteArray33[1 .. 32])
|
||||||
of Leaf:
|
|
||||||
return a.lPfx == b.lPfx and a.lData == b.lData
|
|
||||||
of Extension:
|
|
||||||
return a.ePfx == b.ePfx and a.eLink == b.eLink
|
|
||||||
of Branch:
|
|
||||||
return a.bLink == b.bLink
|
|
||||||
|
|
||||||
proc getNibblesImpl(path: XPath|RPath; start = 0): NibblesSeq =
|
proc getNibblesImpl(path: XPath|RPath; start = 0): NibblesSeq =
|
||||||
## Re-build the key path
|
## Re-build the key path
|
||||||
|
@ -87,49 +81,10 @@ proc toExtensionNode(
|
||||||
{.gcsafe, raises: [Defect,RlpError]} =
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
|
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
|
||||||
|
|
||||||
|
|
||||||
proc `<=`(a, b: NibblesSeq): bool =
|
|
||||||
## Compare nibbles, different lengths are padded to the right with zeros
|
|
||||||
let abMin = min(a.len, b.len)
|
|
||||||
for n in 0 ..< abMin:
|
|
||||||
if a[n] < b[n]:
|
|
||||||
return true
|
|
||||||
if b[n] < a[n]:
|
|
||||||
return false
|
|
||||||
# otherwise a[n] == b[n]
|
|
||||||
|
|
||||||
# Assuming zero for missing entries
|
|
||||||
if b.len < a.len:
|
|
||||||
for n in abMin + 1 ..< a.len:
|
|
||||||
if 0 < a[n]:
|
|
||||||
return false
|
|
||||||
true
|
|
||||||
|
|
||||||
proc `<`(a, b: NibblesSeq): bool =
|
|
||||||
not (b <= a)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc padPartialPath(pfx: NibblesSeq; dblNibble: byte): NodeKey =
|
|
||||||
## Extend (or cut) `partialPath` nibbles sequence and generate `NodeKey`
|
|
||||||
# Pad with zeroes
|
|
||||||
var padded: NibblesSeq
|
|
||||||
|
|
||||||
let padLen = 64 - pfx.len
|
|
||||||
if 0 <= padLen:
|
|
||||||
padded = pfx & dblNibble.repeat(padlen div 2).initNibbleRange
|
|
||||||
if (padLen and 1) == 1:
|
|
||||||
padded = padded & @[dblNibble].initNibbleRange.slice(1)
|
|
||||||
else:
|
|
||||||
let nope = seq[byte].default.initNibbleRange
|
|
||||||
padded = pfx.slice(0,63) & nope # nope forces re-alignment
|
|
||||||
|
|
||||||
let bytes = padded.getBytes
|
|
||||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
|
|
||||||
|
|
||||||
|
|
||||||
proc pathExtend(
|
proc pathExtend(
|
||||||
path: RPath;
|
path: RPath;
|
||||||
key: RepairKey;
|
key: RepairKey;
|
||||||
|
@ -140,8 +95,9 @@ proc pathExtend(
|
||||||
## path following the argument `path.tail`.
|
## path following the argument `path.tail`.
|
||||||
result = path
|
result = path
|
||||||
var key = key
|
var key = key
|
||||||
while db.tab.hasKey(key) and 0 < result.tail.len:
|
while db.tab.hasKey(key):
|
||||||
let node = db.tab[key]
|
let node = db.tab[key]
|
||||||
|
|
||||||
case node.kind:
|
case node.kind:
|
||||||
of Leaf:
|
of Leaf:
|
||||||
if result.tail.len == result.tail.sharedPrefixLen(node.lPfx):
|
if result.tail.len == result.tail.sharedPrefixLen(node.lPfx):
|
||||||
|
@ -150,6 +106,9 @@ proc pathExtend(
|
||||||
result.tail = EmptyNibbleRange
|
result.tail = EmptyNibbleRange
|
||||||
return
|
return
|
||||||
of Branch:
|
of Branch:
|
||||||
|
if result.tail.len == 0:
|
||||||
|
result.path.add RPathStep(key: key, node: node, nibble: -1)
|
||||||
|
return
|
||||||
let nibble = result.tail[0].int8
|
let nibble = result.tail[0].int8
|
||||||
if node.bLink[nibble].isZero:
|
if node.bLink[nibble].isZero:
|
||||||
return
|
return
|
||||||
|
@ -173,26 +132,26 @@ proc pathExtend(
|
||||||
## Ditto for `XPath` rather than `RPath`
|
## Ditto for `XPath` rather than `RPath`
|
||||||
result = path
|
result = path
|
||||||
var key = key
|
var key = key
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
let value = key.getFn()
|
let value = key.getFn()
|
||||||
if value.len == 0:
|
if value.len == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
var nodeRlp = rlpFromBytes value
|
var nodeRlp = rlpFromBytes value
|
||||||
|
|
||||||
case nodeRlp.listLen:
|
case nodeRlp.listLen:
|
||||||
of 2:
|
of 2:
|
||||||
let
|
let
|
||||||
(isLeaf, pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
(isLeaf, pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
||||||
nSharedNibbles = result.tail.sharedPrefixLen(pathSegment)
|
nSharedNibbles = result.tail.sharedPrefixLen(pathSegment)
|
||||||
fullPath = (nSharedNibbles == pathSegment.len)
|
fullPath = (nSharedNibbles == pathSegment.len)
|
||||||
newTail = result.tail.slice(nSharedNibbles)
|
|
||||||
|
|
||||||
# Leaf node
|
# Leaf node
|
||||||
if isLeaf:
|
if isLeaf:
|
||||||
let node = nodeRlp.toLeafNode(pathSegment)
|
if result.tail.len == nSharedNibbles:
|
||||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
# Bingo, got full path
|
||||||
result.tail = newTail
|
let node = nodeRlp.toLeafNode(pathSegment)
|
||||||
|
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||||
|
result.tail = EmptyNibbleRange
|
||||||
return
|
return
|
||||||
|
|
||||||
# Extension node
|
# Extension node
|
||||||
|
@ -201,7 +160,7 @@ proc pathExtend(
|
||||||
if node.eLink.len == 0:
|
if node.eLink.len == 0:
|
||||||
return
|
return
|
||||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||||
result.tail = newTail
|
result.tail = result.tail.slice(nSharedNibbles)
|
||||||
key = node.eLink
|
key = node.eLink
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
@ -225,48 +184,6 @@ proc pathExtend(
|
||||||
# notreached
|
# notreached
|
||||||
|
|
||||||
|
|
||||||
proc completeLeast(
|
|
||||||
path: RPath;
|
|
||||||
key: RepairKey;
|
|
||||||
db: HexaryTreeDbRef;
|
|
||||||
pathLenMax = 64;
|
|
||||||
): RPath
|
|
||||||
{.gcsafe, raises: [Defect,KeyError].} =
|
|
||||||
## Extend path using least nodes without recursion.
|
|
||||||
result.path = path.path
|
|
||||||
if db.tab.hasKey(key):
|
|
||||||
var
|
|
||||||
key = key
|
|
||||||
node = db.tab[key]
|
|
||||||
|
|
||||||
while result.path.len < pathLenMax:
|
|
||||||
case node.kind:
|
|
||||||
of Leaf:
|
|
||||||
result.path.add RPathStep(key: key, node: node, nibble: -1)
|
|
||||||
return # done
|
|
||||||
|
|
||||||
of Extension:
|
|
||||||
block useExtensionLink:
|
|
||||||
let newKey = node.eLink
|
|
||||||
if not newkey.isZero and db.tab.hasKey(newKey):
|
|
||||||
result.path.add RPathStep(key: key, node: node, nibble: -1)
|
|
||||||
key = newKey
|
|
||||||
node = db.tab[key]
|
|
||||||
break useExtensionLink
|
|
||||||
return # Oops, no way
|
|
||||||
|
|
||||||
of Branch:
|
|
||||||
block findBranchLink:
|
|
||||||
for inx in 0 .. 15:
|
|
||||||
let newKey = node.bLink[inx]
|
|
||||||
if not newkey.isZero and db.tab.hasKey(newKey):
|
|
||||||
result.path.add RPathStep(key: key, node: node, nibble: inx.int8)
|
|
||||||
key = newKey
|
|
||||||
node = db.tab[key]
|
|
||||||
break findBranchLink
|
|
||||||
return # Oops, no way
|
|
||||||
|
|
||||||
|
|
||||||
proc pathLeast(
|
proc pathLeast(
|
||||||
path: XPath;
|
path: XPath;
|
||||||
key: Blob;
|
key: Blob;
|
||||||
|
@ -447,82 +364,6 @@ proc pathMost(
|
||||||
# End while
|
# End while
|
||||||
# Notreached
|
# Notreached
|
||||||
|
|
||||||
|
|
||||||
proc dismantleLeft(envPt, ivPt: RPath|XPath): Result[seq[Blob],void] =
|
|
||||||
## Helper for `dismantle()` for handling left side of envelope
|
|
||||||
#
|
|
||||||
# partialPath
|
|
||||||
# / \
|
|
||||||
# / \
|
|
||||||
# / \
|
|
||||||
# / \
|
|
||||||
# envPt.. -- envelope of partial path
|
|
||||||
# |
|
|
||||||
# ivPt.. -- `iv`, not fully covering left of `env`
|
|
||||||
#
|
|
||||||
var collect: seq[Blob]
|
|
||||||
block leftCurbEnvelope:
|
|
||||||
for n in 0 ..< min(envPt.path.len, ivPt.path.len):
|
|
||||||
if envPt.path[n] != ivPt.path[n]:
|
|
||||||
#
|
|
||||||
# At this point, the `node` entries of either `path[n]` step are
|
|
||||||
# the same. This is so because the predecessor steps were the same
|
|
||||||
# or were the `rootKey` in case n == 0.
|
|
||||||
#
|
|
||||||
# But then (`node` entries being equal) the only way for the
|
|
||||||
# `path[n]` steps to differ is in the entry selector `nibble` for
|
|
||||||
# a branch node.
|
|
||||||
#
|
|
||||||
for m in n ..< ivPt.path.len:
|
|
||||||
let
|
|
||||||
pfx = ivPt.getNibblesImpl(0,m) # common path segment
|
|
||||||
top = ivPt.path[m].nibble # need nibbles smaller than top
|
|
||||||
#
|
|
||||||
# Incidentally for a non-`Branch` node, the value `top` becomes
|
|
||||||
# `-1` and the `for`- loop will be ignored (which is correct)
|
|
||||||
for nibble in 0 ..< top:
|
|
||||||
collect.add hexPrefixEncode(
|
|
||||||
pfx & @[nibble.byte].initNibbleRange.slice(1), isLeaf=false)
|
|
||||||
break leftCurbEnvelope
|
|
||||||
#
|
|
||||||
# Fringe case, e.g. when `partialPath` is an empty prefix (aka `@[0]`)
|
|
||||||
# and the database has a single leaf node `(a,some-value)` where the
|
|
||||||
# `rootKey` is the hash of this node. In that case, `pMin == 0` and
|
|
||||||
# `pMax == high(NodeTag)` and `iv == [a,a]`.
|
|
||||||
#
|
|
||||||
return err()
|
|
||||||
|
|
||||||
ok(collect)
|
|
||||||
|
|
||||||
proc dismantleRight(envPt, ivPt: RPath|XPath): Result[seq[Blob],void] =
|
|
||||||
## Helper for `dismantle()` for handling right side of envelope
|
|
||||||
#
|
|
||||||
# partialPath
|
|
||||||
# / \
|
|
||||||
# / \
|
|
||||||
# / \
|
|
||||||
# / \
|
|
||||||
# .. envPt -- envelope of partial path
|
|
||||||
# |
|
|
||||||
# .. ivPt -- `iv`, not fully covering right of `env`
|
|
||||||
#
|
|
||||||
var collect: seq[Blob]
|
|
||||||
block rightCurbEnvelope:
|
|
||||||
for n in 0 ..< min(envPt.path.len, ivPt.path.len):
|
|
||||||
if envPt.path[n] != ivPt.path[n]:
|
|
||||||
for m in n ..< ivPt.path.len:
|
|
||||||
let
|
|
||||||
pfx = ivPt.getNibblesImpl(0,m) # common path segment
|
|
||||||
base = ivPt.path[m].nibble # need nibbles greater/equal
|
|
||||||
if 0 <= base:
|
|
||||||
for nibble in base+1 .. 15:
|
|
||||||
collect.add hexPrefixEncode(
|
|
||||||
pfx & @[nibble.byte].initNibbleRange.slice(1), isLeaf=false)
|
|
||||||
break rightCurbEnvelope
|
|
||||||
return err()
|
|
||||||
|
|
||||||
ok(collect)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -531,6 +372,25 @@ proc getNibbles*(path: XPath|RPath; start = 0): NibblesSeq =
|
||||||
## Re-build the key path
|
## Re-build the key path
|
||||||
path.getNibblesImpl(start)
|
path.getNibblesImpl(start)
|
||||||
|
|
||||||
|
proc getNibbles*(path: XPath|RPath; start, maxLen: int): NibblesSeq =
|
||||||
|
## Variant of `getNibbles()`
|
||||||
|
path.getNibblesImpl(start, maxLen)
|
||||||
|
|
||||||
|
|
||||||
|
proc getPartialPath*(path: XPath|RPath): Blob =
|
||||||
|
## Convert to hex encoded partial path as used in `eth` or `snap` protocol
|
||||||
|
## where full leaf paths of nibble length 64 are encoded as 32 byte `Blob`
|
||||||
|
## and non-leaf partial paths are *compact encoded* (i.e. per the Ethereum
|
||||||
|
## wire protocol.)
|
||||||
|
let
|
||||||
|
isLeaf = (0 < path.path.len and path.path[^1].node.kind == Leaf)
|
||||||
|
nibbles = path.getNibbles
|
||||||
|
if isLeaf and nibbles.len == 64:
|
||||||
|
nibbles.getBytes
|
||||||
|
else:
|
||||||
|
nibbles.hexPrefixEncode(isLeaf)
|
||||||
|
|
||||||
|
|
||||||
proc leafData*(path: XPath): Blob =
|
proc leafData*(path: XPath): Blob =
|
||||||
## Return the leaf data from a successful `XPath` computation (if any.)
|
## Return the leaf data from a successful `XPath` computation (if any.)
|
||||||
if path.tail.len == 0 and 0 < path.path.len:
|
if path.tail.len == 0 and 0 < path.path.len:
|
||||||
|
@ -555,204 +415,175 @@ proc leafData*(path: RPath): Blob =
|
||||||
of Extension:
|
of Extension:
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc pathEnvelope*(partialPath: Blob): NodeTagRange =
|
|
||||||
## Convert partial path to range of all keys starting with this
|
|
||||||
## partial path
|
|
||||||
let pfx = (hexPrefixDecode partialPath)[1]
|
|
||||||
NodeTagRange.new(
|
|
||||||
pfx.padPartialPath(0).to(NodeTag),
|
|
||||||
pfx.padPartialPath(255).to(NodeTag))
|
|
||||||
|
|
||||||
proc pathSortUniq*(
|
|
||||||
partialPaths: openArray[Blob];
|
|
||||||
): seq[Blob]
|
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
|
||||||
## Sort and simplify a list of partial paths by removoing nested entries.
|
|
||||||
|
|
||||||
var tab: Table[NodeTag,(Blob,bool)]
|
|
||||||
for w in partialPaths:
|
|
||||||
let iv = w.pathEnvelope
|
|
||||||
tab[iv.minPt] = (w,true) # begin entry
|
|
||||||
tab[iv.maxPt] = (@[],false) # end entry
|
|
||||||
|
|
||||||
# When sorted, nested entries look like
|
|
||||||
#
|
|
||||||
# 123000000.. (w0, true)
|
|
||||||
# 123400000.. (w1, true)
|
|
||||||
# 1234fffff.. (, false)
|
|
||||||
# 123ffffff.. (, false)
|
|
||||||
# ...
|
|
||||||
# 777000000.. (w2, true)
|
|
||||||
#
|
|
||||||
var level = 0
|
|
||||||
for key in toSeq(tab.keys).sorted(cmp):
|
|
||||||
let (w,begin) = tab[key]
|
|
||||||
if begin:
|
|
||||||
if level == 0:
|
|
||||||
result.add w
|
|
||||||
level.inc
|
|
||||||
else:
|
|
||||||
level.dec
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions, hexary path constructors
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc hexaryPath*(
|
proc hexaryPath*(
|
||||||
nodeKey: NodeKey;
|
partialPath: NibblesSeq; ## partial path to resolve
|
||||||
rootKey: RepairKey;
|
rootKey: NodeKey|RepairKey; ## State root
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef; ## Database
|
||||||
): RPath
|
): RPath
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
## Compute logest possible repair tree `db` path matching the `nodeKey`
|
## Compute the longest possible repair tree `db` path matching the `nodeKey`
|
||||||
## nibbles. The `nodeNey` path argument come first to support a more
|
## nibbles. The `nodeNey` path argument comes before the `db` one for
|
||||||
## functional notation.
|
## supporting a more functional notation.
|
||||||
RPath(tail: nodeKey.to(NibblesSeq)).pathExtend(rootKey,db)
|
proc to(a: RepairKey; T: type RepairKey): RepairKey = a
|
||||||
|
RPath(tail: partialPath).pathExtend(rootKey.to(RepairKey), db)
|
||||||
proc hexaryPath*(
|
|
||||||
partialPath: NibblesSeq;
|
|
||||||
rootKey: RepairKey;
|
|
||||||
db: HexaryTreeDbRef;
|
|
||||||
): RPath
|
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
|
||||||
## Variant of `hexaryPath`.
|
|
||||||
RPath(tail: partialPath).pathExtend(rootKey,db)
|
|
||||||
|
|
||||||
proc hexaryPath*(
|
proc hexaryPath*(
|
||||||
nodeKey: NodeKey;
|
nodeKey: NodeKey;
|
||||||
root: NodeKey;
|
rootKey: NodeKey|RepairKey;
|
||||||
getFn: HexaryGetFn;
|
|
||||||
): XPath
|
|
||||||
{.gcsafe, raises: [Defect,RlpError]} =
|
|
||||||
## Compute logest possible path on an arbitrary hexary trie. Note that this
|
|
||||||
## prototype resembles the other ones with the implict `state root`. The
|
|
||||||
## rules for the protopye arguments are:
|
|
||||||
## * First argument is the node key, the node path to be followed
|
|
||||||
## * Last argument is the database (needed only here for debugging)
|
|
||||||
##
|
|
||||||
## Note that this function will flag a potential lowest level `Extception`
|
|
||||||
## in the invoking function due to the `getFn` argument.
|
|
||||||
XPath(tail: nodeKey.to(NibblesSeq)).pathExtend(root.to(Blob), getFn)
|
|
||||||
|
|
||||||
proc hexaryPath*(
|
|
||||||
partialPath: NibblesSeq;
|
|
||||||
root: NodeKey;
|
|
||||||
getFn: HexaryGetFn;
|
|
||||||
): XPath
|
|
||||||
{.gcsafe, raises: [Defect,RlpError]} =
|
|
||||||
## Variant of `hexaryPath`.
|
|
||||||
XPath(tail: partialPath).pathExtend(root.to(Blob), getFn)
|
|
||||||
|
|
||||||
|
|
||||||
proc right*(
|
|
||||||
path: RPath;
|
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef;
|
||||||
): RPath
|
): RPath
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
## Extends the maximally extended argument nodes `path` to the right (with
|
## Variant of `hexaryPath` for a node key.
|
||||||
## path value not decreasing). This is similar to `next()`, only that the
|
nodeKey.to(NibblesSeq).hexaryPath(rootKey, db)
|
||||||
## algorithm does not backtrack if there are dangling links in between.
|
|
||||||
##
|
|
||||||
## This code is intended be used for verifying a left-bound proof.
|
|
||||||
|
|
||||||
# Some easy cases
|
proc hexaryPath*(
|
||||||
if path.path.len == 0:
|
nodeTag: NodeTag;
|
||||||
return RPath() # error
|
rootKey: NodeKey|RepairKey;
|
||||||
if path.path[^1].node.kind == Leaf:
|
|
||||||
return path
|
|
||||||
|
|
||||||
var rPath = path
|
|
||||||
while 0 < rPath.path.len:
|
|
||||||
let top = rPath.path[^1]
|
|
||||||
if top.node.kind != Branch or
|
|
||||||
top.nibble < 0 or
|
|
||||||
rPath.tail.len == 0:
|
|
||||||
return RPath() # error
|
|
||||||
|
|
||||||
let topLink = top.node.bLink[top.nibble]
|
|
||||||
if topLink.isZero or not db.tab.hasKey(topLink):
|
|
||||||
return RPath() # error
|
|
||||||
|
|
||||||
let nextNibble = rPath.tail[0].int8
|
|
||||||
if nextNibble < 15:
|
|
||||||
let
|
|
||||||
nextNode = db.tab[topLink]
|
|
||||||
rPathLen = rPath.path.len # in case of backtracking
|
|
||||||
case nextNode.kind
|
|
||||||
of Leaf:
|
|
||||||
if rPath.tail <= nextNode.lPfx:
|
|
||||||
return rPath.completeLeast(topLink, db)
|
|
||||||
of Extension:
|
|
||||||
if rPath.tail <= nextNode.ePfx:
|
|
||||||
return rPath.completeLeast(topLink, db)
|
|
||||||
of Branch:
|
|
||||||
# Step down and complete with a branch link on the child node
|
|
||||||
rPath.path = rPath.path & RPathStep(
|
|
||||||
key: topLink,
|
|
||||||
node: nextNode,
|
|
||||||
nibble: nextNibble)
|
|
||||||
|
|
||||||
# Find the next item to the right of the new top entry
|
|
||||||
let step = rPath.path[^1]
|
|
||||||
for inx in (step.nibble + 1) .. 15:
|
|
||||||
let link = step.node.bLink[inx]
|
|
||||||
if not link.isZero:
|
|
||||||
rPath.path[^1].nibble = inx.int8
|
|
||||||
return rPath.completeLeast(link, db)
|
|
||||||
|
|
||||||
# Restore `rPath` and backtrack
|
|
||||||
rPath.path.setLen(rPathLen)
|
|
||||||
|
|
||||||
# Pop `Branch` node on top and append nibble to `tail`
|
|
||||||
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
|
||||||
rPath.path.setLen(rPath.path.len - 1)
|
|
||||||
|
|
||||||
# Pathological case: nfffff.. for n < f
|
|
||||||
var step = path.path[0]
|
|
||||||
for inx in (step.nibble + 1) .. 15:
|
|
||||||
let link = step.node.bLink[inx]
|
|
||||||
if not link.isZero:
|
|
||||||
step.nibble = inx.int8
|
|
||||||
rPath.path = @[step]
|
|
||||||
return rPath.completeLeast(link, db)
|
|
||||||
|
|
||||||
RPath() # error
|
|
||||||
|
|
||||||
|
|
||||||
proc rightStop*(
|
|
||||||
path: RPath;
|
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef;
|
||||||
): bool
|
): RPath
|
||||||
{.gcsafe, raises: [Defect,KeyError]} =
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
## Returns `true` if the maximally extended argument nodes `path` is the
|
## Variant of `hexaryPath` for a node tag.
|
||||||
## rightmost on the hexary trie database. It verifies that there is no more
|
nodeTag.to(NodeKey).hexaryPath(rootKey, db)
|
||||||
## leaf entry to the right of the argument `path`.
|
|
||||||
##
|
|
||||||
## This code is intended be used for verifying a left-bound proof.
|
|
||||||
if 0 < path.path.len and 0 < path.tail.len:
|
|
||||||
let top = path.path[^1]
|
|
||||||
if top.node.kind == Branch and 0 <= top.nibble:
|
|
||||||
|
|
||||||
let topLink = top.node.bLink[top.nibble]
|
proc hexaryPath*(
|
||||||
if not topLink.isZero and db.tab.hasKey(topLink):
|
partialPath: Blob;
|
||||||
let
|
rootKey: NodeKey|RepairKey;
|
||||||
nextNibble = path.tail[0]
|
db: HexaryTreeDbRef;
|
||||||
nextNode = db.tab[topLink]
|
): RPath
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Variant of `hexaryPath` for a hex encoded partial path.
|
||||||
|
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, db)
|
||||||
|
|
||||||
case nextNode.kind
|
|
||||||
of Leaf:
|
|
||||||
return nextNode.lPfx < path.tail
|
|
||||||
|
|
||||||
of Extension:
|
proc hexaryPath*(
|
||||||
return nextNode.ePfx < path.tail
|
partialPath: NibblesSeq; ## partial path to resolve
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
): XPath
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Compute the longest possible path on an arbitrary hexary trie.
|
||||||
|
XPath(tail: partialPath).pathExtend(rootKey.to(Blob), getFn)
|
||||||
|
|
||||||
of Branch:
|
proc hexaryPath*(
|
||||||
# Step down and verify that there is no branch link
|
nodeKey: NodeKey;
|
||||||
for inx in nextNibble .. 15:
|
rootKey: NodeKey;
|
||||||
if not nextNode.bLink[inx].isZero:
|
getFn: HexaryGetFn;
|
||||||
return false
|
): XPath
|
||||||
return true
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryPath` for a node key..
|
||||||
|
nodeKey.to(NibblesSeq).hexaryPath(rootKey, getFn)
|
||||||
|
|
||||||
|
proc hexaryPath*(
|
||||||
|
nodeTag: NodeTag;
|
||||||
|
rootKey: NodeKey;
|
||||||
|
getFn: HexaryGetFn;
|
||||||
|
): XPath
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryPath` for a node tag..
|
||||||
|
nodeTag.to(NodeKey).hexaryPath(rootKey, getFn)
|
||||||
|
|
||||||
|
proc hexaryPath*(
|
||||||
|
partialPath: Blob;
|
||||||
|
rootKey: NodeKey;
|
||||||
|
getFn: HexaryGetFn;
|
||||||
|
): XPath
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryPath` for a hex encoded partial path.
|
||||||
|
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, getFn)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public helpers, partial paths resolvers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hexaryPathNodeKey*(
|
||||||
|
partialPath: NibblesSeq; ## Hex encoded partial path
|
||||||
|
rootKey: NodeKey|RepairKey; ## State root
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
missingOk = false; ## Also return key for missing node
|
||||||
|
): Result[NodeKey,void]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Returns the `NodeKey` equivalent for the argment `partialPath` if this
|
||||||
|
## node is available in the database. If the argument flag `missingOk` is
|
||||||
|
## set`true` and the last node addressed by the argument path is missing,
|
||||||
|
## its key is returned as well.
|
||||||
|
let steps = partialPath.hexaryPath(rootKey, db)
|
||||||
|
if 0 < steps.path.len and steps.tail.len == 0:
|
||||||
|
let top = steps.path[^1]
|
||||||
|
# If the path was fully exhaused and the node exists for a `Branch` node,
|
||||||
|
# then the `nibble` is `-1`.
|
||||||
|
if top.nibble < 0 and top.key.isNodeKey:
|
||||||
|
return ok(top.key.convertTo(NodeKey))
|
||||||
|
if missingOk:
|
||||||
|
let link = top.node.bLink[top.nibble]
|
||||||
|
if not link.isZero and link.isNodeKey:
|
||||||
|
return ok(link.convertTo(NodeKey))
|
||||||
|
err()
|
||||||
|
|
||||||
|
proc hexaryPathNodeKey*(
|
||||||
|
partialPath: Blob; ## Hex encoded partial path
|
||||||
|
rootKey: NodeKey|RepairKey; ## State root
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
missingOk = false; ## Also return key for missing node
|
||||||
|
): Result[NodeKey,void]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Variant of `hexaryPathNodeKey()` for hex encoded partial path.
|
||||||
|
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, db, missingOk)
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryPathNodeKey*(
|
||||||
|
partialPath: NibblesSeq; ## Hex encoded partial path
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
missingOk = false; ## Also return key for missing node
|
||||||
|
): Result[NodeKey,void]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryPathNodeKey()` for persistent database.
|
||||||
|
let steps = partialPath.hexaryPath(rootKey, getFn)
|
||||||
|
if 0 < steps.path.len and steps.tail.len == 0:
|
||||||
|
let top = steps.path[^1]
|
||||||
|
# If the path was fully exhaused and the node exists for a `Branch` node,
|
||||||
|
# then the `nibble` is `-1`.
|
||||||
|
if top.nibble < 0:
|
||||||
|
return ok(top.key.convertTo(NodeKey))
|
||||||
|
if missingOk:
|
||||||
|
let link = top.node.bLink[top.nibble]
|
||||||
|
if 0 < link.len:
|
||||||
|
return ok(link.convertTo(NodeKey))
|
||||||
|
err()
|
||||||
|
|
||||||
|
proc hexaryPathNodeKey*(
|
||||||
|
partialPath: Blob; ## Partial database path
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
getFn: HexaryGetFn; ## Database abstraction
|
||||||
|
missingOk = false; ## Also return key for missing node
|
||||||
|
): Result[NodeKey,void]
|
||||||
|
{.gcsafe, raises: [Defect,RlpError]} =
|
||||||
|
## Variant of `hexaryPathNodeKey()` for persistent database and
|
||||||
|
## hex encoded partial path.
|
||||||
|
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, getFn, missingOk)
|
||||||
|
|
||||||
|
|
||||||
|
proc hexaryPathNodeKeys*(
|
||||||
|
partialPaths: seq[Blob]; ## Partial paths segments
|
||||||
|
rootKey: NodeKey|RepairKey; ## State root
|
||||||
|
db: HexaryTreeDbRef; ## Database
|
||||||
|
missingOk = false; ## Also return key for missing node
|
||||||
|
): HashSet[NodeKey]
|
||||||
|
{.gcsafe, raises: [Defect,KeyError]} =
|
||||||
|
## Convert a list of path segments to a set of node keys
|
||||||
|
partialPaths.toSeq
|
||||||
|
.mapIt(it.hexaryPathNodeKey(rootKey, db, missingOk))
|
||||||
|
.filterIt(it.isOk)
|
||||||
|
.mapIt(it.value)
|
||||||
|
.toHashSet
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, traversal
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc next*(
|
proc next*(
|
||||||
path: XPath;
|
path: XPath;
|
||||||
|
@ -815,97 +646,6 @@ proc prev*(
|
||||||
if minDepth <= newPath.depth and 0 < newPath.leafData.len:
|
if minDepth <= newPath.depth and 0 < newPath.leafData.len:
|
||||||
return newPath
|
return newPath
|
||||||
|
|
||||||
|
|
||||||
proc dismantle*(
|
|
||||||
partialPath: Blob; ## Patrial path for existing node
|
|
||||||
rootKey: NodeKey; ## State root
|
|
||||||
iv: NodeTagRange; ## Proofed range of leaf paths
|
|
||||||
db: HexaryTreeDbRef; ## Database
|
|
||||||
): seq[Blob]
|
|
||||||
{.gcsafe, raises: [Defect,RlpError,KeyError]} =
|
|
||||||
## Returns the list of partial paths which envelopes span the range of
|
|
||||||
## node paths one obtains by subtracting the argument range `iv` from the
|
|
||||||
## envelope of the argumenr `partialPath`.
|
|
||||||
##
|
|
||||||
## The following boundary conditions apply in order to get a useful result
|
|
||||||
## in a partially completed hexary trie database.
|
|
||||||
##
|
|
||||||
## * The argument `partialPath` refers to an existing node.
|
|
||||||
##
|
|
||||||
## * The argument `iv` contains a range of paths (e.g. account hash keys)
|
|
||||||
## with the property that if there is no (leaf-) node for that path, then
|
|
||||||
## no such node exists when the database is completed.
|
|
||||||
##
|
|
||||||
## This condition is sort of rephrasing the boundary proof condition that
|
|
||||||
## applies when downloading a range of accounts or storage slots from the
|
|
||||||
## network via `snap/1` protocol. In fact the condition here is stricter
|
|
||||||
## as it excludes sub-trie *holes* (see comment on `importAccounts()`.)
|
|
||||||
##
|
|
||||||
# Chechk for the trivial case when the `partialPath` envelope and `iv` do
|
|
||||||
# not overlap.
|
|
||||||
let env = partialPath.pathEnvelope
|
|
||||||
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
|
|
||||||
return @[partialPath]
|
|
||||||
|
|
||||||
# So ranges do overlap. The case that the `partialPath` envelope is fully
|
|
||||||
# contained in `iv` results in `@[]` which is implicitely handled by
|
|
||||||
# non-matching any of the cases, below.
|
|
||||||
if env.minPt < iv.minPt:
|
|
||||||
let
|
|
||||||
envPt = env.minPt.to(NodeKey).hexaryPath(rootKey.to(RepairKey), db)
|
|
||||||
ivPt = iv.minPt.to(NodeKey).hexaryPath(rootKey.to(RepairKey), db)
|
|
||||||
when false: # or true:
|
|
||||||
echo ">>> ",
|
|
||||||
"\n ", envPt.pp(db),
|
|
||||||
"\n -----",
|
|
||||||
"\n ", ivPt.pp(db)
|
|
||||||
let rc = envPt.dismantleLeft ivPt
|
|
||||||
if rc.isErr:
|
|
||||||
return @[partialPath]
|
|
||||||
result &= rc.value
|
|
||||||
|
|
||||||
if iv.maxPt < env.maxPt:
|
|
||||||
let
|
|
||||||
envPt = env.maxPt.to(NodeKey).hexaryPath(rootKey.to(RepairKey), db)
|
|
||||||
ivPt = iv.maxPt.to(NodeKey).hexaryPath(rootKey.to(RepairKey), db)
|
|
||||||
when false: # or true:
|
|
||||||
echo ">>> ",
|
|
||||||
"\n ", envPt.pp(db),
|
|
||||||
"\n -----",
|
|
||||||
"\n ", ivPt.pp(db)
|
|
||||||
let rc = envPt.dismantleRight ivPt
|
|
||||||
if rc.isErr:
|
|
||||||
return @[partialPath]
|
|
||||||
result &= rc.value
|
|
||||||
|
|
||||||
proc dismantle*(
|
|
||||||
partialPath: Blob; ## Patrial path for existing node
|
|
||||||
rootKey: NodeKey; ## State root
|
|
||||||
iv: NodeTagRange; ## Proofed range of leaf paths
|
|
||||||
getFn: HexaryGetFn; ## Database abstraction
|
|
||||||
): seq[Blob]
|
|
||||||
{.gcsafe, raises: [Defect,RlpError]} =
|
|
||||||
## Variant of `dismantle()` for persistent database.
|
|
||||||
let env = partialPath.pathEnvelope
|
|
||||||
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
|
|
||||||
return @[partialPath]
|
|
||||||
|
|
||||||
if env.minPt < iv.minPt:
|
|
||||||
let rc = dismantleLeft(
|
|
||||||
env.minPt.to(NodeKey).hexaryPath(rootKey, getFn),
|
|
||||||
iv.minPt.to(NodeKey).hexaryPath(rootKey, getFn))
|
|
||||||
if rc.isErr:
|
|
||||||
return @[partialPath]
|
|
||||||
result &= rc.value
|
|
||||||
|
|
||||||
if iv.maxPt < env.maxPt:
|
|
||||||
let rc = dismantleRight(
|
|
||||||
env.maxPt.to(NodeKey).hexaryPath(rootKey, getFn),
|
|
||||||
iv.maxPt.to(NodeKey).hexaryPath(rootKey, getFn))
|
|
||||||
if rc.isErr:
|
|
||||||
return @[partialPath]
|
|
||||||
result &= rc.value
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -165,7 +165,7 @@ proc checkAccountsTrieIsComplete*(
|
||||||
error: HexaryError
|
error: HexaryError
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let stats = db.getAccountFn.hexaryInspectTrie(rootKey, @[])
|
let stats = db.getAccountFn.hexaryInspectTrie(rootKey)
|
||||||
if not stats.stopped:
|
if not stats.stopped:
|
||||||
return stats.dangling.len == 0
|
return stats.dangling.len == 0
|
||||||
|
|
||||||
|
|
|
@ -14,8 +14,9 @@ import
|
||||||
eth/[common, p2p, rlp, trie/nibbles],
|
eth/[common, p2p, rlp, trie/nibbles],
|
||||||
stew/[byteutils, interval_set],
|
stew/[byteutils, interval_set],
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_interpolate,
|
"."/[hexary_desc, hexary_error, hexary_envelope, hexary_import,
|
||||||
hexary_inspect, hexary_paths, snapdb_desc, snapdb_persistent]
|
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc,
|
||||||
|
snapdb_persistent]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
@ -238,7 +239,7 @@ proc importAccounts*(
|
||||||
|
|
||||||
# Inspect trie for dangling nodes from prrof data (if any.)
|
# Inspect trie for dangling nodes from prrof data (if any.)
|
||||||
if 0 < data.proof.len:
|
if 0 < data.proof.len:
|
||||||
proofStats = ps.hexaDb.hexaryInspectTrie(ps.root, @[])
|
proofStats = ps.hexaDb.hexaryInspectTrie(ps.root)
|
||||||
|
|
||||||
if 0 < accounts.len:
|
if 0 < accounts.len:
|
||||||
if 0 < data.proof.len:
|
if 0 < data.proof.len:
|
||||||
|
@ -246,7 +247,7 @@ proc importAccounts*(
|
||||||
# proof data is typically small.
|
# proof data is typically small.
|
||||||
let topTag = accounts[^1].pathTag
|
let topTag = accounts[^1].pathTag
|
||||||
for w in proofStats.dangling:
|
for w in proofStats.dangling:
|
||||||
let iv = w.partialPath.pathEnvelope
|
let iv = w.partialPath.hexaryEnvelope
|
||||||
if iv.maxPt < base or topTag < iv.minPt:
|
if iv.maxPt < base or topTag < iv.minPt:
|
||||||
# Dangling link with partial path envelope outside accounts range
|
# Dangling link with partial path envelope outside accounts range
|
||||||
gaps.dangling.add w
|
gaps.dangling.add w
|
||||||
|
@ -272,7 +273,7 @@ proc importAccounts*(
|
||||||
# Without `proof` data available there can only be a complete
|
# Without `proof` data available there can only be a complete
|
||||||
# set/list of accounts so there are no dangling nodes in the first
|
# set/list of accounts so there are no dangling nodes in the first
|
||||||
# place. But there must be `proof` data for an empty list.
|
# place. But there must be `proof` data for an empty list.
|
||||||
if w.partialPath.pathEnvelope.maxPt < bottomTag:
|
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
||||||
return err(LowerBoundProofError)
|
return err(LowerBoundProofError)
|
||||||
# Otherwise register left over entry
|
# Otherwise register left over entry
|
||||||
gaps.innerGaps.add w
|
gaps.innerGaps.add w
|
||||||
|
@ -289,7 +290,7 @@ proc importAccounts*(
|
||||||
else:
|
else:
|
||||||
if not noBaseBoundCheck:
|
if not noBaseBoundCheck:
|
||||||
for w in proofStats.dangling:
|
for w in proofStats.dangling:
|
||||||
if base <= w.partialPath.pathEnvelope.maxPt:
|
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
||||||
return err(LowerBoundProofError)
|
return err(LowerBoundProofError)
|
||||||
gaps.dangling = proofStats.dangling
|
gaps.dangling = proofStats.dangling
|
||||||
|
|
||||||
|
@ -411,9 +412,9 @@ proc getAccountsNodeKey*(
|
||||||
var rc: Result[NodeKey,void]
|
var rc: Result[NodeKey,void]
|
||||||
noRlpExceptionOops("getAccountsNodeKey()"):
|
noRlpExceptionOops("getAccountsNodeKey()"):
|
||||||
if persistent:
|
if persistent:
|
||||||
rc = ps.getAccountFn.hexaryInspectPath(ps.root, path)
|
rc = path.hexaryPathNodeKey(ps.root, ps.getAccountFn)
|
||||||
else:
|
else:
|
||||||
rc = ps.hexaDb.hexaryInspectPath(ps.root, path)
|
rc = path.hexaryPathNodeKey(ps.root, ps.hexaDb)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
return ok(rc.value)
|
return ok(rc.value)
|
||||||
err(NodeNotFound)
|
err(NodeNotFound)
|
||||||
|
@ -443,7 +444,7 @@ proc getAccountsData*(
|
||||||
if persistent:
|
if persistent:
|
||||||
leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData
|
leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData
|
||||||
else:
|
else:
|
||||||
leaf = path.hexaryPath(ps.root.to(RepairKey),ps.hexaDb).leafData
|
leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData
|
||||||
|
|
||||||
if leaf.len == 0:
|
if leaf.len == 0:
|
||||||
return err(AccountNotFound)
|
return err(AccountNotFound)
|
||||||
|
|
|
@ -14,7 +14,8 @@ import
|
||||||
eth/[common, p2p, trie/db, trie/nibbles],
|
eth/[common, p2p, trie/db, trie/nibbles],
|
||||||
../../../../db/[select_backend, storage_types],
|
../../../../db/[select_backend, storage_types],
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_paths, rocky_bulk_load]
|
"."/[hexary_desc, hexary_error, hexary_import, hexary_nearby,
|
||||||
|
hexary_paths, rocky_bulk_load]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
@ -257,23 +258,22 @@ proc verifyLowerBound*(
|
||||||
{.gcsafe, raises: [Defect, KeyError].} =
|
{.gcsafe, raises: [Defect, KeyError].} =
|
||||||
## Verify that `base` is to the left of the first leaf entry and there is
|
## Verify that `base` is to the left of the first leaf entry and there is
|
||||||
## nothing in between.
|
## nothing in between.
|
||||||
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
var error: HexaryError
|
||||||
discard result.data.NodeKey.init(data) # size error => zero
|
|
||||||
|
|
||||||
let
|
let rc = base.hexaryNearbyRight(ps.root, ps.hexaDb)
|
||||||
root = ps.root.to(RepairKey)
|
if rc.isErr:
|
||||||
base = base.to(NodeKey)
|
error = rc.error
|
||||||
next = base.hexaryPath(root, ps.hexaDb).right(ps.hexaDb).getNibbles
|
elif first == rc.value:
|
||||||
if next.len == 64:
|
return ok()
|
||||||
if first == next.getBytes.convertTo(Hash256).to(NodeTag):
|
else:
|
||||||
return ok()
|
error = LowerBoundProofError
|
||||||
|
|
||||||
let error = LowerBoundProofError
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace "verifyLowerBound()", peer, base=base.pp,
|
trace "verifyLowerBound()", peer, base=base.to(NodeKey).pp,
|
||||||
first=first.to(NodeKey).pp, error
|
first=first.to(NodeKey).pp, error
|
||||||
err(error)
|
err(error)
|
||||||
|
|
||||||
|
|
||||||
proc verifyNoMoreRight*(
|
proc verifyNoMoreRight*(
|
||||||
ps: SnapDbBaseRef; ## Database session descriptor
|
ps: SnapDbBaseRef; ## Database session descriptor
|
||||||
peer: Peer; ## For log messages
|
peer: Peer; ## For log messages
|
||||||
|
@ -285,7 +285,7 @@ proc verifyNoMoreRight*(
|
||||||
let
|
let
|
||||||
root = ps.root.to(RepairKey)
|
root = ps.root.to(RepairKey)
|
||||||
base = base.to(NodeKey)
|
base = base.to(NodeKey)
|
||||||
if base.hexaryPath(root, ps.hexaDb).rightStop(ps.hexaDb):
|
if base.hexaryPath(root, ps.hexaDb).hexaryNearbyRightMissing(ps.hexaDb):
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
let error = LowerBoundProofError
|
let error = LowerBoundProofError
|
||||||
|
@ -319,7 +319,7 @@ proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] =
|
||||||
## Pretty print helper compiling the path into the repair tree for the
|
## Pretty print helper compiling the path into the repair tree for the
|
||||||
## argument `key`.
|
## argument `key`.
|
||||||
noPpError("dumpPath"):
|
noPpError("dumpPath"):
|
||||||
let rPath= key.to(NodeKey).hexaryPath(ps.root.to(RepairKey), ps.hexaDb)
|
let rPath= key.hexaryPath(ps.root, ps.hexaDb)
|
||||||
result = rPath.path.mapIt(it.pp(ps.hexaDb)) & @["(" & rPath.tail.pp & ")"]
|
result = rPath.path.mapIt(it.pp(ps.hexaDb)) & @["(" & rPath.tail.pp & ")"]
|
||||||
|
|
||||||
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
|
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
|
||||||
|
|
|
@ -15,8 +15,9 @@ import
|
||||||
stew/interval_set,
|
stew/interval_set,
|
||||||
../../../protocol,
|
../../../protocol,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_inspect,
|
"."/[hexary_desc, hexary_error, hexary_envelope, hexary_import,
|
||||||
hexary_interpolate, hexary_paths, snapdb_desc, snapdb_persistent]
|
hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc,
|
||||||
|
snapdb_persistent]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
@ -163,7 +164,7 @@ proc importStorageSlots(
|
||||||
# proof data is typically small.
|
# proof data is typically small.
|
||||||
let topTag = slots[^1].pathTag
|
let topTag = slots[^1].pathTag
|
||||||
for w in proofStats.dangling:
|
for w in proofStats.dangling:
|
||||||
let iv = w.partialPath.pathEnvelope
|
let iv = w.partialPath.hexaryEnvelope
|
||||||
if iv.maxPt < base or topTag < iv.minPt:
|
if iv.maxPt < base or topTag < iv.minPt:
|
||||||
# Dangling link with partial path envelope outside accounts range
|
# Dangling link with partial path envelope outside accounts range
|
||||||
discard
|
discard
|
||||||
|
@ -189,7 +190,7 @@ proc importStorageSlots(
|
||||||
# Without `proof` data available there can only be a complete
|
# Without `proof` data available there can only be a complete
|
||||||
# set/list of accounts so there are no dangling nodes in the first
|
# set/list of accounts so there are no dangling nodes in the first
|
||||||
# place. But there must be `proof` data for an empty list.
|
# place. But there must be `proof` data for an empty list.
|
||||||
if w.partialPath.pathEnvelope.maxPt < bottomTag:
|
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
||||||
return err(LowerBoundProofError)
|
return err(LowerBoundProofError)
|
||||||
# Otherwise register left over entry
|
# Otherwise register left over entry
|
||||||
dangling.add w
|
dangling.add w
|
||||||
|
@ -207,7 +208,7 @@ proc importStorageSlots(
|
||||||
else:
|
else:
|
||||||
if not noBaseBoundCheck:
|
if not noBaseBoundCheck:
|
||||||
for w in proofStats.dangling:
|
for w in proofStats.dangling:
|
||||||
if base <= w.partialPath.pathEnvelope.maxPt:
|
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
||||||
return err(LowerBoundProofError)
|
return err(LowerBoundProofError)
|
||||||
dangling = proofStats.dangling
|
dangling = proofStats.dangling
|
||||||
|
|
||||||
|
@ -498,9 +499,9 @@ proc getStorageSlotsNodeKey*(
|
||||||
var rc: Result[NodeKey,void]
|
var rc: Result[NodeKey,void]
|
||||||
noRlpExceptionOops("getStorageSlotsNodeKey()"):
|
noRlpExceptionOops("getStorageSlotsNodeKey()"):
|
||||||
if persistent:
|
if persistent:
|
||||||
rc = ps.getStorageSlotsFn.hexaryInspectPath(ps.root, path)
|
rc = path.hexarypathNodeKey(ps.root, ps.getStorageSlotsFn)
|
||||||
else:
|
else:
|
||||||
rc = ps.hexaDb.hexaryInspectPath(ps.root, path)
|
rc = path.hexarypathNodeKey(ps.root, ps.hexaDb)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
return ok(rc.value)
|
return ok(rc.value)
|
||||||
err(NodeNotFound)
|
err(NodeNotFound)
|
||||||
|
@ -533,7 +534,7 @@ proc getStorageSlotsData*(
|
||||||
if persistent:
|
if persistent:
|
||||||
leaf = path.hexaryPath(ps.root, ps.getStorageSlotsFn).leafData
|
leaf = path.hexaryPath(ps.root, ps.getStorageSlotsFn).leafData
|
||||||
else:
|
else:
|
||||||
leaf = path.hexaryPath(ps.root.to(RepairKey), ps.hexaDb).leafData
|
leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData
|
||||||
|
|
||||||
if leaf.len == 0:
|
if leaf.len == 0:
|
||||||
return err(AccountNotFound)
|
return err(AccountNotFound)
|
||||||
|
|
|
@ -39,7 +39,7 @@ import
|
||||||
../../sync_desc,
|
../../sync_desc,
|
||||||
".."/[constants, range_desc, worker_desc],
|
".."/[constants, range_desc, worker_desc],
|
||||||
./com/[com_error, get_account_range],
|
./com/[com_error, get_account_range],
|
||||||
./db/[hexary_paths, snapdb_accounts]
|
./db/[hexary_envelope, snapdb_accounts]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ proc accountsRangefetchImpl(
|
||||||
# Punch holes into the reported range of received accounts from the network
|
# Punch holes into the reported range of received accounts from the network
|
||||||
# if it there are gaps (described by dangling nodes.)
|
# if it there are gaps (described by dangling nodes.)
|
||||||
for w in gaps.innerGaps:
|
for w in gaps.innerGaps:
|
||||||
discard covered.reduce w.partialPath.pathEnvelope
|
discard covered.reduce w.partialPath.hexaryEnvelope
|
||||||
|
|
||||||
# Update book keeping
|
# Update book keeping
|
||||||
for w in covered.increasing:
|
for w in covered.increasing:
|
||||||
|
|
|
@ -9,12 +9,13 @@
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/sequtils,
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common, p2p],
|
eth/[common, p2p],
|
||||||
stew/interval_set,
|
stew/interval_set,
|
||||||
".."/[constants, range_desc, worker_desc],
|
".."/[constants, range_desc, worker_desc],
|
||||||
./db/[hexary_desc, hexary_error, hexary_inspect, hexary_paths]
|
./db/[hexary_desc, hexary_error, hexary_envelope, hexary_inspect]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
@ -179,7 +180,7 @@ proc subTriesNodesReclassify*(
|
||||||
|
|
||||||
for w in batch.checkNodes:
|
for w in batch.checkNodes:
|
||||||
let
|
let
|
||||||
iv = w.pathEnvelope
|
iv = w.hexaryEnvelope
|
||||||
nCov = batch.processed.covered iv
|
nCov = batch.processed.covered iv
|
||||||
|
|
||||||
if iv.len <= nCov:
|
if iv.len <= nCov:
|
||||||
|
@ -193,8 +194,12 @@ proc subTriesNodesReclassify*(
|
||||||
# Partially processed range, fetch an overlapping interval and
|
# Partially processed range, fetch an overlapping interval and
|
||||||
# remove that from the envelope of `w`.
|
# remove that from the envelope of `w`.
|
||||||
try:
|
try:
|
||||||
let paths = w.dismantle(
|
let paths = block:
|
||||||
rootKey, batch.getOverlapping(iv).value, getFn)
|
let rc = w.hexaryEnvelopeDecompose(
|
||||||
|
rootKey, batch.getOverlapping(iv).value, getFn)
|
||||||
|
if rc.isErr:
|
||||||
|
continue
|
||||||
|
rc.value.mapIt(it.partialpath)
|
||||||
delayed &= paths
|
delayed &= paths
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "reclassify dismantled", count, partialPath=w,
|
trace logTxt "reclassify dismantled", count, partialPath=w,
|
||||||
|
@ -211,7 +216,7 @@ proc subTriesNodesReclassify*(
|
||||||
batch.checkNodes.swap delayed
|
batch.checkNodes.swap delayed
|
||||||
delayed.setLen(0)
|
delayed.setLen(0)
|
||||||
|
|
||||||
batch.checkNodes = doneWith.pathSortUniq
|
batch.checkNodes = doneWith.hexaryEnvelopeUniq
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "reclassify finalise", count,
|
trace logTxt "reclassify finalise", count,
|
||||||
|
|
|
@ -15,23 +15,24 @@ import
|
||||||
std/[algorithm, distros, hashes, math, os, sets,
|
std/[algorithm, distros, hashes, math, os, sets,
|
||||||
sequtils, strformat, strutils, tables, times],
|
sequtils, strformat, strutils, tables, times],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[p2p, rlp],
|
eth/[common, p2p, rlp],
|
||||||
eth/trie/[nibbles],
|
eth/trie/[nibbles],
|
||||||
rocksdb,
|
rocksdb,
|
||||||
stint,
|
stint,
|
||||||
stew/[byteutils, interval_set, results],
|
stew/[byteutils, interval_set, results],
|
||||||
unittest2,
|
unittest2,
|
||||||
../nimbus/common/common,
|
../nimbus/common/common as nimbus_common, # avoid name clash
|
||||||
../nimbus/db/[select_backend, storage_types],
|
../nimbus/db/[select_backend, storage_types],
|
||||||
../nimbus/core/chain,
|
../nimbus/core/chain,
|
||||||
../nimbus/sync/types,
|
../nimbus/sync/types,
|
||||||
../nimbus/sync/snap/range_desc,
|
../nimbus/sync/snap/range_desc,
|
||||||
../nimbus/sync/snap/worker/db/[
|
../nimbus/sync/snap/worker/db/[
|
||||||
hexary_desc, hexary_error, hexary_inspect, hexary_paths, rocky_bulk_load,
|
hexary_desc, hexary_envelope, hexary_error, hexary_inspect, hexary_nearby,
|
||||||
snapdb_accounts, snapdb_desc, snapdb_pivot, snapdb_storage_slots],
|
hexary_paths, rocky_bulk_load, snapdb_accounts, snapdb_desc, snapdb_pivot,
|
||||||
|
snapdb_storage_slots],
|
||||||
../nimbus/utils/prettify,
|
../nimbus/utils/prettify,
|
||||||
./replay/[pp, undump_blocks, undump_accounts, undump_storages],
|
./replay/[pp, undump_blocks, undump_accounts, undump_storages],
|
||||||
./test_sync_snap/[bulk_test_xx, snap_test_xx, test_types]
|
./test_sync_snap/[bulk_test_xx, snap_test_xx, test_decompose, test_types]
|
||||||
|
|
||||||
const
|
const
|
||||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||||
|
@ -302,7 +303,6 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
||||||
var
|
var
|
||||||
desc: SnapDbAccountsRef
|
desc: SnapDbAccountsRef
|
||||||
accKeys: seq[NodeKey]
|
accKeys: seq[NodeKey]
|
||||||
accBaseTag: NodeTag
|
|
||||||
|
|
||||||
test &"Snap-proofing {accountsList.len} items for state root ..{root.pp}":
|
test &"Snap-proofing {accountsList.len} items for state root ..{root.pp}":
|
||||||
let
|
let
|
||||||
|
@ -318,13 +318,13 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
||||||
desc = SnapDbAccountsRef.init(dbBase, root, peer)
|
desc = SnapDbAccountsRef.init(dbBase, root, peer)
|
||||||
|
|
||||||
# Load/accumulate data from several samples (needs some particular sort)
|
# Load/accumulate data from several samples (needs some particular sort)
|
||||||
accBaseTag = accountsList.mapIt(it.base).sortMerge
|
let baseTag = accountsList.mapIt(it.base).sortMerge
|
||||||
let packed = PackedAccountRange(
|
let packed = PackedAccountRange(
|
||||||
accounts: accountsList.mapIt(it.data.accounts).sortMerge,
|
accounts: accountsList.mapIt(it.data.accounts).sortMerge,
|
||||||
proof: accountsList.mapIt(it.data.proof).flatten)
|
proof: accountsList.mapIt(it.data.proof).flatten)
|
||||||
# Merging intervals will produce gaps, so the result is expected OK but
|
# Merging intervals will produce gaps, so the result is expected OK but
|
||||||
# different from `.isImportOk`
|
# different from `.isImportOk`
|
||||||
check desc.importAccounts(accBaseTag, packed, true).isOk
|
check desc.importAccounts(baseTag, packed, true).isOk
|
||||||
|
|
||||||
# check desc.merge(lowerBound, accounts) == OkHexDb
|
# check desc.merge(lowerBound, accounts) == OkHexDb
|
||||||
desc.assignPrettyKeys() # for debugging, make sure that state root ~ "$0"
|
desc.assignPrettyKeys() # for debugging, make sure that state root ~ "$0"
|
||||||
|
@ -372,9 +372,7 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
||||||
# the account in the next for-loop cycle (if any.)
|
# the account in the next for-loop cycle (if any.)
|
||||||
check pfx & accKey.pp(false) == pfx & nextAccount.pp(false)
|
check pfx & accKey.pp(false) == pfx & nextAccount.pp(false)
|
||||||
if byNextKey.isOk:
|
if byNextKey.isOk:
|
||||||
nextAccount = byNextKey.value
|
nextAccount = byNextKey.get(otherwise = NodeKey.default)
|
||||||
else:
|
|
||||||
nextAccount = NodeKey.default
|
|
||||||
|
|
||||||
# Check `prev` traversal funcionality
|
# Check `prev` traversal funcionality
|
||||||
if prevAccount != NodeKey.default:
|
if prevAccount != NodeKey.default:
|
||||||
|
@ -409,59 +407,15 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
||||||
# Beware: dumping a large database is not recommended
|
# Beware: dumping a large database is not recommended
|
||||||
#true.say "***", "database dump\n ", desc.dumpHexaDB()
|
#true.say "***", "database dump\n ", desc.dumpHexaDB()
|
||||||
|
|
||||||
test "Dismantle path prefix envelopes":
|
test &"Decompose path prefix envelopes on {info}":
|
||||||
doAssert 1 < accKeys.len
|
if db.persistent:
|
||||||
let
|
# Store accounts persistent accounts DB
|
||||||
iv = NodeTagRange.new(accBaseTag, accKeys[^2].to(NodeTag))
|
accKeys.test_decompose(root.to(NodeKey), desc.getAccountFn, desc.hexaDB)
|
||||||
ivMin = iv.minPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
|
else:
|
||||||
ivMax = iv.maxPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
|
accKeys.test_decompose(root.to(NodeKey), desc.hexaDB, desc.hexaDB)
|
||||||
pfxLen = ivMin.sharedPrefixLen ivMax
|
|
||||||
# Use some overlapping prefixes. Note that a prefix must refer to
|
|
||||||
# an existing node
|
|
||||||
for n in 0 .. pfxLen:
|
|
||||||
let
|
|
||||||
pfx = ivMin.slice(0, pfxLen - n).hexPrefixEncode
|
|
||||||
qfx = pfx.dismantle(root.to(NodeKey), iv, desc.hexaDB)
|
|
||||||
|
|
||||||
# Re-assemble intervals
|
|
||||||
let covered = NodeTagRangeSet.init()
|
|
||||||
for w in qfx:
|
|
||||||
let iv = pathEnvelope w
|
|
||||||
check iv.len == covered.merge iv
|
|
||||||
|
|
||||||
if covered.chunks == 1 and iv.minPt == low(NodeTag):
|
|
||||||
# Order: `iv` <= `covered`
|
|
||||||
check iv.maxPt <= covered.ge.value.minPt
|
|
||||||
elif covered.chunks == 1 and iv.maxPt == high(NodeTag):
|
|
||||||
# Order: `covered` <= `iv`
|
|
||||||
check covered.ge.value.maxPt <= iv.minPt
|
|
||||||
else:
|
|
||||||
# Covered contains two ranges were the gap is big enough for `iv`
|
|
||||||
check covered.chunks == 2
|
|
||||||
# Order: `covered.ge` <= `iv` <= `covered.le`
|
|
||||||
check covered.ge.value.maxPt <= iv.minPt
|
|
||||||
check iv.maxPt <= covered.le.value.minPt
|
|
||||||
|
|
||||||
# Must hold
|
|
||||||
check covered.le.value.minPt <= accKeys[^1].to(Nodetag)
|
|
||||||
|
|
||||||
when false: # or true:
|
|
||||||
let
|
|
||||||
cmaNlSp0 = ",\n" & repeat(" ",12)
|
|
||||||
cmaNlSpc = ",\n" & repeat(" ",13)
|
|
||||||
echo ">>> n=", n, " pfxMax=", pfxLen,
|
|
||||||
"\n pfx=", pfx,
|
|
||||||
"\n ivMin=", ivMin,
|
|
||||||
"\n iv1st=", accKeys[0],
|
|
||||||
"\n ivMax=", ivMax,
|
|
||||||
"\n ivPast=", accKeys[^1],
|
|
||||||
"\n covered=@[", toSeq(covered.increasing)
|
|
||||||
.mapIt(&"[{it.minPt}{cmaNlSpc}{it.maxPt}]")
|
|
||||||
.join(cmaNlSp0), "]",
|
|
||||||
"\n => @[", qfx.mapIt(it.toHex).join(cmaNlSpc), "]"
|
|
||||||
|
|
||||||
test &"Storing/retrieving {accKeys.len} items " &
|
test &"Storing/retrieving {accKeys.len} items " &
|
||||||
"on persistent state root registry":
|
"on persistent pivot/checkpoint registry":
|
||||||
if not persistent:
|
if not persistent:
|
||||||
skip()
|
skip()
|
||||||
else:
|
else:
|
||||||
|
@ -510,6 +464,7 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
||||||
check rc.value.nSlotLists == 0
|
check rc.value.nSlotLists == 0
|
||||||
check rc.value.processed == seq[(NodeTag,NodeTag)].default
|
check rc.value.processed == seq[(NodeTag,NodeTag)].default
|
||||||
|
|
||||||
|
|
||||||
proc storagesRunner(
|
proc storagesRunner(
|
||||||
noisy = true;
|
noisy = true;
|
||||||
persistent = true;
|
persistent = true;
|
||||||
|
@ -628,7 +583,8 @@ proc inspectionRunner(
|
||||||
check not stats.stopped
|
check not stats.stopped
|
||||||
let
|
let
|
||||||
dangling = stats.dangling.mapIt(it.partialPath)
|
dangling = stats.dangling.mapIt(it.partialPath)
|
||||||
keys = desc.hexaDb.hexaryInspectToKeys(rootKey, dangling)
|
keys = dangling.hexaryPathNodeKeys(
|
||||||
|
rootKey, desc.hexaDb, missingOk=true)
|
||||||
check dangling.len == keys.len
|
check dangling.len == keys.len
|
||||||
singleStats.add (desc.hexaDb.tab.len,stats)
|
singleStats.add (desc.hexaDb.tab.len,stats)
|
||||||
|
|
||||||
|
@ -667,7 +623,8 @@ proc inspectionRunner(
|
||||||
check not stats.stopped
|
check not stats.stopped
|
||||||
let
|
let
|
||||||
dangling = stats.dangling.mapIt(it.partialPath)
|
dangling = stats.dangling.mapIt(it.partialPath)
|
||||||
keys = desc.hexaDb.hexaryInspectToKeys(rootKey, dangling)
|
keys = dangling.hexaryPathNodeKeys(
|
||||||
|
rootKey, desc.hexaDb, missingOk=true)
|
||||||
check dangling.len == keys.len
|
check dangling.len == keys.len
|
||||||
# Must be the same as the in-memory fingerprint
|
# Must be the same as the in-memory fingerprint
|
||||||
let ssn1 = singleStats[n][1].dangling.mapIt(it.partialPath)
|
let ssn1 = singleStats[n][1].dangling.mapIt(it.partialPath)
|
||||||
|
@ -701,8 +658,8 @@ proc inspectionRunner(
|
||||||
check not stats.stopped
|
check not stats.stopped
|
||||||
let
|
let
|
||||||
dangling = stats.dangling.mapIt(it.partialPath)
|
dangling = stats.dangling.mapIt(it.partialPath)
|
||||||
keys = desc.hexaDb.hexaryInspectToKeys(
|
keys = dangling.hexaryPathNodeKeys(
|
||||||
rootKey, dangling.toHashSet.toSeq)
|
rootKey, desc.hexaDb, missingOk=true)
|
||||||
check dangling.len == keys.len
|
check dangling.len == keys.len
|
||||||
accuStats.add (desc.hexaDb.tab.len, stats)
|
accuStats.add (desc.hexaDb.tab.len, stats)
|
||||||
|
|
||||||
|
@ -724,8 +681,8 @@ proc inspectionRunner(
|
||||||
check not stats.stopped
|
check not stats.stopped
|
||||||
let
|
let
|
||||||
dangling = stats.dangling.mapIt(it.partialPath)
|
dangling = stats.dangling.mapIt(it.partialPath)
|
||||||
keys = desc.hexaDb.hexaryInspectToKeys(
|
keys = dangling.hexaryPathNodeKeys(
|
||||||
rootKey, dangling.toHashSet.toSeq)
|
rootKey, desc.hexaDb, missingOk=true)
|
||||||
check dangling.len == keys.len
|
check dangling.len == keys.len
|
||||||
check accuStats[n][1] == stats
|
check accuStats[n][1] == stats
|
||||||
|
|
||||||
|
@ -1242,7 +1199,7 @@ proc storeRunner(noisy = true; persistent = true; cleanUp = true) =
|
||||||
|
|
||||||
proc syncSnapMain*(noisy = defined(debug)) =
|
proc syncSnapMain*(noisy = defined(debug)) =
|
||||||
noisy.accountsRunner(persistent=true)
|
noisy.accountsRunner(persistent=true)
|
||||||
#noisy.accountsRunner(persistent=false) # problems unless running stand-alone
|
noisy.accountsRunner(persistent=false)
|
||||||
noisy.importRunner() # small sample, just verify functionality
|
noisy.importRunner() # small sample, just verify functionality
|
||||||
noisy.inspectionRunner()
|
noisy.inspectionRunner()
|
||||||
noisy.storeRunner()
|
noisy.storeRunner()
|
||||||
|
@ -1304,7 +1261,8 @@ when isMainModule:
|
||||||
import ./test_sync_snap/snap_other_xx
|
import ./test_sync_snap/snap_other_xx
|
||||||
noisy.showElapsed("accountsRunner()"):
|
noisy.showElapsed("accountsRunner()"):
|
||||||
for n,sam in snapOtherList:
|
for n,sam in snapOtherList:
|
||||||
false.accountsRunner(persistent=true, sam)
|
if n == 3:
|
||||||
|
false.accountsRunner(persistent=true, sam)
|
||||||
noisy.showElapsed("inspectRunner()"):
|
noisy.showElapsed("inspectRunner()"):
|
||||||
for n,sam in snapOtherHealingList:
|
for n,sam in snapOtherHealingList:
|
||||||
false.inspectionRunner(persistent=true, cascaded=false, sam)
|
false.inspectionRunner(persistent=true, cascaded=false, sam)
|
||||||
|
@ -1326,9 +1284,11 @@ when isMainModule:
|
||||||
# This one uses readily available dumps
|
# This one uses readily available dumps
|
||||||
when true: # and false:
|
when true: # and false:
|
||||||
false.inspectionRunner()
|
false.inspectionRunner()
|
||||||
for sam in snapTestList:
|
for n,sam in snapTestList:
|
||||||
|
false.accountsRunner(persistent=false, sam)
|
||||||
false.accountsRunner(persistent=true, sam)
|
false.accountsRunner(persistent=true, sam)
|
||||||
for sam in snapTestStorageList:
|
for n,sam in snapTestStorageList:
|
||||||
|
false.accountsRunner(persistent=false, sam)
|
||||||
false.accountsRunner(persistent=true, sam)
|
false.accountsRunner(persistent=true, sam)
|
||||||
false.storagesRunner(persistent=true, sam)
|
false.storagesRunner(persistent=true, sam)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or
|
||||||
|
# distributed except according to those terms.
|
||||||
|
|
||||||
|
## Snap sync components tester and TDD environment
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[sequtils, strformat, strutils],
|
||||||
|
eth/[common, p2p, trie/nibbles],
|
||||||
|
stew/[byteutils, interval_set, results],
|
||||||
|
unittest2,
|
||||||
|
../../nimbus/sync/snap/range_desc,
|
||||||
|
../../nimbus/sync/snap/worker/db/[
|
||||||
|
hexary_desc, hexary_envelope, hexary_nearby, hexary_paths]
|
||||||
|
|
||||||
|
const
|
||||||
|
cmaNlSp0 = ",\n" & repeat(" ",12)
|
||||||
|
cmaNlSpc = ",\n" & repeat(" ",13)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc print_data(
|
||||||
|
pfx: Blob;
|
||||||
|
pfxLen: int;
|
||||||
|
ivMin: NibblesSeq;
|
||||||
|
firstTag: NodeTag;
|
||||||
|
lastTag: NodeTag;
|
||||||
|
ivMax: NibblesSeq;
|
||||||
|
gaps: NodeTagRangeSet;
|
||||||
|
gapPaths: seq[NodeTagRange];
|
||||||
|
info: string;
|
||||||
|
) =
|
||||||
|
echo ">>>", info, " pfxMax=", pfxLen,
|
||||||
|
"\n pfx=", pfx, "/", ivMin.slice(0,pfxLen).hexPrefixEncode,
|
||||||
|
"\n ivMin=", ivMin,
|
||||||
|
"\n firstTag=", firstTag,
|
||||||
|
"\n lastTag=", lastTag,
|
||||||
|
"\n ivMax=", ivMax,
|
||||||
|
"\n gaps=@[", toSeq(gaps.increasing)
|
||||||
|
.mapIt(&"[{it.minPt}{cmaNlSpc}{it.maxPt}]")
|
||||||
|
.join(cmaNlSp0), "]",
|
||||||
|
"\n gapPaths=@[", gapPaths
|
||||||
|
.mapIt(&"[{it.minPt}{cmaNlSpc}{it.maxPt}]")
|
||||||
|
.join(cmaNlSp0), "]"
|
||||||
|
|
||||||
|
|
||||||
|
proc print_data(
|
||||||
|
pfx: Blob;
|
||||||
|
qfx: seq[NodeSpecs];
|
||||||
|
iv: NodeTagRange;
|
||||||
|
firstTag: NodeTag;
|
||||||
|
lastTag: NodeTag;
|
||||||
|
rootKey: NodeKey;
|
||||||
|
db: HexaryTreeDbRef|HexaryGetFn;
|
||||||
|
dbg: HexaryTreeDbRef;
|
||||||
|
) =
|
||||||
|
echo "***",
|
||||||
|
"\n qfx=@[", qfx
|
||||||
|
.mapIt(&"({it.partialPath.toHex},{it.nodeKey.pp(dbg)})")
|
||||||
|
.join(cmaNlSpc), "]",
|
||||||
|
"\n ivMin=", iv.minPt,
|
||||||
|
"\n ", iv.minPt.hexaryPath(rootKey,db).pp(dbg), "\n",
|
||||||
|
"\n firstTag=", firstTag,
|
||||||
|
"\n ", firstTag.hexaryPath(rootKey,db).pp(dbg), "\n",
|
||||||
|
"\n lastTag=", lastTag,
|
||||||
|
"\n ", lastTag.hexaryPath(rootKey,db).pp(dbg), "\n",
|
||||||
|
"\n ivMax=", iv.maxPt,
|
||||||
|
"\n ", iv.maxPt.hexaryPath(rootKey,db).pp(dbg), "\n",
|
||||||
|
"\n pfxMax=", pfx.hexaryEnvelope.maxPt,
|
||||||
|
"\n ", pfx.hexaryEnvelope.maxPt.hexaryPath(rootKey,db).pp(dbg)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public test function
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc test_decompose*(
|
||||||
|
accKeys: seq[NodeKey]; ## Accounts key range
|
||||||
|
rootKey: NodeKey; ## State root
|
||||||
|
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||||
|
dbg: HexaryTreeDbRef; ## Debugging env
|
||||||
|
) =
|
||||||
|
## Testing body for `hexary_nearby` and `hexary_envelope` tests
|
||||||
|
# The base data from above cannot be relied upon as there might be
|
||||||
|
# stray account nodes in the proof *before* the left boundary.
|
||||||
|
doAssert 2 < accKeys.len
|
||||||
|
|
||||||
|
const
|
||||||
|
isPersistent = db.type is HexaryTreeDbRef
|
||||||
|
let
|
||||||
|
baseTag = accKeys[0].to(NodeTag) + 1.u256
|
||||||
|
firstTag = baseTag.hexaryNearbyRight(rootKey, db).get(
|
||||||
|
otherwise = low(Nodetag))
|
||||||
|
lastTag = accKeys[^2].to(NodeTag)
|
||||||
|
topTag = accKeys[^1].to(NodeTag) - 1.u256
|
||||||
|
|
||||||
|
# Verify set up
|
||||||
|
check baseTag < firstTag
|
||||||
|
check firstTag < lastTag
|
||||||
|
check lastTag < topTag
|
||||||
|
|
||||||
|
# Verify right boundary proof function (left boundary is
|
||||||
|
# correct by definition of `firstTag`.)
|
||||||
|
check lastTag == topTag.hexaryNearbyLeft(rootKey, db).get(
|
||||||
|
otherwise = high(NodeTag))
|
||||||
|
|
||||||
|
# Construct test range
|
||||||
|
let
|
||||||
|
iv = NodeTagRange.new(baseTag, topTag)
|
||||||
|
ivMin = iv.minPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
|
||||||
|
ivMax = iv.maxPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
|
||||||
|
pfxLen = ivMin.sharedPrefixLen ivMax
|
||||||
|
|
||||||
|
# Use some overlapping prefixes. Note that a prefix must refer to
|
||||||
|
# an existing node
|
||||||
|
for n in 0 .. pfxLen:
|
||||||
|
let
|
||||||
|
pfx = ivMin.slice(0, pfxLen - n).hexPrefixEncode
|
||||||
|
qfx = block:
|
||||||
|
let rc = pfx.hexaryEnvelopeDecompose(rootKey, iv, db)
|
||||||
|
check rc.isOk
|
||||||
|
if rc.isOk:
|
||||||
|
rc.value
|
||||||
|
else:
|
||||||
|
seq[NodeSpecs].default
|
||||||
|
|
||||||
|
# Assemble possible gaps in decomposed envelope `qfx`
|
||||||
|
let gaps = NodeTagRangeSet.init()
|
||||||
|
|
||||||
|
# Start with full envelope and remove decomposed enveloped from `qfx`
|
||||||
|
discard gaps.merge pfx.hexaryEnvelope
|
||||||
|
|
||||||
|
# There are no node points between `iv.minPt` (aka base) and the first
|
||||||
|
# account `firstTag` and beween `lastTag` and `iv.maxPt`. So only the
|
||||||
|
# interval `[firstTag,lastTag]` is to be fully covered by `gaps`.
|
||||||
|
block:
|
||||||
|
let iw = NodeTagRange.new(firstTag, lastTag)
|
||||||
|
check iw.len == gaps.reduce iw
|
||||||
|
|
||||||
|
for w in qfx:
|
||||||
|
# The envelope of `w` must be fully contained in `gaps`
|
||||||
|
let iw = w.partialPath.hexaryEnvelope
|
||||||
|
check iw.len == gaps.reduce iw
|
||||||
|
|
||||||
|
# Remove that space between the start of `iv` and the first account
|
||||||
|
# key (if any.).
|
||||||
|
if iv.minPt < firstTag:
|
||||||
|
discard gaps.reduce(iv.minPt, firstTag-1.u256)
|
||||||
|
|
||||||
|
# There are no node points between `lastTag` and `iv.maxPt`
|
||||||
|
if lastTag < iv.maxPt:
|
||||||
|
discard gaps.reduce(lastTag+1.u256, iv.maxPt)
|
||||||
|
|
||||||
|
# All gaps must be empty intervals
|
||||||
|
var gapPaths: seq[NodeTagRange]
|
||||||
|
for w in gaps.increasing:
|
||||||
|
let rc = w.minPt.hexaryPath(rootKey,db).hexaryNearbyRight(db)
|
||||||
|
if rc.isOk:
|
||||||
|
var firstTag = rc.value.getPartialPath.convertTo(NodeTag)
|
||||||
|
|
||||||
|
# The point `firstTag` might be zero if there is a missing node
|
||||||
|
# in between to advance to the next key.
|
||||||
|
if w.minPt <= firstTag:
|
||||||
|
# The interval `w` starts before the first interval
|
||||||
|
if firstTag <= w.maxPt:
|
||||||
|
# Make sure that there is no leaf node in the range
|
||||||
|
gapPaths.add w
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Some sub-tries might not exists which leads to gaps
|
||||||
|
let
|
||||||
|
wMin = w.minPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
|
||||||
|
wMax = w.maxPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
|
||||||
|
nPfx = wMin.sharedPrefixLen wMax
|
||||||
|
for nibble in wMin[nPfx] .. wMax[nPfx]:
|
||||||
|
let wPfy = wMin.slice(0,nPfx) & @[nibble].initNibbleRange.slice(1)
|
||||||
|
if wPfy.hexaryPathNodeKey(rootKey, db, missingOk=true).isOk:
|
||||||
|
gapPaths.add wPfy.hexPrefixEncode.hexaryEnvelope
|
||||||
|
|
||||||
|
# Verify :)
|
||||||
|
check gapPaths == seq[NodeTagRange].default
|
||||||
|
|
||||||
|
when false: # or true:
|
||||||
|
print_data(
|
||||||
|
pfx, pfxLen, ivMin, firstTag, lastTag, ivMax, gaps, gapPaths, "n=" & $n)
|
||||||
|
|
||||||
|
print_data(
|
||||||
|
pfx, qfx, iv, firstTag, lastTag, rootKey, db, dbg)
|
||||||
|
|
||||||
|
if true: quit()
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
Loading…
Reference in New Issue