Snap sync interval range extractor (#1449)
* Update comments and test noise * Fix boundary proofs why: Where neither used in production, nor unit tested. For production, other methods apply to test leaf range integrity directly based of the proof nodes. * Added `hexary_range()`: interval range + proof extractor details: + Will be used for `snap/1` protocol handler + Unit tests added (also for testing left boundary proof) todo: Need to verify completeness of proof nodes * Reduce some nim 1.6 compiler noise * Stop unit test gossip for ci tests
This commit is contained in:
parent
d65bb18ad2
commit
197d2b16dd
|
@ -11,9 +11,9 @@
|
|||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[p2p, p2p/peer_pool],
|
||||
eth/p2p,
|
||||
../protocol,
|
||||
../protocol/[snap/snap_types, trace_config],
|
||||
../protocol/snap/snap_types,
|
||||
../../core/chain
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
import
|
||||
chronicles,
|
||||
eth/[common, p2p, p2p/private/p2p_types]
|
||||
eth/[common, p2p/private/p2p_types]
|
||||
# ../../types
|
||||
|
||||
type
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[hashes, options, sets, strutils],
|
||||
std/[options, sets, strutils],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
|
|
|
@ -407,6 +407,24 @@ proc convertTo*(node: RNodeRef; T: type Blob): T =
|
|||
|
||||
writer.finish()
|
||||
|
||||
proc convertTo*(node: XNodeObj; T: type Blob): T =
|
||||
## Variant of above `convertTo()` for `XNodeObj` nodes.
|
||||
var writer = initRlpWriter()
|
||||
|
||||
case node.kind:
|
||||
of Branch:
|
||||
writer.append(node.bLink)
|
||||
of Extension:
|
||||
writer.startList(2)
|
||||
writer.append(node.ePfx.hexPrefixEncode(isleaf = false))
|
||||
writer.append(node.eLink)
|
||||
of Leaf:
|
||||
writer.startList(2)
|
||||
writer.append(node.lPfx.hexPrefixEncode(isleaf = true))
|
||||
writer.append(node.lData)
|
||||
|
||||
writer.finish()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -70,15 +70,6 @@
|
|||
## * then there is a ``w = partialPath & w-ext`` in ``W`` with
|
||||
## ``p-ext = w-ext & some-ext``.
|
||||
##
|
||||
## Relation to boundary proofs
|
||||
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
## Consider the decomposition of an empty *partial path* (the envelope of which
|
||||
## representing the whole leaf node path range) for a leaf node range `iv`.
|
||||
## This result is then a `boundary proof` for `iv` according to the definition
|
||||
## above though it is highly redundant. All *partial path* bottom level nodes
|
||||
## with envelopes disjunct to `iv` can be removed from `W` for a `boundary
|
||||
## proof`.
|
||||
##
|
||||
import
|
||||
std/[algorithm, sequtils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
|
@ -160,41 +151,41 @@ proc padPartialPath(pfx: NibblesSeq; dblNibble: byte): NodeKey =
|
|||
|
||||
|
||||
proc doDecomposeLeft(
|
||||
envPt: RPath|XPath;
|
||||
ivPt: RPath|XPath;
|
||||
envQ: RPath|XPath;
|
||||
ivQ: RPath|XPath;
|
||||
): Result[seq[NodeSpecs],HexaryError] =
|
||||
## Helper for `hexaryEnvelopeDecompose()` for handling left side of
|
||||
## envelope from partial path argument
|
||||
#
|
||||
# partialPath
|
||||
# / \
|
||||
# / \
|
||||
# envPt.. -- envelope left end of partial path
|
||||
# |
|
||||
# ivPt.. -- `iv`, not fully covering left of `env`
|
||||
# partialPath
|
||||
# / \
|
||||
# / \
|
||||
# ivQ[x]==envQ[x] \ -- envelope left end of partial path
|
||||
# | \
|
||||
# ivQ[x+1] -- `iv`, not fully covering left of `env`
|
||||
# :
|
||||
#
|
||||
var collect: seq[NodeSpecs]
|
||||
block rightCurbEnvelope:
|
||||
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len):
|
||||
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]:
|
||||
for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
|
||||
if n == envQ.path.len or envQ.path[n] != ivQ.path[n]:
|
||||
#
|
||||
# At this point, the `node` entries of either `path[n]` step are
|
||||
# At this point, the `node` entries of either `.path[n]` step are
|
||||
# the same. This is so because the predecessor steps were the same
|
||||
# or were the `rootKey` in case n == 0.
|
||||
#
|
||||
# But then (`node` entries being equal) the only way for the
|
||||
# `path[n]` steps to differ is in the entry selector `nibble` for
|
||||
# a branch node.
|
||||
# But then (`node` entries being equal) the only way for the `.path[n]`
|
||||
# steps to differ is in the entry selector `nibble` for a branch node.
|
||||
#
|
||||
for m in n ..< ivPt.path.len:
|
||||
for m in n ..< ivQ.path.len:
|
||||
let
|
||||
pfx = ivPt.getNibbles(0, m) # common path segment
|
||||
top = ivPt.path[m].nibble # need nibbles smaller than top
|
||||
pfx = ivQ.getNibbles(0, m) # common path segment
|
||||
top = ivQ.path[m].nibble # need nibbles smaller than top
|
||||
#
|
||||
# Incidentally for a non-`Branch` node, the value `top` becomes
|
||||
# `-1` and the `for`- loop will be ignored (which is correct)
|
||||
for nibble in 0 ..< top:
|
||||
let nodeKey = ivPt.path[m].node.bLink[nibble]
|
||||
let nodeKey = ivQ.path[m].node.bLink[nibble]
|
||||
if not nodeKey.isZeroLink:
|
||||
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||
|
@ -210,30 +201,31 @@ proc doDecomposeLeft(
|
|||
ok(collect)
|
||||
|
||||
proc doDecomposeRight(
|
||||
envPt: RPath|XPath;
|
||||
ivPt: RPath|XPath;
|
||||
envQ: RPath|XPath;
|
||||
ivQ: RPath|XPath;
|
||||
): Result[seq[NodeSpecs],HexaryError] =
|
||||
## Helper for `hexaryEnvelopeDecompose()` for handling right side of
|
||||
## envelope from partial path argument
|
||||
#
|
||||
# partialPath
|
||||
# / \
|
||||
# / \
|
||||
# .. envPt -- envelope right end of partial path
|
||||
# |
|
||||
# .. ivPt -- `iv`, not fully covering right of `env`
|
||||
# partialPath
|
||||
# / \
|
||||
# / \
|
||||
# / ivQ[x]==envQ[^1] -- envelope right end of partial path
|
||||
# / |
|
||||
# ivQ[x+1] -- `iv`, not fully covering right of `env`
|
||||
# :
|
||||
#
|
||||
var collect: seq[NodeSpecs]
|
||||
block leftCurbEnvelope:
|
||||
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len):
|
||||
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]:
|
||||
for m in n ..< ivPt.path.len:
|
||||
for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
|
||||
if n == envQ.path.len or envQ.path[n] != ivQ.path[n]:
|
||||
for m in n ..< ivQ.path.len:
|
||||
let
|
||||
pfx = ivPt.getNibbles(0, m) # common path segment
|
||||
base = ivPt.path[m].nibble # need nibbles greater/equal
|
||||
pfx = ivQ.getNibbles(0, m) # common path segment
|
||||
base = ivQ.path[m].nibble # need nibbles greater/equal
|
||||
if 0 <= base:
|
||||
for nibble in base+1 .. 15:
|
||||
let nodeKey = ivPt.path[m].node.bLink[nibble]
|
||||
let nodeKey = ivQ.path[m].node.bLink[nibble]
|
||||
if not nodeKey.isZeroLink:
|
||||
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||
|
@ -258,15 +250,15 @@ proc decomposeLeftImpl(
|
|||
# non-matching of the below if clause.
|
||||
if env.minPt < iv.minPt:
|
||||
let
|
||||
envPt = env.minPt.hexaryPath(rootKey, db)
|
||||
envQ = env.minPt.hexaryPath(rootKey, db)
|
||||
# Make sure that the min point is the nearest node to the right
|
||||
ivPt = block:
|
||||
ivQ = block:
|
||||
let rc = iv.minPt.hexaryPath(rootKey, db).hexaryNearbyRight(db)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
block:
|
||||
let rc = envPt.doDecomposeLeft ivPt
|
||||
let rc = envQ.doDecomposeLeft ivQ
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
nodeSpex &= rc.value
|
||||
|
@ -285,14 +277,14 @@ proc decomposeRightImpl(
|
|||
var nodeSpex: seq[NodeSpecs]
|
||||
if iv.maxPt < env.maxPt:
|
||||
let
|
||||
envPt = env.maxPt.hexaryPath(rootKey, db)
|
||||
ivPt = block:
|
||||
envQ = env.maxPt.hexaryPath(rootKey, db)
|
||||
ivQ = block:
|
||||
let rc = iv.maxPt.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
block:
|
||||
let rc = envPt.doDecomposeRight ivPt
|
||||
let rc = envQ.doDecomposeRight ivQ
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
nodeSpex &= rc.value
|
||||
|
|
|
@ -27,6 +27,10 @@ type
|
|||
TooManySlotAccounts
|
||||
NoAccountsYet
|
||||
|
||||
# range
|
||||
LeafNodeExpected
|
||||
FailedNextNode
|
||||
|
||||
# nearby/boundary proofs
|
||||
NearbyExtensionError
|
||||
NearbyBranchError
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
import
|
||||
std/[sequtils, sets, strutils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error]
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
## re-factored database layer.
|
||||
|
||||
import
|
||||
std/[sequtils, sets, strutils, tables],
|
||||
std/[sequtils, strutils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../range_desc,
|
||||
|
@ -508,10 +508,10 @@ proc rTreeSquashRootNode(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryInterpolate*(
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
rootKey: NodeKey; ## Root node hash
|
||||
dbItems: var seq[RLeafSpecs]; ## List of path and leaf items
|
||||
bootstrap = false; ## Can create root node on-the-fly
|
||||
db: HexaryTreeDbRef; # Database
|
||||
rootKey: NodeKey; # Root node hash
|
||||
dbItems: var seq[RLeafSpecs]; # List of path and leaf items
|
||||
bootstrap = false; # Can create root node on-the-fly
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## From the argument list `dbItems`, leaf nodes will be added to the hexary
|
||||
|
|
|
@ -87,9 +87,9 @@ template noRlpErrorOops(info: static[string]; code: untyped) =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryNearbyRightImpl(
|
||||
baseTag: NodeTag; ## Some node
|
||||
rootKey: NodeKey; ## State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError,RlpError]} =
|
||||
## Wrapper
|
||||
|
@ -107,9 +107,9 @@ proc hexaryNearbyRightImpl(
|
|||
err(NearbyLeafExpected)
|
||||
|
||||
proc hexaryNearbyLeftImpl(
|
||||
baseTag: NodeTag; ## Some node
|
||||
rootKey: NodeKey; ## State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError,RlpError]} =
|
||||
## Wrapper
|
||||
|
@ -347,8 +347,8 @@ proc completeMost(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryNearbyRight*(
|
||||
path: RPath; ## Partially expanded path
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
path: RPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): Result[RPath,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Extends the maximally extended argument nodes `path` to the right (i.e.
|
||||
|
@ -366,53 +366,72 @@ proc hexaryNearbyRight*(
|
|||
if path.path[^1].node.kind == Leaf:
|
||||
return ok(path)
|
||||
|
||||
var rPath = path
|
||||
var
|
||||
rPath = path
|
||||
start = true
|
||||
while 0 < rPath.path.len:
|
||||
let top = rPath.path[^1]
|
||||
if top.node.kind != Branch or
|
||||
top.nibble < 0 or
|
||||
rPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode) # error
|
||||
case top.node.kind:
|
||||
of Leaf:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Branch:
|
||||
if top.nibble < 0 or rPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Extension:
|
||||
rPath.tail = top.node.ePfx & rPath.tail
|
||||
rPath.path.setLen(rPath.path.len - 1)
|
||||
continue
|
||||
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.isZero or not db.tab.hasKey(topLink):
|
||||
return err(NearbyDanglingLink) # error
|
||||
var
|
||||
step = top
|
||||
let
|
||||
rPathLen = rPath.path.len # in case of backtracking
|
||||
rPathTail = rPath.tail # in case of backtracking
|
||||
|
||||
let nextNibble = rPath.tail[0].int8
|
||||
if nextNibble < 15:
|
||||
let
|
||||
nextNode = db.tab[topLink]
|
||||
rPathLen = rPath.path.len # in case of backtracking
|
||||
rPathTail = rPath.tail
|
||||
case nextNode.kind
|
||||
of Leaf:
|
||||
if rPath.tail <= nextNode.lPfx:
|
||||
return rPath.completeLeast(topLink, db)
|
||||
of Extension:
|
||||
if rPath.tail <= nextNode.ePfx:
|
||||
return rPath.completeLeast(topLink, db)
|
||||
of Branch:
|
||||
# Step down and complete with a branch link on the child node
|
||||
rPath.path = rPath.path & RPathStep(
|
||||
key: topLink,
|
||||
node: nextNode,
|
||||
nibble: nextNibble)
|
||||
# Look ahead checking next node
|
||||
if start:
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.isZero or not db.tab.hasKey(topLink):
|
||||
return err(NearbyDanglingLink) # error
|
||||
|
||||
# Find the next item to the right of the new top entry
|
||||
let step = rPath.path[^1]
|
||||
for inx in (step.nibble + 1) .. 15:
|
||||
let link = step.node.bLink[inx]
|
||||
if not link.isZero:
|
||||
rPath.path[^1].nibble = inx.int8
|
||||
return rPath.completeLeast(link, db)
|
||||
let nextNibble = rPath.tail[0].int8
|
||||
if start and nextNibble < 15:
|
||||
let nextNode = db.tab[topLink]
|
||||
case nextNode.kind
|
||||
of Leaf:
|
||||
if rPath.tail <= nextNode.lPfx:
|
||||
return rPath.completeLeast(topLink, db)
|
||||
of Extension:
|
||||
if rPath.tail <= nextNode.ePfx:
|
||||
return rPath.completeLeast(topLink, db)
|
||||
of Branch:
|
||||
# Step down and complete with a branch link on the child node
|
||||
step = RPathStep(
|
||||
key: topLink,
|
||||
node: nextNode,
|
||||
nibble: nextNibble)
|
||||
rPath.path &= step
|
||||
|
||||
# Restore `rPath` and backtrack
|
||||
rPath.path.setLen(rPathLen)
|
||||
rPath.tail = rPathTail
|
||||
# Find the next item to the right of the current top entry
|
||||
for inx in (step.nibble + 1) .. 15:
|
||||
let link = step.node.bLink[inx]
|
||||
if not link.isZero:
|
||||
rPath.path[^1].nibble = inx.int8
|
||||
return rPath.completeLeast(link, db)
|
||||
|
||||
# Pop `Branch` node on top and append nibble to `tail`
|
||||
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
||||
rPath.path.setLen(rPath.path.len - 1)
|
||||
if start:
|
||||
# Retry without look ahead
|
||||
start = false
|
||||
|
||||
# Restore `rPath` (pop temporary extra step)
|
||||
if rPathLen < rPath.path.len:
|
||||
rPath.path.setLen(rPathLen)
|
||||
rPath.tail = rPathTail
|
||||
else:
|
||||
# Pop current `Branch` node on top and append nibble to `tail`
|
||||
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
||||
rPath.path.setLen(rPath.path.len - 1)
|
||||
# End while
|
||||
|
||||
# Pathological case: nfffff.. for n < f
|
||||
var step = path.path[0]
|
||||
|
@ -425,10 +444,9 @@ proc hexaryNearbyRight*(
|
|||
|
||||
err(NearbyFailed) # error
|
||||
|
||||
|
||||
proc hexaryNearbyRight*(
|
||||
path: XPath; ## Partially expanded path
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
path: XPath; # Partially expanded path
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): Result[XPath,HexaryError]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Variant of `hexaryNearbyRight()` for persistant database
|
||||
|
@ -439,52 +457,71 @@ proc hexaryNearbyRight*(
|
|||
if path.path[^1].node.kind == Leaf:
|
||||
return ok(path)
|
||||
|
||||
var xPath = path
|
||||
var
|
||||
xPath = path
|
||||
start = true
|
||||
while 0 < xPath.path.len:
|
||||
let top = xPath.path[^1]
|
||||
if top.node.kind != Branch or
|
||||
top.nibble < 0 or
|
||||
xPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode) # error
|
||||
case top.node.kind:
|
||||
of Leaf:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Branch:
|
||||
if top.nibble < 0 or xPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Extension:
|
||||
xPath.tail = top.node.ePfx & xPath.tail
|
||||
xPath.path.setLen(xPath.path.len - 1)
|
||||
continue
|
||||
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.len == 0 or topLink.getFn().len == 0:
|
||||
return err(NearbyDanglingLink) # error
|
||||
var
|
||||
step = top
|
||||
let
|
||||
xPathLen = xPath.path.len # in case of backtracking
|
||||
xPathTail = xPath.tail # in case of backtracking
|
||||
|
||||
let nextNibble = xPath.tail[0].int8
|
||||
if nextNibble < 15:
|
||||
let
|
||||
nextNodeRlp = rlpFromBytes topLink.getFn()
|
||||
xPathLen = xPath.path.len # in case of backtracking
|
||||
xPathTail = xPath.tail
|
||||
case nextNodeRlp.listLen:
|
||||
of 2:
|
||||
if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]:
|
||||
return xPath.completeLeast(topLink, getFn)
|
||||
of 17:
|
||||
# Step down and complete with a branch link on the child node
|
||||
xPath.path = xPath.path & XPathStep(
|
||||
key: topLink,
|
||||
node: nextNodeRlp.toBranchNode,
|
||||
nibble: nextNibble)
|
||||
else:
|
||||
return err(NearbyGarbledNode) # error
|
||||
# Look ahead checking next node
|
||||
if start:
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.len == 0 or topLink.getFn().len == 0:
|
||||
return err(NearbyDanglingLink) # error
|
||||
|
||||
# Find the next item to the right of the new top entry
|
||||
let step = xPath.path[^1]
|
||||
for inx in (step.nibble + 1) .. 15:
|
||||
let link = step.node.bLink[inx]
|
||||
if 0 < link.len:
|
||||
xPath.path[^1].nibble = inx.int8
|
||||
return xPath.completeLeast(link, getFn)
|
||||
let nextNibble = xPath.tail[0].int8
|
||||
if nextNibble < 15:
|
||||
let nextNodeRlp = rlpFromBytes topLink.getFn()
|
||||
case nextNodeRlp.listLen:
|
||||
of 2:
|
||||
if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]:
|
||||
return xPath.completeLeast(topLink, getFn)
|
||||
of 17:
|
||||
# Step down and complete with a branch link on the child node
|
||||
step = XPathStep(
|
||||
key: topLink,
|
||||
node: nextNodeRlp.toBranchNode,
|
||||
nibble: nextNibble)
|
||||
xPath.path &= step
|
||||
else:
|
||||
return err(NearbyGarbledNode) # error
|
||||
|
||||
# Restore `xPath` and backtrack
|
||||
xPath.path.setLen(xPathLen)
|
||||
xPath.tail = xPathTail
|
||||
# Find the next item to the right of the current top entry
|
||||
for inx in (step.nibble + 1) .. 15:
|
||||
let link = step.node.bLink[inx]
|
||||
if 0 < link.len:
|
||||
xPath.path[^1].nibble = inx.int8
|
||||
return xPath.completeLeast(link, getFn)
|
||||
|
||||
# Pop `Branch` node on top and append nibble to `tail`
|
||||
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
|
||||
xPath.path.setLen(xPath.path.len - 1)
|
||||
if start:
|
||||
# Retry without look ahead
|
||||
start = false
|
||||
|
||||
# Restore `xPath` (pop temporary extra step)
|
||||
if xPathLen < xPath.path.len:
|
||||
xPath.path.setLen(xPathLen)
|
||||
xPath.tail = xPathTail
|
||||
else:
|
||||
# Pop current `Branch` node on top and append nibble to `tail`
|
||||
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
|
||||
xPath.path.setLen(xPath.path.len - 1)
|
||||
# End while
|
||||
|
||||
# Pathological case: nfffff.. for n < f
|
||||
var step = path.path[0]
|
||||
|
@ -537,8 +574,8 @@ proc hexaryNearbyRightMissing*(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryNearbyLeft*(
|
||||
path: RPath; ## Partially expanded path
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
path: RPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): Result[RPath,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Similar to `hexaryNearbyRight()`.
|
||||
|
@ -552,53 +589,75 @@ proc hexaryNearbyLeft*(
|
|||
if path.path[^1].node.kind == Leaf:
|
||||
return ok(path)
|
||||
|
||||
var rPath = path
|
||||
var
|
||||
rPath = path
|
||||
start = true
|
||||
while 0 < rPath.path.len:
|
||||
let top = rPath.path[^1]
|
||||
if top.node.kind != Branch or
|
||||
top.nibble < 0 or
|
||||
rPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode) # error
|
||||
case top.node.kind:
|
||||
of Leaf:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Branch:
|
||||
if top.nibble < 0 or rPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Extension:
|
||||
rPath.tail = top.node.ePfx & rPath.tail
|
||||
rPath.path.setLen(rPath.path.len - 1)
|
||||
continue
|
||||
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.isZero or not db.tab.hasKey(topLink):
|
||||
return err(NearbyDanglingLink) # error
|
||||
var
|
||||
step = top
|
||||
let
|
||||
rPathLen = rPath.path.len # in case of backtracking
|
||||
rPathTail = rPath.tail # in case of backtracking
|
||||
|
||||
let nextNibble = rPath.tail[0].int8
|
||||
if 0 < nextNibble:
|
||||
let
|
||||
nextNode = db.tab[topLink]
|
||||
rPathLen = rPath.path.len # in case of backtracking
|
||||
rPathTail = rPath.tail
|
||||
case nextNode.kind
|
||||
of Leaf:
|
||||
if nextNode.lPfx <= rPath.tail:
|
||||
return rPath.completeMost(topLink, db)
|
||||
of Extension:
|
||||
if nextNode.ePfx <= rPath.tail:
|
||||
return rPath.completeMost(topLink, db)
|
||||
of Branch:
|
||||
# Step down and complete with a branch link on the child node
|
||||
rPath.path = rPath.path & RPathStep(
|
||||
key: topLink,
|
||||
node: nextNode,
|
||||
nibble: nextNibble)
|
||||
# Look ahead checking next node
|
||||
if start:
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.isZero or not db.tab.hasKey(topLink):
|
||||
return err(NearbyDanglingLink) # error
|
||||
|
||||
# Find the next item to the right of the new top entry
|
||||
let step = rPath.path[^1]
|
||||
for inx in (step.nibble - 1).countDown(0):
|
||||
let link = step.node.bLink[inx]
|
||||
if not link.isZero:
|
||||
rPath.path[^1].nibble = inx.int8
|
||||
return rPath.completeMost(link, db)
|
||||
let nextNibble = rPath.tail[0].int8
|
||||
if 0 < nextNibble:
|
||||
let
|
||||
nextNode = db.tab[topLink]
|
||||
rPathLen = rPath.path.len # in case of backtracking
|
||||
rPathTail = rPath.tail
|
||||
case nextNode.kind
|
||||
of Leaf:
|
||||
if nextNode.lPfx <= rPath.tail:
|
||||
return rPath.completeMost(topLink, db)
|
||||
of Extension:
|
||||
if nextNode.ePfx <= rPath.tail:
|
||||
return rPath.completeMost(topLink, db)
|
||||
of Branch:
|
||||
# Step down and complete with a branch link on the child node
|
||||
step = RPathStep(
|
||||
key: topLink,
|
||||
node: nextNode,
|
||||
nibble: nextNibble)
|
||||
rPath.path &= step
|
||||
|
||||
# Restore `rPath` and backtrack
|
||||
rPath.path.setLen(rPathLen)
|
||||
rPath.tail = rPathTail
|
||||
# Find the next item to the right of the new top entry
|
||||
for inx in (step.nibble - 1).countDown(0):
|
||||
let link = step.node.bLink[inx]
|
||||
if not link.isZero:
|
||||
rPath.path[^1].nibble = inx.int8
|
||||
return rPath.completeMost(link, db)
|
||||
|
||||
# Pop `Branch` node on top and append nibble to `tail`
|
||||
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
||||
rPath.path.setLen(rPath.path.len - 1)
|
||||
if start:
|
||||
# Retry without look ahead
|
||||
start = false
|
||||
|
||||
# Restore `rPath` (pop temporary extra step)
|
||||
if rPathLen < rPath.path.len:
|
||||
rPath.path.setLen(rPathLen)
|
||||
rPath.tail = rPathTail
|
||||
else:
|
||||
# Pop current `Branch` node on top and append nibble to `tail`
|
||||
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
|
||||
rPath.path.setLen(rPath.path.len - 1)
|
||||
# End while
|
||||
|
||||
# Pathological case: n0000.. for 0 < n
|
||||
var step = path.path[0]
|
||||
|
@ -613,8 +672,8 @@ proc hexaryNearbyLeft*(
|
|||
|
||||
|
||||
proc hexaryNearbyLeft*(
|
||||
path: XPath; ## Partially expanded path
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
path: XPath; # Partially expanded path
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): Result[XPath,HexaryError]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Variant of `hexaryNearbyLeft()` for persistant database
|
||||
|
@ -625,52 +684,74 @@ proc hexaryNearbyLeft*(
|
|||
if path.path[^1].node.kind == Leaf:
|
||||
return ok(path)
|
||||
|
||||
var xPath = path
|
||||
var
|
||||
xPath = path
|
||||
start = true
|
||||
while 0 < xPath.path.len:
|
||||
let top = xPath.path[^1]
|
||||
if top.node.kind != Branch or
|
||||
top.nibble < 0 or
|
||||
xPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode) # error
|
||||
case top.node.kind:
|
||||
of Leaf:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Branch:
|
||||
if top.nibble < 0 or xPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Extension:
|
||||
xPath.tail = top.node.ePfx & xPath.tail
|
||||
xPath.path.setLen(xPath.path.len - 1)
|
||||
continue
|
||||
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.len == 0 or topLink.getFn().len == 0:
|
||||
return err(NearbyDanglingLink) # error
|
||||
var
|
||||
step = top
|
||||
let
|
||||
xPathLen = xPath.path.len # in case of backtracking
|
||||
xPathTail = xPath.tail # in case of backtracking
|
||||
|
||||
let nextNibble = xPath.tail[0].int8
|
||||
if 0 < nextNibble:
|
||||
let
|
||||
nextNodeRlp = rlpFromBytes topLink.getFn()
|
||||
xPathLen = xPath.path.len # in case of backtracking
|
||||
xPathTail = xPath.tail
|
||||
case nextNodeRlp.listLen:
|
||||
of 2:
|
||||
if nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1] <= xPath.tail:
|
||||
return xPath.completeMost(topLink, getFn)
|
||||
of 17:
|
||||
# Step down and complete with a branch link on the child node
|
||||
xPath.path = xPath.path & XPathStep(
|
||||
key: topLink,
|
||||
node: nextNodeRlp.toBranchNode,
|
||||
nibble: nextNibble)
|
||||
else:
|
||||
return err(NearbyGarbledNode) # error
|
||||
# Look ahead checking next node
|
||||
if start:
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.len == 0 or topLink.getFn().len == 0:
|
||||
return err(NearbyDanglingLink) # error
|
||||
|
||||
# Find the next item to the right of the new top entry
|
||||
let step = xPath.path[^1]
|
||||
for inx in (step.nibble - 1).countDown(0):
|
||||
let link = step.node.bLink[inx]
|
||||
if 0 < link.len:
|
||||
xPath.path[^1].nibble = inx.int8
|
||||
return xPath.completeMost(link, getFn)
|
||||
let nextNibble = xPath.tail[0].int8
|
||||
if 0 < nextNibble:
|
||||
let
|
||||
nextNodeRlp = rlpFromBytes topLink.getFn()
|
||||
xPathLen = xPath.path.len # in case of backtracking
|
||||
xPathTail = xPath.tail
|
||||
case nextNodeRlp.listLen:
|
||||
of 2:
|
||||
if nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1] <= xPath.tail:
|
||||
return xPath.completeMost(topLink, getFn)
|
||||
of 17:
|
||||
# Step down and complete with a branch link on the child node
|
||||
step = XPathStep(
|
||||
key: topLink,
|
||||
node: nextNodeRlp.toBranchNode,
|
||||
nibble: nextNibble)
|
||||
xPath.path &= step
|
||||
else:
|
||||
return err(NearbyGarbledNode) # error
|
||||
|
||||
# Restore `xPath` and backtrack
|
||||
xPath.path.setLen(xPathLen)
|
||||
xPath.tail = xPathTail
|
||||
# Find the next item to the right of the new top entry
|
||||
for inx in (step.nibble - 1).countDown(0):
|
||||
let link = step.node.bLink[inx]
|
||||
if 0 < link.len:
|
||||
xPath.path[^1].nibble = inx.int8
|
||||
return xPath.completeMost(link, getFn)
|
||||
|
||||
# Pop `Branch` node on top and append nibble to `tail`
|
||||
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
|
||||
xPath.path.setLen(xPath.path.len - 1)
|
||||
if start:
|
||||
# Retry without look ahead
|
||||
start = false
|
||||
|
||||
# Restore `xPath` (pop temporary extra step)
|
||||
if xPathLen < xPath.path.len:
|
||||
xPath.path.setLen(xPathLen)
|
||||
xPath.tail = xPathTail
|
||||
else:
|
||||
# Pop `Branch` node on top and append nibble to `tail`
|
||||
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
|
||||
xPath.path.setLen(xPath.path.len - 1)
|
||||
# End while
|
||||
|
||||
# Pathological case: n00000.. for 0 < n
|
||||
var step = path.path[0]
|
||||
|
@ -688,9 +769,9 @@ proc hexaryNearbyLeft*(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryNearbyRight*(
|
||||
baseTag: NodeTag; ## Some node
|
||||
rootKey: NodeKey; ## State root
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather
|
||||
|
@ -699,9 +780,9 @@ proc hexaryNearbyRight*(
|
|||
return baseTag.hexaryNearbyRightImpl(rootKey, db)
|
||||
|
||||
proc hexaryNearbyRight*(
|
||||
baseTag: NodeTag; ## Some node
|
||||
rootKey: NodeKey; ## State root
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Variant of `hexaryNearbyRight()` for persistant database
|
||||
|
@ -710,9 +791,9 @@ proc hexaryNearbyRight*(
|
|||
|
||||
|
||||
proc hexaryNearbyLeft*(
|
||||
baseTag: NodeTag; ## Some node
|
||||
rootKey: NodeKey; ## State root
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Similar to `hexaryNearbyRight()` for `NodeKey` arguments.
|
||||
|
@ -720,9 +801,9 @@ proc hexaryNearbyLeft*(
|
|||
return baseTag.hexaryNearbyLeftImpl(rootKey, db)
|
||||
|
||||
proc hexaryNearbyLeft*(
|
||||
baseTag: NodeTag; ## Some node
|
||||
rootKey: NodeKey; ## State root
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Variant of `hexaryNearbyLeft()` for persistant database
|
||||
|
|
|
@ -420,9 +420,9 @@ proc leafData*(path: RPath): Blob =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryPath*(
|
||||
partialPath: NibblesSeq; ## partial path to resolve
|
||||
rootKey: NodeKey|RepairKey; ## State root
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
partialPath: NibblesSeq; # partial path to resolve
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): RPath
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Compute the longest possible repair tree `db` path matching the `nodeKey`
|
||||
|
@ -460,9 +460,9 @@ proc hexaryPath*(
|
|||
|
||||
|
||||
proc hexaryPath*(
|
||||
partialPath: NibblesSeq; ## partial path to resolve
|
||||
rootKey: NodeKey; ## State root
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
partialPath: NibblesSeq; # partial path to resolve
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): XPath
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Compute the longest possible path on an arbitrary hexary trie.
|
||||
|
@ -500,10 +500,10 @@ proc hexaryPath*(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: NibblesSeq; ## Hex encoded partial path
|
||||
rootKey: NodeKey|RepairKey; ## State root
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
missingOk = false; ## Also return key for missing node
|
||||
partialPath: NibblesSeq; # Hex encoded partial path
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Returns the `NodeKey` equivalent for the argment `partialPath` if this
|
||||
|
@ -524,10 +524,10 @@ proc hexaryPathNodeKey*(
|
|||
err()
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: Blob; ## Hex encoded partial path
|
||||
rootKey: NodeKey|RepairKey; ## State root
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
missingOk = false; ## Also return key for missing node
|
||||
partialPath: Blob; # Hex encoded partial path
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Variant of `hexaryPathNodeKey()` for hex encoded partial path.
|
||||
|
@ -535,10 +535,10 @@ proc hexaryPathNodeKey*(
|
|||
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: NibblesSeq; ## Hex encoded partial path
|
||||
rootKey: NodeKey; ## State root
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
missingOk = false; ## Also return key for missing node
|
||||
partialPath: NibblesSeq; # Hex encoded partial path
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Variant of `hexaryPathNodeKey()` for persistent database.
|
||||
|
@ -556,10 +556,10 @@ proc hexaryPathNodeKey*(
|
|||
err()
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: Blob; ## Partial database path
|
||||
rootKey: NodeKey; ## State root
|
||||
getFn: HexaryGetFn; ## Database abstraction
|
||||
missingOk = false; ## Also return key for missing node
|
||||
partialPath: Blob; # Partial database path
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## Variant of `hexaryPathNodeKey()` for persistent database and
|
||||
|
@ -568,10 +568,10 @@ proc hexaryPathNodeKey*(
|
|||
|
||||
|
||||
proc hexaryPathNodeKeys*(
|
||||
partialPaths: seq[Blob]; ## Partial paths segments
|
||||
rootKey: NodeKey|RepairKey; ## State root
|
||||
db: HexaryTreeDbRef; ## Database
|
||||
missingOk = false; ## Also return key for missing node
|
||||
partialPaths: seq[Blob]; # Partial paths segments
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
missingOk = false; # Also return key for missing node
|
||||
): HashSet[NodeKey]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## Convert a list of path segments to a set of node keys
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[sequtils, sets, tables],
|
||||
chronicles,
|
||||
eth/[common, p2p, rlp, trie/nibbles],
|
||||
stew/[byteutils, interval_set],
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
type
|
||||
RangeLeaf* = object
|
||||
key*: NodeKey ## Leaf node path
|
||||
data*: Blob ## Leaf node data
|
||||
|
||||
RangeProof* = object
|
||||
leafs*: seq[RangeLeaf]
|
||||
proof*: seq[Blob]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use (if at all, unless debugging)
|
||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template collectLeafs(
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
nLeafs: int; # Implies maximal data size
|
||||
): auto =
|
||||
## Collect trie database leafs prototype. This directive is provided as
|
||||
## `template` for avoiding varying exceprion annotations.
|
||||
var rc: Result[seq[RangeLeaf],HexaryError]
|
||||
|
||||
block body:
|
||||
var
|
||||
nodeTag = iv.minPt
|
||||
prevTag: NodeTag
|
||||
rls: seq[RangeLeaf]
|
||||
|
||||
# Fill at most `nLeafs` leaf nodes from interval range
|
||||
while rls.len < nLeafs and nodeTag <= iv.maxPt:
|
||||
# The following logic might be sub-optimal. A strict version of the
|
||||
# `next()` function that stops with an error at dangling links could
|
||||
# be faster if the leaf nodes are not too far apart on the hexary trie.
|
||||
var
|
||||
xPath = block:
|
||||
let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyRight(db)
|
||||
if rx.isErr:
|
||||
rc = typeof(rc).err(rx.error)
|
||||
break body
|
||||
rx.value
|
||||
rightKey = xPath.getPartialPath.convertTo(NodeKey)
|
||||
rightTag = rightKey.to(NodeTag)
|
||||
|
||||
# Prevents from semi-endless looping
|
||||
if rightTag <= prevTag and 0 < rls.len:
|
||||
# Oops, should have been tackeled by `hexaryNearbyRight()`
|
||||
rc = typeof(rc).err(FailedNextNode)
|
||||
break body # stop here
|
||||
|
||||
rls.add RangeLeaf(
|
||||
key: rightKey,
|
||||
data: xPath.leafData)
|
||||
|
||||
prevTag = nodeTag
|
||||
nodeTag = rightTag + 1.u256
|
||||
|
||||
rc = typeof(rc).ok(rls)
|
||||
# End body
|
||||
|
||||
rc
|
||||
|
||||
|
||||
template updateProof(
|
||||
baseTag: NodeTag; # Left boundary
|
||||
leafList: seq[RangeLeaf]; # Set of collected leafs
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): auto =
|
||||
## Update leafs list by adding proof nodes. This directive is provided as
|
||||
## `template` for avoiding varying exceprion annotations.
|
||||
var proof = baseTag.hexaryPath(rootKey, db)
|
||||
.path
|
||||
.mapIt(it.node)
|
||||
.filterIt(it.kind != Leaf)
|
||||
.mapIt(it.convertTo(Blob))
|
||||
.toHashSet
|
||||
if 0 < leafList.len:
|
||||
proof.incl leafList[^1].key.to(NodeTag).hexaryPath(rootKey, db)
|
||||
.path
|
||||
.mapIt(it.node)
|
||||
.filterIt(it.kind != Leaf)
|
||||
.mapIt(it.convertTo(Blob))
|
||||
.toHashSet
|
||||
|
||||
RangeProof(
|
||||
leafs: leafList,
|
||||
proof: proof.toSeq)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryRangeLeafsProof*(
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryGetFn; # Database abstraction
|
||||
nLeafs = high(int); # Implies maximal data size
|
||||
): Result[RangeProof,HexaryError]
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## ...
|
||||
let rc = iv.collectLeafs(rootKey, db, nLeafs)
|
||||
if rc.isErr:
|
||||
err(rc.error)
|
||||
else:
|
||||
ok(iv.minPt.updateProof(rc.value, rootKey, db))
|
||||
|
||||
proc hexaryRangeLeafsProof*(
|
||||
baseTag: NodeTag; # Left boundary
|
||||
leafList: seq[RangeLeaf]; # Set of already collected leafs
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryGetFn; # Database abstraction
|
||||
): RangeProof
|
||||
{.gcsafe, raises: [Defect,RlpError]} =
|
||||
## ...
|
||||
baseTag.updateProof(leafList, rootKey, db)
|
||||
|
||||
|
||||
proc hexaryRangeLeafsProof*(
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef; # Database abstraction
|
||||
nLeafs = high(int); # Implies maximal data size
|
||||
): Result[RangeProof,HexaryError]
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## ...
|
||||
let rc = iv.collectLeafs(rootKey, db, nLeafs)
|
||||
if rc.isErr:
|
||||
err(rc.error)
|
||||
else:
|
||||
ok(iv.minPt.updateProof(rc.value, rootKey, db))
|
||||
|
||||
proc hexaryRangeLeafsProof*(
|
||||
baseTag: NodeTag; # Left boundary
|
||||
leafList: seq[RangeLeaf]; # Set of already collected leafs
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef; # Database abstraction
|
||||
): RangeProof
|
||||
{.gcsafe, raises: [Defect,KeyError]} =
|
||||
## ...
|
||||
baseTag.updateProof(leafList, rootKey, db)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -322,7 +322,7 @@ proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] =
|
|||
let rPath= key.hexaryPath(ps.root, ps.hexaDb)
|
||||
result = rPath.path.mapIt(it.pp(ps.hexaDb)) & @["(" & rPath.tail.pp & ")"]
|
||||
|
||||
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
|
||||
proc dumpHexaDB*(xDb: HexaryTreeDbRef; root: NodeKey; indent = 4): string =
|
||||
## Dump the entries from the a generic accounts trie. These are
|
||||
## key value pairs for
|
||||
## ::
|
||||
|
@ -348,7 +348,11 @@ proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
|
|||
## added later (typically these nodes are update `Mutable` nodes.)
|
||||
##
|
||||
## Beware: dumping a large database is not recommended
|
||||
ps.hexaDb.pp(ps.root,indent)
|
||||
xDb.pp(root, indent)
|
||||
|
||||
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
|
||||
## Ditto
|
||||
ps.hexaDb.pp(ps.root, indent)
|
||||
|
||||
proc hexaryPpFn*(ps: SnapDbBaseRef): HexaryPpFn =
|
||||
## Key mapping function used in `HexaryTreeDB`
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[math, sets, sequtils, strutils],
|
||||
std/[math, sets, sequtils],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p, trie/trie_defs],
|
||||
|
|
|
@ -119,7 +119,7 @@ proc open*(state: var GUnzip; fileName: string):
|
|||
state.gzIn = fileName.open(fmRead)
|
||||
state.gzOpenOK = true
|
||||
state.gzMax = state.gzIn.getFileSize
|
||||
state.gzCount = state.gzIn.readChars(strBuf, 0, strBuf.len)
|
||||
state.gzCount = state.gzIn.readChars(toOpenArray(strBuf, 0, strBuf.len-1))
|
||||
|
||||
# Parse GZIP header (RFC 1952)
|
||||
doAssert 18 < state.gzCount
|
||||
|
@ -157,7 +157,7 @@ proc nextChunk*(state: var GUnzip):
|
|||
result = ok("")
|
||||
|
||||
while state.gzCount < state.gzMax:
|
||||
var strLen = state.gzIn.readChars(strBuf, 0, strBuf.len)
|
||||
var strLen = state.gzIn.readChars(toOpenArray(strBuf, 0, strBuf.len-1))
|
||||
if state.gzMax < state.gzCount + strLen:
|
||||
strLen = (state.gzMax - state.gzCount).int
|
||||
state.gzCount += strLen
|
||||
|
|
|
@ -27,8 +27,8 @@ import
|
|||
./replay/[pp, undump_accounts, undump_storages],
|
||||
./test_sync_snap/[
|
||||
bulk_test_xx, snap_test_xx,
|
||||
test_accounts, test_node_range, test_inspect, test_pivot, test_storage,
|
||||
test_db_timing, test_types]
|
||||
test_accounts, test_helpers, test_node_range, test_inspect, test_pivot,
|
||||
test_storage, test_db_timing, test_types]
|
||||
|
||||
const
|
||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||
|
@ -61,9 +61,6 @@ else:
|
|||
const isUbuntu32bit = false
|
||||
|
||||
let
|
||||
# Forces `check()` to print the error (as opposed when using `isOk()`)
|
||||
OkHexDb = Result[void,HexaryError].ok()
|
||||
|
||||
# There was a problem with the Github/CI which results in spurious crashes
|
||||
# when leaving the `runner()` if the persistent ChainDBRef initialisation
|
||||
# was present, see `test_custom_network` for more details.
|
||||
|
@ -92,15 +89,6 @@ proc findFilePath(file: string;
|
|||
proc getTmpDir(sampleDir = sampleDirRefFile): string =
|
||||
sampleDir.findFilePath(baseDir,repoDir).value.splitFile.dir
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
if noisy:
|
||||
if args.len == 0:
|
||||
echo "*** ", pfx
|
||||
elif 0 < pfx.len and pfx[^1] != ' ':
|
||||
echo pfx, " ", args.toSeq.join
|
||||
else:
|
||||
echo pfx, args.toSeq.join
|
||||
|
||||
proc setTraceLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
|
@ -176,9 +164,6 @@ proc testDbs(workDir = ""; subDir = ""; instances = nTestDbInstances): TestDbs =
|
|||
for n in 0 ..< min(result.cdb.len, instances):
|
||||
result.cdb[n] = (result.dbDir / $n).newChainDB
|
||||
|
||||
proc lastTwo(a: openArray[string]): seq[string] =
|
||||
if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq
|
||||
|
||||
proc snapDbRef(cdb: ChainDb; pers: bool): SnapDbRef =
|
||||
if pers: SnapDbRef.init(cdb) else: SnapDbRef.init(newMemoryDB())
|
||||
|
||||
|
@ -209,22 +194,43 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
|||
tmpDir.flushDbDir(sample.name)
|
||||
|
||||
suite &"SyncSnap: {fileInfo} accounts and proofs for {info}":
|
||||
test &"Proofing {accLst.len} items for state root ..{root.pp}":
|
||||
let desc = db.cdb[0].snapDbAccountsRef(root, db.persistent)
|
||||
accLst.test_accountsImport(desc, db.persistent)
|
||||
|
||||
var accKeys: seq[NodeKey]
|
||||
|
||||
block:
|
||||
# New common descriptor for this sub-group of tests
|
||||
let
|
||||
# Common descriptor for this group of tests
|
||||
desc = db.cdb[1].snapDbAccountsRef(root, db.persistent)
|
||||
desc = db.cdb[0].snapDbAccountsRef(root, db.persistent)
|
||||
hexaDb = desc.hexaDb
|
||||
getFn = desc.getAccountFn
|
||||
dbg = if noisy: hexaDb else: nil
|
||||
|
||||
# Database abstractions
|
||||
getFn = desc.getAccountFn # pestistent case
|
||||
hexaDB = desc.hexaDB # in-memory, and debugging setup
|
||||
desc.assignPrettyKeys() # debugging, make sure that state root ~ "$0"
|
||||
|
||||
test &"Merging {accLst.len} proofs for state root ..{root.pp}":
|
||||
test &"Proofing {accLst.len} list items for state root ..{root.pp}":
|
||||
accLst.test_accountsImport(desc, db.persistent)
|
||||
|
||||
test &"Retrieve accounts & proofs for previous account ranges":
|
||||
let nPart = 3
|
||||
if db.persistent:
|
||||
accLst.test_NodeRangeRightProofs(getFn, nPart, dbg)
|
||||
else:
|
||||
accLst.test_NodeRangeRightProofs(hexaDB, nPart, dbg)
|
||||
|
||||
test &"Verify left boundary checks":
|
||||
if db.persistent:
|
||||
accLst.test_NodeRangeLeftBoundary(getFn, dbg)
|
||||
else:
|
||||
accLst.test_NodeRangeLeftBoundary(hexaDB, dbg)
|
||||
|
||||
block:
|
||||
# List of keys to be shared by sub-group
|
||||
var accKeys: seq[NodeKey]
|
||||
|
||||
# New common descriptor for this sub-group of tests
|
||||
let
|
||||
cdb = db.cdb[1]
|
||||
desc = cdb.snapDbAccountsRef(root, db.persistent)
|
||||
|
||||
test &"Merging {accLst.len} accounts/proofs lists into single list":
|
||||
accLst.test_accountsMergeProofs(desc, accKeys) # set up `accKeys`
|
||||
|
||||
test &"Revisiting {accKeys.len} stored items on ChainDBRef":
|
||||
|
@ -233,17 +239,19 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
|||
# true.say "***", "database dump\n ", desc.dumpHexaDB()
|
||||
|
||||
test &"Decompose path prefix envelopes on {info}":
|
||||
let hexaDb = desc.hexaDb
|
||||
if db.persistent:
|
||||
accKeys.test_NodeRangeDecompose(root, getFn, hexaDB)
|
||||
accKeys.test_NodeRangeDecompose(root, desc.getAccountFn, hexaDb)
|
||||
else:
|
||||
accKeys.test_NodeRangeDecompose(root, hexaDB, hexaDB)
|
||||
accKeys.test_NodeRangeDecompose(root, hexaDb, hexaDb)
|
||||
|
||||
test &"Storing/retrieving {accKeys.len} stored items " &
|
||||
"on persistent pivot/checkpoint registry":
|
||||
if db.persistent:
|
||||
accKeys.test_pivotStoreRead(cdb)
|
||||
else:
|
||||
skip()
|
||||
|
||||
test &"Storing/retrieving {accKeys.len} items " &
|
||||
"on persistent pivot/checkpoint registry":
|
||||
if db.persistent:
|
||||
accKeys.test_pivotStoreRead(db.cdb[0])
|
||||
else:
|
||||
skip()
|
||||
|
||||
proc storagesRunner(
|
||||
noisy = true;
|
||||
|
@ -539,14 +547,14 @@ when isMainModule:
|
|||
#
|
||||
|
||||
# This one uses dumps from the external `nimbus-eth1-blob` repo
|
||||
when true and false:
|
||||
when true: # and false:
|
||||
import ./test_sync_snap/snap_other_xx
|
||||
noisy.showElapsed("accountsRunner()"):
|
||||
for n,sam in snapOtherList:
|
||||
false.accountsRunner(persistent=true, sam)
|
||||
noisy.showElapsed("inspectRunner()"):
|
||||
for n,sam in snapOtherHealingList:
|
||||
false.inspectionRunner(persistent=true, cascaded=false, sam)
|
||||
#noisy.showElapsed("inspectRunner()"):
|
||||
# for n,sam in snapOtherHealingList:
|
||||
# false.inspectionRunner(persistent=true, cascaded=false, sam)
|
||||
|
||||
# This one usues dumps from the external `nimbus-eth1-blob` repo
|
||||
when true and false:
|
||||
|
@ -564,14 +572,14 @@ when isMainModule:
|
|||
|
||||
# This one uses readily available dumps
|
||||
when true: # and false:
|
||||
false.inspectionRunner()
|
||||
# false.inspectionRunner()
|
||||
for n,sam in snapTestList:
|
||||
false.accountsRunner(persistent=false, sam)
|
||||
false.accountsRunner(persistent=true, sam)
|
||||
for n,sam in snapTestStorageList:
|
||||
false.accountsRunner(persistent=false, sam)
|
||||
false.accountsRunner(persistent=true, sam)
|
||||
false.storagesRunner(persistent=true, sam)
|
||||
# false.storagesRunner(persistent=true, sam)
|
||||
|
||||
# This one uses readily available dumps
|
||||
when true and false:
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
## Snap sync components tester and TDD environment
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, strformat, strutils, tables],
|
||||
eth/[common, p2p, trie/db],
|
||||
std/algorithm,
|
||||
eth/[common, p2p],
|
||||
unittest2,
|
||||
../../nimbus/db/select_backend,
|
||||
../../nimbus/sync/snap/range_desc,
|
||||
|
@ -36,7 +36,7 @@ proc flatten(list: openArray[seq[Blob]]): seq[Blob] =
|
|||
proc test_accountsImport*(
|
||||
inList: seq[UndumpAccounts];
|
||||
desc: SnapDbAccountsRef;
|
||||
persistent: bool
|
||||
persistent: bool;
|
||||
) =
|
||||
## Import accounts
|
||||
for n,w in inList:
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
## Snap sync components tester and TDD environment
|
||||
|
||||
import
|
||||
std/[algorithm, math, sequtils, strformat, strutils, times],
|
||||
std/[algorithm, math, sequtils, strformat, times],
|
||||
stew/byteutils,
|
||||
rocksdb,
|
||||
unittest2,
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
import
|
||||
std/times,
|
||||
eth/common,
|
||||
stew/results,
|
||||
stew/[interval_set, results],
|
||||
unittest2,
|
||||
../../nimbus/sync/snap/range_desc,
|
||||
../../nimbus/sync/snap/worker/db/hexary_error,
|
||||
|
@ -31,6 +31,9 @@ proc isImportOk*(rc: Result[SnapAccountsGaps,HexaryError]): bool =
|
|||
else:
|
||||
return true
|
||||
|
||||
proc lastTwo*(a: openArray[string]): seq[string] =
|
||||
if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public type conversions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -63,6 +66,12 @@ proc to*(w: (byte, NodeTag); T: type Blob): T =
|
|||
proc to*(t: NodeTag; T: type Blob): T =
|
||||
toSeq(t.UInt256.toBytesBE)
|
||||
|
||||
# ----------
|
||||
|
||||
proc convertTo*(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use (if at all, unless debugging)
|
||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, pretty printing
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -95,6 +104,27 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|||
else:
|
||||
echo pfx, args.toSeq.join
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public free parking
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rangeAccountSizeMax*(n: int): int =
|
||||
## Max number of bytes needed to store `n` RLP encoded `Account()` type
|
||||
## entries. Note that this is an upper bound.
|
||||
##
|
||||
## The maximum size of a single RLP encoded account item can be determined
|
||||
## by setting every field of `Account()` to `high()` or `0xff`.
|
||||
if 127 < n:
|
||||
3 + n * 110
|
||||
elif 0 < n:
|
||||
2 + n * 110
|
||||
else:
|
||||
1
|
||||
|
||||
proc rangeNumAccounts*(size: int): int =
|
||||
## ..
|
||||
(size - 3) div 110
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
## Snap sync components tester and TDD environment
|
||||
|
||||
import
|
||||
std/[sequtils, strformat, strutils],
|
||||
std/[sequtils],
|
||||
eth/[common, p2p, trie/db],
|
||||
unittest2,
|
||||
../../nimbus/db/select_backend,
|
||||
|
|
|
@ -13,12 +13,16 @@
|
|||
|
||||
import
|
||||
std/[sequtils, strformat, strutils],
|
||||
eth/[common, p2p, trie/nibbles],
|
||||
eth/[common, p2p, rlp, trie/nibbles],
|
||||
stew/[byteutils, interval_set, results],
|
||||
unittest2,
|
||||
../../nimbus/sync/types,
|
||||
../../nimbus/sync/snap/range_desc,
|
||||
../../nimbus/sync/snap/worker/db/[
|
||||
hexary_desc, hexary_envelope, hexary_nearby, hexary_paths]
|
||||
hexary_desc, hexary_envelope, hexary_error, hexary_nearby, hexary_paths,
|
||||
hexary_range, snapdb_accounts, snapdb_desc],
|
||||
../replay/[pp, undump_accounts],
|
||||
./test_helpers
|
||||
|
||||
const
|
||||
cmaNlSp0 = ",\n" & repeat(" ",12)
|
||||
|
@ -78,6 +82,125 @@ proc print_data(
|
|||
"\n pfxMax=", pfx.hexaryEnvelope.maxPt,
|
||||
"\n ", pfx.hexaryEnvelope.maxPt.hexaryPath(rootKey,db).pp(dbg)
|
||||
|
||||
|
||||
proc printCompareRightLeafs(
|
||||
rootKey: NodeKey;
|
||||
baseTag: NodeTag;
|
||||
accounts: seq[PackedAccount];
|
||||
leafs: seq[RangeLeaf];
|
||||
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||
dbg: HexaryTreeDbRef; ## Debugging env
|
||||
) =
|
||||
let
|
||||
noisy = not dbg.isNil
|
||||
var
|
||||
top = 0
|
||||
nMax = min(accounts.len, leafs.len)
|
||||
step = nMax div 2
|
||||
|
||||
while top < nMax:
|
||||
while 1 < step and accounts[top+step].accKey != leafs[top+step].key:
|
||||
#noisy.say "***", "i=", top+step, " fail"
|
||||
step = max(1, step div 2)
|
||||
|
||||
if accounts[top+step].accKey == leafs[top+step].key:
|
||||
top += step
|
||||
step = max(1, step div 2)
|
||||
noisy.say "***", "i=", top, " step=", step, " ok"
|
||||
continue
|
||||
|
||||
let start = top
|
||||
top = nMax
|
||||
for i in start ..< top:
|
||||
if accounts[i].accKey == leafs[i].key:
|
||||
noisy.say "***", "i=", i, " skip, ok"
|
||||
continue
|
||||
|
||||
# Diagnostics and return
|
||||
check (i,accounts[i].accKey) == (i,leafs[i].key)
|
||||
|
||||
let
|
||||
lfsKey = leafs[i].key
|
||||
accKey = accounts[i].accKey
|
||||
prdKey = if 0 < i: accounts[i-1].accKey else: baseTag.to(NodeKey)
|
||||
nxtTag = if 0 < i: prdKey.to(NodeTag) + 1.u256 else: baseTag
|
||||
nxtPath = nxtTag.hexaryPath(rootKey,db)
|
||||
rightRc = nxtPath.hexaryNearbyRight(db)
|
||||
|
||||
if rightRc.isOk:
|
||||
check lfsKey == rightRc.value.getPartialPath.convertTo(NodeKey)
|
||||
else:
|
||||
check rightRc.error == HexaryError(0) # force error printing
|
||||
|
||||
noisy.say "\n***", "i=", i, "/", accounts.len,
|
||||
"\n",
|
||||
"\n prdKey=", prdKey,
|
||||
"\n ", prdKey.hexaryPath(rootKey,db).pp(dbg),
|
||||
"\n",
|
||||
"\n nxtKey=", nxtTag,
|
||||
"\n ", nxtPath.pp(dbg),
|
||||
"\n",
|
||||
"\n accKey=", accKey,
|
||||
"\n ", accKey.hexaryPath(rootKey,db).pp(dbg),
|
||||
"\n",
|
||||
"\n lfsKey=", lfsKey,
|
||||
"\n ", lfsKey.hexaryPath(rootKey,db).pp(dbg),
|
||||
"\n"
|
||||
return
|
||||
|
||||
|
||||
proc printCompareLeftNearby(
|
||||
rootKey: NodeKey;
|
||||
leftKey: NodeKey;
|
||||
rightKey: NodeKey;
|
||||
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||
dbg: HexaryTreeDbRef; ## Debugging env
|
||||
) =
|
||||
let
|
||||
noisy = not dbg.isNil
|
||||
rightPath = rightKey.hexaryPath(rootKey,db)
|
||||
toLeftRc = rightPath.hexaryNearbyLeft(db)
|
||||
var
|
||||
toLeftKey: NodeKey
|
||||
|
||||
if toLeftRc.isErr:
|
||||
check toLeftRc.error == HexaryError(0) # force error printing
|
||||
else:
|
||||
toLeftKey = toLeftRc.value.getPartialPath.convertTo(NodeKey)
|
||||
if toLeftKey == leftKey:
|
||||
return
|
||||
|
||||
noisy.say "\n***",
|
||||
" rightKey=", rightKey,
|
||||
"\n ", rightKey.hexaryPath(rootKey,db).pp(dbg),
|
||||
"\n",
|
||||
"\n leftKey=", leftKey,
|
||||
"\n ", leftKey.hexaryPath(rootKey,db).pp(dbg),
|
||||
"\n",
|
||||
"\n toLeftKey=", toLeftKey,
|
||||
"\n ", toLeftKey.hexaryPath(rootKey,db).pp(dbg),
|
||||
"\n"
|
||||
|
||||
|
||||
proc verifyAccountListSizes() =
|
||||
## RLP does not allow static check ..
|
||||
for n in [0, 1, 128, 129, 200]:
|
||||
check n.rangeAccountSizeMax == Account(
|
||||
storageRoot: Hash256(data: high(UInt256).toBytesBE),
|
||||
codeHash: Hash256(data: high(UInt256).toBytesBE),
|
||||
nonce: high(uint64),
|
||||
balance: high(UInt256)).repeat(n).encode.len
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, pretty printing
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp(a: NodeTag; collapse = true): string =
|
||||
a.to(NodeKey).pp(collapse)
|
||||
|
||||
proc pp(iv: NodeTagRange; collapse = false): string =
|
||||
"(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -198,6 +321,78 @@ proc test_NodeRangeDecompose*(
|
|||
|
||||
if true: quit()
|
||||
|
||||
|
||||
proc test_NodeRangeRightProofs*(
|
||||
inLst: seq[UndumpAccounts];
|
||||
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||
nSplit = 0; ## Also split intervals (unused)
|
||||
dbg = HexaryTreeDbRef(nil); ## Debugging env
|
||||
) =
|
||||
## Partition range and provide proofs suitable for `GetAccountRange` message
|
||||
## from `snap/1` protocol.
|
||||
let
|
||||
rootKey = inLst[0].root.to(NodeKey)
|
||||
noisy = not dbg.isNil
|
||||
|
||||
# RLP does not allow static check
|
||||
verifyAccountListSizes()
|
||||
|
||||
# Assuming the `inLst` entries have been stored in the DB already
|
||||
for n,w in inLst:
|
||||
let
|
||||
iv = NodeTagRange.new(w.base, w.data.accounts[^1].accKey.to(NodeTag))
|
||||
rc = iv.hexaryRangeLeafsProof(rootKey, db, high(int))
|
||||
check rc.isOk
|
||||
if rc.isErr:
|
||||
return
|
||||
|
||||
let
|
||||
leafs = rc.value.leafs
|
||||
accounts = w.data.accounts
|
||||
if leafs.len != accounts.len or accounts[^1].accKey != leafs[^1].key:
|
||||
noisy.say "***", "n=", n, " something went wrong .."
|
||||
check (n,leafs.len) == (n,accounts.len)
|
||||
rootKey.printCompareRightLeafs(w.base, accounts, leafs, db, dbg)
|
||||
return
|
||||
|
||||
# FIXME: verify that proof nodes are complete
|
||||
|
||||
check rc.value.proof.len <= w.data.proof.len
|
||||
check leafs[^1].key.to(NodeTag) <= iv.maxPt
|
||||
noisy.say "***", "n=", n,
|
||||
" leafs=", leafs.len,
|
||||
" proof=", rc.value.proof.len, "/", w.data.proof.len
|
||||
|
||||
|
||||
proc test_NodeRangeLeftBoundary*(
|
||||
inLst: seq[UndumpAccounts];
|
||||
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
|
||||
dbg = HexaryTreeDbRef(nil); ## Debugging env
|
||||
) =
|
||||
## Verify left side boundary checks
|
||||
let
|
||||
rootKey = inLst[0].root.to(NodeKey)
|
||||
noisy = not dbg.isNil
|
||||
|
||||
# Assuming the `inLst` entries have been stored in the DB already
|
||||
for n,w in inLst:
|
||||
let accounts = w.data.accounts
|
||||
for i in 1 ..< accounts.len:
|
||||
let
|
||||
leftKey = accounts[i-1].accKey
|
||||
rightKey = (accounts[i].accKey.to(NodeTag) - 1.u256).to(NodeKey)
|
||||
toLeftRc = rightKey.hexaryPath(rootKey,db).hexaryNearbyLeft(db)
|
||||
if toLeftRc.isErr:
|
||||
check toLeftRc.error == HexaryError(0) # force error printing
|
||||
return
|
||||
let toLeftKey = toLeftRc.value.getPartialPath.convertTo(NodeKey)
|
||||
if leftKey != toLeftKey:
|
||||
let j = i-1
|
||||
check (n, j, leftKey) == (n, j, toLeftKey)
|
||||
rootKey.printCompareLeftNearby(leftKey, rightKey, db, dbg)
|
||||
return
|
||||
noisy.say "***", "n=", n, " accounts=", accounts.len
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
## Snap sync components tester and TDD environment
|
||||
|
||||
import
|
||||
std/[sequtils, strformat, strutils],
|
||||
eth/[common, p2p],
|
||||
unittest2,
|
||||
../../nimbus/db/select_backend,
|
||||
|
@ -34,7 +33,8 @@ proc test_pivotStoreRead*(
|
|||
(4.to(NodeTag),5.to(NodeTag)),
|
||||
(6.to(NodeTag),7.to(NodeTag))]
|
||||
slotAccounts = seq[NodeKey].default
|
||||
for n,w in accKeys:
|
||||
for n in 0 ..< accKeys.len:
|
||||
let w = accKeys[n]
|
||||
check dbBase.savePivot(
|
||||
SnapDbPivotRegistry(
|
||||
header: BlockHeader(stateRoot: w.to(Hash256)),
|
||||
|
@ -50,7 +50,13 @@ proc test_pivotStoreRead*(
|
|||
check rc.value.nAccounts == n.uint64
|
||||
check rc.value.nSlotLists == n.uint64
|
||||
check rc.value.processed == processed
|
||||
for n,w in accKeys:
|
||||
# Stop gossiping (happens whith corrupted database)
|
||||
if rc.value.nAccounts != n.uint64 or
|
||||
rc.value.nSlotLists != n.uint64 or
|
||||
rc.value.processed != processed:
|
||||
return
|
||||
for n in 0 ..< accKeys.len:
|
||||
let w = accKeys[n]
|
||||
block:
|
||||
let rc = dbBase.recoverPivot(w)
|
||||
check rc.isOk
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
## Snap sync components tester and TDD environment
|
||||
|
||||
import
|
||||
std/[sequtils, strformat, strutils, tables],
|
||||
std/[sequtils, tables],
|
||||
eth/[common, p2p],
|
||||
unittest2,
|
||||
../../nimbus/db/select_backend,
|
||||
|
|
Loading…
Reference in New Issue