Snap sync interval range extractor (#1449)

* Update comments and test noise

* Fix boundary proofs

why:
  Where neither used in production, nor unit tested. For production, other
  methods apply to test leaf range integrity directly based of the proof
  nodes.

* Added `hexary_range()`: interval range + proof extractor

details:
+ Will be used for `snap/1` protocol handler
+ Unit tests added (also for testing left boundary proof)

todo:
  Need to verify completeness of proof nodes

* Reduce some nim 1.6 compiler noise

* Stop unit test gossip for ci tests
This commit is contained in:
Jordan Hrycaj 2023-01-30 17:50:58 +00:00 committed by GitHub
parent d65bb18ad2
commit 197d2b16dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 837 additions and 327 deletions

View File

@ -11,9 +11,9 @@
import import
chronicles, chronicles,
chronos, chronos,
eth/[p2p, p2p/peer_pool], eth/p2p,
../protocol, ../protocol,
../protocol/[snap/snap_types, trace_config], ../protocol/snap/snap_types,
../../core/chain ../../core/chain
{.push raises: [Defect].} {.push raises: [Defect].}

View File

@ -10,7 +10,7 @@
import import
chronicles, chronicles,
eth/[common, p2p, p2p/private/p2p_types] eth/[common, p2p/private/p2p_types]
# ../../types # ../../types
type type

View File

@ -9,7 +9,7 @@
# except according to those terms. # except according to those terms.
import import
std/[hashes, options, sets, strutils], std/[options, sets, strutils],
chronicles, chronicles,
chronos, chronos,
eth/[common, p2p], eth/[common, p2p],

View File

@ -407,6 +407,24 @@ proc convertTo*(node: RNodeRef; T: type Blob): T =
writer.finish() writer.finish()
proc convertTo*(node: XNodeObj; T: type Blob): T =
## Variant of above `convertTo()` for `XNodeObj` nodes.
var writer = initRlpWriter()
case node.kind:
of Branch:
writer.append(node.bLink)
of Extension:
writer.startList(2)
writer.append(node.ePfx.hexPrefixEncode(isleaf = false))
writer.append(node.eLink)
of Leaf:
writer.startList(2)
writer.append(node.lPfx.hexPrefixEncode(isleaf = true))
writer.append(node.lData)
writer.finish()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -70,15 +70,6 @@
## * then there is a ``w = partialPath & w-ext`` in ``W`` with ## * then there is a ``w = partialPath & w-ext`` in ``W`` with
## ``p-ext = w-ext & some-ext``. ## ``p-ext = w-ext & some-ext``.
## ##
## Relation to boundary proofs
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^
## Consider the decomposition of an empty *partial path* (the envelope of which
## representing the whole leaf node path range) for a leaf node range `iv`.
## This result is then a `boundary proof` for `iv` according to the definition
## above though it is highly redundant. All *partial path* bottom level nodes
## with envelopes disjunct to `iv` can be removed from `W` for a `boundary
## proof`.
##
import import
std/[algorithm, sequtils, tables], std/[algorithm, sequtils, tables],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
@ -160,8 +151,8 @@ proc padPartialPath(pfx: NibblesSeq; dblNibble: byte): NodeKey =
proc doDecomposeLeft( proc doDecomposeLeft(
envPt: RPath|XPath; envQ: RPath|XPath;
ivPt: RPath|XPath; ivQ: RPath|XPath;
): Result[seq[NodeSpecs],HexaryError] = ): Result[seq[NodeSpecs],HexaryError] =
## Helper for `hexaryEnvelopeDecompose()` for handling left side of ## Helper for `hexaryEnvelopeDecompose()` for handling left side of
## envelope from partial path argument ## envelope from partial path argument
@ -169,32 +160,32 @@ proc doDecomposeLeft(
# partialPath # partialPath
# / \ # / \
# / \ # / \
# envPt.. -- envelope left end of partial path # ivQ[x]==envQ[x] \ -- envelope left end of partial path
# | # | \
# ivPt.. -- `iv`, not fully covering left of `env` # ivQ[x+1] -- `iv`, not fully covering left of `env`
# :
# #
var collect: seq[NodeSpecs] var collect: seq[NodeSpecs]
block rightCurbEnvelope: block rightCurbEnvelope:
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len): for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]: if n == envQ.path.len or envQ.path[n] != ivQ.path[n]:
# #
# At this point, the `node` entries of either `path[n]` step are # At this point, the `node` entries of either `.path[n]` step are
# the same. This is so because the predecessor steps were the same # the same. This is so because the predecessor steps were the same
# or were the `rootKey` in case n == 0. # or were the `rootKey` in case n == 0.
# #
# But then (`node` entries being equal) the only way for the # But then (`node` entries being equal) the only way for the `.path[n]`
# `path[n]` steps to differ is in the entry selector `nibble` for # steps to differ is in the entry selector `nibble` for a branch node.
# a branch node.
# #
for m in n ..< ivPt.path.len: for m in n ..< ivQ.path.len:
let let
pfx = ivPt.getNibbles(0, m) # common path segment pfx = ivQ.getNibbles(0, m) # common path segment
top = ivPt.path[m].nibble # need nibbles smaller than top top = ivQ.path[m].nibble # need nibbles smaller than top
# #
# Incidentally for a non-`Branch` node, the value `top` becomes # Incidentally for a non-`Branch` node, the value `top` becomes
# `-1` and the `for`- loop will be ignored (which is correct) # `-1` and the `for`- loop will be ignored (which is correct)
for nibble in 0 ..< top: for nibble in 0 ..< top:
let nodeKey = ivPt.path[m].node.bLink[nibble] let nodeKey = ivQ.path[m].node.bLink[nibble]
if not nodeKey.isZeroLink: if not nodeKey.isZeroLink:
collect.add nodeKey.toNodeSpecs hexPrefixEncode( collect.add nodeKey.toNodeSpecs hexPrefixEncode(
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false) pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
@ -210,8 +201,8 @@ proc doDecomposeLeft(
ok(collect) ok(collect)
proc doDecomposeRight( proc doDecomposeRight(
envPt: RPath|XPath; envQ: RPath|XPath;
ivPt: RPath|XPath; ivQ: RPath|XPath;
): Result[seq[NodeSpecs],HexaryError] = ): Result[seq[NodeSpecs],HexaryError] =
## Helper for `hexaryEnvelopeDecompose()` for handling right side of ## Helper for `hexaryEnvelopeDecompose()` for handling right side of
## envelope from partial path argument ## envelope from partial path argument
@ -219,21 +210,22 @@ proc doDecomposeRight(
# partialPath # partialPath
# / \ # / \
# / \ # / \
# .. envPt -- envelope right end of partial path # / ivQ[x]==envQ[^1] -- envelope right end of partial path
# | # / |
# .. ivPt -- `iv`, not fully covering right of `env` # ivQ[x+1] -- `iv`, not fully covering right of `env`
# :
# #
var collect: seq[NodeSpecs] var collect: seq[NodeSpecs]
block leftCurbEnvelope: block leftCurbEnvelope:
for n in 0 ..< min(envPt.path.len+1, ivPt.path.len): for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
if n == envPt.path.len or envPt.path[n] != ivPt.path[n]: if n == envQ.path.len or envQ.path[n] != ivQ.path[n]:
for m in n ..< ivPt.path.len: for m in n ..< ivQ.path.len:
let let
pfx = ivPt.getNibbles(0, m) # common path segment pfx = ivQ.getNibbles(0, m) # common path segment
base = ivPt.path[m].nibble # need nibbles greater/equal base = ivQ.path[m].nibble # need nibbles greater/equal
if 0 <= base: if 0 <= base:
for nibble in base+1 .. 15: for nibble in base+1 .. 15:
let nodeKey = ivPt.path[m].node.bLink[nibble] let nodeKey = ivQ.path[m].node.bLink[nibble]
if not nodeKey.isZeroLink: if not nodeKey.isZeroLink:
collect.add nodeKey.toNodeSpecs hexPrefixEncode( collect.add nodeKey.toNodeSpecs hexPrefixEncode(
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false) pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
@ -258,15 +250,15 @@ proc decomposeLeftImpl(
# non-matching of the below if clause. # non-matching of the below if clause.
if env.minPt < iv.minPt: if env.minPt < iv.minPt:
let let
envPt = env.minPt.hexaryPath(rootKey, db) envQ = env.minPt.hexaryPath(rootKey, db)
# Make sure that the min point is the nearest node to the right # Make sure that the min point is the nearest node to the right
ivPt = block: ivQ = block:
let rc = iv.minPt.hexaryPath(rootKey, db).hexaryNearbyRight(db) let rc = iv.minPt.hexaryPath(rootKey, db).hexaryNearbyRight(db)
if rc.isErr: if rc.isErr:
return err(rc.error) return err(rc.error)
rc.value rc.value
block: block:
let rc = envPt.doDecomposeLeft ivPt let rc = envQ.doDecomposeLeft ivQ
if rc.isErr: if rc.isErr:
return err(rc.error) return err(rc.error)
nodeSpex &= rc.value nodeSpex &= rc.value
@ -285,14 +277,14 @@ proc decomposeRightImpl(
var nodeSpex: seq[NodeSpecs] var nodeSpex: seq[NodeSpecs]
if iv.maxPt < env.maxPt: if iv.maxPt < env.maxPt:
let let
envPt = env.maxPt.hexaryPath(rootKey, db) envQ = env.maxPt.hexaryPath(rootKey, db)
ivPt = block: ivQ = block:
let rc = iv.maxPt.hexaryPath(rootKey, db).hexaryNearbyLeft(db) let rc = iv.maxPt.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
if rc.isErr: if rc.isErr:
return err(rc.error) return err(rc.error)
rc.value rc.value
block: block:
let rc = envPt.doDecomposeRight ivPt let rc = envQ.doDecomposeRight ivQ
if rc.isErr: if rc.isErr:
return err(rc.error) return err(rc.error)
nodeSpex &= rc.value nodeSpex &= rc.value

View File

@ -27,6 +27,10 @@ type
TooManySlotAccounts TooManySlotAccounts
NoAccountsYet NoAccountsYet
# range
LeafNodeExpected
FailedNextNode
# nearby/boundary proofs # nearby/boundary proofs
NearbyExtensionError NearbyExtensionError
NearbyBranchError NearbyBranchError

View File

@ -11,7 +11,6 @@
import import
std/[sequtils, sets, strutils, tables], std/[sequtils, sets, strutils, tables],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
stew/results,
../../range_desc, ../../range_desc,
"."/[hexary_desc, hexary_error] "."/[hexary_desc, hexary_error]

View File

@ -15,7 +15,7 @@
## re-factored database layer. ## re-factored database layer.
import import
std/[sequtils, sets, strutils, tables], std/[sequtils, strutils, tables],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
stew/results, stew/results,
../../range_desc, ../../range_desc,
@ -508,10 +508,10 @@ proc rTreeSquashRootNode(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryInterpolate*( proc hexaryInterpolate*(
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
rootKey: NodeKey; ## Root node hash rootKey: NodeKey; # Root node hash
dbItems: var seq[RLeafSpecs]; ## List of path and leaf items dbItems: var seq[RLeafSpecs]; # List of path and leaf items
bootstrap = false; ## Can create root node on-the-fly bootstrap = false; # Can create root node on-the-fly
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## From the argument list `dbItems`, leaf nodes will be added to the hexary ## From the argument list `dbItems`, leaf nodes will be added to the hexary

View File

@ -87,9 +87,9 @@ template noRlpErrorOops(info: static[string]; code: untyped) =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryNearbyRightImpl( proc hexaryNearbyRightImpl(
baseTag: NodeTag; ## Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError,RlpError]} = {.gcsafe, raises: [Defect,KeyError,RlpError]} =
## Wrapper ## Wrapper
@ -107,9 +107,9 @@ proc hexaryNearbyRightImpl(
err(NearbyLeafExpected) err(NearbyLeafExpected)
proc hexaryNearbyLeftImpl( proc hexaryNearbyLeftImpl(
baseTag: NodeTag; ## Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError,RlpError]} = {.gcsafe, raises: [Defect,KeyError,RlpError]} =
## Wrapper ## Wrapper
@ -347,8 +347,8 @@ proc completeMost(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryNearbyRight*( proc hexaryNearbyRight*(
path: RPath; ## Partially expanded path path: RPath; # Partially expanded path
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
): Result[RPath,HexaryError] ): Result[RPath,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Extends the maximally extended argument nodes `path` to the right (i.e. ## Extends the maximally extended argument nodes `path` to the right (i.e.
@ -366,24 +366,37 @@ proc hexaryNearbyRight*(
if path.path[^1].node.kind == Leaf: if path.path[^1].node.kind == Leaf:
return ok(path) return ok(path)
var rPath = path var
rPath = path
start = true
while 0 < rPath.path.len: while 0 < rPath.path.len:
let top = rPath.path[^1] let top = rPath.path[^1]
if top.node.kind != Branch or case top.node.kind:
top.nibble < 0 or of Leaf:
rPath.tail.len == 0: return err(NearbyUnexpectedNode)
return err(NearbyUnexpectedNode) # error of Branch:
if top.nibble < 0 or rPath.tail.len == 0:
return err(NearbyUnexpectedNode)
of Extension:
rPath.tail = top.node.ePfx & rPath.tail
rPath.path.setLen(rPath.path.len - 1)
continue
var
step = top
let
rPathLen = rPath.path.len # in case of backtracking
rPathTail = rPath.tail # in case of backtracking
# Look ahead checking next node
if start:
let topLink = top.node.bLink[top.nibble] let topLink = top.node.bLink[top.nibble]
if topLink.isZero or not db.tab.hasKey(topLink): if topLink.isZero or not db.tab.hasKey(topLink):
return err(NearbyDanglingLink) # error return err(NearbyDanglingLink) # error
let nextNibble = rPath.tail[0].int8 let nextNibble = rPath.tail[0].int8
if nextNibble < 15: if start and nextNibble < 15:
let let nextNode = db.tab[topLink]
nextNode = db.tab[topLink]
rPathLen = rPath.path.len # in case of backtracking
rPathTail = rPath.tail
case nextNode.kind case nextNode.kind
of Leaf: of Leaf:
if rPath.tail <= nextNode.lPfx: if rPath.tail <= nextNode.lPfx:
@ -393,26 +406,32 @@ proc hexaryNearbyRight*(
return rPath.completeLeast(topLink, db) return rPath.completeLeast(topLink, db)
of Branch: of Branch:
# Step down and complete with a branch link on the child node # Step down and complete with a branch link on the child node
rPath.path = rPath.path & RPathStep( step = RPathStep(
key: topLink, key: topLink,
node: nextNode, node: nextNode,
nibble: nextNibble) nibble: nextNibble)
rPath.path &= step
# Find the next item to the right of the new top entry # Find the next item to the right of the current top entry
let step = rPath.path[^1]
for inx in (step.nibble + 1) .. 15: for inx in (step.nibble + 1) .. 15:
let link = step.node.bLink[inx] let link = step.node.bLink[inx]
if not link.isZero: if not link.isZero:
rPath.path[^1].nibble = inx.int8 rPath.path[^1].nibble = inx.int8
return rPath.completeLeast(link, db) return rPath.completeLeast(link, db)
# Restore `rPath` and backtrack if start:
# Retry without look ahead
start = false
# Restore `rPath` (pop temporary extra step)
if rPathLen < rPath.path.len:
rPath.path.setLen(rPathLen) rPath.path.setLen(rPathLen)
rPath.tail = rPathTail rPath.tail = rPathTail
else:
# Pop `Branch` node on top and append nibble to `tail` # Pop current `Branch` node on top and append nibble to `tail`
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
rPath.path.setLen(rPath.path.len - 1) rPath.path.setLen(rPath.path.len - 1)
# End while
# Pathological case: nfffff.. for n < f # Pathological case: nfffff.. for n < f
var step = path.path[0] var step = path.path[0]
@ -425,10 +444,9 @@ proc hexaryNearbyRight*(
err(NearbyFailed) # error err(NearbyFailed) # error
proc hexaryNearbyRight*( proc hexaryNearbyRight*(
path: XPath; ## Partially expanded path path: XPath; # Partially expanded path
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
): Result[XPath,HexaryError] ): Result[XPath,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryNearbyRight()` for persistant database ## Variant of `hexaryNearbyRight()` for persistant database
@ -439,52 +457,71 @@ proc hexaryNearbyRight*(
if path.path[^1].node.kind == Leaf: if path.path[^1].node.kind == Leaf:
return ok(path) return ok(path)
var xPath = path var
xPath = path
start = true
while 0 < xPath.path.len: while 0 < xPath.path.len:
let top = xPath.path[^1] let top = xPath.path[^1]
if top.node.kind != Branch or case top.node.kind:
top.nibble < 0 or of Leaf:
xPath.tail.len == 0: return err(NearbyUnexpectedNode)
return err(NearbyUnexpectedNode) # error of Branch:
if top.nibble < 0 or xPath.tail.len == 0:
return err(NearbyUnexpectedNode)
of Extension:
xPath.tail = top.node.ePfx & xPath.tail
xPath.path.setLen(xPath.path.len - 1)
continue
var
step = top
let
xPathLen = xPath.path.len # in case of backtracking
xPathTail = xPath.tail # in case of backtracking
# Look ahead checking next node
if start:
let topLink = top.node.bLink[top.nibble] let topLink = top.node.bLink[top.nibble]
if topLink.len == 0 or topLink.getFn().len == 0: if topLink.len == 0 or topLink.getFn().len == 0:
return err(NearbyDanglingLink) # error return err(NearbyDanglingLink) # error
let nextNibble = xPath.tail[0].int8 let nextNibble = xPath.tail[0].int8
if nextNibble < 15: if nextNibble < 15:
let let nextNodeRlp = rlpFromBytes topLink.getFn()
nextNodeRlp = rlpFromBytes topLink.getFn()
xPathLen = xPath.path.len # in case of backtracking
xPathTail = xPath.tail
case nextNodeRlp.listLen: case nextNodeRlp.listLen:
of 2: of 2:
if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]: if xPath.tail <= nextNodeRlp.listElem(0).toBytes.hexPrefixDecode[1]:
return xPath.completeLeast(topLink, getFn) return xPath.completeLeast(topLink, getFn)
of 17: of 17:
# Step down and complete with a branch link on the child node # Step down and complete with a branch link on the child node
xPath.path = xPath.path & XPathStep( step = XPathStep(
key: topLink, key: topLink,
node: nextNodeRlp.toBranchNode, node: nextNodeRlp.toBranchNode,
nibble: nextNibble) nibble: nextNibble)
xPath.path &= step
else: else:
return err(NearbyGarbledNode) # error return err(NearbyGarbledNode) # error
# Find the next item to the right of the new top entry # Find the next item to the right of the current top entry
let step = xPath.path[^1]
for inx in (step.nibble + 1) .. 15: for inx in (step.nibble + 1) .. 15:
let link = step.node.bLink[inx] let link = step.node.bLink[inx]
if 0 < link.len: if 0 < link.len:
xPath.path[^1].nibble = inx.int8 xPath.path[^1].nibble = inx.int8
return xPath.completeLeast(link, getFn) return xPath.completeLeast(link, getFn)
# Restore `xPath` and backtrack if start:
# Retry without look ahead
start = false
# Restore `xPath` (pop temporary extra step)
if xPathLen < xPath.path.len:
xPath.path.setLen(xPathLen) xPath.path.setLen(xPathLen)
xPath.tail = xPathTail xPath.tail = xPathTail
else:
# Pop `Branch` node on top and append nibble to `tail` # Pop current `Branch` node on top and append nibble to `tail`
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
xPath.path.setLen(xPath.path.len - 1) xPath.path.setLen(xPath.path.len - 1)
# End while
# Pathological case: nfffff.. for n < f # Pathological case: nfffff.. for n < f
var step = path.path[0] var step = path.path[0]
@ -537,8 +574,8 @@ proc hexaryNearbyRightMissing*(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryNearbyLeft*( proc hexaryNearbyLeft*(
path: RPath; ## Partially expanded path path: RPath; # Partially expanded path
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
): Result[RPath,HexaryError] ): Result[RPath,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Similar to `hexaryNearbyRight()`. ## Similar to `hexaryNearbyRight()`.
@ -552,14 +589,30 @@ proc hexaryNearbyLeft*(
if path.path[^1].node.kind == Leaf: if path.path[^1].node.kind == Leaf:
return ok(path) return ok(path)
var rPath = path var
rPath = path
start = true
while 0 < rPath.path.len: while 0 < rPath.path.len:
let top = rPath.path[^1] let top = rPath.path[^1]
if top.node.kind != Branch or case top.node.kind:
top.nibble < 0 or of Leaf:
rPath.tail.len == 0: return err(NearbyUnexpectedNode)
return err(NearbyUnexpectedNode) # error of Branch:
if top.nibble < 0 or rPath.tail.len == 0:
return err(NearbyUnexpectedNode)
of Extension:
rPath.tail = top.node.ePfx & rPath.tail
rPath.path.setLen(rPath.path.len - 1)
continue
var
step = top
let
rPathLen = rPath.path.len # in case of backtracking
rPathTail = rPath.tail # in case of backtracking
# Look ahead checking next node
if start:
let topLink = top.node.bLink[top.nibble] let topLink = top.node.bLink[top.nibble]
if topLink.isZero or not db.tab.hasKey(topLink): if topLink.isZero or not db.tab.hasKey(topLink):
return err(NearbyDanglingLink) # error return err(NearbyDanglingLink) # error
@ -579,26 +632,32 @@ proc hexaryNearbyLeft*(
return rPath.completeMost(topLink, db) return rPath.completeMost(topLink, db)
of Branch: of Branch:
# Step down and complete with a branch link on the child node # Step down and complete with a branch link on the child node
rPath.path = rPath.path & RPathStep( step = RPathStep(
key: topLink, key: topLink,
node: nextNode, node: nextNode,
nibble: nextNibble) nibble: nextNibble)
rPath.path &= step
# Find the next item to the right of the new top entry # Find the next item to the right of the new top entry
let step = rPath.path[^1]
for inx in (step.nibble - 1).countDown(0): for inx in (step.nibble - 1).countDown(0):
let link = step.node.bLink[inx] let link = step.node.bLink[inx]
if not link.isZero: if not link.isZero:
rPath.path[^1].nibble = inx.int8 rPath.path[^1].nibble = inx.int8
return rPath.completeMost(link, db) return rPath.completeMost(link, db)
# Restore `rPath` and backtrack if start:
# Retry without look ahead
start = false
# Restore `rPath` (pop temporary extra step)
if rPathLen < rPath.path.len:
rPath.path.setLen(rPathLen) rPath.path.setLen(rPathLen)
rPath.tail = rPathTail rPath.tail = rPathTail
else:
# Pop `Branch` node on top and append nibble to `tail` # Pop current `Branch` node on top and append nibble to `tail`
rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail rPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & rPath.tail
rPath.path.setLen(rPath.path.len - 1) rPath.path.setLen(rPath.path.len - 1)
# End while
# Pathological case: n0000.. for 0 < n # Pathological case: n0000.. for 0 < n
var step = path.path[0] var step = path.path[0]
@ -613,8 +672,8 @@ proc hexaryNearbyLeft*(
proc hexaryNearbyLeft*( proc hexaryNearbyLeft*(
path: XPath; ## Partially expanded path path: XPath; # Partially expanded path
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
): Result[XPath,HexaryError] ): Result[XPath,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryNearbyLeft()` for persistant database ## Variant of `hexaryNearbyLeft()` for persistant database
@ -625,14 +684,30 @@ proc hexaryNearbyLeft*(
if path.path[^1].node.kind == Leaf: if path.path[^1].node.kind == Leaf:
return ok(path) return ok(path)
var xPath = path var
xPath = path
start = true
while 0 < xPath.path.len: while 0 < xPath.path.len:
let top = xPath.path[^1] let top = xPath.path[^1]
if top.node.kind != Branch or case top.node.kind:
top.nibble < 0 or of Leaf:
xPath.tail.len == 0: return err(NearbyUnexpectedNode)
return err(NearbyUnexpectedNode) # error of Branch:
if top.nibble < 0 or xPath.tail.len == 0:
return err(NearbyUnexpectedNode)
of Extension:
xPath.tail = top.node.ePfx & xPath.tail
xPath.path.setLen(xPath.path.len - 1)
continue
var
step = top
let
xPathLen = xPath.path.len # in case of backtracking
xPathTail = xPath.tail # in case of backtracking
# Look ahead checking next node
if start:
let topLink = top.node.bLink[top.nibble] let topLink = top.node.bLink[top.nibble]
if topLink.len == 0 or topLink.getFn().len == 0: if topLink.len == 0 or topLink.getFn().len == 0:
return err(NearbyDanglingLink) # error return err(NearbyDanglingLink) # error
@ -649,28 +724,34 @@ proc hexaryNearbyLeft*(
return xPath.completeMost(topLink, getFn) return xPath.completeMost(topLink, getFn)
of 17: of 17:
# Step down and complete with a branch link on the child node # Step down and complete with a branch link on the child node
xPath.path = xPath.path & XPathStep( step = XPathStep(
key: topLink, key: topLink,
node: nextNodeRlp.toBranchNode, node: nextNodeRlp.toBranchNode,
nibble: nextNibble) nibble: nextNibble)
xPath.path &= step
else: else:
return err(NearbyGarbledNode) # error return err(NearbyGarbledNode) # error
# Find the next item to the right of the new top entry # Find the next item to the right of the new top entry
let step = xPath.path[^1]
for inx in (step.nibble - 1).countDown(0): for inx in (step.nibble - 1).countDown(0):
let link = step.node.bLink[inx] let link = step.node.bLink[inx]
if 0 < link.len: if 0 < link.len:
xPath.path[^1].nibble = inx.int8 xPath.path[^1].nibble = inx.int8
return xPath.completeMost(link, getFn) return xPath.completeMost(link, getFn)
# Restore `xPath` and backtrack if start:
# Retry without look ahead
start = false
# Restore `xPath` (pop temporary extra step)
if xPathLen < xPath.path.len:
xPath.path.setLen(xPathLen) xPath.path.setLen(xPathLen)
xPath.tail = xPathTail xPath.tail = xPathTail
else:
# Pop `Branch` node on top and append nibble to `tail` # Pop `Branch` node on top and append nibble to `tail`
xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail xPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & xPath.tail
xPath.path.setLen(xPath.path.len - 1) xPath.path.setLen(xPath.path.len - 1)
# End while
# Pathological case: n00000.. for 0 < n # Pathological case: n00000.. for 0 < n
var step = path.path[0] var step = path.path[0]
@ -688,9 +769,9 @@ proc hexaryNearbyLeft*(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryNearbyRight*( proc hexaryNearbyRight*(
baseTag: NodeTag; ## Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather ## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather
@ -699,9 +780,9 @@ proc hexaryNearbyRight*(
return baseTag.hexaryNearbyRightImpl(rootKey, db) return baseTag.hexaryNearbyRightImpl(rootKey, db)
proc hexaryNearbyRight*( proc hexaryNearbyRight*(
baseTag: NodeTag; ## Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryNearbyRight()` for persistant database ## Variant of `hexaryNearbyRight()` for persistant database
@ -710,9 +791,9 @@ proc hexaryNearbyRight*(
proc hexaryNearbyLeft*( proc hexaryNearbyLeft*(
baseTag: NodeTag; ## Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Similar to `hexaryNearbyRight()` for `NodeKey` arguments. ## Similar to `hexaryNearbyRight()` for `NodeKey` arguments.
@ -720,9 +801,9 @@ proc hexaryNearbyLeft*(
return baseTag.hexaryNearbyLeftImpl(rootKey, db) return baseTag.hexaryNearbyLeftImpl(rootKey, db)
proc hexaryNearbyLeft*( proc hexaryNearbyLeft*(
baseTag: NodeTag; ## Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryNearbyLeft()` for persistant database ## Variant of `hexaryNearbyLeft()` for persistant database

View File

@ -420,9 +420,9 @@ proc leafData*(path: RPath): Blob =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryPath*( proc hexaryPath*(
partialPath: NibblesSeq; ## partial path to resolve partialPath: NibblesSeq; # partial path to resolve
rootKey: NodeKey|RepairKey; ## State root rootKey: NodeKey|RepairKey; # State root
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
): RPath ): RPath
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Compute the longest possible repair tree `db` path matching the `nodeKey` ## Compute the longest possible repair tree `db` path matching the `nodeKey`
@ -460,9 +460,9 @@ proc hexaryPath*(
proc hexaryPath*( proc hexaryPath*(
partialPath: NibblesSeq; ## partial path to resolve partialPath: NibblesSeq; # partial path to resolve
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
): XPath ): XPath
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Compute the longest possible path on an arbitrary hexary trie. ## Compute the longest possible path on an arbitrary hexary trie.
@ -500,10 +500,10 @@ proc hexaryPath*(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryPathNodeKey*( proc hexaryPathNodeKey*(
partialPath: NibblesSeq; ## Hex encoded partial path partialPath: NibblesSeq; # Hex encoded partial path
rootKey: NodeKey|RepairKey; ## State root rootKey: NodeKey|RepairKey; # State root
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
missingOk = false; ## Also return key for missing node missingOk = false; # Also return key for missing node
): Result[NodeKey,void] ): Result[NodeKey,void]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Returns the `NodeKey` equivalent for the argment `partialPath` if this ## Returns the `NodeKey` equivalent for the argment `partialPath` if this
@ -524,10 +524,10 @@ proc hexaryPathNodeKey*(
err() err()
proc hexaryPathNodeKey*( proc hexaryPathNodeKey*(
partialPath: Blob; ## Hex encoded partial path partialPath: Blob; # Hex encoded partial path
rootKey: NodeKey|RepairKey; ## State root rootKey: NodeKey|RepairKey; # State root
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
missingOk = false; ## Also return key for missing node missingOk = false; # Also return key for missing node
): Result[NodeKey,void] ): Result[NodeKey,void]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Variant of `hexaryPathNodeKey()` for hex encoded partial path. ## Variant of `hexaryPathNodeKey()` for hex encoded partial path.
@ -535,10 +535,10 @@ proc hexaryPathNodeKey*(
proc hexaryPathNodeKey*( proc hexaryPathNodeKey*(
partialPath: NibblesSeq; ## Hex encoded partial path partialPath: NibblesSeq; # Hex encoded partial path
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
missingOk = false; ## Also return key for missing node missingOk = false; # Also return key for missing node
): Result[NodeKey,void] ): Result[NodeKey,void]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryPathNodeKey()` for persistent database. ## Variant of `hexaryPathNodeKey()` for persistent database.
@ -556,10 +556,10 @@ proc hexaryPathNodeKey*(
err() err()
proc hexaryPathNodeKey*( proc hexaryPathNodeKey*(
partialPath: Blob; ## Partial database path partialPath: Blob; # Partial database path
rootKey: NodeKey; ## State root rootKey: NodeKey; # State root
getFn: HexaryGetFn; ## Database abstraction getFn: HexaryGetFn; # Database abstraction
missingOk = false; ## Also return key for missing node missingOk = false; # Also return key for missing node
): Result[NodeKey,void] ): Result[NodeKey,void]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryPathNodeKey()` for persistent database and ## Variant of `hexaryPathNodeKey()` for persistent database and
@ -568,10 +568,10 @@ proc hexaryPathNodeKey*(
proc hexaryPathNodeKeys*( proc hexaryPathNodeKeys*(
partialPaths: seq[Blob]; ## Partial paths segments partialPaths: seq[Blob]; # Partial paths segments
rootKey: NodeKey|RepairKey; ## State root rootKey: NodeKey|RepairKey; # State root
db: HexaryTreeDbRef; ## Database db: HexaryTreeDbRef; # Database
missingOk = false; ## Also return key for missing node missingOk = false; # Also return key for missing node
): HashSet[NodeKey] ): HashSet[NodeKey]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [Defect,KeyError]} =
## Convert a list of path segments to a set of node keys ## Convert a list of path segments to a set of node keys

View File

@ -0,0 +1,173 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
std/[sequtils, sets, tables],
chronicles,
eth/[common, p2p, rlp, trie/nibbles],
stew/[byteutils, interval_set],
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
{.push raises: [Defect].}
type
RangeLeaf* = object
key*: NodeKey ## Leaf node path
data*: Blob ## Leaf node data
RangeProof* = object
leafs*: seq[RangeLeaf]
proof*: seq[Blob]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc convertTo(key: RepairKey; T: type NodeKey): T =
## Might be lossy, check before use (if at all, unless debugging)
(addr result.ByteArray32[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
template collectLeafs(
iv: NodeTagRange; # Proofed range of leaf paths
rootKey: NodeKey|RepairKey; # State root
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
nLeafs: int; # Implies maximal data size
): auto =
## Collect trie database leafs prototype. This directive is provided as
## `template` for avoiding varying exceprion annotations.
var rc: Result[seq[RangeLeaf],HexaryError]
block body:
var
nodeTag = iv.minPt
prevTag: NodeTag
rls: seq[RangeLeaf]
# Fill at most `nLeafs` leaf nodes from interval range
while rls.len < nLeafs and nodeTag <= iv.maxPt:
# The following logic might be sub-optimal. A strict version of the
# `next()` function that stops with an error at dangling links could
# be faster if the leaf nodes are not too far apart on the hexary trie.
var
xPath = block:
let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyRight(db)
if rx.isErr:
rc = typeof(rc).err(rx.error)
break body
rx.value
rightKey = xPath.getPartialPath.convertTo(NodeKey)
rightTag = rightKey.to(NodeTag)
# Prevents from semi-endless looping
if rightTag <= prevTag and 0 < rls.len:
# Oops, should have been tackeled by `hexaryNearbyRight()`
rc = typeof(rc).err(FailedNextNode)
break body # stop here
rls.add RangeLeaf(
key: rightKey,
data: xPath.leafData)
prevTag = nodeTag
nodeTag = rightTag + 1.u256
rc = typeof(rc).ok(rls)
# End body
rc
template updateProof(
baseTag: NodeTag; # Left boundary
leafList: seq[RangeLeaf]; # Set of collected leafs
rootKey: NodeKey|RepairKey; # State root
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): auto =
## Update leafs list by adding proof nodes. This directive is provided as
## `template` for avoiding varying exceprion annotations.
var proof = baseTag.hexaryPath(rootKey, db)
.path
.mapIt(it.node)
.filterIt(it.kind != Leaf)
.mapIt(it.convertTo(Blob))
.toHashSet
if 0 < leafList.len:
proof.incl leafList[^1].key.to(NodeTag).hexaryPath(rootKey, db)
.path
.mapIt(it.node)
.filterIt(it.kind != Leaf)
.mapIt(it.convertTo(Blob))
.toHashSet
RangeProof(
leafs: leafList,
proof: proof.toSeq)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc hexaryRangeLeafsProof*(
iv: NodeTagRange; # Proofed range of leaf paths
rootKey: NodeKey; # State root
db: HexaryGetFn; # Database abstraction
nLeafs = high(int); # Implies maximal data size
): Result[RangeProof,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} =
## ...
let rc = iv.collectLeafs(rootKey, db, nLeafs)
if rc.isErr:
err(rc.error)
else:
ok(iv.minPt.updateProof(rc.value, rootKey, db))
proc hexaryRangeLeafsProof*(
baseTag: NodeTag; # Left boundary
leafList: seq[RangeLeaf]; # Set of already collected leafs
rootKey: NodeKey; # State root
db: HexaryGetFn; # Database abstraction
): RangeProof
{.gcsafe, raises: [Defect,RlpError]} =
## ...
baseTag.updateProof(leafList, rootKey, db)
proc hexaryRangeLeafsProof*(
iv: NodeTagRange; # Proofed range of leaf paths
rootKey: NodeKey; # State root
db: HexaryTreeDbRef; # Database abstraction
nLeafs = high(int); # Implies maximal data size
): Result[RangeProof,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} =
## ...
let rc = iv.collectLeafs(rootKey, db, nLeafs)
if rc.isErr:
err(rc.error)
else:
ok(iv.minPt.updateProof(rc.value, rootKey, db))
proc hexaryRangeLeafsProof*(
baseTag: NodeTag; # Left boundary
leafList: seq[RangeLeaf]; # Set of already collected leafs
rootKey: NodeKey; # State root
db: HexaryTreeDbRef; # Database abstraction
): RangeProof
{.gcsafe, raises: [Defect,KeyError]} =
## ...
baseTag.updateProof(leafList, rootKey, db)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -322,7 +322,7 @@ proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] =
let rPath= key.hexaryPath(ps.root, ps.hexaDb) let rPath= key.hexaryPath(ps.root, ps.hexaDb)
result = rPath.path.mapIt(it.pp(ps.hexaDb)) & @["(" & rPath.tail.pp & ")"] result = rPath.path.mapIt(it.pp(ps.hexaDb)) & @["(" & rPath.tail.pp & ")"]
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string = proc dumpHexaDB*(xDb: HexaryTreeDbRef; root: NodeKey; indent = 4): string =
## Dump the entries from the a generic accounts trie. These are ## Dump the entries from the a generic accounts trie. These are
## key value pairs for ## key value pairs for
## :: ## ::
@ -348,7 +348,11 @@ proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
## added later (typically these nodes are update `Mutable` nodes.) ## added later (typically these nodes are update `Mutable` nodes.)
## ##
## Beware: dumping a large database is not recommended ## Beware: dumping a large database is not recommended
ps.hexaDb.pp(ps.root,indent) xDb.pp(root, indent)
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
## Ditto
ps.hexaDb.pp(ps.root, indent)
proc hexaryPpFn*(ps: SnapDbBaseRef): HexaryPpFn = proc hexaryPpFn*(ps: SnapDbBaseRef): HexaryPpFn =
## Key mapping function used in `HexaryTreeDB` ## Key mapping function used in `HexaryTreeDB`

View File

@ -9,7 +9,7 @@
# except according to those terms. # except according to those terms.
import import
std/[math, sets, sequtils, strutils], std/[math, sets, sequtils],
chronicles, chronicles,
chronos, chronos,
eth/[common, p2p, trie/trie_defs], eth/[common, p2p, trie/trie_defs],

View File

@ -119,7 +119,7 @@ proc open*(state: var GUnzip; fileName: string):
state.gzIn = fileName.open(fmRead) state.gzIn = fileName.open(fmRead)
state.gzOpenOK = true state.gzOpenOK = true
state.gzMax = state.gzIn.getFileSize state.gzMax = state.gzIn.getFileSize
state.gzCount = state.gzIn.readChars(strBuf, 0, strBuf.len) state.gzCount = state.gzIn.readChars(toOpenArray(strBuf, 0, strBuf.len-1))
# Parse GZIP header (RFC 1952) # Parse GZIP header (RFC 1952)
doAssert 18 < state.gzCount doAssert 18 < state.gzCount
@ -157,7 +157,7 @@ proc nextChunk*(state: var GUnzip):
result = ok("") result = ok("")
while state.gzCount < state.gzMax: while state.gzCount < state.gzMax:
var strLen = state.gzIn.readChars(strBuf, 0, strBuf.len) var strLen = state.gzIn.readChars(toOpenArray(strBuf, 0, strBuf.len-1))
if state.gzMax < state.gzCount + strLen: if state.gzMax < state.gzCount + strLen:
strLen = (state.gzMax - state.gzCount).int strLen = (state.gzMax - state.gzCount).int
state.gzCount += strLen state.gzCount += strLen

View File

@ -27,8 +27,8 @@ import
./replay/[pp, undump_accounts, undump_storages], ./replay/[pp, undump_accounts, undump_storages],
./test_sync_snap/[ ./test_sync_snap/[
bulk_test_xx, snap_test_xx, bulk_test_xx, snap_test_xx,
test_accounts, test_node_range, test_inspect, test_pivot, test_storage, test_accounts, test_helpers, test_node_range, test_inspect, test_pivot,
test_db_timing, test_types] test_storage, test_db_timing, test_types]
const const
baseDir = [".", "..", ".."/"..", $DirSep] baseDir = [".", "..", ".."/"..", $DirSep]
@ -61,9 +61,6 @@ else:
const isUbuntu32bit = false const isUbuntu32bit = false
let let
# Forces `check()` to print the error (as opposed when using `isOk()`)
OkHexDb = Result[void,HexaryError].ok()
# There was a problem with the Github/CI which results in spurious crashes # There was a problem with the Github/CI which results in spurious crashes
# when leaving the `runner()` if the persistent ChainDBRef initialisation # when leaving the `runner()` if the persistent ChainDBRef initialisation
# was present, see `test_custom_network` for more details. # was present, see `test_custom_network` for more details.
@ -92,15 +89,6 @@ proc findFilePath(file: string;
proc getTmpDir(sampleDir = sampleDirRefFile): string = proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath(baseDir,repoDir).value.splitFile.dir sampleDir.findFilePath(baseDir,repoDir).value.splitFile.dir
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
echo "*** ", pfx
elif 0 < pfx.len and pfx[^1] != ' ':
echo pfx, " ", args.toSeq.join
else:
echo pfx, args.toSeq.join
proc setTraceLevel = proc setTraceLevel =
discard discard
when defined(chronicles_runtime_filtering) and loggingEnabled: when defined(chronicles_runtime_filtering) and loggingEnabled:
@ -176,9 +164,6 @@ proc testDbs(workDir = ""; subDir = ""; instances = nTestDbInstances): TestDbs =
for n in 0 ..< min(result.cdb.len, instances): for n in 0 ..< min(result.cdb.len, instances):
result.cdb[n] = (result.dbDir / $n).newChainDB result.cdb[n] = (result.dbDir / $n).newChainDB
proc lastTwo(a: openArray[string]): seq[string] =
if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq
proc snapDbRef(cdb: ChainDb; pers: bool): SnapDbRef = proc snapDbRef(cdb: ChainDb; pers: bool): SnapDbRef =
if pers: SnapDbRef.init(cdb) else: SnapDbRef.init(newMemoryDB()) if pers: SnapDbRef.init(cdb) else: SnapDbRef.init(newMemoryDB())
@ -209,22 +194,43 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
tmpDir.flushDbDir(sample.name) tmpDir.flushDbDir(sample.name)
suite &"SyncSnap: {fileInfo} accounts and proofs for {info}": suite &"SyncSnap: {fileInfo} accounts and proofs for {info}":
test &"Proofing {accLst.len} items for state root ..{root.pp}":
let desc = db.cdb[0].snapDbAccountsRef(root, db.persistent)
accLst.test_accountsImport(desc, db.persistent)
var accKeys: seq[NodeKey]
block: block:
# New common descriptor for this sub-group of tests
let let
# Common descriptor for this group of tests desc = db.cdb[0].snapDbAccountsRef(root, db.persistent)
desc = db.cdb[1].snapDbAccountsRef(root, db.persistent) hexaDb = desc.hexaDb
getFn = desc.getAccountFn
dbg = if noisy: hexaDb else: nil
# Database abstractions desc.assignPrettyKeys() # debugging, make sure that state root ~ "$0"
getFn = desc.getAccountFn # pestistent case
hexaDB = desc.hexaDB # in-memory, and debugging setup
test &"Merging {accLst.len} proofs for state root ..{root.pp}": test &"Proofing {accLst.len} list items for state root ..{root.pp}":
accLst.test_accountsImport(desc, db.persistent)
test &"Retrieve accounts & proofs for previous account ranges":
let nPart = 3
if db.persistent:
accLst.test_NodeRangeRightProofs(getFn, nPart, dbg)
else:
accLst.test_NodeRangeRightProofs(hexaDB, nPart, dbg)
test &"Verify left boundary checks":
if db.persistent:
accLst.test_NodeRangeLeftBoundary(getFn, dbg)
else:
accLst.test_NodeRangeLeftBoundary(hexaDB, dbg)
block:
# List of keys to be shared by sub-group
var accKeys: seq[NodeKey]
# New common descriptor for this sub-group of tests
let
cdb = db.cdb[1]
desc = cdb.snapDbAccountsRef(root, db.persistent)
test &"Merging {accLst.len} accounts/proofs lists into single list":
accLst.test_accountsMergeProofs(desc, accKeys) # set up `accKeys` accLst.test_accountsMergeProofs(desc, accKeys) # set up `accKeys`
test &"Revisiting {accKeys.len} stored items on ChainDBRef": test &"Revisiting {accKeys.len} stored items on ChainDBRef":
@ -233,18 +239,20 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
# true.say "***", "database dump\n ", desc.dumpHexaDB() # true.say "***", "database dump\n ", desc.dumpHexaDB()
test &"Decompose path prefix envelopes on {info}": test &"Decompose path prefix envelopes on {info}":
let hexaDb = desc.hexaDb
if db.persistent: if db.persistent:
accKeys.test_NodeRangeDecompose(root, getFn, hexaDB) accKeys.test_NodeRangeDecompose(root, desc.getAccountFn, hexaDb)
else: else:
accKeys.test_NodeRangeDecompose(root, hexaDB, hexaDB) accKeys.test_NodeRangeDecompose(root, hexaDb, hexaDb)
test &"Storing/retrieving {accKeys.len} items " & test &"Storing/retrieving {accKeys.len} stored items " &
"on persistent pivot/checkpoint registry": "on persistent pivot/checkpoint registry":
if db.persistent: if db.persistent:
accKeys.test_pivotStoreRead(db.cdb[0]) accKeys.test_pivotStoreRead(cdb)
else: else:
skip() skip()
proc storagesRunner( proc storagesRunner(
noisy = true; noisy = true;
persistent = true; persistent = true;
@ -539,14 +547,14 @@ when isMainModule:
# #
# This one uses dumps from the external `nimbus-eth1-blob` repo # This one uses dumps from the external `nimbus-eth1-blob` repo
when true and false: when true: # and false:
import ./test_sync_snap/snap_other_xx import ./test_sync_snap/snap_other_xx
noisy.showElapsed("accountsRunner()"): noisy.showElapsed("accountsRunner()"):
for n,sam in snapOtherList: for n,sam in snapOtherList:
false.accountsRunner(persistent=true, sam) false.accountsRunner(persistent=true, sam)
noisy.showElapsed("inspectRunner()"): #noisy.showElapsed("inspectRunner()"):
for n,sam in snapOtherHealingList: # for n,sam in snapOtherHealingList:
false.inspectionRunner(persistent=true, cascaded=false, sam) # false.inspectionRunner(persistent=true, cascaded=false, sam)
# This one usues dumps from the external `nimbus-eth1-blob` repo # This one usues dumps from the external `nimbus-eth1-blob` repo
when true and false: when true and false:
@ -564,14 +572,14 @@ when isMainModule:
# This one uses readily available dumps # This one uses readily available dumps
when true: # and false: when true: # and false:
false.inspectionRunner() # false.inspectionRunner()
for n,sam in snapTestList: for n,sam in snapTestList:
false.accountsRunner(persistent=false, sam) false.accountsRunner(persistent=false, sam)
false.accountsRunner(persistent=true, sam) false.accountsRunner(persistent=true, sam)
for n,sam in snapTestStorageList: for n,sam in snapTestStorageList:
false.accountsRunner(persistent=false, sam) false.accountsRunner(persistent=false, sam)
false.accountsRunner(persistent=true, sam) false.accountsRunner(persistent=true, sam)
false.storagesRunner(persistent=true, sam) # false.storagesRunner(persistent=true, sam)
# This one uses readily available dumps # This one uses readily available dumps
when true and false: when true and false:

View File

@ -12,8 +12,8 @@
## Snap sync components tester and TDD environment ## Snap sync components tester and TDD environment
import import
std/[algorithm, sequtils, strformat, strutils, tables], std/algorithm,
eth/[common, p2p, trie/db], eth/[common, p2p],
unittest2, unittest2,
../../nimbus/db/select_backend, ../../nimbus/db/select_backend,
../../nimbus/sync/snap/range_desc, ../../nimbus/sync/snap/range_desc,
@ -36,7 +36,7 @@ proc flatten(list: openArray[seq[Blob]]): seq[Blob] =
proc test_accountsImport*( proc test_accountsImport*(
inList: seq[UndumpAccounts]; inList: seq[UndumpAccounts];
desc: SnapDbAccountsRef; desc: SnapDbAccountsRef;
persistent: bool persistent: bool;
) = ) =
## Import accounts ## Import accounts
for n,w in inList: for n,w in inList:

View File

@ -12,7 +12,7 @@
## Snap sync components tester and TDD environment ## Snap sync components tester and TDD environment
import import
std/[algorithm, math, sequtils, strformat, strutils, times], std/[algorithm, math, sequtils, strformat, times],
stew/byteutils, stew/byteutils,
rocksdb, rocksdb,
unittest2, unittest2,

View File

@ -12,7 +12,7 @@
import import
std/times, std/times,
eth/common, eth/common,
stew/results, stew/[interval_set, results],
unittest2, unittest2,
../../nimbus/sync/snap/range_desc, ../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/hexary_error, ../../nimbus/sync/snap/worker/db/hexary_error,
@ -31,6 +31,9 @@ proc isImportOk*(rc: Result[SnapAccountsGaps,HexaryError]): bool =
else: else:
return true return true
proc lastTwo*(a: openArray[string]): seq[string] =
if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public type conversions # Public type conversions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -63,6 +66,12 @@ proc to*(w: (byte, NodeTag); T: type Blob): T =
proc to*(t: NodeTag; T: type Blob): T = proc to*(t: NodeTag; T: type Blob): T =
toSeq(t.UInt256.toBytesBE) toSeq(t.UInt256.toBytesBE)
# ----------
proc convertTo*(key: RepairKey; T: type NodeKey): T =
## Might be lossy, check before use (if at all, unless debugging)
(addr result.ByteArray32[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, pretty printing # Public functions, pretty printing
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -95,6 +104,27 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
else: else:
echo pfx, args.toSeq.join echo pfx, args.toSeq.join
# ------------------------------------------------------------------------------
# Public free parking
# ------------------------------------------------------------------------------
proc rangeAccountSizeMax*(n: int): int =
## Max number of bytes needed to store `n` RLP encoded `Account()` type
## entries. Note that this is an upper bound.
##
## The maximum size of a single RLP encoded account item can be determined
## by setting every field of `Account()` to `high()` or `0xff`.
if 127 < n:
3 + n * 110
elif 0 < n:
2 + n * 110
else:
1
proc rangeNumAccounts*(size: int): int =
## ..
(size - 3) div 110
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -12,7 +12,7 @@
## Snap sync components tester and TDD environment ## Snap sync components tester and TDD environment
import import
std/[sequtils, strformat, strutils], std/[sequtils],
eth/[common, p2p, trie/db], eth/[common, p2p, trie/db],
unittest2, unittest2,
../../nimbus/db/select_backend, ../../nimbus/db/select_backend,

View File

@ -13,12 +13,16 @@
import import
std/[sequtils, strformat, strutils], std/[sequtils, strformat, strutils],
eth/[common, p2p, trie/nibbles], eth/[common, p2p, rlp, trie/nibbles],
stew/[byteutils, interval_set, results], stew/[byteutils, interval_set, results],
unittest2, unittest2,
../../nimbus/sync/types,
../../nimbus/sync/snap/range_desc, ../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[ ../../nimbus/sync/snap/worker/db/[
hexary_desc, hexary_envelope, hexary_nearby, hexary_paths] hexary_desc, hexary_envelope, hexary_error, hexary_nearby, hexary_paths,
hexary_range, snapdb_accounts, snapdb_desc],
../replay/[pp, undump_accounts],
./test_helpers
const const
cmaNlSp0 = ",\n" & repeat(" ",12) cmaNlSp0 = ",\n" & repeat(" ",12)
@ -78,6 +82,125 @@ proc print_data(
"\n pfxMax=", pfx.hexaryEnvelope.maxPt, "\n pfxMax=", pfx.hexaryEnvelope.maxPt,
"\n ", pfx.hexaryEnvelope.maxPt.hexaryPath(rootKey,db).pp(dbg) "\n ", pfx.hexaryEnvelope.maxPt.hexaryPath(rootKey,db).pp(dbg)
proc printCompareRightLeafs(
rootKey: NodeKey;
baseTag: NodeTag;
accounts: seq[PackedAccount];
leafs: seq[RangeLeaf];
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg: HexaryTreeDbRef; ## Debugging env
) =
let
noisy = not dbg.isNil
var
top = 0
nMax = min(accounts.len, leafs.len)
step = nMax div 2
while top < nMax:
while 1 < step and accounts[top+step].accKey != leafs[top+step].key:
#noisy.say "***", "i=", top+step, " fail"
step = max(1, step div 2)
if accounts[top+step].accKey == leafs[top+step].key:
top += step
step = max(1, step div 2)
noisy.say "***", "i=", top, " step=", step, " ok"
continue
let start = top
top = nMax
for i in start ..< top:
if accounts[i].accKey == leafs[i].key:
noisy.say "***", "i=", i, " skip, ok"
continue
# Diagnostics and return
check (i,accounts[i].accKey) == (i,leafs[i].key)
let
lfsKey = leafs[i].key
accKey = accounts[i].accKey
prdKey = if 0 < i: accounts[i-1].accKey else: baseTag.to(NodeKey)
nxtTag = if 0 < i: prdKey.to(NodeTag) + 1.u256 else: baseTag
nxtPath = nxtTag.hexaryPath(rootKey,db)
rightRc = nxtPath.hexaryNearbyRight(db)
if rightRc.isOk:
check lfsKey == rightRc.value.getPartialPath.convertTo(NodeKey)
else:
check rightRc.error == HexaryError(0) # force error printing
noisy.say "\n***", "i=", i, "/", accounts.len,
"\n",
"\n prdKey=", prdKey,
"\n ", prdKey.hexaryPath(rootKey,db).pp(dbg),
"\n",
"\n nxtKey=", nxtTag,
"\n ", nxtPath.pp(dbg),
"\n",
"\n accKey=", accKey,
"\n ", accKey.hexaryPath(rootKey,db).pp(dbg),
"\n",
"\n lfsKey=", lfsKey,
"\n ", lfsKey.hexaryPath(rootKey,db).pp(dbg),
"\n"
return
proc printCompareLeftNearby(
rootKey: NodeKey;
leftKey: NodeKey;
rightKey: NodeKey;
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg: HexaryTreeDbRef; ## Debugging env
) =
let
noisy = not dbg.isNil
rightPath = rightKey.hexaryPath(rootKey,db)
toLeftRc = rightPath.hexaryNearbyLeft(db)
var
toLeftKey: NodeKey
if toLeftRc.isErr:
check toLeftRc.error == HexaryError(0) # force error printing
else:
toLeftKey = toLeftRc.value.getPartialPath.convertTo(NodeKey)
if toLeftKey == leftKey:
return
noisy.say "\n***",
" rightKey=", rightKey,
"\n ", rightKey.hexaryPath(rootKey,db).pp(dbg),
"\n",
"\n leftKey=", leftKey,
"\n ", leftKey.hexaryPath(rootKey,db).pp(dbg),
"\n",
"\n toLeftKey=", toLeftKey,
"\n ", toLeftKey.hexaryPath(rootKey,db).pp(dbg),
"\n"
proc verifyAccountListSizes() =
## RLP does not allow static check ..
for n in [0, 1, 128, 129, 200]:
check n.rangeAccountSizeMax == Account(
storageRoot: Hash256(data: high(UInt256).toBytesBE),
codeHash: Hash256(data: high(UInt256).toBytesBE),
nonce: high(uint64),
balance: high(UInt256)).repeat(n).encode.len
# ------------------------------------------------------------------------------
# Private functions, pretty printing
# ------------------------------------------------------------------------------
proc pp(a: NodeTag; collapse = true): string =
a.to(NodeKey).pp(collapse)
proc pp(iv: NodeTagRange; collapse = false): string =
"(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")"
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public test function # Public test function
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -198,6 +321,78 @@ proc test_NodeRangeDecompose*(
if true: quit() if true: quit()
proc test_NodeRangeRightProofs*(
inLst: seq[UndumpAccounts];
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
nSplit = 0; ## Also split intervals (unused)
dbg = HexaryTreeDbRef(nil); ## Debugging env
) =
## Partition range and provide proofs suitable for `GetAccountRange` message
## from `snap/1` protocol.
let
rootKey = inLst[0].root.to(NodeKey)
noisy = not dbg.isNil
# RLP does not allow static check
verifyAccountListSizes()
# Assuming the `inLst` entries have been stored in the DB already
for n,w in inLst:
let
iv = NodeTagRange.new(w.base, w.data.accounts[^1].accKey.to(NodeTag))
rc = iv.hexaryRangeLeafsProof(rootKey, db, high(int))
check rc.isOk
if rc.isErr:
return
let
leafs = rc.value.leafs
accounts = w.data.accounts
if leafs.len != accounts.len or accounts[^1].accKey != leafs[^1].key:
noisy.say "***", "n=", n, " something went wrong .."
check (n,leafs.len) == (n,accounts.len)
rootKey.printCompareRightLeafs(w.base, accounts, leafs, db, dbg)
return
# FIXME: verify that proof nodes are complete
check rc.value.proof.len <= w.data.proof.len
check leafs[^1].key.to(NodeTag) <= iv.maxPt
noisy.say "***", "n=", n,
" leafs=", leafs.len,
" proof=", rc.value.proof.len, "/", w.data.proof.len
proc test_NodeRangeLeftBoundary*(
inLst: seq[UndumpAccounts];
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg = HexaryTreeDbRef(nil); ## Debugging env
) =
## Verify left side boundary checks
let
rootKey = inLst[0].root.to(NodeKey)
noisy = not dbg.isNil
# Assuming the `inLst` entries have been stored in the DB already
for n,w in inLst:
let accounts = w.data.accounts
for i in 1 ..< accounts.len:
let
leftKey = accounts[i-1].accKey
rightKey = (accounts[i].accKey.to(NodeTag) - 1.u256).to(NodeKey)
toLeftRc = rightKey.hexaryPath(rootKey,db).hexaryNearbyLeft(db)
if toLeftRc.isErr:
check toLeftRc.error == HexaryError(0) # force error printing
return
let toLeftKey = toLeftRc.value.getPartialPath.convertTo(NodeKey)
if leftKey != toLeftKey:
let j = i-1
check (n, j, leftKey) == (n, j, toLeftKey)
rootKey.printCompareLeftNearby(leftKey, rightKey, db, dbg)
return
noisy.say "***", "n=", n, " accounts=", accounts.len
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -12,7 +12,6 @@
## Snap sync components tester and TDD environment ## Snap sync components tester and TDD environment
import import
std/[sequtils, strformat, strutils],
eth/[common, p2p], eth/[common, p2p],
unittest2, unittest2,
../../nimbus/db/select_backend, ../../nimbus/db/select_backend,
@ -34,7 +33,8 @@ proc test_pivotStoreRead*(
(4.to(NodeTag),5.to(NodeTag)), (4.to(NodeTag),5.to(NodeTag)),
(6.to(NodeTag),7.to(NodeTag))] (6.to(NodeTag),7.to(NodeTag))]
slotAccounts = seq[NodeKey].default slotAccounts = seq[NodeKey].default
for n,w in accKeys: for n in 0 ..< accKeys.len:
let w = accKeys[n]
check dbBase.savePivot( check dbBase.savePivot(
SnapDbPivotRegistry( SnapDbPivotRegistry(
header: BlockHeader(stateRoot: w.to(Hash256)), header: BlockHeader(stateRoot: w.to(Hash256)),
@ -50,7 +50,13 @@ proc test_pivotStoreRead*(
check rc.value.nAccounts == n.uint64 check rc.value.nAccounts == n.uint64
check rc.value.nSlotLists == n.uint64 check rc.value.nSlotLists == n.uint64
check rc.value.processed == processed check rc.value.processed == processed
for n,w in accKeys: # Stop gossiping (happens whith corrupted database)
if rc.value.nAccounts != n.uint64 or
rc.value.nSlotLists != n.uint64 or
rc.value.processed != processed:
return
for n in 0 ..< accKeys.len:
let w = accKeys[n]
block: block:
let rc = dbBase.recoverPivot(w) let rc = dbBase.recoverPivot(w)
check rc.isOk check rc.isOk

View File

@ -12,7 +12,7 @@
## Snap sync components tester and TDD environment ## Snap sync components tester and TDD environment
import import
std/[sequtils, strformat, strutils, tables], std/[sequtils, tables],
eth/[common, p2p], eth/[common, p2p],
unittest2, unittest2,
../../nimbus/db/select_backend, ../../nimbus/db/select_backend,