Prepare snap server client test scenario cont2 (#1487)
* Clean up some function prototypes why: Simplify polymorphic prototype variances for easier maintenance. * Fix fringe condition crash when importing bogus RLP node why: Accessing non-list RLP entry as a list causes `Defect` * Fix left boundary proof at range extractor why: Was insufficient. The main problem was that there was no unit test for the validity of the generated left boundary. * Handle incomplete left boundary proofs early why: Attempt to do it later leads to overly complex code in order to prevent looping when the same peer repeats to send the same incomplete proof. Contrary, gaps in the leaf sequence can be handled gracefully with registering the gaps * Implement a manual pivot setup mechanism for snap sync why: For a test scenario it is convenient to set the pivot to something lower than the beacon header from the consensus layer. This does not need rely on any RPC mechanism. details: The file containing the pivot specs is specified by the `--sync-ctrl-file` option. It is regularly parsed for updates. * Fix calculation error why: Prevent from calculating negative square root
This commit is contained in:
parent
fe04b50fef
commit
fe3a6d67c6
|
@ -179,10 +179,10 @@ type
|
||||||
name: "sync-mode" .}: SyncMode
|
name: "sync-mode" .}: SyncMode
|
||||||
|
|
||||||
syncCtrlFile* {.
|
syncCtrlFile* {.
|
||||||
desc: "Specify a file that is regularly checked for updates. It " &
|
desc: "Specify a file that is regularly checked for updates. If it " &
|
||||||
"contains extra information specific to the type of sync " &
|
"exists it is checked for whether it contains extra information " &
|
||||||
"process. This option is primaily intended only for sync " &
|
"specific to the type of sync process. This option is primarily " &
|
||||||
"testing and debugging."
|
"intended only for sync testing and debugging."
|
||||||
abbr: "z"
|
abbr: "z"
|
||||||
name: "sync-ctrl-file" }: Option[string]
|
name: "sync-ctrl-file" }: Option[string]
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ import
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "full-ticker"
|
topics = "full-tick"
|
||||||
|
|
||||||
type
|
type
|
||||||
TickerStats* = object
|
TickerStats* = object
|
||||||
|
|
|
@ -145,6 +145,17 @@ proc processStaged(buddy: FullBuddyRef): bool =
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
|
||||||
|
proc suspendDownload(buddy: FullBuddyRef): bool =
|
||||||
|
## Check whether downloading should be suspended
|
||||||
|
let ctx = buddy.ctx
|
||||||
|
if ctx.exCtrlFile.isSome:
|
||||||
|
let rc = ctx.exCtrlFile.syncCtrlBlockNumberFromFile
|
||||||
|
if rc.isOk:
|
||||||
|
ctx.pool.suspendAt = rc.value
|
||||||
|
if 0 < ctx.pool.suspendAt:
|
||||||
|
return ctx.pool.suspendAt < buddy.only.bQueue.topAccepted
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public start/stop and admin functions
|
# Public start/stop and admin functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -378,7 +389,6 @@ proc runPool*(buddy: FullBuddyRef; last: bool): bool =
|
||||||
buddy.ctx.poolMode = false
|
buddy.ctx.poolMode = false
|
||||||
true
|
true
|
||||||
|
|
||||||
|
|
||||||
proc runMulti*(buddy: FullBuddyRef) {.async.} =
|
proc runMulti*(buddy: FullBuddyRef) {.async.} =
|
||||||
## This peer worker is invoked if the `buddy.ctrl.multiOk` flag is set
|
## This peer worker is invoked if the `buddy.ctrl.multiOk` flag is set
|
||||||
## `true` which is typically done after finishing `runSingle()`. This
|
## `true` which is typically done after finishing `runSingle()`. This
|
||||||
|
@ -388,15 +398,10 @@ proc runMulti*(buddy: FullBuddyRef) {.async.} =
|
||||||
ctx = buddy.ctx
|
ctx = buddy.ctx
|
||||||
bq = buddy.only.bQueue
|
bq = buddy.only.bQueue
|
||||||
|
|
||||||
if ctx.exCtrlFile.isSome:
|
if buddy.suspendDownload:
|
||||||
let rc = ctx.exCtrlFile.syncCtrlBlockNumberFromFile
|
# Sleep for a while, then leave
|
||||||
if rc.isOk:
|
await sleepAsync(10.seconds)
|
||||||
ctx.pool.suspendAt = rc.value
|
return
|
||||||
if 0 < ctx.pool.suspendAt:
|
|
||||||
if ctx.pool.suspendAt < buddy.only.bQueue.topAccepted:
|
|
||||||
# Sleep for a while, then leave
|
|
||||||
await sleepAsync(10.seconds)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Fetch work item
|
# Fetch work item
|
||||||
let rc = await bq.blockQueueWorker()
|
let rc = await bq.blockQueueWorker()
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
|
{.used, push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
eth/p2p,
|
eth/p2p,
|
||||||
../../core/[chain, tx_pool],
|
../../core/[chain, tx_pool],
|
||||||
|
@ -15,8 +17,6 @@ import
|
||||||
./eth as handlers_eth,
|
./eth as handlers_eth,
|
||||||
./snap as handlers_snap
|
./snap as handlers_snap
|
||||||
|
|
||||||
{.used, push raises: [].}
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions: convenience mappings for `eth`
|
# Public functions: convenience mappings for `eth`
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -51,7 +51,7 @@ proc addSnapHandlerCapability*(
|
||||||
node: var EthereumNode;
|
node: var EthereumNode;
|
||||||
peerPool: PeerPool;
|
peerPool: PeerPool;
|
||||||
chain = ChainRef(nil);
|
chain = ChainRef(nil);
|
||||||
) =
|
) =
|
||||||
## Install `snap` handlers,Passing `chein` as `nil` installs the handler
|
## Install `snap` handlers,Passing `chein` as `nil` installs the handler
|
||||||
## in minimal/outbound mode.
|
## in minimal/outbound mode.
|
||||||
if chain.isNil:
|
if chain.isNil:
|
||||||
|
|
|
@ -74,36 +74,37 @@ proc fetchLeafRange(
|
||||||
# on wire. So the `sizeMax` is the argument size `replySizeMax` with some
|
# on wire. So the `sizeMax` is the argument size `replySizeMax` with some
|
||||||
# space removed to accomodate for the proof nodes.
|
# space removed to accomodate for the proof nodes.
|
||||||
let
|
let
|
||||||
sizeMax =replySizeMax - estimatedProofSize
|
sizeMax = replySizeMax - estimatedProofSize
|
||||||
rc = db.hexaryRangeLeafsProof(rootKey, iv, sizeMax)
|
rc = db.hexaryRangeLeafsProof(rootKey, iv, sizeMax)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
error logTxt "fetchLeafRange(): database problem",
|
error logTxt "fetchLeafRange(): database problem",
|
||||||
iv, replySizeMax, error=rc.error
|
iv, replySizeMax, error=rc.error
|
||||||
return err() # database error
|
return err() # database error
|
||||||
let sizeOnWire = rc.value.leafsSize + rc.value.proofSize
|
let sizeOnWire = rc.value.leafsSize + rc.value.proofSize
|
||||||
|
|
||||||
if sizeOnWire <= replySizeMax:
|
if sizeOnWire <= replySizeMax:
|
||||||
return ok(rc.value)
|
return ok(rc.value)
|
||||||
|
|
||||||
# Strip parts of leafs result and amend remainder by adding proof nodes
|
# Strip parts of leafs result and amend remainder by adding proof nodes
|
||||||
var
|
var
|
||||||
leafs = rc.value.leafs
|
rpl = rc.value
|
||||||
leafsTop = leafs.len - 1
|
leafsTop = rpl.leafs.len - 1
|
||||||
tailSize = 0
|
tailSize = 0
|
||||||
tailItems = 0
|
tailItems = 0
|
||||||
reduceBy = replySizeMax - sizeOnWire
|
reduceBy = replySizeMax - sizeOnWire
|
||||||
while tailSize <= reduceBy and tailItems < leafsTop:
|
while tailSize <= reduceBy and tailItems < leafsTop:
|
||||||
# Estimate the size on wire needed for the tail item
|
# Estimate the size on wire needed for the tail item
|
||||||
const extraSize = (sizeof RangeLeaf()) - (sizeof newSeq[Blob](0))
|
const extraSize = (sizeof RangeLeaf()) - (sizeof newSeq[Blob](0))
|
||||||
tailSize += leafs[leafsTop - tailItems].data.len + extraSize
|
tailSize += rpl.leafs[leafsTop - tailItems].data.len + extraSize
|
||||||
tailItems.inc
|
tailItems.inc
|
||||||
if leafsTop <= tailItems:
|
if leafsTop <= tailItems:
|
||||||
trace logTxt "fetchLeafRange(): stripping leaf list failed",
|
trace logTxt "fetchLeafRange(): stripping leaf list failed",
|
||||||
iv, replySizeMax,leafsTop, tailItems
|
iv, replySizeMax,leafsTop, tailItems
|
||||||
return err() # package size too small
|
return err() # package size too small
|
||||||
|
|
||||||
leafs.setLen(leafsTop - tailItems - 1) # chop off one more for slack
|
rpl.leafs.setLen(leafsTop - tailItems - 1) # chop off one more for slack
|
||||||
let
|
let
|
||||||
leafProof = db.hexaryRangeLeafsProof(rootKey, iv.minPt, leafs)
|
leafProof = db.hexaryRangeLeafsProof(rootKey, rpl)
|
||||||
strippedSizeOnWire = leafProof.leafsSize + leafProof.proofSize
|
strippedSizeOnWire = leafProof.leafsSize + leafProof.proofSize
|
||||||
if strippedSizeOnWire <= replySizeMax:
|
if strippedSizeOnWire <= replySizeMax:
|
||||||
return ok(leafProof)
|
return ok(leafProof)
|
||||||
|
|
|
@ -51,6 +51,37 @@ proc syncCtrlBlockNumberFromFile*(
|
||||||
debug "Exception while parsing block number", file, name, msg
|
debug "Exception while parsing block number", file, name, msg
|
||||||
err()
|
err()
|
||||||
|
|
||||||
|
proc syncCtrlHashOrBlockNumFromFile*(
|
||||||
|
fileName: Option[string];
|
||||||
|
): Result[HashOrNum,void] =
|
||||||
|
## Returns a block number or a hash from the file name argument `fileName`.
|
||||||
|
## A block number is decimal encoded and a hash is expexted to be a 66 hex
|
||||||
|
## digits string startnib wiyh `0x`.
|
||||||
|
if fileName.isSome:
|
||||||
|
let file = fileName.get
|
||||||
|
|
||||||
|
# Parse value dump and fetch a header from the peer (if any)
|
||||||
|
try:
|
||||||
|
let data = file.getDataLine
|
||||||
|
if 0 < data.len:
|
||||||
|
if 66 == data.len:
|
||||||
|
let hash = HashOrNum(
|
||||||
|
isHash: true,
|
||||||
|
hash: Hash256(
|
||||||
|
data: UInt256.fromHex(data).toBytesBE))
|
||||||
|
return ok(hash)
|
||||||
|
else:
|
||||||
|
let num = HashOrNum(
|
||||||
|
isHash: false,
|
||||||
|
number: parse(data,UInt256))
|
||||||
|
return ok(num)
|
||||||
|
except CatchableError as e:
|
||||||
|
let
|
||||||
|
name {.used.} = $e.name
|
||||||
|
msg {.used.} = e.msg
|
||||||
|
debug "Exception while parsing hash or block number", file, name, msg
|
||||||
|
err()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2021 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
|
# except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
chronicles,
|
||||||
|
chronos,
|
||||||
|
eth/[common, p2p],
|
||||||
|
../sync_desc,
|
||||||
|
../misc/sync_ctrl,
|
||||||
|
./worker_desc,
|
||||||
|
./worker/com/[com_error, get_block_header]
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "snap-ctrl"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc updateBeaconHeaderFromFile*(
|
||||||
|
buddy: SnapBuddyRef; # Worker peer
|
||||||
|
) {.async.} =
|
||||||
|
## This function updates the beacon header cache by import from the file name
|
||||||
|
## argument `fileName`. The first line of the contents of the file looks like
|
||||||
|
## * `0x<hex-number>` -- hash of block header
|
||||||
|
## * `<decimal-number>` -- block number
|
||||||
|
## This function is typically used for testing and debugging.
|
||||||
|
let
|
||||||
|
ctx = buddy.ctx
|
||||||
|
|
||||||
|
hashOrNum = block:
|
||||||
|
let rc = ctx.exCtrlFile.syncCtrlHashOrBlockNumFromFile
|
||||||
|
if rc.isErr:
|
||||||
|
return
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
peer = buddy.peer
|
||||||
|
|
||||||
|
var
|
||||||
|
rc = Result[BlockHeader,ComError].err(ComError(0))
|
||||||
|
isHash = hashOrNum.isHash # so that the value can be logged
|
||||||
|
|
||||||
|
# Parse value dump and fetch a header from the peer (if any)
|
||||||
|
try:
|
||||||
|
if isHash:
|
||||||
|
let hash = hashOrNum.hash
|
||||||
|
trace "External beacon info", peer, hash
|
||||||
|
if hash != ctx.pool.beaconHeader.hash:
|
||||||
|
rc = await buddy.getBlockHeader(hash)
|
||||||
|
else:
|
||||||
|
let num = hashOrNum.number
|
||||||
|
trace "External beacon info", peer, num
|
||||||
|
if ctx.pool.beaconHeader.blockNumber < num:
|
||||||
|
rc = await buddy.getBlockHeader(num)
|
||||||
|
except CatchableError as e:
|
||||||
|
let
|
||||||
|
name {.used.} = $e.name
|
||||||
|
msg {.used.} = e.msg
|
||||||
|
trace "Exception while parsing beacon info", peer, isHash, name, msg
|
||||||
|
|
||||||
|
if rc.isOk:
|
||||||
|
if ctx.pool.beaconHeader.blockNumber < rc.value.blockNumber:
|
||||||
|
ctx.pool.beaconHeader = rc.value
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
|
@ -21,7 +21,7 @@ import
|
||||||
./worker/[pivot, ticker],
|
./worker/[pivot, ticker],
|
||||||
./worker/com/com_error,
|
./worker/com/com_error,
|
||||||
./worker/db/[hexary_desc, snapdb_desc, snapdb_pivot],
|
./worker/db/[hexary_desc, snapdb_desc, snapdb_pivot],
|
||||||
"."/[range_desc, worker_desc]
|
"."/[range_desc, update_beacon_header, worker_desc]
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
|
@ -126,6 +126,10 @@ proc setup*(ctx: SnapCtxRef; tickerOK: bool): bool =
|
||||||
checkpoint=("#" & $ctx.pool.pivotTable.topNumber() & "(0)")
|
checkpoint=("#" & $ctx.pool.pivotTable.topNumber() & "(0)")
|
||||||
if not ctx.pool.ticker.isNil:
|
if not ctx.pool.ticker.isNil:
|
||||||
ctx.pool.ticker.startRecovery()
|
ctx.pool.ticker.startRecovery()
|
||||||
|
|
||||||
|
if ctx.exCtrlFile.isSome:
|
||||||
|
warn "Snap sync accepts pivot block number or hash",
|
||||||
|
syncCtrlFile=ctx.exCtrlFile.get
|
||||||
true
|
true
|
||||||
|
|
||||||
proc release*(ctx: SnapCtxRef) =
|
proc release*(ctx: SnapCtxRef) =
|
||||||
|
@ -179,6 +183,11 @@ proc runSingle*(buddy: SnapBuddyRef) {.async.} =
|
||||||
## * `buddy.ctrl.multiOk` is `false`
|
## * `buddy.ctrl.multiOk` is `false`
|
||||||
## * `buddy.ctrl.poolMode` is `false`
|
## * `buddy.ctrl.poolMode` is `false`
|
||||||
##
|
##
|
||||||
|
let ctx = buddy.ctx
|
||||||
|
|
||||||
|
# External beacon header updater
|
||||||
|
await buddy.updateBeaconHeaderFromFile()
|
||||||
|
|
||||||
await buddy.pivotApprovePeer()
|
await buddy.pivotApprovePeer()
|
||||||
buddy.ctrl.multiOk = true
|
buddy.ctrl.multiOk = true
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/options,
|
std/options,
|
||||||
chronos,
|
chronos,
|
||||||
|
@ -17,8 +19,6 @@ import
|
||||||
../../worker_desc,
|
../../worker_desc,
|
||||||
./com_error
|
./com_error
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-fetch"
|
topics = "snap-fetch"
|
||||||
|
|
||||||
|
@ -26,49 +26,49 @@ logScope:
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
# proc getBlockHeader*(
|
proc getBlockHeader*(
|
||||||
# buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
# num: BlockNumber;
|
num: BlockNumber;
|
||||||
# ): Future[Result[BlockHeader,ComError]]
|
): Future[Result[BlockHeader,ComError]]
|
||||||
# {.async.} =
|
{.async.} =
|
||||||
# ## Get single block header
|
## Get single block header
|
||||||
# let
|
let
|
||||||
# peer = buddy.peer
|
peer = buddy.peer
|
||||||
# reqLen = 1u
|
reqLen = 1u
|
||||||
# hdrReq = BlocksRequest(
|
hdrReq = BlocksRequest(
|
||||||
# startBlock: HashOrNum(
|
startBlock: HashOrNum(
|
||||||
# isHash: false,
|
isHash: false,
|
||||||
# number: num),
|
number: num),
|
||||||
# maxResults: reqLen,
|
maxResults: reqLen,
|
||||||
# skip: 0,
|
skip: 0,
|
||||||
# reverse: false)
|
reverse: false)
|
||||||
#
|
|
||||||
# trace trEthSendSendingGetBlockHeaders, peer, header=("#" & $num), reqLen
|
trace trEthSendSendingGetBlockHeaders, peer, header=("#" & $num), reqLen
|
||||||
#
|
|
||||||
# var hdrResp: Option[blockHeadersObj]
|
var hdrResp: Option[blockHeadersObj]
|
||||||
# try:
|
try:
|
||||||
# hdrResp = await peer.getBlockHeaders(hdrReq)
|
hdrResp = await peer.getBlockHeaders(hdrReq)
|
||||||
# except CatchableError as e:
|
except CatchableError as e:
|
||||||
# trace trSnapRecvError & "waiting for GetByteCodes reply", peer,
|
trace trSnapRecvError & "waiting for GetByteCodes reply", peer,
|
||||||
# error=e.msg
|
error=e.msg
|
||||||
# return err(ComNetworkProblem)
|
return err(ComNetworkProblem)
|
||||||
#
|
|
||||||
# var hdrRespLen = 0
|
var hdrRespLen = 0
|
||||||
# if hdrResp.isSome:
|
if hdrResp.isSome:
|
||||||
# hdrRespLen = hdrResp.get.headers.len
|
hdrRespLen = hdrResp.get.headers.len
|
||||||
# if hdrRespLen == 0:
|
if hdrRespLen == 0:
|
||||||
# trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a"
|
trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a"
|
||||||
# return err(ComNoHeaderAvailable)
|
return err(ComNoHeaderAvailable)
|
||||||
#
|
|
||||||
# if hdrRespLen == 1:
|
if hdrRespLen == 1:
|
||||||
# let
|
let
|
||||||
# header = hdrResp.get.headers[0]
|
header = hdrResp.get.headers[0]
|
||||||
# blockNumber = header.blockNumber
|
blockNumber = header.blockNumber
|
||||||
# trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber
|
trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber
|
||||||
# return ok(header)
|
return ok(header)
|
||||||
#
|
|
||||||
# trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen
|
trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen
|
||||||
# return err(ComTooManyHeaders)
|
return err(ComTooManyHeaders)
|
||||||
|
|
||||||
|
|
||||||
proc getBlockHeader*(
|
proc getBlockHeader*(
|
|
@ -8,14 +8,14 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sets, tables],
|
std/[sets, tables],
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error]
|
"."/[hexary_desc, hexary_error]
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private debugging helpers
|
# Private debugging helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -49,6 +49,10 @@ proc hexaryImport*(
|
||||||
top = 0 # count entries
|
top = 0 # count entries
|
||||||
rNode: RNodeRef # repair tree node
|
rNode: RNodeRef # repair tree node
|
||||||
|
|
||||||
|
if not rlp.isList:
|
||||||
|
# Otherwise `rlp.items` will raise a `Defect`
|
||||||
|
return HexaryNodeReport(error: Rlp2Or17ListEntries)
|
||||||
|
|
||||||
# Collect lists of either 2 or 17 blob entries.
|
# Collect lists of either 2 or 17 blob entries.
|
||||||
for w in rlp.items:
|
for w in rlp.items:
|
||||||
case top
|
case top
|
||||||
|
@ -145,6 +149,10 @@ proc hexaryImport*(
|
||||||
top = 0 # count entries
|
top = 0 # count entries
|
||||||
rNode: RNodeRef # repair tree node
|
rNode: RNodeRef # repair tree node
|
||||||
|
|
||||||
|
if not rlp.isList:
|
||||||
|
# Otherwise `rlp.items` will raise a `Defect`
|
||||||
|
return HexaryNodeReport(error: Rlp2Or17ListEntries)
|
||||||
|
|
||||||
# Collect lists of either 2 or 17 blob entries.
|
# Collect lists of either 2 or 17 blob entries.
|
||||||
for w in rlp.items:
|
for w in rlp.items:
|
||||||
case top
|
case top
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, sets, tables],
|
std/[sequtils, sets, tables],
|
||||||
chronicles,
|
|
||||||
eth/[common, p2p, trie/nibbles],
|
eth/[common, p2p, trie/nibbles],
|
||||||
stew/[byteutils, interval_set],
|
stew/[byteutils, interval_set],
|
||||||
../../../protocol,
|
../../../protocol,
|
||||||
|
@ -25,10 +24,11 @@ type
|
||||||
data*: Blob ## Leaf node data
|
data*: Blob ## Leaf node data
|
||||||
|
|
||||||
RangeProof* = object
|
RangeProof* = object
|
||||||
leafs*: seq[RangeLeaf]
|
base*: NodeTag ## No node between `base` and `leafs[0]`
|
||||||
leafsSize*: int
|
leafs*: seq[RangeLeaf] ## List of consecutive leaf nodes
|
||||||
proof*: seq[SnapProof]
|
leafsSize*: int ## RLP encoded size of `leafs` on wire
|
||||||
proofSize*: int
|
proof*: seq[SnapProof] ## Boundary proof
|
||||||
|
proofSize*: int ## RLP encoded size of `proof` on wire
|
||||||
|
|
||||||
proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) {.gcsafe.}
|
proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) {.gcsafe.}
|
||||||
proc hexaryRangeRlpSize*(blobLen: int): int {.gcsafe.}
|
proc hexaryRangeRlpSize*(blobLen: int): int {.gcsafe.}
|
||||||
|
@ -51,13 +51,13 @@ proc rlpPairSize(aLen: int; bRlpLen: int): int =
|
||||||
high(int)
|
high(int)
|
||||||
|
|
||||||
proc nonLeafPathNodes(
|
proc nonLeafPathNodes(
|
||||||
baseTag: NodeTag; # Left boundary
|
nodeTag: NodeTag; # Left boundary
|
||||||
rootKey: NodeKey|RepairKey; # State root
|
rootKey: NodeKey|RepairKey; # State root
|
||||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||||
): HashSet[SnapProof]
|
): HashSet[SnapProof]
|
||||||
{.gcsafe, raises: [CatchableError]} =
|
{.gcsafe, raises: [CatchableError]} =
|
||||||
## Helper for `updateProof()`
|
## Helper for `updateProof()`
|
||||||
baseTag
|
nodeTag
|
||||||
.hexaryPath(rootKey, db)
|
.hexaryPath(rootKey, db)
|
||||||
.path
|
.path
|
||||||
.mapIt(it.node)
|
.mapIt(it.node)
|
||||||
|
@ -65,6 +65,20 @@ proc nonLeafPathNodes(
|
||||||
.mapIt(it.convertTo(Blob).to(SnapProof))
|
.mapIt(it.convertTo(Blob).to(SnapProof))
|
||||||
.toHashSet
|
.toHashSet
|
||||||
|
|
||||||
|
proc allPathNodes(
|
||||||
|
nodeTag: NodeTag; # Left boundary
|
||||||
|
rootKey: NodeKey|RepairKey; # State root
|
||||||
|
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||||
|
): HashSet[SnapProof]
|
||||||
|
{.gcsafe, raises: [CatchableError]} =
|
||||||
|
## Helper for `updateProof()`
|
||||||
|
nodeTag
|
||||||
|
.hexaryPath(rootKey, db)
|
||||||
|
.path
|
||||||
|
.mapIt(it.node)
|
||||||
|
.mapIt(it.convertTo(Blob).to(SnapProof))
|
||||||
|
.toHashSet
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -74,20 +88,30 @@ template collectLeafs(
|
||||||
rootKey: NodeKey|RepairKey; # State root
|
rootKey: NodeKey|RepairKey; # State root
|
||||||
iv: NodeTagRange; # Proofed range of leaf paths
|
iv: NodeTagRange; # Proofed range of leaf paths
|
||||||
nSizeLimit: int; # List of RLP encoded data must be smaller
|
nSizeLimit: int; # List of RLP encoded data must be smaller
|
||||||
nSizeUsed: var int; # Updated size counter for the raw list
|
|
||||||
): auto =
|
): auto =
|
||||||
## Collect trie database leafs prototype. This directive is provided as
|
## Collect trie database leafs prototype. This directive is provided as
|
||||||
## `template` for avoiding varying exceprion annotations.
|
## `template` for avoiding varying exceprion annotations.
|
||||||
var rc: Result[seq[RangeLeaf],HexaryError]
|
var rc: Result[RangeProof,HexaryError]
|
||||||
|
|
||||||
block body:
|
block body:
|
||||||
|
let
|
||||||
|
nodeMax = maxPt(iv) # `inject` is for debugging (if any)
|
||||||
var
|
var
|
||||||
nodeTag = minPt(iv)
|
nodeTag = minPt(iv)
|
||||||
prevTag: NodeTag
|
prevTag: NodeTag
|
||||||
rls: seq[RangeLeaf]
|
rls: RangeProof
|
||||||
|
|
||||||
|
# Set up base node, the nearest node before `iv.minPt`
|
||||||
|
block:
|
||||||
|
let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyLeft(db)
|
||||||
|
if rx.isOk:
|
||||||
|
rls.base = getPartialPath(rx.value).convertTo(NodeKey).to(NodeTag)
|
||||||
|
elif rx.error != NearbyFailed:
|
||||||
|
rc = typeof(rc).err(rx.error)
|
||||||
|
break body
|
||||||
|
|
||||||
# Fill leaf nodes from interval range unless size reached
|
# Fill leaf nodes from interval range unless size reached
|
||||||
while nodeTag <= maxPt(iv):
|
while nodeTag <= nodeMax:
|
||||||
# The following logic might be sub-optimal. A strict version of the
|
# The following logic might be sub-optimal. A strict version of the
|
||||||
# `next()` function that stops with an error at dangling links could
|
# `next()` function that stops with an error at dangling links could
|
||||||
# be faster if the leaf nodes are not too far apart on the hexary trie.
|
# be faster if the leaf nodes are not too far apart on the hexary trie.
|
||||||
|
@ -102,24 +126,30 @@ template collectLeafs(
|
||||||
rightTag = rightKey.to(NodeTag)
|
rightTag = rightKey.to(NodeTag)
|
||||||
|
|
||||||
# Prevents from semi-endless looping
|
# Prevents from semi-endless looping
|
||||||
if rightTag <= prevTag and 0 < rls.len:
|
if rightTag <= prevTag and 0 < rls.leafs.len:
|
||||||
# Oops, should have been tackeled by `hexaryNearbyRight()`
|
# Oops, should have been tackeled by `hexaryNearbyRight()`
|
||||||
rc = typeof(rc).err(FailedNextNode)
|
rc = typeof(rc).err(FailedNextNode)
|
||||||
break body # stop here
|
break body # stop here
|
||||||
|
|
||||||
let (pairLen,listLen) =
|
let (pairLen,listLen) =
|
||||||
hexaryRangeRlpLeafListSize(xPath.leafData.len, nSizeUsed)
|
hexaryRangeRlpLeafListSize(xPath.leafData.len, rls.leafsSize)
|
||||||
|
|
||||||
if listLen < nSizeLimit:
|
if listLen < nSizeLimit:
|
||||||
nSizeUsed += pairLen
|
rls.leafsSize += pairLen
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
rls.add RangeLeaf(
|
rls.leafs.add RangeLeaf(
|
||||||
key: rightKey,
|
key: rightKey,
|
||||||
data: xPath.leafData)
|
data: xPath.leafData)
|
||||||
|
|
||||||
prevTag = nodeTag
|
prevTag = nodeTag
|
||||||
nodeTag = rightTag + 1.u256
|
nodeTag = rightTag + 1.u256
|
||||||
|
# End loop
|
||||||
|
|
||||||
|
# Count outer RLP wrapper
|
||||||
|
if 0 < rls.leafs.len:
|
||||||
|
rls.leafsSize = hexaryRangeRlpSize rls.leafsSize
|
||||||
|
|
||||||
rc = typeof(rc).ok(rls)
|
rc = typeof(rc).ok(rls)
|
||||||
# End body
|
# End body
|
||||||
|
@ -130,24 +160,17 @@ template collectLeafs(
|
||||||
template updateProof(
|
template updateProof(
|
||||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||||
rootKey: NodeKey|RepairKey; # State root
|
rootKey: NodeKey|RepairKey; # State root
|
||||||
baseTag: NodeTag; # Left boundary
|
rls: RangeProof; # Set of collected leafs and a `base`
|
||||||
leafList: seq[RangeLeaf]; # Set of collected leafs
|
|
||||||
nSizeUsed: int; # To be stored into the result
|
|
||||||
): auto =
|
): auto =
|
||||||
## Complement leafs list by adding proof nodes. This directive is provided as
|
## Complement leafs list by adding proof nodes. This directive is provided as
|
||||||
## `template` for avoiding varying exceprion annotations.
|
## `template` for avoiding varying exceprion annotations.
|
||||||
var proof = nonLeafPathNodes(baseTag, rootKey, db)
|
var proof = allPathNodes(rls.base, rootKey, db)
|
||||||
if 0 < leafList.len:
|
if 0 < rls.leafs.len:
|
||||||
proof.incl nonLeafPathNodes(leafList[^1].key.to(NodeTag), rootKey, db)
|
proof.incl nonLeafPathNodes(rls.leafs[^1].key.to(NodeTag), rootKey, db)
|
||||||
|
|
||||||
var rp = RangeProof(
|
var rp = rls
|
||||||
leafs: leafList,
|
rp.proof = toSeq(proof)
|
||||||
proof: toSeq(proof))
|
rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0)
|
||||||
|
|
||||||
if 0 < nSizeUsed:
|
|
||||||
rp.leafsSize = hexaryRangeRlpSize nSizeUsed
|
|
||||||
if 0 < rp.proof.len:
|
|
||||||
rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0)
|
|
||||||
|
|
||||||
rp
|
rp
|
||||||
|
|
||||||
|
@ -163,23 +186,21 @@ proc hexaryRangeLeafsProof*(
|
||||||
): Result[RangeProof,HexaryError]
|
): Result[RangeProof,HexaryError]
|
||||||
{.gcsafe, raises: [CatchableError]} =
|
{.gcsafe, raises: [CatchableError]} =
|
||||||
## Collect trie database leafs prototype and add proof.
|
## Collect trie database leafs prototype and add proof.
|
||||||
var accSize = 0
|
let rc = db.collectLeafs(rootKey, iv, nSizeLimit)
|
||||||
let rc = db.collectLeafs(rootKey, iv, nSizeLimit, accSize)
|
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
err(rc.error)
|
err(rc.error)
|
||||||
else:
|
else:
|
||||||
ok(db.updateProof(rootKey, iv.minPt, rc.value, accSize))
|
ok(db.updateProof(rootKey, rc.value))
|
||||||
|
|
||||||
proc hexaryRangeLeafsProof*(
|
proc hexaryRangeLeafsProof*(
|
||||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||||
rootKey: NodeKey; # State root
|
rootKey: NodeKey; # State root
|
||||||
baseTag: NodeTag; # Left boundary
|
rp: RangeProof; # Set of collected leafs and a `base`
|
||||||
leafList: seq[RangeLeaf]; # Set of already collected leafs
|
|
||||||
): RangeProof
|
): RangeProof
|
||||||
{.gcsafe, raises: [CatchableError]} =
|
{.gcsafe, raises: [CatchableError]} =
|
||||||
## Complement leafs list by adding proof nodes to the argument list
|
## Complement leafs list by adding proof nodes to the argument list
|
||||||
## `leafList`.
|
## `leafList`.
|
||||||
db.updateProof(rootKey, baseTag, leafList, 0)
|
db.updateProof(rootKey, rp)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
|
|
|
@ -173,7 +173,6 @@ proc importAccounts*(
|
||||||
base: NodeTag; ## Before or at first account entry in `data`
|
base: NodeTag; ## Before or at first account entry in `data`
|
||||||
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
||||||
persistent = false; ## Store data on disk
|
persistent = false; ## Store data on disk
|
||||||
noBaseBoundCheck = false; ## Ignore left boundary proof check if `true`
|
|
||||||
): Result[SnapAccountsGaps,HexaryError] =
|
): Result[SnapAccountsGaps,HexaryError] =
|
||||||
## Validate and import accounts (using proofs as received with the snap
|
## Validate and import accounts (using proofs as received with the snap
|
||||||
## message `AccountRange`). This function accumulates data in a memory table
|
## message `AccountRange`). This function accumulates data in a memory table
|
||||||
|
@ -206,16 +205,6 @@ proc importAccounts*(
|
||||||
## Leaving out `(7&y,Y)` the boundary proofs still succeed but the
|
## Leaving out `(7&y,Y)` the boundary proofs still succeed but the
|
||||||
## return value will be @[`(7&y,c)`].
|
## return value will be @[`(7&y,c)`].
|
||||||
##
|
##
|
||||||
## The left boundary proof might be omitted by passing `true` for the
|
|
||||||
## `noBaseBoundCheck` argument. In this case, the boundary check must be
|
|
||||||
## performed on the return code as
|
|
||||||
## * if `data.accounts` is empty, the return value must be an empty list
|
|
||||||
## * otherwise, all type `NodeSpecs` items `w` of the return code must
|
|
||||||
## satisfy
|
|
||||||
## ::
|
|
||||||
## let leastAccountPath = data.accounts[0].accKey.to(NodeTag)
|
|
||||||
## leastAccountPath <= w.partialPath.max(NodeKey).to(NodeTag)
|
|
||||||
##
|
|
||||||
## Besides the inner gaps, the function also returns the dangling nodes left
|
## Besides the inner gaps, the function also returns the dangling nodes left
|
||||||
## from the `proof` list.
|
## from the `proof` list.
|
||||||
##
|
##
|
||||||
|
@ -227,7 +216,7 @@ proc importAccounts*(
|
||||||
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
||||||
try:
|
try:
|
||||||
if 0 < data.proof.len:
|
if 0 < data.proof.len:
|
||||||
let rc = ps.mergeProofs(ps.peer, data.proof)
|
let rc = ps.hexaDb.mergeProofs(ps.root, data.proof, ps.peer)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
block:
|
block:
|
||||||
|
@ -265,16 +254,17 @@ proc importAccounts*(
|
||||||
let bottomTag = accounts[0].pathTag
|
let bottomTag = accounts[0].pathTag
|
||||||
for w in innerSubTrie:
|
for w in innerSubTrie:
|
||||||
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
||||||
if not noBaseBoundCheck:
|
# Verify that `base` is to the left of the first account and there
|
||||||
# Verify that `base` is to the left of the first account and there
|
# is nothing in between. If there is an envelope to the left of
|
||||||
# is nothing in between.
|
# the first account, then it might also cover a point before the
|
||||||
#
|
# first account.
|
||||||
# Without `proof` data available there can only be a complete
|
#
|
||||||
# set/list of accounts so there are no dangling nodes in the first
|
# Without `proof` data available there can only be a complete
|
||||||
# place. But there must be `proof` data for an empty list.
|
# set/list of accounts so there are no dangling nodes in the first
|
||||||
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
# place. But there must be `proof` data for an empty list.
|
||||||
return err(LowerBoundProofError)
|
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
||||||
# Otherwise register left over entry
|
return err(LowerBoundProofError)
|
||||||
|
# Otherwise register left over entry, a gap in the accounts list
|
||||||
gaps.innerGaps.add w
|
gaps.innerGaps.add w
|
||||||
|
|
||||||
if persistent:
|
if persistent:
|
||||||
|
@ -287,10 +277,9 @@ proc importAccounts*(
|
||||||
return err(LowerBoundProofError)
|
return err(LowerBoundProofError)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if not noBaseBoundCheck:
|
for w in proofStats.dangling:
|
||||||
for w in proofStats.dangling:
|
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
||||||
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
return err(LowerBoundProofError)
|
||||||
return err(LowerBoundProofError)
|
|
||||||
gaps.dangling = proofStats.dangling
|
gaps.dangling = proofStats.dangling
|
||||||
|
|
||||||
except RlpError:
|
except RlpError:
|
||||||
|
@ -317,12 +306,10 @@ proc importAccounts*(
|
||||||
root: Hash256; ## State root
|
root: Hash256; ## State root
|
||||||
base: NodeTag; ## Before or at first account entry in `data`
|
base: NodeTag; ## Before or at first account entry in `data`
|
||||||
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
||||||
noBaseBoundCheck = false; ## Ignore left bound proof check if `true`
|
|
||||||
): Result[SnapAccountsGaps,HexaryError] =
|
): Result[SnapAccountsGaps,HexaryError] =
|
||||||
## Variant of `importAccounts()` for presistent storage, only.
|
## Variant of `importAccounts()` for presistent storage, only.
|
||||||
SnapDbAccountsRef.init(
|
SnapDbAccountsRef.init(
|
||||||
pv, root, peer).importAccounts(
|
pv, root, peer).importAccounts(base, data, persistent=true)
|
||||||
base, data, persistent=true, noBaseBoundCheck)
|
|
||||||
|
|
||||||
|
|
||||||
proc importRawAccountsNodes*(
|
proc importRawAccountsNodes*(
|
||||||
|
|
|
@ -210,23 +210,22 @@ proc dbBackendRocksDb*(ps: SnapDbBaseRef): bool =
|
||||||
not ps.base.rocky.isNil
|
not ps.base.rocky.isNil
|
||||||
|
|
||||||
proc mergeProofs*(
|
proc mergeProofs*(
|
||||||
ps: SnapDbBaseRef; ## Session database
|
xDb: HexaryTreeDbRef; ## Session database
|
||||||
peer: Peer; ## For log messages
|
root: NodeKey; ## State root
|
||||||
proof: seq[SnapProof]; ## Node records
|
proof: seq[SnapProof]; ## Node records
|
||||||
|
peer = Peer(); ## For log messages
|
||||||
freeStandingOk = false; ## Remove freestanding nodes
|
freeStandingOk = false; ## Remove freestanding nodes
|
||||||
): Result[void,HexaryError]
|
): Result[void,HexaryError]
|
||||||
{.gcsafe, raises: [RlpError,KeyError].} =
|
{.gcsafe, raises: [RlpError,KeyError].} =
|
||||||
## Import proof records (as received with snap message) into a hexary trie
|
## Import proof records (as received with snap message) into a hexary trie
|
||||||
## of the repair table. These hexary trie records can be extended to a full
|
## of the repair table. These hexary trie records can be extended to a full
|
||||||
## trie at a later stage and used for validating account data.
|
## trie at a later stage and used for validating account data.
|
||||||
let
|
|
||||||
db = ps.hexaDb
|
|
||||||
var
|
var
|
||||||
nodes: HashSet[RepairKey]
|
nodes: HashSet[RepairKey]
|
||||||
refs = @[ps.root.to(RepairKey)].toHashSet
|
refs = @[root.to(RepairKey)].toHashSet
|
||||||
|
|
||||||
for n,rlpRec in proof:
|
for n,rlpRec in proof:
|
||||||
let report = db.hexaryImport(rlpRec.to(Blob), nodes, refs)
|
let report = xDb.hexaryImport(rlpRec.to(Blob), nodes, refs)
|
||||||
if report.error != NothingSerious:
|
if report.error != NothingSerious:
|
||||||
let error = report.error
|
let error = report.error
|
||||||
trace "mergeProofs()", peer, item=n, proofs=proof.len, error
|
trace "mergeProofs()", peer, item=n, proofs=proof.len, error
|
||||||
|
@ -242,24 +241,25 @@ proc mergeProofs*(
|
||||||
else:
|
else:
|
||||||
# Delete unreferenced nodes
|
# Delete unreferenced nodes
|
||||||
for nodeKey in nodes:
|
for nodeKey in nodes:
|
||||||
db.tab.del(nodeKey)
|
xDb.tab.del(nodeKey)
|
||||||
trace "mergeProofs() ignoring unrelated nodes", peer, nodes=nodes.len
|
trace "mergeProofs() ignoring unrelated nodes", peer, nodes=nodes.len
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc verifyLowerBound*(
|
proc verifyLowerBound*(
|
||||||
ps: SnapDbBaseRef; ## Database session descriptor
|
xDb: HexaryTreeDbRef; ## Session database
|
||||||
peer: Peer; ## For log messages
|
root: NodeKey; ## State root
|
||||||
base: NodeTag; ## Before or at first account entry in `data`
|
base: NodeTag; ## Before or at first account entry in `data`
|
||||||
first: NodeTag; ## First account key
|
first: NodeTag; ## First account/storage key
|
||||||
|
peer = Peer(); ## For log messages
|
||||||
): Result[void,HexaryError]
|
): Result[void,HexaryError]
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
## Verify that `base` is to the left of the first leaf entry and there is
|
## Verify that `base` is to the left of the first leaf entry and there is
|
||||||
## nothing in between.
|
## nothing in between.
|
||||||
var error: HexaryError
|
var error: HexaryError
|
||||||
|
|
||||||
let rc = base.hexaryNearbyRight(ps.root, ps.hexaDb)
|
let rc = base.hexaryNearbyRight(root, xDb)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
error = rc.error
|
error = rc.error
|
||||||
elif first == rc.value:
|
elif first == rc.value:
|
||||||
|
@ -274,17 +274,18 @@ proc verifyLowerBound*(
|
||||||
|
|
||||||
|
|
||||||
proc verifyNoMoreRight*(
|
proc verifyNoMoreRight*(
|
||||||
ps: SnapDbBaseRef; ## Database session descriptor
|
xDb: HexaryTreeDbRef; ## Session database
|
||||||
peer: Peer; ## For log messages
|
root: NodeKey; ## State root
|
||||||
base: NodeTag; ## Before or at first account entry in `data`
|
base: NodeTag; ## Before or at first account entry in `data`
|
||||||
|
peer = Peer(); ## For log messages
|
||||||
): Result[void,HexaryError]
|
): Result[void,HexaryError]
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
## Verify that there is are no more leaf entries to the right of and
|
## Verify that there is are no more leaf entries to the right of and
|
||||||
## including `base`.
|
## including `base`.
|
||||||
let
|
let
|
||||||
root = ps.root.to(RepairKey)
|
root = root.to(RepairKey)
|
||||||
base = base.to(NodeKey)
|
base = base.to(NodeKey)
|
||||||
if base.hexaryPath(root, ps.hexaDb).hexaryNearbyRightMissing(ps.hexaDb):
|
if base.hexaryPath(root, xDb).hexaryNearbyRightMissing(xDb):
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
let error = LowerBoundProofError
|
let error = LowerBoundProofError
|
||||||
|
@ -315,10 +316,6 @@ proc assignPrettyKeys*(xDb: HexaryTreeDbRef; root: NodeKey) =
|
||||||
of Extension: discard xDb.keyPp node.eLink
|
of Extension: discard xDb.keyPp node.eLink
|
||||||
of Leaf: discard
|
of Leaf: discard
|
||||||
|
|
||||||
proc assignPrettyKeys*(ps: SnapDbBaseRef) =
|
|
||||||
## Variant of `assignPrettyKeys()`
|
|
||||||
ps.hexaDb.assignPrettyKeys(ps.root)
|
|
||||||
|
|
||||||
proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] =
|
proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] =
|
||||||
## Pretty print helper compiling the path into the repair tree for the
|
## Pretty print helper compiling the path into the repair tree for the
|
||||||
## argument `key`.
|
## argument `key`.
|
||||||
|
@ -354,14 +351,6 @@ proc dumpHexaDB*(xDb: HexaryTreeDbRef; root: NodeKey; indent = 4): string =
|
||||||
## Beware: dumping a large database is not recommended
|
## Beware: dumping a large database is not recommended
|
||||||
xDb.pp(root, indent)
|
xDb.pp(root, indent)
|
||||||
|
|
||||||
proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string =
|
|
||||||
## Ditto
|
|
||||||
ps.hexaDb.pp(ps.root, indent)
|
|
||||||
|
|
||||||
proc hexaryPpFn*(ps: SnapDbBaseRef): HexaryPpFn =
|
|
||||||
## Key mapping function used in `HexaryTreeDB`
|
|
||||||
ps.hexaDb.keyPp
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/tables,
|
std/tables,
|
||||||
chronicles,
|
chronicles,
|
||||||
|
@ -19,8 +21,6 @@ import
|
||||||
hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc,
|
hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc,
|
||||||
snapdb_persistent]
|
snapdb_persistent]
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-db"
|
topics = "snap-db"
|
||||||
|
|
||||||
|
@ -114,7 +114,6 @@ proc importStorageSlots(
|
||||||
base: NodeTag; ## before or at first account entry in `data`
|
base: NodeTag; ## before or at first account entry in `data`
|
||||||
data: AccountSlots; ## Account storage descriptor
|
data: AccountSlots; ## Account storage descriptor
|
||||||
proof: seq[SnapProof]; ## Storage slots proof data
|
proof: seq[SnapProof]; ## Storage slots proof data
|
||||||
noBaseBoundCheck = false; ## Ignore left boundary proof check if `true`
|
|
||||||
): Result[seq[NodeSpecs],HexaryError]
|
): Result[seq[NodeSpecs],HexaryError]
|
||||||
{.gcsafe, raises: [RlpError,KeyError].} =
|
{.gcsafe, raises: [RlpError,KeyError].} =
|
||||||
## Process storage slots for a particular storage root. See `importAccounts()`
|
## Process storage slots for a particular storage root. See `importAccounts()`
|
||||||
|
@ -127,7 +126,7 @@ proc importStorageSlots(
|
||||||
proofStats: TrieNodeStat # `proof` data dangling links
|
proofStats: TrieNodeStat # `proof` data dangling links
|
||||||
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
||||||
if 0 < proof.len:
|
if 0 < proof.len:
|
||||||
let rc = tmpDb.mergeProofs(ps.peer, proof)
|
let rc = tmpDb.hexaDb.mergeProofs(tmpDb.root, proof, ps.peer)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
block:
|
block:
|
||||||
|
@ -161,15 +160,14 @@ proc importStorageSlots(
|
||||||
let bottomTag = slots[0].pathTag
|
let bottomTag = slots[0].pathTag
|
||||||
for w in innerSubTrie:
|
for w in innerSubTrie:
|
||||||
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
||||||
if not noBaseBoundCheck:
|
# Verify that `base` is to the left of the first slot and there is
|
||||||
# Verify that `base` is to the left of the first slot and there is
|
# nothing in between.
|
||||||
# nothing in between.
|
#
|
||||||
#
|
# Without `proof` data available there can only be a complete
|
||||||
# Without `proof` data available there can only be a complete
|
# set/list of accounts so there are no dangling nodes in the first
|
||||||
# set/list of accounts so there are no dangling nodes in the first
|
# place. But there must be `proof` data for an empty list.
|
||||||
# place. But there must be `proof` data for an empty list.
|
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
||||||
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
return err(LowerBoundProofError)
|
||||||
return err(LowerBoundProofError)
|
|
||||||
# Otherwise register left over entry
|
# Otherwise register left over entry
|
||||||
dangling.add w
|
dangling.add w
|
||||||
|
|
||||||
|
@ -184,10 +182,9 @@ proc importStorageSlots(
|
||||||
return err(LowerBoundProofError)
|
return err(LowerBoundProofError)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if not noBaseBoundCheck:
|
for w in proofStats.dangling:
|
||||||
for w in proofStats.dangling:
|
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
||||||
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
return err(LowerBoundProofError)
|
||||||
return err(LowerBoundProofError)
|
|
||||||
dangling = proofStats.dangling
|
dangling = proofStats.dangling
|
||||||
|
|
||||||
ok(dangling)
|
ok(dangling)
|
||||||
|
@ -233,7 +230,6 @@ proc importStorageSlots*(
|
||||||
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
||||||
data: AccountStorageRange; ## Account storage reply from `snap/1` protocol
|
data: AccountStorageRange; ## Account storage reply from `snap/1` protocol
|
||||||
persistent = false; ## store data on disk
|
persistent = false; ## store data on disk
|
||||||
noBaseBoundCheck = false; ## Ignore left boundary proof check if `true`
|
|
||||||
): seq[HexaryNodeReport] =
|
): seq[HexaryNodeReport] =
|
||||||
## Validate and import storage slots (using proofs as received with the snap
|
## Validate and import storage slots (using proofs as received with the snap
|
||||||
## message `StorageRanges`). This function accumulates data in a memory table
|
## message `StorageRanges`). This function accumulates data in a memory table
|
||||||
|
@ -274,7 +270,7 @@ proc importStorageSlots*(
|
||||||
block:
|
block:
|
||||||
itemInx = some(sTop)
|
itemInx = some(sTop)
|
||||||
let rc = ps.importStorageSlots(
|
let rc = ps.importStorageSlots(
|
||||||
data.base, data.storages[sTop], data.proof, noBaseBoundCheck)
|
data.base, data.storages[sTop], data.proof)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
result.add HexaryNodeReport(slot: itemInx, error: rc.error)
|
result.add HexaryNodeReport(slot: itemInx, error: rc.error)
|
||||||
trace "Storage slots last item fails", peer, itemInx=sTop, nItems,
|
trace "Storage slots last item fails", peer, itemInx=sTop, nItems,
|
||||||
|
@ -315,12 +311,11 @@ proc importStorageSlots*(
|
||||||
pv: SnapDbRef; ## Base descriptor on `ChainDBRef`
|
pv: SnapDbRef; ## Base descriptor on `ChainDBRef`
|
||||||
peer: Peer; ## For log messages, only
|
peer: Peer; ## For log messages, only
|
||||||
data: AccountStorageRange; ## Account storage reply from `snap/1` protocol
|
data: AccountStorageRange; ## Account storage reply from `snap/1` protocol
|
||||||
noBaseBoundCheck = false; ## Ignore left boundary proof check if `true`
|
|
||||||
): seq[HexaryNodeReport] =
|
): seq[HexaryNodeReport] =
|
||||||
## Variant of `importStorages()`
|
## Variant of `importStorages()`
|
||||||
SnapDbStorageSlotsRef.init(
|
SnapDbStorageSlotsRef.init(
|
||||||
pv, Hash256().to(NodeKey), Hash256(), peer).importStorageSlots(
|
pv, Hash256().to(NodeKey), Hash256(), peer).importStorageSlots(
|
||||||
data, persistent = true, noBaseBoundCheck)
|
data, persistent=true)
|
||||||
|
|
||||||
|
|
||||||
proc importRawStorageSlotsNodes*(
|
proc importRawStorageSlotsNodes*(
|
||||||
|
|
|
@ -128,7 +128,11 @@ proc tickerStats*(
|
||||||
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
|
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
|
||||||
if 0 < length:
|
if 0 < length:
|
||||||
result[0] = sum / length.float
|
result[0] = sum / length.float
|
||||||
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
|
let
|
||||||
|
sqSumAv = sqSum / length.float
|
||||||
|
rSq = result[0] * result[0]
|
||||||
|
if rSq < sqSumAv:
|
||||||
|
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
|
||||||
|
|
||||||
result = proc: SnapTickerStats =
|
result = proc: SnapTickerStats =
|
||||||
var
|
var
|
||||||
|
|
|
@ -163,8 +163,7 @@ proc accountsRangefetchImpl(
|
||||||
let gaps = block:
|
let gaps = block:
|
||||||
# No left boundary check needed. If there is a gap, the partial path for
|
# No left boundary check needed. If there is a gap, the partial path for
|
||||||
# that gap is returned by the import function to be registered, below.
|
# that gap is returned by the import function to be registered, below.
|
||||||
let rc = db.importAccounts(
|
let rc = db.importAccounts(peer, stateRoot, iv.minPt, dd.data)
|
||||||
peer, stateRoot, iv.minPt, dd.data, noBaseBoundCheck = true)
|
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
# Bad data, just try another peer
|
# Bad data, just try another peer
|
||||||
buddy.ctrl.zombie = true
|
buddy.ctrl.zombie = true
|
||||||
|
|
|
@ -61,6 +61,9 @@
|
||||||
## In general, if an error occurs, the entry that caused the error is moved
|
## In general, if an error occurs, the entry that caused the error is moved
|
||||||
## or re-stored onto the queue of partial requests `env.fetchStoragePart`.
|
## or re-stored onto the queue of partial requests `env.fetchStoragePart`.
|
||||||
##
|
##
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
|
@ -73,8 +76,6 @@ import
|
||||||
../db/[hexary_error, snapdb_storage_slots],
|
../db/[hexary_error, snapdb_storage_slots],
|
||||||
./storage_queue_helper
|
./storage_queue_helper
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-range"
|
topics = "snap-range"
|
||||||
|
|
||||||
|
@ -137,9 +138,7 @@ proc storeStoragesSingleBatch(
|
||||||
if 0 < gotSlotLists:
|
if 0 < gotSlotLists:
|
||||||
|
|
||||||
# Verify/process storages data and save it to disk
|
# Verify/process storages data and save it to disk
|
||||||
let report = ctx.pool.snapDb.importStorageSlots(
|
let report = ctx.pool.snapDb.importStorageSlots(peer, stoRange.data)
|
||||||
peer, stoRange.data, noBaseBoundCheck = true)
|
|
||||||
|
|
||||||
if 0 < report.len:
|
if 0 < report.len:
|
||||||
if report[^1].slot.isNone:
|
if report[^1].slot.isNone:
|
||||||
# Failed to store on database, not much that can be done here
|
# Failed to store on database, not much that can be done here
|
||||||
|
|
|
@ -86,6 +86,8 @@
|
||||||
## * "."/[sync_desc, sync_sched, protocol]
|
## * "."/[sync_desc, sync_sched, protocol]
|
||||||
##
|
##
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/hashes,
|
std/hashes,
|
||||||
chronos,
|
chronos,
|
||||||
|
@ -93,8 +95,6 @@ import
|
||||||
stew/keyed_queue,
|
stew/keyed_queue,
|
||||||
"."/[handlers, sync_desc]
|
"."/[handlers, sync_desc]
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
static:
|
static:
|
||||||
# type `EthWireRef` is needed in `initSync()`
|
# type `EthWireRef` is needed in `initSync()`
|
||||||
type silenceUnusedhandlerComplaint = EthWireRef # dummy directive
|
type silenceUnusedhandlerComplaint = EthWireRef # dummy directive
|
||||||
|
|
|
@ -230,22 +230,22 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
|
||||||
accLst.test_accountsImport(desc, db.persistent)
|
accLst.test_accountsImport(desc, db.persistent)
|
||||||
|
|
||||||
# debugging, make sure that state root ~ "$0"
|
# debugging, make sure that state root ~ "$0"
|
||||||
desc.assignPrettyKeys()
|
hexaDb.assignPrettyKeys(root.to(NodeKey))
|
||||||
|
|
||||||
# Beware: dumping a large database is not recommended
|
# Beware: dumping a large database is not recommended
|
||||||
# true.say "***", "database dump\n ", desc.dumpHexaDB()
|
# true.say "***", "database dump\n ", hexaDb.dumpHexaDB(root)
|
||||||
|
|
||||||
test &"Retrieve accounts & proofs for previous account ranges":
|
test &"Retrieve accounts & proofs for previous account ranges":
|
||||||
if db.persistent:
|
if db.persistent:
|
||||||
accLst.test_NodeRangeProof(getFn, dbg)
|
accLst.test_NodeRangeProof(getFn, dbg)
|
||||||
else:
|
else:
|
||||||
accLst.test_NodeRangeProof(hexaDB, dbg)
|
accLst.test_NodeRangeProof(hexaDb, dbg)
|
||||||
|
|
||||||
test &"Verify left boundary checks":
|
test &"Verify left boundary checks":
|
||||||
if db.persistent:
|
if db.persistent:
|
||||||
accLst.test_NodeRangeLeftBoundary(getFn, dbg)
|
accLst.test_NodeRangeLeftBoundary(getFn, dbg)
|
||||||
else:
|
else:
|
||||||
accLst.test_NodeRangeLeftBoundary(hexaDB, dbg)
|
accLst.test_NodeRangeLeftBoundary(hexaDb, dbg)
|
||||||
|
|
||||||
block:
|
block:
|
||||||
# List of keys to be shared by sub-group
|
# List of keys to be shared by sub-group
|
||||||
|
|
|
@ -105,8 +105,8 @@ proc test_accountsMergeProofs*(
|
||||||
# different from `.isImportOk`
|
# different from `.isImportOk`
|
||||||
check desc.importAccounts(baseTag, packed, true).isOk
|
check desc.importAccounts(baseTag, packed, true).isOk
|
||||||
|
|
||||||
# check desc.merge(lowerBound, accounts) == OkHexDb
|
# for debugging, make sure that state root ~ "$0"
|
||||||
desc.assignPrettyKeys() # for debugging, make sure that state root ~ "$0"
|
desc.hexaDb.assignPrettyKeys(desc.root)
|
||||||
|
|
||||||
# Update list of accounts. There might be additional accounts in the set
|
# Update list of accounts. There might be additional accounts in the set
|
||||||
# of proof nodes, typically before the `lowerBound` of each block. As
|
# of proof nodes, typically before the `lowerBound` of each block. As
|
||||||
|
|
|
@ -20,8 +20,7 @@ import
|
||||||
../../nimbus/sync/snap/range_desc,
|
../../nimbus/sync/snap/range_desc,
|
||||||
../../nimbus/sync/snap/worker/db/[
|
../../nimbus/sync/snap/worker/db/[
|
||||||
hexary_desc, hexary_envelope, hexary_error, hexary_interpolate,
|
hexary_desc, hexary_envelope, hexary_error, hexary_interpolate,
|
||||||
hexary_import, hexary_nearby, hexary_paths, hexary_range,
|
hexary_nearby, hexary_paths, hexary_range, snapdb_accounts, snapdb_desc],
|
||||||
snapdb_accounts, snapdb_desc],
|
|
||||||
../replay/[pp, undump_accounts],
|
../replay/[pp, undump_accounts],
|
||||||
./test_helpers
|
./test_helpers
|
||||||
|
|
||||||
|
@ -30,10 +29,10 @@ const
|
||||||
cmaNlSpc = ",\n" & repeat(" ",13)
|
cmaNlSpc = ",\n" & repeat(" ",13)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private functions, pretty printing
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc ppNodeKeys(a: openArray[SnapProof], dbg = HexaryTreeDbRef(nil)): string =
|
proc ppNodeKeys(a: openArray[SnapProof]; dbg = HexaryTreeDbRef(nil)): string =
|
||||||
result = "["
|
result = "["
|
||||||
if dbg.isNil:
|
if dbg.isNil:
|
||||||
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(collapse=true)).join(",")
|
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(collapse=true)).join(",")
|
||||||
|
@ -41,6 +40,18 @@ proc ppNodeKeys(a: openArray[SnapProof], dbg = HexaryTreeDbRef(nil)): string =
|
||||||
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(dbg)).join(",")
|
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(dbg)).join(",")
|
||||||
result &= "]"
|
result &= "]"
|
||||||
|
|
||||||
|
proc ppHexPath(p: RPath|XPath; dbg = HexaryTreeDbRef(nil)): string =
|
||||||
|
if dbg.isNil:
|
||||||
|
"*pretty printing disabled*"
|
||||||
|
else:
|
||||||
|
p.pp(dbg)
|
||||||
|
|
||||||
|
proc pp(a: NodeTag; collapse = true): string =
|
||||||
|
a.to(NodeKey).pp(collapse)
|
||||||
|
|
||||||
|
proc pp(iv: NodeTagRange; collapse = false): string =
|
||||||
|
"(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")"
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functionsto(Blob)
|
# Private functionsto(Blob)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -191,6 +202,7 @@ proc printCompareLeftNearby(
|
||||||
|
|
||||||
proc verifyRangeProof(
|
proc verifyRangeProof(
|
||||||
rootKey: NodeKey;
|
rootKey: NodeKey;
|
||||||
|
baseTag: NodeTag;
|
||||||
leafs: seq[RangeLeaf];
|
leafs: seq[RangeLeaf];
|
||||||
proof: seq[SnapProof];
|
proof: seq[SnapProof];
|
||||||
dbg = HexaryTreeDbRef(nil);
|
dbg = HexaryTreeDbRef(nil);
|
||||||
|
@ -203,24 +215,34 @@ proc verifyRangeProof(
|
||||||
if not dbg.isNil:
|
if not dbg.isNil:
|
||||||
xDb.keyPp = dbg.keyPp
|
xDb.keyPp = dbg.keyPp
|
||||||
|
|
||||||
# Import proof nodes
|
result = ok()
|
||||||
var unrefs, refs: HashSet[RepairKey] # values ignored
|
block verify:
|
||||||
for rlpRec in proof:
|
|
||||||
let importError = xDb.hexaryImport(rlpRec.to(Blob), unrefs, refs).error
|
# Import proof nodes
|
||||||
if importError != HexaryError(0):
|
result = xDb.mergeProofs(rootKey, proof)
|
||||||
check importError == HexaryError(0)
|
if result.isErr:
|
||||||
return err(importError)
|
check result == Result[void,HexaryError].ok()
|
||||||
|
break verify
|
||||||
|
|
||||||
|
# Build tree
|
||||||
|
var lItems = leafs.mapIt(RLeafSpecs(
|
||||||
|
pathTag: it.key.to(NodeTag),
|
||||||
|
payload: it.data))
|
||||||
|
result = xDb.hexaryInterpolate(rootKey, lItems)
|
||||||
|
if result.isErr:
|
||||||
|
check result == Result[void,HexaryError].ok()
|
||||||
|
break verify
|
||||||
|
|
||||||
|
# Left proof
|
||||||
|
result = xDb.verifyLowerBound(rootKey, baseTag, leafs[0].key.to(NodeTag))
|
||||||
|
if result.isErr:
|
||||||
|
check result == Result[void,HexaryError].ok()
|
||||||
|
break verify
|
||||||
|
|
||||||
# Build tree
|
|
||||||
var lItems = leafs.mapIt(RLeafSpecs(
|
|
||||||
pathTag: it.key.to(NodeTag),
|
|
||||||
payload: it.data))
|
|
||||||
let rc = xDb.hexaryInterpolate(rootKey, lItems)
|
|
||||||
if rc.isOk:
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
if noisy:
|
if noisy:
|
||||||
true.say "\n***", "error=", rc.error,
|
true.say "\n***", "error=", result.error,
|
||||||
#"\n",
|
#"\n",
|
||||||
#"\n unrefs=[", unrefs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
|
#"\n unrefs=[", unrefs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
|
||||||
#"\n refs=[", refs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
|
#"\n refs=[", refs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
|
||||||
|
@ -232,17 +254,6 @@ proc verifyRangeProof(
|
||||||
"\n\n database dump",
|
"\n\n database dump",
|
||||||
"\n ", xDb.dumpHexaDB(rootKey),
|
"\n ", xDb.dumpHexaDB(rootKey),
|
||||||
"\n"
|
"\n"
|
||||||
rc
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Private functions, pretty printing
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc pp(a: NodeTag; collapse = true): string =
|
|
||||||
a.to(NodeKey).pp(collapse)
|
|
||||||
|
|
||||||
proc pp(iv: NodeTagRange; collapse = false): string =
|
|
||||||
"(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")"
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public test function
|
# Public test function
|
||||||
|
@ -379,9 +390,15 @@ proc test_NodeRangeProof*(
|
||||||
|
|
||||||
# Assuming the `inLst` entries have been stored in the DB already
|
# Assuming the `inLst` entries have been stored in the DB already
|
||||||
for n,w in inLst:
|
for n,w in inLst:
|
||||||
|
doAssert 1 < w.data.accounts.len
|
||||||
let
|
let
|
||||||
accounts = w.data.accounts[0 ..< min(w.data.accounts.len,maxLen)]
|
# Use the middle of the first two points as base
|
||||||
iv = NodeTagRange.new(w.base, accounts[^1].accKey.to(NodeTag))
|
delta = (w.data.accounts[1].accKey.to(NodeTag) -
|
||||||
|
w.data.accounts[0].accKey.to(NodeTag)) div 2
|
||||||
|
base = w.data.accounts[0].accKey.to(NodeTag) + delta
|
||||||
|
# Assemble accounts list starting at the second item
|
||||||
|
accounts = w.data.accounts[1 ..< min(w.data.accounts.len,maxLen)]
|
||||||
|
iv = NodeTagRange.new(base, accounts[^1].accKey.to(NodeTag))
|
||||||
rc = db.hexaryRangeLeafsProof(rootKey, iv)
|
rc = db.hexaryRangeLeafsProof(rootKey, iv)
|
||||||
check rc.isOk
|
check rc.isOk
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
|
@ -407,7 +424,7 @@ proc test_NodeRangeProof*(
|
||||||
if leafs.len != accounts.len or accounts[^1].accKey != leafs[^1].key:
|
if leafs.len != accounts.len or accounts[^1].accKey != leafs[^1].key:
|
||||||
noisy.say "***", "n=", n, " something went wrong .."
|
noisy.say "***", "n=", n, " something went wrong .."
|
||||||
check (n,leafs.len) == (n,accounts.len)
|
check (n,leafs.len) == (n,accounts.len)
|
||||||
rootKey.printCompareRightLeafs(w.base, accounts, leafs, db, dbg)
|
rootKey.printCompareRightLeafs(base, accounts, leafs, db, dbg)
|
||||||
return
|
return
|
||||||
proof = rc.value.proof
|
proof = rc.value.proof
|
||||||
|
|
||||||
|
@ -415,7 +432,7 @@ proc test_NodeRangeProof*(
|
||||||
check rc.value.proofSize == proof.proofEncode.len
|
check rc.value.proofSize == proof.proofEncode.len
|
||||||
check rc.value.leafsSize == leafsRlpLen
|
check rc.value.leafsSize == leafsRlpLen
|
||||||
else:
|
else:
|
||||||
# Make sure that the size calculation deliver the expected number
|
# Make sure that the size calculation delivers the expected number
|
||||||
# of entries.
|
# of entries.
|
||||||
let rx = db.hexaryRangeLeafsProof(rootKey, iv, leafsRlpLen + 1)
|
let rx = db.hexaryRangeLeafsProof(rootKey, iv, leafsRlpLen + 1)
|
||||||
check rx.isOk
|
check rx.isOk
|
||||||
|
@ -427,13 +444,13 @@ proc test_NodeRangeProof*(
|
||||||
check rx.value.proofSize == rx.value.proof.proofEncode.len
|
check rx.value.proofSize == rx.value.proof.proofEncode.len
|
||||||
|
|
||||||
# Re-adjust proof
|
# Re-adjust proof
|
||||||
proof = db.hexaryRangeLeafsProof(rootKey, iv.minPt, leafs).proof
|
proof = db.hexaryRangeLeafsProof(rootKey, rx.value).proof
|
||||||
|
|
||||||
# Import proof nodes and build trie
|
# Import proof nodes and build trie
|
||||||
block:
|
block:
|
||||||
var rx = rootKey.verifyRangeProof(leafs, proof)
|
var rx = rootKey.verifyRangeProof(base, leafs, proof)
|
||||||
if rx.isErr:
|
if rx.isErr:
|
||||||
rx = rootKey.verifyRangeProof(leafs, proof, dbg)
|
rx = rootKey.verifyRangeProof(base, leafs, proof, dbg)
|
||||||
let
|
let
|
||||||
baseNbls = iv.minPt.to(NodeKey).to(NibblesSeq)
|
baseNbls = iv.minPt.to(NodeKey).to(NibblesSeq)
|
||||||
lastNbls = iv.maxPt.to(NodeKey).to(NibblesSeq)
|
lastNbls = iv.maxPt.to(NodeKey).to(NibblesSeq)
|
||||||
|
@ -445,11 +462,11 @@ proc test_NodeRangeProof*(
|
||||||
" proof=", proof.ppNodeKeys(dbg),
|
" proof=", proof.ppNodeKeys(dbg),
|
||||||
"\n\n ",
|
"\n\n ",
|
||||||
" base=", iv.minPt,
|
" base=", iv.minPt,
|
||||||
"\n ", iv.minPt.hexaryPath(rootKey,db).pp(dbg),
|
"\n ", iv.minPt.hexaryPath(rootKey,db).ppHexpath(dbg),
|
||||||
"\n\n ",
|
"\n\n ",
|
||||||
" pfx=", pfxNbls,
|
" pfx=", pfxNbls,
|
||||||
" nPfx=", nPfxNblsLen,
|
" nPfx=", nPfxNblsLen,
|
||||||
"\n ", pfxNbls.hexaryPath(rootKey,db).pp(dbg),
|
"\n ", pfxNbls.hexaryPath(rootKey,db).ppHexpath(dbg),
|
||||||
"\n"
|
"\n"
|
||||||
|
|
||||||
check rx == typeof(rx).ok()
|
check rx == typeof(rx).ok()
|
||||||
|
|
Loading…
Reference in New Issue