From fe3a6d67c68ebec74de03f9016098ab71824924a Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Tue, 7 Mar 2023 14:23:22 +0000 Subject: [PATCH] Prepare snap server client test scenario cont2 (#1487) * Clean up some function prototypes why: Simplify polymorphic prototype variances for easier maintenance. * Fix fringe condition crash when importing bogus RLP node why: Accessing non-list RLP entry as a list causes `Defect` * Fix left boundary proof at range extractor why: Was insufficient. The main problem was that there was no unit test for the validity of the generated left boundary. * Handle incomplete left boundary proofs early why: Attempt to do it later leads to overly complex code in order to prevent looping when the same peer repeats to send the same incomplete proof. Contrary, gaps in the leaf sequence can be handled gracefully with registering the gaps * Implement a manual pivot setup mechanism for snap sync why: For a test scenario it is convenient to set the pivot to something lower than the beacon header from the consensus layer. This does not need rely on any RPC mechanism. details: The file containing the pivot specs is specified by the `--sync-ctrl-file` option. It is regularly parsed for updates. * Fix calculation error why: Prevent from calculating negative square root --- nimbus/config.nim | 8 +- nimbus/sync/full/ticker.nim | 2 +- nimbus/sync/full/worker.nim | 25 +++-- nimbus/sync/handlers/setup.nim | 6 +- nimbus/sync/handlers/snap.nim | 13 +-- nimbus/sync/misc/sync_ctrl.nim | 31 +++++++ nimbus/sync/snap/update_beacon_header.nim | 76 +++++++++++++++ nimbus/sync/snap/worker.nim | 11 ++- .../com/{notused => }/get_block_header.nim | 90 +++++++++--------- nimbus/sync/snap/worker/db/hexary_import.nim | 12 ++- nimbus/sync/snap/worker/db/hexary_range.nim | 91 +++++++++++------- .../sync/snap/worker/db/snapdb_accounts.nim | 45 ++++----- nimbus/sync/snap/worker/db/snapdb_desc.nim | 43 ++++----- .../snap/worker/db/snapdb_storage_slots.nim | 37 ++++---- nimbus/sync/snap/worker/pivot.nim | 6 +- .../worker/pivot/range_fetch_accounts.nim | 3 +- .../pivot/range_fetch_storage_slots.nim | 9 +- nimbus/sync/sync_sched.nim | 4 +- tests/test_sync_snap.nim | 8 +- tests/test_sync_snap/test_accounts.nim | 4 +- tests/test_sync_snap/test_node_range.nim | 93 +++++++++++-------- 21 files changed, 379 insertions(+), 238 deletions(-) create mode 100644 nimbus/sync/snap/update_beacon_header.nim rename nimbus/sync/snap/worker/com/{notused => }/get_block_header.nim (61%) diff --git a/nimbus/config.nim b/nimbus/config.nim index a3e14f5aa..b0b8325c4 100644 --- a/nimbus/config.nim +++ b/nimbus/config.nim @@ -179,10 +179,10 @@ type name: "sync-mode" .}: SyncMode syncCtrlFile* {. - desc: "Specify a file that is regularly checked for updates. It " & - "contains extra information specific to the type of sync " & - "process. This option is primaily intended only for sync " & - "testing and debugging." + desc: "Specify a file that is regularly checked for updates. If it " & + "exists it is checked for whether it contains extra information " & + "specific to the type of sync process. This option is primarily " & + "intended only for sync testing and debugging." abbr: "z" name: "sync-ctrl-file" }: Option[string] diff --git a/nimbus/sync/full/ticker.nim b/nimbus/sync/full/ticker.nim index 8023f7950..acc53ab25 100644 --- a/nimbus/sync/full/ticker.nim +++ b/nimbus/sync/full/ticker.nim @@ -19,7 +19,7 @@ import {.push raises: [].} logScope: - topics = "full-ticker" + topics = "full-tick" type TickerStats* = object diff --git a/nimbus/sync/full/worker.nim b/nimbus/sync/full/worker.nim index 0bb753e56..3515cfb57 100644 --- a/nimbus/sync/full/worker.nim +++ b/nimbus/sync/full/worker.nim @@ -145,6 +145,17 @@ proc processStaged(buddy: FullBuddyRef): bool = return false + +proc suspendDownload(buddy: FullBuddyRef): bool = + ## Check whether downloading should be suspended + let ctx = buddy.ctx + if ctx.exCtrlFile.isSome: + let rc = ctx.exCtrlFile.syncCtrlBlockNumberFromFile + if rc.isOk: + ctx.pool.suspendAt = rc.value + if 0 < ctx.pool.suspendAt: + return ctx.pool.suspendAt < buddy.only.bQueue.topAccepted + # ------------------------------------------------------------------------------ # Public start/stop and admin functions # ------------------------------------------------------------------------------ @@ -378,7 +389,6 @@ proc runPool*(buddy: FullBuddyRef; last: bool): bool = buddy.ctx.poolMode = false true - proc runMulti*(buddy: FullBuddyRef) {.async.} = ## This peer worker is invoked if the `buddy.ctrl.multiOk` flag is set ## `true` which is typically done after finishing `runSingle()`. This @@ -388,15 +398,10 @@ proc runMulti*(buddy: FullBuddyRef) {.async.} = ctx = buddy.ctx bq = buddy.only.bQueue - if ctx.exCtrlFile.isSome: - let rc = ctx.exCtrlFile.syncCtrlBlockNumberFromFile - if rc.isOk: - ctx.pool.suspendAt = rc.value - if 0 < ctx.pool.suspendAt: - if ctx.pool.suspendAt < buddy.only.bQueue.topAccepted: - # Sleep for a while, then leave - await sleepAsync(10.seconds) - return + if buddy.suspendDownload: + # Sleep for a while, then leave + await sleepAsync(10.seconds) + return # Fetch work item let rc = await bq.blockQueueWorker() diff --git a/nimbus/sync/handlers/setup.nim b/nimbus/sync/handlers/setup.nim index cb1a4c66f..966e9c39f 100644 --- a/nimbus/sync/handlers/setup.nim +++ b/nimbus/sync/handlers/setup.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.used, push raises: [].} + import eth/p2p, ../../core/[chain, tx_pool], @@ -15,8 +17,6 @@ import ./eth as handlers_eth, ./snap as handlers_snap -{.used, push raises: [].} - # ------------------------------------------------------------------------------ # Public functions: convenience mappings for `eth` # ------------------------------------------------------------------------------ @@ -51,7 +51,7 @@ proc addSnapHandlerCapability*( node: var EthereumNode; peerPool: PeerPool; chain = ChainRef(nil); - ) = + ) = ## Install `snap` handlers,Passing `chein` as `nil` installs the handler ## in minimal/outbound mode. if chain.isNil: diff --git a/nimbus/sync/handlers/snap.nim b/nimbus/sync/handlers/snap.nim index 682e92798..c9ee36052 100644 --- a/nimbus/sync/handlers/snap.nim +++ b/nimbus/sync/handlers/snap.nim @@ -74,36 +74,37 @@ proc fetchLeafRange( # on wire. So the `sizeMax` is the argument size `replySizeMax` with some # space removed to accomodate for the proof nodes. let - sizeMax =replySizeMax - estimatedProofSize + sizeMax = replySizeMax - estimatedProofSize rc = db.hexaryRangeLeafsProof(rootKey, iv, sizeMax) if rc.isErr: error logTxt "fetchLeafRange(): database problem", iv, replySizeMax, error=rc.error return err() # database error let sizeOnWire = rc.value.leafsSize + rc.value.proofSize + if sizeOnWire <= replySizeMax: return ok(rc.value) # Strip parts of leafs result and amend remainder by adding proof nodes var - leafs = rc.value.leafs - leafsTop = leafs.len - 1 + rpl = rc.value + leafsTop = rpl.leafs.len - 1 tailSize = 0 tailItems = 0 reduceBy = replySizeMax - sizeOnWire while tailSize <= reduceBy and tailItems < leafsTop: # Estimate the size on wire needed for the tail item const extraSize = (sizeof RangeLeaf()) - (sizeof newSeq[Blob](0)) - tailSize += leafs[leafsTop - tailItems].data.len + extraSize + tailSize += rpl.leafs[leafsTop - tailItems].data.len + extraSize tailItems.inc if leafsTop <= tailItems: trace logTxt "fetchLeafRange(): stripping leaf list failed", iv, replySizeMax,leafsTop, tailItems return err() # package size too small - leafs.setLen(leafsTop - tailItems - 1) # chop off one more for slack + rpl.leafs.setLen(leafsTop - tailItems - 1) # chop off one more for slack let - leafProof = db.hexaryRangeLeafsProof(rootKey, iv.minPt, leafs) + leafProof = db.hexaryRangeLeafsProof(rootKey, rpl) strippedSizeOnWire = leafProof.leafsSize + leafProof.proofSize if strippedSizeOnWire <= replySizeMax: return ok(leafProof) diff --git a/nimbus/sync/misc/sync_ctrl.nim b/nimbus/sync/misc/sync_ctrl.nim index 6f05f2654..7a0446c65 100644 --- a/nimbus/sync/misc/sync_ctrl.nim +++ b/nimbus/sync/misc/sync_ctrl.nim @@ -51,6 +51,37 @@ proc syncCtrlBlockNumberFromFile*( debug "Exception while parsing block number", file, name, msg err() +proc syncCtrlHashOrBlockNumFromFile*( + fileName: Option[string]; + ): Result[HashOrNum,void] = + ## Returns a block number or a hash from the file name argument `fileName`. + ## A block number is decimal encoded and a hash is expexted to be a 66 hex + ## digits string startnib wiyh `0x`. + if fileName.isSome: + let file = fileName.get + + # Parse value dump and fetch a header from the peer (if any) + try: + let data = file.getDataLine + if 0 < data.len: + if 66 == data.len: + let hash = HashOrNum( + isHash: true, + hash: Hash256( + data: UInt256.fromHex(data).toBytesBE)) + return ok(hash) + else: + let num = HashOrNum( + isHash: false, + number: parse(data,UInt256)) + return ok(num) + except CatchableError as e: + let + name {.used.} = $e.name + msg {.used.} = e.msg + debug "Exception while parsing hash or block number", file, name, msg + err() + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/update_beacon_header.nim b/nimbus/sync/snap/update_beacon_header.nim new file mode 100644 index 000000000..86f19514a --- /dev/null +++ b/nimbus/sync/snap/update_beacon_header.nim @@ -0,0 +1,76 @@ +# Nimbus +# Copyright (c) 2021 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed +# except according to those terms. + +{.push raises: [].} + +import + chronicles, + chronos, + eth/[common, p2p], + ../sync_desc, + ../misc/sync_ctrl, + ./worker_desc, + ./worker/com/[com_error, get_block_header] + +logScope: + topics = "snap-ctrl" + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc updateBeaconHeaderFromFile*( + buddy: SnapBuddyRef; # Worker peer + ) {.async.} = + ## This function updates the beacon header cache by import from the file name + ## argument `fileName`. The first line of the contents of the file looks like + ## * `0x` -- hash of block header + ## * `` -- block number + ## This function is typically used for testing and debugging. + let + ctx = buddy.ctx + + hashOrNum = block: + let rc = ctx.exCtrlFile.syncCtrlHashOrBlockNumFromFile + if rc.isErr: + return + rc.value + + peer = buddy.peer + + var + rc = Result[BlockHeader,ComError].err(ComError(0)) + isHash = hashOrNum.isHash # so that the value can be logged + + # Parse value dump and fetch a header from the peer (if any) + try: + if isHash: + let hash = hashOrNum.hash + trace "External beacon info", peer, hash + if hash != ctx.pool.beaconHeader.hash: + rc = await buddy.getBlockHeader(hash) + else: + let num = hashOrNum.number + trace "External beacon info", peer, num + if ctx.pool.beaconHeader.blockNumber < num: + rc = await buddy.getBlockHeader(num) + except CatchableError as e: + let + name {.used.} = $e.name + msg {.used.} = e.msg + trace "Exception while parsing beacon info", peer, isHash, name, msg + + if rc.isOk: + if ctx.pool.beaconHeader.blockNumber < rc.value.blockNumber: + ctx.pool.beaconHeader = rc.value + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker.nim b/nimbus/sync/snap/worker.nim index 6ffae07e7..b982da319 100644 --- a/nimbus/sync/snap/worker.nim +++ b/nimbus/sync/snap/worker.nim @@ -21,7 +21,7 @@ import ./worker/[pivot, ticker], ./worker/com/com_error, ./worker/db/[hexary_desc, snapdb_desc, snapdb_pivot], - "."/[range_desc, worker_desc] + "."/[range_desc, update_beacon_header, worker_desc] {.push raises: [].} @@ -126,6 +126,10 @@ proc setup*(ctx: SnapCtxRef; tickerOK: bool): bool = checkpoint=("#" & $ctx.pool.pivotTable.topNumber() & "(0)") if not ctx.pool.ticker.isNil: ctx.pool.ticker.startRecovery() + + if ctx.exCtrlFile.isSome: + warn "Snap sync accepts pivot block number or hash", + syncCtrlFile=ctx.exCtrlFile.get true proc release*(ctx: SnapCtxRef) = @@ -179,6 +183,11 @@ proc runSingle*(buddy: SnapBuddyRef) {.async.} = ## * `buddy.ctrl.multiOk` is `false` ## * `buddy.ctrl.poolMode` is `false` ## + let ctx = buddy.ctx + + # External beacon header updater + await buddy.updateBeaconHeaderFromFile() + await buddy.pivotApprovePeer() buddy.ctrl.multiOk = true diff --git a/nimbus/sync/snap/worker/com/notused/get_block_header.nim b/nimbus/sync/snap/worker/com/get_block_header.nim similarity index 61% rename from nimbus/sync/snap/worker/com/notused/get_block_header.nim rename to nimbus/sync/snap/worker/com/get_block_header.nim index 9a2ebc4a2..ea83ee77f 100644 --- a/nimbus/sync/snap/worker/com/notused/get_block_header.nim +++ b/nimbus/sync/snap/worker/com/get_block_header.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import std/options, chronos, @@ -17,8 +19,6 @@ import ../../worker_desc, ./com_error -{.push raises: [Defect].} - logScope: topics = "snap-fetch" @@ -26,49 +26,49 @@ logScope: # Public functions # ------------------------------------------------------------------------------ -# proc getBlockHeader*( -# buddy: SnapBuddyRef; -# num: BlockNumber; -# ): Future[Result[BlockHeader,ComError]] -# {.async.} = -# ## Get single block header -# let -# peer = buddy.peer -# reqLen = 1u -# hdrReq = BlocksRequest( -# startBlock: HashOrNum( -# isHash: false, -# number: num), -# maxResults: reqLen, -# skip: 0, -# reverse: false) -# -# trace trEthSendSendingGetBlockHeaders, peer, header=("#" & $num), reqLen -# -# var hdrResp: Option[blockHeadersObj] -# try: -# hdrResp = await peer.getBlockHeaders(hdrReq) -# except CatchableError as e: -# trace trSnapRecvError & "waiting for GetByteCodes reply", peer, -# error=e.msg -# return err(ComNetworkProblem) -# -# var hdrRespLen = 0 -# if hdrResp.isSome: -# hdrRespLen = hdrResp.get.headers.len -# if hdrRespLen == 0: -# trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a" -# return err(ComNoHeaderAvailable) -# -# if hdrRespLen == 1: -# let -# header = hdrResp.get.headers[0] -# blockNumber = header.blockNumber -# trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber -# return ok(header) -# -# trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen -# return err(ComTooManyHeaders) +proc getBlockHeader*( + buddy: SnapBuddyRef; + num: BlockNumber; + ): Future[Result[BlockHeader,ComError]] + {.async.} = + ## Get single block header + let + peer = buddy.peer + reqLen = 1u + hdrReq = BlocksRequest( + startBlock: HashOrNum( + isHash: false, + number: num), + maxResults: reqLen, + skip: 0, + reverse: false) + + trace trEthSendSendingGetBlockHeaders, peer, header=("#" & $num), reqLen + + var hdrResp: Option[blockHeadersObj] + try: + hdrResp = await peer.getBlockHeaders(hdrReq) + except CatchableError as e: + trace trSnapRecvError & "waiting for GetByteCodes reply", peer, + error=e.msg + return err(ComNetworkProblem) + + var hdrRespLen = 0 + if hdrResp.isSome: + hdrRespLen = hdrResp.get.headers.len + if hdrRespLen == 0: + trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a" + return err(ComNoHeaderAvailable) + + if hdrRespLen == 1: + let + header = hdrResp.get.headers[0] + blockNumber = header.blockNumber + trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber + return ok(header) + + trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen + return err(ComTooManyHeaders) proc getBlockHeader*( diff --git a/nimbus/sync/snap/worker/db/hexary_import.nim b/nimbus/sync/snap/worker/db/hexary_import.nim index 12fef17fa..81cbe68cf 100644 --- a/nimbus/sync/snap/worker/db/hexary_import.nim +++ b/nimbus/sync/snap/worker/db/hexary_import.nim @@ -8,14 +8,14 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import std/[sets, tables], eth/[common, trie/nibbles], ../../range_desc, "."/[hexary_desc, hexary_error] -{.push raises: [].} - # ------------------------------------------------------------------------------ # Private debugging helpers # ------------------------------------------------------------------------------ @@ -49,6 +49,10 @@ proc hexaryImport*( top = 0 # count entries rNode: RNodeRef # repair tree node + if not rlp.isList: + # Otherwise `rlp.items` will raise a `Defect` + return HexaryNodeReport(error: Rlp2Or17ListEntries) + # Collect lists of either 2 or 17 blob entries. for w in rlp.items: case top @@ -145,6 +149,10 @@ proc hexaryImport*( top = 0 # count entries rNode: RNodeRef # repair tree node + if not rlp.isList: + # Otherwise `rlp.items` will raise a `Defect` + return HexaryNodeReport(error: Rlp2Or17ListEntries) + # Collect lists of either 2 or 17 blob entries. for w in rlp.items: case top diff --git a/nimbus/sync/snap/worker/db/hexary_range.nim b/nimbus/sync/snap/worker/db/hexary_range.nim index eb0e2171d..8d8d23bf6 100644 --- a/nimbus/sync/snap/worker/db/hexary_range.nim +++ b/nimbus/sync/snap/worker/db/hexary_range.nim @@ -12,7 +12,6 @@ import std/[sequtils, sets, tables], - chronicles, eth/[common, p2p, trie/nibbles], stew/[byteutils, interval_set], ../../../protocol, @@ -25,10 +24,11 @@ type data*: Blob ## Leaf node data RangeProof* = object - leafs*: seq[RangeLeaf] - leafsSize*: int - proof*: seq[SnapProof] - proofSize*: int + base*: NodeTag ## No node between `base` and `leafs[0]` + leafs*: seq[RangeLeaf] ## List of consecutive leaf nodes + leafsSize*: int ## RLP encoded size of `leafs` on wire + proof*: seq[SnapProof] ## Boundary proof + proofSize*: int ## RLP encoded size of `proof` on wire proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) {.gcsafe.} proc hexaryRangeRlpSize*(blobLen: int): int {.gcsafe.} @@ -51,13 +51,13 @@ proc rlpPairSize(aLen: int; bRlpLen: int): int = high(int) proc nonLeafPathNodes( - baseTag: NodeTag; # Left boundary + nodeTag: NodeTag; # Left boundary rootKey: NodeKey|RepairKey; # State root db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction ): HashSet[SnapProof] {.gcsafe, raises: [CatchableError]} = ## Helper for `updateProof()` - baseTag + nodeTag .hexaryPath(rootKey, db) .path .mapIt(it.node) @@ -65,6 +65,20 @@ proc nonLeafPathNodes( .mapIt(it.convertTo(Blob).to(SnapProof)) .toHashSet +proc allPathNodes( + nodeTag: NodeTag; # Left boundary + rootKey: NodeKey|RepairKey; # State root + db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction + ): HashSet[SnapProof] + {.gcsafe, raises: [CatchableError]} = + ## Helper for `updateProof()` + nodeTag + .hexaryPath(rootKey, db) + .path + .mapIt(it.node) + .mapIt(it.convertTo(Blob).to(SnapProof)) + .toHashSet + # ------------------------------------------------------------------------------ # Private functions # ------------------------------------------------------------------------------ @@ -74,20 +88,30 @@ template collectLeafs( rootKey: NodeKey|RepairKey; # State root iv: NodeTagRange; # Proofed range of leaf paths nSizeLimit: int; # List of RLP encoded data must be smaller - nSizeUsed: var int; # Updated size counter for the raw list ): auto = ## Collect trie database leafs prototype. This directive is provided as ## `template` for avoiding varying exceprion annotations. - var rc: Result[seq[RangeLeaf],HexaryError] + var rc: Result[RangeProof,HexaryError] block body: + let + nodeMax = maxPt(iv) # `inject` is for debugging (if any) var nodeTag = minPt(iv) prevTag: NodeTag - rls: seq[RangeLeaf] + rls: RangeProof + + # Set up base node, the nearest node before `iv.minPt` + block: + let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyLeft(db) + if rx.isOk: + rls.base = getPartialPath(rx.value).convertTo(NodeKey).to(NodeTag) + elif rx.error != NearbyFailed: + rc = typeof(rc).err(rx.error) + break body # Fill leaf nodes from interval range unless size reached - while nodeTag <= maxPt(iv): + while nodeTag <= nodeMax: # The following logic might be sub-optimal. A strict version of the # `next()` function that stops with an error at dangling links could # be faster if the leaf nodes are not too far apart on the hexary trie. @@ -102,24 +126,30 @@ template collectLeafs( rightTag = rightKey.to(NodeTag) # Prevents from semi-endless looping - if rightTag <= prevTag and 0 < rls.len: + if rightTag <= prevTag and 0 < rls.leafs.len: # Oops, should have been tackeled by `hexaryNearbyRight()` rc = typeof(rc).err(FailedNextNode) break body # stop here let (pairLen,listLen) = - hexaryRangeRlpLeafListSize(xPath.leafData.len, nSizeUsed) + hexaryRangeRlpLeafListSize(xPath.leafData.len, rls.leafsSize) + if listLen < nSizeLimit: - nSizeUsed += pairLen + rls.leafsSize += pairLen else: break - rls.add RangeLeaf( + rls.leafs.add RangeLeaf( key: rightKey, data: xPath.leafData) prevTag = nodeTag nodeTag = rightTag + 1.u256 + # End loop + + # Count outer RLP wrapper + if 0 < rls.leafs.len: + rls.leafsSize = hexaryRangeRlpSize rls.leafsSize rc = typeof(rc).ok(rls) # End body @@ -130,24 +160,17 @@ template collectLeafs( template updateProof( db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction rootKey: NodeKey|RepairKey; # State root - baseTag: NodeTag; # Left boundary - leafList: seq[RangeLeaf]; # Set of collected leafs - nSizeUsed: int; # To be stored into the result + rls: RangeProof; # Set of collected leafs and a `base` ): auto = ## Complement leafs list by adding proof nodes. This directive is provided as ## `template` for avoiding varying exceprion annotations. - var proof = nonLeafPathNodes(baseTag, rootKey, db) - if 0 < leafList.len: - proof.incl nonLeafPathNodes(leafList[^1].key.to(NodeTag), rootKey, db) + var proof = allPathNodes(rls.base, rootKey, db) + if 0 < rls.leafs.len: + proof.incl nonLeafPathNodes(rls.leafs[^1].key.to(NodeTag), rootKey, db) - var rp = RangeProof( - leafs: leafList, - proof: toSeq(proof)) - - if 0 < nSizeUsed: - rp.leafsSize = hexaryRangeRlpSize nSizeUsed - if 0 < rp.proof.len: - rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0) + var rp = rls + rp.proof = toSeq(proof) + rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0) rp @@ -163,23 +186,21 @@ proc hexaryRangeLeafsProof*( ): Result[RangeProof,HexaryError] {.gcsafe, raises: [CatchableError]} = ## Collect trie database leafs prototype and add proof. - var accSize = 0 - let rc = db.collectLeafs(rootKey, iv, nSizeLimit, accSize) + let rc = db.collectLeafs(rootKey, iv, nSizeLimit) if rc.isErr: err(rc.error) else: - ok(db.updateProof(rootKey, iv.minPt, rc.value, accSize)) + ok(db.updateProof(rootKey, rc.value)) proc hexaryRangeLeafsProof*( db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction rootKey: NodeKey; # State root - baseTag: NodeTag; # Left boundary - leafList: seq[RangeLeaf]; # Set of already collected leafs + rp: RangeProof; # Set of collected leafs and a `base` ): RangeProof {.gcsafe, raises: [CatchableError]} = ## Complement leafs list by adding proof nodes to the argument list ## `leafList`. - db.updateProof(rootKey, baseTag, leafList, 0) + db.updateProof(rootKey, rp) # ------------------------------------------------------------------------------ # Public helpers diff --git a/nimbus/sync/snap/worker/db/snapdb_accounts.nim b/nimbus/sync/snap/worker/db/snapdb_accounts.nim index cb5a59784..460e3afee 100644 --- a/nimbus/sync/snap/worker/db/snapdb_accounts.nim +++ b/nimbus/sync/snap/worker/db/snapdb_accounts.nim @@ -173,7 +173,6 @@ proc importAccounts*( base: NodeTag; ## Before or at first account entry in `data` data: PackedAccountRange; ## Re-packed `snap/1 ` reply data persistent = false; ## Store data on disk - noBaseBoundCheck = false; ## Ignore left boundary proof check if `true` ): Result[SnapAccountsGaps,HexaryError] = ## Validate and import accounts (using proofs as received with the snap ## message `AccountRange`). This function accumulates data in a memory table @@ -206,16 +205,6 @@ proc importAccounts*( ## Leaving out `(7&y,Y)` the boundary proofs still succeed but the ## return value will be @[`(7&y,c)`]. ## - ## The left boundary proof might be omitted by passing `true` for the - ## `noBaseBoundCheck` argument. In this case, the boundary check must be - ## performed on the return code as - ## * if `data.accounts` is empty, the return value must be an empty list - ## * otherwise, all type `NodeSpecs` items `w` of the return code must - ## satisfy - ## :: - ## let leastAccountPath = data.accounts[0].accKey.to(NodeTag) - ## leastAccountPath <= w.partialPath.max(NodeKey).to(NodeTag) - ## ## Besides the inner gaps, the function also returns the dangling nodes left ## from the `proof` list. ## @@ -227,7 +216,7 @@ proc importAccounts*( innerSubTrie: seq[NodeSpecs] # internal, collect dangling links try: if 0 < data.proof.len: - let rc = ps.mergeProofs(ps.peer, data.proof) + let rc = ps.hexaDb.mergeProofs(ps.root, data.proof, ps.peer) if rc.isErr: return err(rc.error) block: @@ -265,16 +254,17 @@ proc importAccounts*( let bottomTag = accounts[0].pathTag for w in innerSubTrie: if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)): - if not noBaseBoundCheck: - # Verify that `base` is to the left of the first account and there - # is nothing in between. - # - # Without `proof` data available there can only be a complete - # set/list of accounts so there are no dangling nodes in the first - # place. But there must be `proof` data for an empty list. - if w.partialPath.hexaryEnvelope.maxPt < bottomTag: - return err(LowerBoundProofError) - # Otherwise register left over entry + # Verify that `base` is to the left of the first account and there + # is nothing in between. If there is an envelope to the left of + # the first account, then it might also cover a point before the + # first account. + # + # Without `proof` data available there can only be a complete + # set/list of accounts so there are no dangling nodes in the first + # place. But there must be `proof` data for an empty list. + if w.partialPath.hexaryEnvelope.maxPt < bottomTag: + return err(LowerBoundProofError) + # Otherwise register left over entry, a gap in the accounts list gaps.innerGaps.add w if persistent: @@ -287,10 +277,9 @@ proc importAccounts*( return err(LowerBoundProofError) else: - if not noBaseBoundCheck: - for w in proofStats.dangling: - if base <= w.partialPath.hexaryEnvelope.maxPt: - return err(LowerBoundProofError) + for w in proofStats.dangling: + if base <= w.partialPath.hexaryEnvelope.maxPt: + return err(LowerBoundProofError) gaps.dangling = proofStats.dangling except RlpError: @@ -317,12 +306,10 @@ proc importAccounts*( root: Hash256; ## State root base: NodeTag; ## Before or at first account entry in `data` data: PackedAccountRange; ## Re-packed `snap/1 ` reply data - noBaseBoundCheck = false; ## Ignore left bound proof check if `true` ): Result[SnapAccountsGaps,HexaryError] = ## Variant of `importAccounts()` for presistent storage, only. SnapDbAccountsRef.init( - pv, root, peer).importAccounts( - base, data, persistent=true, noBaseBoundCheck) + pv, root, peer).importAccounts(base, data, persistent=true) proc importRawAccountsNodes*( diff --git a/nimbus/sync/snap/worker/db/snapdb_desc.nim b/nimbus/sync/snap/worker/db/snapdb_desc.nim index 95ad3840c..cc024d196 100644 --- a/nimbus/sync/snap/worker/db/snapdb_desc.nim +++ b/nimbus/sync/snap/worker/db/snapdb_desc.nim @@ -210,23 +210,22 @@ proc dbBackendRocksDb*(ps: SnapDbBaseRef): bool = not ps.base.rocky.isNil proc mergeProofs*( - ps: SnapDbBaseRef; ## Session database - peer: Peer; ## For log messages + xDb: HexaryTreeDbRef; ## Session database + root: NodeKey; ## State root proof: seq[SnapProof]; ## Node records + peer = Peer(); ## For log messages freeStandingOk = false; ## Remove freestanding nodes ): Result[void,HexaryError] {.gcsafe, raises: [RlpError,KeyError].} = ## Import proof records (as received with snap message) into a hexary trie ## of the repair table. These hexary trie records can be extended to a full ## trie at a later stage and used for validating account data. - let - db = ps.hexaDb var nodes: HashSet[RepairKey] - refs = @[ps.root.to(RepairKey)].toHashSet + refs = @[root.to(RepairKey)].toHashSet for n,rlpRec in proof: - let report = db.hexaryImport(rlpRec.to(Blob), nodes, refs) + let report = xDb.hexaryImport(rlpRec.to(Blob), nodes, refs) if report.error != NothingSerious: let error = report.error trace "mergeProofs()", peer, item=n, proofs=proof.len, error @@ -242,24 +241,25 @@ proc mergeProofs*( else: # Delete unreferenced nodes for nodeKey in nodes: - db.tab.del(nodeKey) + xDb.tab.del(nodeKey) trace "mergeProofs() ignoring unrelated nodes", peer, nodes=nodes.len ok() proc verifyLowerBound*( - ps: SnapDbBaseRef; ## Database session descriptor - peer: Peer; ## For log messages + xDb: HexaryTreeDbRef; ## Session database + root: NodeKey; ## State root base: NodeTag; ## Before or at first account entry in `data` - first: NodeTag; ## First account key + first: NodeTag; ## First account/storage key + peer = Peer(); ## For log messages ): Result[void,HexaryError] {.gcsafe, raises: [CatchableError].} = ## Verify that `base` is to the left of the first leaf entry and there is ## nothing in between. var error: HexaryError - let rc = base.hexaryNearbyRight(ps.root, ps.hexaDb) + let rc = base.hexaryNearbyRight(root, xDb) if rc.isErr: error = rc.error elif first == rc.value: @@ -274,17 +274,18 @@ proc verifyLowerBound*( proc verifyNoMoreRight*( - ps: SnapDbBaseRef; ## Database session descriptor - peer: Peer; ## For log messages + xDb: HexaryTreeDbRef; ## Session database + root: NodeKey; ## State root base: NodeTag; ## Before or at first account entry in `data` + peer = Peer(); ## For log messages ): Result[void,HexaryError] {.gcsafe, raises: [CatchableError].} = ## Verify that there is are no more leaf entries to the right of and ## including `base`. let - root = ps.root.to(RepairKey) + root = root.to(RepairKey) base = base.to(NodeKey) - if base.hexaryPath(root, ps.hexaDb).hexaryNearbyRightMissing(ps.hexaDb): + if base.hexaryPath(root, xDb).hexaryNearbyRightMissing(xDb): return ok() let error = LowerBoundProofError @@ -315,10 +316,6 @@ proc assignPrettyKeys*(xDb: HexaryTreeDbRef; root: NodeKey) = of Extension: discard xDb.keyPp node.eLink of Leaf: discard -proc assignPrettyKeys*(ps: SnapDbBaseRef) = - ## Variant of `assignPrettyKeys()` - ps.hexaDb.assignPrettyKeys(ps.root) - proc dumpPath*(ps: SnapDbBaseRef; key: NodeTag): seq[string] = ## Pretty print helper compiling the path into the repair tree for the ## argument `key`. @@ -354,14 +351,6 @@ proc dumpHexaDB*(xDb: HexaryTreeDbRef; root: NodeKey; indent = 4): string = ## Beware: dumping a large database is not recommended xDb.pp(root, indent) -proc dumpHexaDB*(ps: SnapDbBaseRef; indent = 4): string = - ## Ditto - ps.hexaDb.pp(ps.root, indent) - -proc hexaryPpFn*(ps: SnapDbBaseRef): HexaryPpFn = - ## Key mapping function used in `HexaryTreeDB` - ps.hexaDb.keyPp - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim b/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim index 54476385d..73d8e869c 100644 --- a/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim +++ b/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import std/tables, chronicles, @@ -19,8 +21,6 @@ import hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc, snapdb_persistent] -{.push raises: [].} - logScope: topics = "snap-db" @@ -114,7 +114,6 @@ proc importStorageSlots( base: NodeTag; ## before or at first account entry in `data` data: AccountSlots; ## Account storage descriptor proof: seq[SnapProof]; ## Storage slots proof data - noBaseBoundCheck = false; ## Ignore left boundary proof check if `true` ): Result[seq[NodeSpecs],HexaryError] {.gcsafe, raises: [RlpError,KeyError].} = ## Process storage slots for a particular storage root. See `importAccounts()` @@ -127,7 +126,7 @@ proc importStorageSlots( proofStats: TrieNodeStat # `proof` data dangling links innerSubTrie: seq[NodeSpecs] # internal, collect dangling links if 0 < proof.len: - let rc = tmpDb.mergeProofs(ps.peer, proof) + let rc = tmpDb.hexaDb.mergeProofs(tmpDb.root, proof, ps.peer) if rc.isErr: return err(rc.error) block: @@ -161,15 +160,14 @@ proc importStorageSlots( let bottomTag = slots[0].pathTag for w in innerSubTrie: if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)): - if not noBaseBoundCheck: - # Verify that `base` is to the left of the first slot and there is - # nothing in between. - # - # Without `proof` data available there can only be a complete - # set/list of accounts so there are no dangling nodes in the first - # place. But there must be `proof` data for an empty list. - if w.partialPath.hexaryEnvelope.maxPt < bottomTag: - return err(LowerBoundProofError) + # Verify that `base` is to the left of the first slot and there is + # nothing in between. + # + # Without `proof` data available there can only be a complete + # set/list of accounts so there are no dangling nodes in the first + # place. But there must be `proof` data for an empty list. + if w.partialPath.hexaryEnvelope.maxPt < bottomTag: + return err(LowerBoundProofError) # Otherwise register left over entry dangling.add w @@ -184,10 +182,9 @@ proc importStorageSlots( return err(LowerBoundProofError) else: - if not noBaseBoundCheck: - for w in proofStats.dangling: - if base <= w.partialPath.hexaryEnvelope.maxPt: - return err(LowerBoundProofError) + for w in proofStats.dangling: + if base <= w.partialPath.hexaryEnvelope.maxPt: + return err(LowerBoundProofError) dangling = proofStats.dangling ok(dangling) @@ -233,7 +230,6 @@ proc importStorageSlots*( ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor data: AccountStorageRange; ## Account storage reply from `snap/1` protocol persistent = false; ## store data on disk - noBaseBoundCheck = false; ## Ignore left boundary proof check if `true` ): seq[HexaryNodeReport] = ## Validate and import storage slots (using proofs as received with the snap ## message `StorageRanges`). This function accumulates data in a memory table @@ -274,7 +270,7 @@ proc importStorageSlots*( block: itemInx = some(sTop) let rc = ps.importStorageSlots( - data.base, data.storages[sTop], data.proof, noBaseBoundCheck) + data.base, data.storages[sTop], data.proof) if rc.isErr: result.add HexaryNodeReport(slot: itemInx, error: rc.error) trace "Storage slots last item fails", peer, itemInx=sTop, nItems, @@ -315,12 +311,11 @@ proc importStorageSlots*( pv: SnapDbRef; ## Base descriptor on `ChainDBRef` peer: Peer; ## For log messages, only data: AccountStorageRange; ## Account storage reply from `snap/1` protocol - noBaseBoundCheck = false; ## Ignore left boundary proof check if `true` ): seq[HexaryNodeReport] = ## Variant of `importStorages()` SnapDbStorageSlotsRef.init( pv, Hash256().to(NodeKey), Hash256(), peer).importStorageSlots( - data, persistent = true, noBaseBoundCheck) + data, persistent=true) proc importRawStorageSlotsNodes*( diff --git a/nimbus/sync/snap/worker/pivot.nim b/nimbus/sync/snap/worker/pivot.nim index 7dea3b7ac..4f035de0e 100644 --- a/nimbus/sync/snap/worker/pivot.nim +++ b/nimbus/sync/snap/worker/pivot.nim @@ -128,7 +128,11 @@ proc tickerStats*( proc meanStdDev(sum, sqSum: float; length: int): (float,float) = if 0 < length: result[0] = sum / length.float - result[1] = sqrt(sqSum / length.float - result[0] * result[0]) + let + sqSumAv = sqSum / length.float + rSq = result[0] * result[0] + if rSq < sqSumAv: + result[1] = sqrt(sqSum / length.float - result[0] * result[0]) result = proc: SnapTickerStats = var diff --git a/nimbus/sync/snap/worker/pivot/range_fetch_accounts.nim b/nimbus/sync/snap/worker/pivot/range_fetch_accounts.nim index 9a5efb2e5..196152369 100644 --- a/nimbus/sync/snap/worker/pivot/range_fetch_accounts.nim +++ b/nimbus/sync/snap/worker/pivot/range_fetch_accounts.nim @@ -163,8 +163,7 @@ proc accountsRangefetchImpl( let gaps = block: # No left boundary check needed. If there is a gap, the partial path for # that gap is returned by the import function to be registered, below. - let rc = db.importAccounts( - peer, stateRoot, iv.minPt, dd.data, noBaseBoundCheck = true) + let rc = db.importAccounts(peer, stateRoot, iv.minPt, dd.data) if rc.isErr: # Bad data, just try another peer buddy.ctrl.zombie = true diff --git a/nimbus/sync/snap/worker/pivot/range_fetch_storage_slots.nim b/nimbus/sync/snap/worker/pivot/range_fetch_storage_slots.nim index 31d0f6023..60bbfeae2 100644 --- a/nimbus/sync/snap/worker/pivot/range_fetch_storage_slots.nim +++ b/nimbus/sync/snap/worker/pivot/range_fetch_storage_slots.nim @@ -61,6 +61,9 @@ ## In general, if an error occurs, the entry that caused the error is moved ## or re-stored onto the queue of partial requests `env.fetchStoragePart`. ## + +{.push raises: [].} + import chronicles, chronos, @@ -73,8 +76,6 @@ import ../db/[hexary_error, snapdb_storage_slots], ./storage_queue_helper -{.push raises: [].} - logScope: topics = "snap-range" @@ -137,9 +138,7 @@ proc storeStoragesSingleBatch( if 0 < gotSlotLists: # Verify/process storages data and save it to disk - let report = ctx.pool.snapDb.importStorageSlots( - peer, stoRange.data, noBaseBoundCheck = true) - + let report = ctx.pool.snapDb.importStorageSlots(peer, stoRange.data) if 0 < report.len: if report[^1].slot.isNone: # Failed to store on database, not much that can be done here diff --git a/nimbus/sync/sync_sched.nim b/nimbus/sync/sync_sched.nim index b128ec985..2fe88e9eb 100644 --- a/nimbus/sync/sync_sched.nim +++ b/nimbus/sync/sync_sched.nim @@ -86,6 +86,8 @@ ## * "."/[sync_desc, sync_sched, protocol] ## +{.push raises: [].} + import std/hashes, chronos, @@ -93,8 +95,6 @@ import stew/keyed_queue, "."/[handlers, sync_desc] -{.push raises: [].} - static: # type `EthWireRef` is needed in `initSync()` type silenceUnusedhandlerComplaint = EthWireRef # dummy directive diff --git a/tests/test_sync_snap.nim b/tests/test_sync_snap.nim index d3a8727aa..e301145f6 100644 --- a/tests/test_sync_snap.nim +++ b/tests/test_sync_snap.nim @@ -230,22 +230,22 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample) = accLst.test_accountsImport(desc, db.persistent) # debugging, make sure that state root ~ "$0" - desc.assignPrettyKeys() + hexaDb.assignPrettyKeys(root.to(NodeKey)) # Beware: dumping a large database is not recommended - # true.say "***", "database dump\n ", desc.dumpHexaDB() + # true.say "***", "database dump\n ", hexaDb.dumpHexaDB(root) test &"Retrieve accounts & proofs for previous account ranges": if db.persistent: accLst.test_NodeRangeProof(getFn, dbg) else: - accLst.test_NodeRangeProof(hexaDB, dbg) + accLst.test_NodeRangeProof(hexaDb, dbg) test &"Verify left boundary checks": if db.persistent: accLst.test_NodeRangeLeftBoundary(getFn, dbg) else: - accLst.test_NodeRangeLeftBoundary(hexaDB, dbg) + accLst.test_NodeRangeLeftBoundary(hexaDb, dbg) block: # List of keys to be shared by sub-group diff --git a/tests/test_sync_snap/test_accounts.nim b/tests/test_sync_snap/test_accounts.nim index 124e9c633..2885803ba 100644 --- a/tests/test_sync_snap/test_accounts.nim +++ b/tests/test_sync_snap/test_accounts.nim @@ -105,8 +105,8 @@ proc test_accountsMergeProofs*( # different from `.isImportOk` check desc.importAccounts(baseTag, packed, true).isOk - # check desc.merge(lowerBound, accounts) == OkHexDb - desc.assignPrettyKeys() # for debugging, make sure that state root ~ "$0" + # for debugging, make sure that state root ~ "$0" + desc.hexaDb.assignPrettyKeys(desc.root) # Update list of accounts. There might be additional accounts in the set # of proof nodes, typically before the `lowerBound` of each block. As diff --git a/tests/test_sync_snap/test_node_range.nim b/tests/test_sync_snap/test_node_range.nim index f2371a37b..e149cb35d 100644 --- a/tests/test_sync_snap/test_node_range.nim +++ b/tests/test_sync_snap/test_node_range.nim @@ -20,8 +20,7 @@ import ../../nimbus/sync/snap/range_desc, ../../nimbus/sync/snap/worker/db/[ hexary_desc, hexary_envelope, hexary_error, hexary_interpolate, - hexary_import, hexary_nearby, hexary_paths, hexary_range, - snapdb_accounts, snapdb_desc], + hexary_nearby, hexary_paths, hexary_range, snapdb_accounts, snapdb_desc], ../replay/[pp, undump_accounts], ./test_helpers @@ -30,10 +29,10 @@ const cmaNlSpc = ",\n" & repeat(" ",13) # ------------------------------------------------------------------------------ -# Private helpers +# Private functions, pretty printing # ------------------------------------------------------------------------------ -proc ppNodeKeys(a: openArray[SnapProof], dbg = HexaryTreeDbRef(nil)): string = +proc ppNodeKeys(a: openArray[SnapProof]; dbg = HexaryTreeDbRef(nil)): string = result = "[" if dbg.isNil: result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(collapse=true)).join(",") @@ -41,6 +40,18 @@ proc ppNodeKeys(a: openArray[SnapProof], dbg = HexaryTreeDbRef(nil)): string = result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(dbg)).join(",") result &= "]" +proc ppHexPath(p: RPath|XPath; dbg = HexaryTreeDbRef(nil)): string = + if dbg.isNil: + "*pretty printing disabled*" + else: + p.pp(dbg) + +proc pp(a: NodeTag; collapse = true): string = + a.to(NodeKey).pp(collapse) + +proc pp(iv: NodeTagRange; collapse = false): string = + "(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")" + # ------------------------------------------------------------------------------ # Private functionsto(Blob) # ------------------------------------------------------------------------------ @@ -191,6 +202,7 @@ proc printCompareLeftNearby( proc verifyRangeProof( rootKey: NodeKey; + baseTag: NodeTag; leafs: seq[RangeLeaf]; proof: seq[SnapProof]; dbg = HexaryTreeDbRef(nil); @@ -203,24 +215,34 @@ proc verifyRangeProof( if not dbg.isNil: xDb.keyPp = dbg.keyPp - # Import proof nodes - var unrefs, refs: HashSet[RepairKey] # values ignored - for rlpRec in proof: - let importError = xDb.hexaryImport(rlpRec.to(Blob), unrefs, refs).error - if importError != HexaryError(0): - check importError == HexaryError(0) - return err(importError) + result = ok() + block verify: + + # Import proof nodes + result = xDb.mergeProofs(rootKey, proof) + if result.isErr: + check result == Result[void,HexaryError].ok() + break verify + + # Build tree + var lItems = leafs.mapIt(RLeafSpecs( + pathTag: it.key.to(NodeTag), + payload: it.data)) + result = xDb.hexaryInterpolate(rootKey, lItems) + if result.isErr: + check result == Result[void,HexaryError].ok() + break verify + + # Left proof + result = xDb.verifyLowerBound(rootKey, baseTag, leafs[0].key.to(NodeTag)) + if result.isErr: + check result == Result[void,HexaryError].ok() + break verify - # Build tree - var lItems = leafs.mapIt(RLeafSpecs( - pathTag: it.key.to(NodeTag), - payload: it.data)) - let rc = xDb.hexaryInterpolate(rootKey, lItems) - if rc.isOk: return ok() if noisy: - true.say "\n***", "error=", rc.error, + true.say "\n***", "error=", result.error, #"\n", #"\n unrefs=[", unrefs.toSeq.mapIt(it.pp(dbg)).join(","), "]", #"\n refs=[", refs.toSeq.mapIt(it.pp(dbg)).join(","), "]", @@ -232,17 +254,6 @@ proc verifyRangeProof( "\n\n database dump", "\n ", xDb.dumpHexaDB(rootKey), "\n" - rc - -# ------------------------------------------------------------------------------ -# Private functions, pretty printing -# ------------------------------------------------------------------------------ - -proc pp(a: NodeTag; collapse = true): string = - a.to(NodeKey).pp(collapse) - -proc pp(iv: NodeTagRange; collapse = false): string = - "(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")" # ------------------------------------------------------------------------------ # Public test function @@ -379,9 +390,15 @@ proc test_NodeRangeProof*( # Assuming the `inLst` entries have been stored in the DB already for n,w in inLst: + doAssert 1 < w.data.accounts.len let - accounts = w.data.accounts[0 ..< min(w.data.accounts.len,maxLen)] - iv = NodeTagRange.new(w.base, accounts[^1].accKey.to(NodeTag)) + # Use the middle of the first two points as base + delta = (w.data.accounts[1].accKey.to(NodeTag) - + w.data.accounts[0].accKey.to(NodeTag)) div 2 + base = w.data.accounts[0].accKey.to(NodeTag) + delta + # Assemble accounts list starting at the second item + accounts = w.data.accounts[1 ..< min(w.data.accounts.len,maxLen)] + iv = NodeTagRange.new(base, accounts[^1].accKey.to(NodeTag)) rc = db.hexaryRangeLeafsProof(rootKey, iv) check rc.isOk if rc.isErr: @@ -407,7 +424,7 @@ proc test_NodeRangeProof*( if leafs.len != accounts.len or accounts[^1].accKey != leafs[^1].key: noisy.say "***", "n=", n, " something went wrong .." check (n,leafs.len) == (n,accounts.len) - rootKey.printCompareRightLeafs(w.base, accounts, leafs, db, dbg) + rootKey.printCompareRightLeafs(base, accounts, leafs, db, dbg) return proof = rc.value.proof @@ -415,7 +432,7 @@ proc test_NodeRangeProof*( check rc.value.proofSize == proof.proofEncode.len check rc.value.leafsSize == leafsRlpLen else: - # Make sure that the size calculation deliver the expected number + # Make sure that the size calculation delivers the expected number # of entries. let rx = db.hexaryRangeLeafsProof(rootKey, iv, leafsRlpLen + 1) check rx.isOk @@ -427,13 +444,13 @@ proc test_NodeRangeProof*( check rx.value.proofSize == rx.value.proof.proofEncode.len # Re-adjust proof - proof = db.hexaryRangeLeafsProof(rootKey, iv.minPt, leafs).proof + proof = db.hexaryRangeLeafsProof(rootKey, rx.value).proof # Import proof nodes and build trie block: - var rx = rootKey.verifyRangeProof(leafs, proof) + var rx = rootKey.verifyRangeProof(base, leafs, proof) if rx.isErr: - rx = rootKey.verifyRangeProof(leafs, proof, dbg) + rx = rootKey.verifyRangeProof(base, leafs, proof, dbg) let baseNbls = iv.minPt.to(NodeKey).to(NibblesSeq) lastNbls = iv.maxPt.to(NodeKey).to(NibblesSeq) @@ -445,11 +462,11 @@ proc test_NodeRangeProof*( " proof=", proof.ppNodeKeys(dbg), "\n\n ", " base=", iv.minPt, - "\n ", iv.minPt.hexaryPath(rootKey,db).pp(dbg), + "\n ", iv.minPt.hexaryPath(rootKey,db).ppHexpath(dbg), "\n\n ", " pfx=", pfxNbls, " nPfx=", nPfxNblsLen, - "\n ", pfxNbls.hexaryPath(rootKey,db).pp(dbg), + "\n ", pfxNbls.hexaryPath(rootKey,db).ppHexpath(dbg), "\n" check rx == typeof(rx).ok()