mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-23 09:18:29 +00:00
Aristo: Update unit test suite (#2002)
* Aristo: Update unit test suite * Aristo/Kvt: Fix iterators why: Generic iterators were not properly updated after backend change * Aristo: Add sub-trie deletion functionality why: For storage tries linked to an account payload vertex ID, a the whole storage trie needs to be deleted with the account. * Aristo: Reserve vertex ID numbers for static custom state roots why: Static custom state roots may be controlled by an application, e.g. for a receipt or a transaction root. The `Aristo` functions are agnostic of what the static state roots are when different from the internal tree vertex ID 1. details; The `merge()` function applied to a non-static state root (assumed to be a storage root) will check the payload of an accounts leaf and mark its Merkle keys to be re-checked. * Aristo: Correct error code symbol * Aristo: Update error code symbols * Aristo: Code cosmetics/comments * Aristo: Fix hashify schedule calculator why: Had a tendency to stop early leaving an incomplete job
This commit is contained in:
parent
a5ac5d3078
commit
3b306a9689
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -11,7 +11,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, sets, tables],
|
||||
std/[algorithm, sequtils, sets, tables, typetraits],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/interval_set,
|
||||
../../aristo,
|
||||
@ -19,21 +19,22 @@ import
|
||||
".."/[aristo_desc, aristo_get, aristo_layers, aristo_vid]
|
||||
|
||||
const
|
||||
Vid2 = @[VertexID(2)].toHashSet
|
||||
Vid2 = @[VertexID(LEAST_FREE_VID)].toHashSet
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc invTo(s: IntervalSetRef[VertexID,uint64]; T: type HashSet[VertexID]): T =
|
||||
## Convert the complement of the argument list `s` to a set of vertex IDs
|
||||
## as it would appear with a vertex generator state list.
|
||||
proc to(s: IntervalSetRef[VertexID,uint64]; T: type HashSet[VertexID]): T =
|
||||
## Convert the argument list `s` to a set of vertex IDs as it would appear
|
||||
## with a vertex generator state list.
|
||||
if s.total < high(uint64):
|
||||
for w in s.increasing:
|
||||
if w.maxPt == high(VertexID):
|
||||
result.incl w.minPt # last interval
|
||||
else:
|
||||
for pt in w.minPt .. w.maxPt:
|
||||
if LEAST_FREE_VID <= pt.distinctBase:
|
||||
result.incl pt
|
||||
|
||||
proc toNodeBE(
|
||||
@ -90,7 +91,8 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
## Make sure that each vertex has a Merkle hash and vice versa. Also check
|
||||
## the vertex ID generator state.
|
||||
let vids = IntervalSetRef[VertexID,uint64].init()
|
||||
discard vids.merge Interval[VertexID,uint64].new(VertexID(1),high(VertexID))
|
||||
discard vids.merge Interval[VertexID,uint64].new(
|
||||
VertexID(LEAST_FREE_VID),high(VertexID))
|
||||
|
||||
for (vid,vtx) in T.walkVtxBE db:
|
||||
if not vtx.isValid:
|
||||
@ -139,7 +141,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
else:
|
||||
return err((VertexID(0),rc.error))
|
||||
let
|
||||
vGenExpected = vids.invTo(HashSet[VertexID])
|
||||
vGenExpected = vids.to(HashSet[VertexID])
|
||||
delta = vGenExpected -+- vGen # symmetric difference
|
||||
if 0 < delta.len:
|
||||
# Exclude fringe case when there is a single root vertex only
|
||||
@ -202,14 +204,19 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
# Check vGen
|
||||
let
|
||||
vGen = db.vGen.vidReorg.toHashSet
|
||||
vGenExpected = vids.invTo(HashSet[VertexID])
|
||||
vGenExpected = vids.to(HashSet[VertexID])
|
||||
delta = vGenExpected -+- vGen # symmetric difference
|
||||
if 0 < delta.len:
|
||||
# Exclude fringe case when there is a single root vertex only
|
||||
if vGenExpected != Vid2 or 0 < vGen.len:
|
||||
if vGen == Vid2 and vGenExpected.len == 0:
|
||||
# Fringe case when the database is empty
|
||||
discard
|
||||
elif vGen.len == 0 and vGenExpected == Vid2:
|
||||
# Fringe case when there is a single root vertex only
|
||||
discard
|
||||
else:
|
||||
let delta = delta.toSeq
|
||||
# As happens with Merkle signature calculator: `root=VertexID(2)`
|
||||
if delta.len != 1 or delta[0] != VertexID(1) or VertexID(1) in vGen:
|
||||
if delta.len != 1 or
|
||||
delta[0] != VertexID(1) or VertexID(1) in vGen:
|
||||
return err((delta.sorted[^1],CheckBeCacheGarbledVGen))
|
||||
|
||||
ok()
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -37,6 +37,9 @@ const
|
||||
VOID_HASH_LABEL* = HashLabel(key: VOID_HASH_KEY)
|
||||
## Void equivalent for Merkle hash value
|
||||
|
||||
VOID_PATH_ID* = PathID()
|
||||
## Void equivalent for Merkle hash value
|
||||
|
||||
EmptyQidPairSeq* = seq[(QueueID,QueueID)].default
|
||||
## Useful shortcut
|
||||
|
||||
@ -46,4 +49,17 @@ const
|
||||
( 64, 127), ## ..
|
||||
( 64, 255)]
|
||||
|
||||
SUB_TREE_DISPOSAL_MAX* = 200_000
|
||||
## Some limit for disposing sub-trees in one go using `delete()`.
|
||||
|
||||
LEAST_FREE_VID* = 100
|
||||
## Vids smaller are used as known state roots and cannot be recycled. Only
|
||||
## the `VertexID(1)` state root is used by the `Aristo` methods. The other
|
||||
## numbers smaller than `LEAST_FREE_VID` may be used by application
|
||||
## functions with fixed assignments of the type of a state root (e.g. for
|
||||
## a receipt or a transaction root.)
|
||||
|
||||
static:
|
||||
doAssert 1 < LEAST_FREE_VID # must stay away from `VertexID(1)`
|
||||
|
||||
# End
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -21,11 +21,15 @@ import
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
|
||||
aristo_vid]
|
||||
aristo_utils, aristo_vid]
|
||||
|
||||
logScope:
|
||||
topics = "aristo-delete"
|
||||
|
||||
type
|
||||
SaveToVaeVidFn =
|
||||
proc(err: AristoError): (VertexID,AristoError) {.gcsafe, raises: [].}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private heplers
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -34,6 +38,12 @@ func toVae(err: AristoError): (VertexID,AristoError) =
|
||||
## Map single error to error pair with dummy vertex
|
||||
(VertexID(0),err)
|
||||
|
||||
func toVae(vid: VertexID): SaveToVaeVidFn =
|
||||
## Map single error to error pair with argument vertex
|
||||
result =
|
||||
proc(err: AristoError): (VertexID,AristoError) =
|
||||
return (vid,err)
|
||||
|
||||
func toVae(err: (Hike,AristoError)): (VertexID,AristoError) =
|
||||
if 0 < err[0].legs.len:
|
||||
(err[0].legs[^1].wp.vid, err[1])
|
||||
@ -268,7 +278,7 @@ proc collapseLeaf(
|
||||
# No need to update the cache unless `lf` is present there. The leaf path
|
||||
# as well as the value associated with the leaf path has not been changed.
|
||||
let lfTie = LeafTie(root: hike.root, path: rc.value)
|
||||
if db.top.final.lTab.hasKey lfTie:
|
||||
if db.lTab.hasKey lfTie:
|
||||
db.top.final.lTab[lfTie] = lf.vid
|
||||
|
||||
# Clean up stale leaf vertex which has moved to root position
|
||||
@ -288,6 +298,39 @@ proc collapseLeaf(
|
||||
|
||||
# -------------------------
|
||||
|
||||
proc delSubTree(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # Root vertex
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Implementation of *delete* sub-trie.
|
||||
if not root.isValid:
|
||||
return err((root,DelSubTreeVoidRoot))
|
||||
var
|
||||
dispose = @[root]
|
||||
rootVtx = db.getVtxRc(root).valueOr:
|
||||
if error == GetVtxNotFound:
|
||||
return ok()
|
||||
return err((root,error))
|
||||
follow = @[rootVtx]
|
||||
|
||||
# Collect list of nodes to delete
|
||||
while 0 < follow.len:
|
||||
var redo: seq[VertexRef]
|
||||
for vtx in follow:
|
||||
for vid in vtx.subVids:
|
||||
let vtx = ? db.getVtxRc(vid).mapErr toVae(vid)
|
||||
redo.add vtx
|
||||
dispose.add vid
|
||||
if SUB_TREE_DISPOSAL_MAX < dispose.len:
|
||||
return err((VertexID(0),DelSubTreeTooBig))
|
||||
redo.swap follow
|
||||
|
||||
# Mark nodes deleted
|
||||
for vid in dispose:
|
||||
db.disposeOfVtx vid
|
||||
ok()
|
||||
|
||||
|
||||
proc deleteImpl(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Fully expanded path
|
||||
@ -366,17 +409,42 @@ proc deleteImpl(
|
||||
# No need to keep it any longer in cache
|
||||
db.top.final.lTab.del lty
|
||||
|
||||
# Delete dependent leaf node storage tree if there is any
|
||||
let data = lf.vtx.lData
|
||||
if data.pType == AccountData:
|
||||
let vid = data.account.storageID
|
||||
if vid.isValid:
|
||||
return db.delSubTree vid
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc delete*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # Root vertex
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Delete sub-trie below `root`. The maximum supported sub-tree size is
|
||||
## `SUB_TREE_DISPOSAL_MAX`. Larger tries must be disposed by walk-deleting
|
||||
## leaf nodes using `left()` or `right()` traversal functions.
|
||||
##
|
||||
## Caveat:
|
||||
## There is no way to quickly verify that the `root` argument is isolated.
|
||||
## Deleting random sub-trees might lead to an inconsistent database.
|
||||
##
|
||||
db.delSubTree root
|
||||
|
||||
proc delete*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Fully expanded chain of vertices
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Delete argument `hike` chain of vertices from the database
|
||||
## Delete argument `hike` chain of vertices from the database.
|
||||
##
|
||||
## Note:
|
||||
## If the leaf node has an account payload referring to a storage sub-trie,
|
||||
## this one will be deleted as well.
|
||||
##
|
||||
# Need path in order to remove it from `lTab[]`
|
||||
let lty = LeafTie(
|
||||
|
@ -116,6 +116,9 @@ func isValid*(nd: NodeRef): bool =
|
||||
func isValid*(pld: PayloadRef): bool =
|
||||
pld != PayloadRef(nil)
|
||||
|
||||
func isValid*(pid: PathID): bool =
|
||||
pid != VOID_PATH_ID
|
||||
|
||||
func isValid*(filter: FilterRef): bool =
|
||||
filter != FilterRef(nil)
|
||||
|
||||
|
@ -66,13 +66,14 @@ type
|
||||
CacheMissingNodekeys
|
||||
|
||||
# Path function `hikeUp()`
|
||||
HikeRootMissing
|
||||
HikeEmptyPath
|
||||
HikeBranchMissingEdge
|
||||
HikeBranchTailEmpty
|
||||
HikeBranchBlindEdge
|
||||
HikeEmptyPath
|
||||
HikeExtTailEmpty
|
||||
HikeExtTailMismatch
|
||||
HikeLeafUnexpected
|
||||
HikeNoLegs
|
||||
HikeRootMissing
|
||||
|
||||
# Path/nibble/key conversions in `aisto_path.nim`
|
||||
PathExpected64Nibbles
|
||||
@ -94,6 +95,10 @@ type
|
||||
MergeNonBranchProofModeLock
|
||||
MergeRootBranchLinkBusy
|
||||
MergeRootMissing
|
||||
MergeAccPathMissing
|
||||
MergeAccUnaccessible
|
||||
MergeAccPathWithoutLeaf
|
||||
MergeAccWrongStorageRoot
|
||||
MergeAssemblyFailed # Ooops, internal error
|
||||
|
||||
MergeHashKeyInvalid
|
||||
@ -188,6 +193,8 @@ type
|
||||
DelBranchWithoutRefs
|
||||
DelExtLocked
|
||||
DelVidStaleVtx
|
||||
DelSubTreeTooBig
|
||||
DelSubTreeVoidRoot
|
||||
|
||||
# Functions from `aristo_filter.nim`
|
||||
FilBackendMissing
|
||||
@ -213,6 +220,7 @@ type
|
||||
FilTrgSrcMismatch
|
||||
FilTrgTopSrcMismatch
|
||||
FilSiblingsCommitUnfinshed
|
||||
FilSrcTrgInconsistent
|
||||
|
||||
# Get functions from `aristo_get.nim`
|
||||
GetLeafMissing
|
||||
@ -227,6 +235,7 @@ type
|
||||
|
||||
# Fetch functions from `aristo_fetch.nim`
|
||||
FetchPathNotFound
|
||||
LeafKeyInvalid
|
||||
|
||||
# RocksDB backend
|
||||
RdbBeCantCreateDataDir
|
||||
@ -252,21 +261,23 @@ type
|
||||
TxStackUnderflow
|
||||
TxGarbledSpan
|
||||
|
||||
# Functions from `aristo_desc`
|
||||
# Functions from `aristo_desc.nim`
|
||||
MustBeOnCentre
|
||||
NotAllowedOnCentre
|
||||
|
||||
# Miscelaneous handy helpers
|
||||
# Functions from `aristo_utils.nim`
|
||||
AccRlpDecodingError
|
||||
AccStorageKeyMissing
|
||||
AccVtxUnsupported
|
||||
AccNodeUnsupported
|
||||
PayloadTypeUnsupported
|
||||
LeafKeyInvalid
|
||||
AccountRootUnacceptable
|
||||
AccountRlpDecodingError
|
||||
AccountStorageKeyMissing
|
||||
AccountVtxUnsupported
|
||||
AccountNodeUnsupported
|
||||
MptRootUnacceptable
|
||||
|
||||
# Miscelaneous handy helpers
|
||||
AccRootUnacceptable
|
||||
MptContextMissing
|
||||
VidContextLocked
|
||||
MptRootUnacceptable
|
||||
NotImplemented
|
||||
VidContextLocked
|
||||
VidRootMissing
|
||||
|
||||
# End
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -67,7 +67,7 @@ type
|
||||
##
|
||||
## The path ID should be kept normalised, i.e.
|
||||
## * 0 <= `length` <= 64
|
||||
## * the unused trailing nibbles in `pfx` ar set to `0`
|
||||
## * the unused trailing nibbles in `pfx` are set to `0`
|
||||
##
|
||||
pfx*: UInt256
|
||||
length*: uint8
|
||||
@ -105,22 +105,6 @@ type
|
||||
chronicles.formatIt(VertexID): $it
|
||||
chronicles.formatIt(QueueID): $it
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func to(lid: HashKey; T: type PathID): T =
|
||||
## Helper to bowrrow certain properties from `PathID`
|
||||
if lid.isHash:
|
||||
PathID(pfx: UInt256.fromBytesBE lid.key.data, length: 64)
|
||||
elif 0 < lid.blob.len:
|
||||
doAssert lid.blob.len < 32
|
||||
var a32: array[32,byte]
|
||||
(addr a32[0]).copyMem(unsafeAddr lid.blob[0], lid.blob.len)
|
||||
PathID(pfx: UInt256.fromBytesBE a32, length: 2 * lid.blob.len.uint8)
|
||||
else:
|
||||
PathID()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `VertexID` scalar data model
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -223,6 +207,18 @@ func `==`*(a, b: PathID): bool =
|
||||
func cmp*(a, b: PathID): int =
|
||||
if a < b: -1 elif b < a: 1 else: 0
|
||||
|
||||
func to*(lid: HashKey; T: type PathID): T =
|
||||
## Helper to bowrrow certain properties from `PathID`
|
||||
if lid.isHash:
|
||||
PathID(pfx: UInt256.fromBytesBE lid.key.data, length: 64)
|
||||
elif 0 < lid.blob.len:
|
||||
doAssert lid.blob.len < 32
|
||||
var a32: array[32,byte]
|
||||
(addr a32[0]).copyMem(unsafeAddr lid.blob[0], lid.blob.len)
|
||||
PathID(pfx: UInt256.fromBytesBE a32, length: 2 * lid.blob.len.uint8)
|
||||
else:
|
||||
PathID()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `HashKey` ordered scalar data model
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -21,10 +21,11 @@ import
|
||||
const
|
||||
AcceptableHikeStops = {
|
||||
HikeBranchTailEmpty,
|
||||
HikeBranchBlindEdge,
|
||||
HikeBranchMissingEdge,
|
||||
HikeExtTailEmpty,
|
||||
HikeExtTailMismatch,
|
||||
HikeLeafUnexpected}
|
||||
HikeLeafUnexpected,
|
||||
HikeNoLegs}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -13,7 +13,7 @@
|
||||
##
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
std/[sequtils, sets, tables],
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_vid],
|
||||
@ -71,6 +71,7 @@ proc merge*(
|
||||
## Merge the argument `filter` into the read-only filter layer. Note that
|
||||
## this function has no control of the filter source. Having merged the
|
||||
## argument `filter`, all the `top` and `stack` layers should be cleared.
|
||||
##
|
||||
let ubeRoot = block:
|
||||
let rc = db.getKeyUBE VertexID(1)
|
||||
if rc.isOk:
|
||||
@ -81,6 +82,14 @@ proc merge*(
|
||||
return err((VertexID(1),rc.error))
|
||||
|
||||
db.roFilter = ? db.merge(filter, db.roFilter, ubeRoot)
|
||||
if db.roFilter.src == db.roFilter.trg:
|
||||
# Under normal conditions, the root keys cannot be the same unless the
|
||||
# database is empty. This changes if there is a fixed root vertex as
|
||||
# used with the `snap` sync protocol boundaty proof. In that case, there
|
||||
# can be no history chain and the filter is just another cache.
|
||||
if VertexID(1) notin db.top.final.pPrf:
|
||||
db.roFilter = FilterRef(nil)
|
||||
|
||||
ok()
|
||||
|
||||
|
||||
|
@ -59,6 +59,14 @@ proc getLayerStateRoots*(
|
||||
if spr.fg.isValid:
|
||||
return ok(spr)
|
||||
|
||||
if not delta.kMap.hasKey(VertexID(1)) and
|
||||
not delta.sTab.hasKey(VertexID(1)):
|
||||
# This layer is unusable, need both: vertex and key
|
||||
return err(FilPrettyPointlessLayer)
|
||||
elif not delta.sTab.getOrVoid(VertexID(1)).isValid:
|
||||
# Root key and vertex has been deleted
|
||||
return ok(spr)
|
||||
|
||||
if chunkedMpt:
|
||||
let lbl = HashLabel(root: VertexID(1), key: sprBeKey)
|
||||
if VertexID(1) in delta.pAmk.getOrVoid lbl:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -95,6 +95,11 @@ proc merge*(
|
||||
else:
|
||||
return err((vid,rc.error))
|
||||
|
||||
# Check consistency
|
||||
if (newFilter.src == newFilter.trg) !=
|
||||
(newFilter.sTab.len == 0 and newFilter.kMap.len == 0):
|
||||
return err((VertexID(0),FilSrcTrgInconsistent))
|
||||
|
||||
ok newFilter
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -181,7 +181,7 @@ proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
|
||||
return err(GetKeyUpdateNeeded)
|
||||
else:
|
||||
# The vertex is to be deleted. So is the value label.
|
||||
return err(GetVtxNotFound)
|
||||
return err(GetKeyNotFound)
|
||||
|
||||
db.getKeyBE vid
|
||||
|
||||
|
@ -243,6 +243,8 @@ proc updateSchedule(
|
||||
if db.layersGetKeyOrVoid(root).isValid:
|
||||
wff.root.excl root
|
||||
wff.completed.incl root
|
||||
else:
|
||||
wff.root.incl root
|
||||
return
|
||||
|
||||
# Unresolved root target to reach via width-first search
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -90,7 +90,7 @@ proc hikeUp*(
|
||||
if error != GetVtxNotFound:
|
||||
return err((hike,error))
|
||||
if hike.legs.len == 0:
|
||||
return err((hike,HikeEmptyPath))
|
||||
return err((hike,HikeNoLegs))
|
||||
break
|
||||
|
||||
case leg.wp.vtx.vType:
|
||||
@ -113,7 +113,7 @@ proc hikeUp*(
|
||||
nextVid = leg.wp.vtx.bVid[nibble]
|
||||
|
||||
if not nextVid.isValid:
|
||||
return err((hike,HikeBranchBlindEdge))
|
||||
return err((hike,HikeBranchMissingEdge))
|
||||
|
||||
leg.nibble = nibble
|
||||
hike.legs.add leg
|
||||
|
@ -110,10 +110,13 @@ proc layersGetLabel*(db: AristoDbRef; vid: VertexID): Result[HashLabel,void] =
|
||||
## cache that way.
|
||||
##
|
||||
if db.top.delta.kMap.hasKey vid:
|
||||
# This is ok regardless of the `dirty` flag. If this vertex has become
|
||||
# dirty, there is an empty `kMap[]` entry on this layer.
|
||||
return ok(db.top.delta.kMap.getOrVoid vid)
|
||||
|
||||
for w in db.stack.reversed:
|
||||
if w.delta.kMap.hasKey vid:
|
||||
# Same reasoning as above regarding the `dirty` flag.
|
||||
return ok(w.delta.kMap.getOrVoid vid)
|
||||
|
||||
err()
|
||||
@ -177,7 +180,7 @@ proc layersPutLabel*(db: AristoDbRef; vid: VertexID; lbl: HashLabel) =
|
||||
# Get previous label
|
||||
let blb = db.top.delta.kMap.getOrVoid vid
|
||||
|
||||
# Update label on `label->vid` mappiing table
|
||||
# Update label on `label->vid` mapping table
|
||||
db.top.delta.kMap[vid] = lbl
|
||||
db.top.final.dirty = true # Modified top cache layers
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, strutils, sets, tables],
|
||||
std/[algorithm, sequtils, strutils, sets, tables, typetraits],
|
||||
chronicles,
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
@ -468,6 +468,47 @@ proc updatePayload(
|
||||
else:
|
||||
err(MergeLeafPathCachedAlready)
|
||||
|
||||
|
||||
proc registerStorageRootAccount(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
stoRoot: VertexID; # Storage root ID
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,AristoError] =
|
||||
## Verify that the `stoRoot` argument is properly referred to by the
|
||||
## account data (if any) implied to by the `accPath` argument.
|
||||
##
|
||||
# Verify storage root and account path
|
||||
if not stoRoot.isValid:
|
||||
return err(MergeRootMissing)
|
||||
if not accPath.isValid:
|
||||
return err(MergeAccPathMissing)
|
||||
|
||||
# Check whether the account is marked for re-hash, already
|
||||
let lty = LeafTie(root: VertexID(1), path: accPath)
|
||||
if db.lTab.hasKey lty:
|
||||
return ok()
|
||||
|
||||
# Get account leaf with account data
|
||||
let hike = lty.hikeUp(db).valueOr:
|
||||
return err(MergeAccUnaccessible)
|
||||
let wp = hike.legs[^1].wp
|
||||
if wp.vtx.vType != Leaf:
|
||||
return err(MergeAccPathWithoutLeaf)
|
||||
if wp.vtx.lData.pType != AccountData:
|
||||
return ok() # nothing to do
|
||||
|
||||
# Need to flag for re-hash
|
||||
let stoID = wp.vtx.lData.account.storageID
|
||||
if stoID.isValid and stoID != stoRoot:
|
||||
return err(MergeAccWrongStorageRoot)
|
||||
|
||||
# Clear Merkle keys and store leaf record
|
||||
for w in hike.legs.mapIt(it.wp.vid):
|
||||
db.nullifyKey w
|
||||
db.top.final.lTab[lty] = wp.vid
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: add Merkle proof node
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -597,6 +638,7 @@ proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leafTie: LeafTie; # Leaf item to add to the database
|
||||
payload: PayloadRef; # Payload value
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[Hike,AristoError] =
|
||||
## Merge the argument `leafTie` key-value-pair into the top level vertex
|
||||
## table of the database `db`. The field `path` of the `leafTie` argument is
|
||||
@ -604,6 +646,13 @@ proc merge*(
|
||||
## is stored with the leaf vertex in the database unless the leaf vertex
|
||||
## exists already.
|
||||
##
|
||||
## For `payload.root` vertex IDs with number at least `LEAST_FREE_VID`, the
|
||||
## sub-tree generated by `payload.root` is considered a storage root linked
|
||||
## to an account leaf referred to by a valid `accPath` (i.e. different from
|
||||
## `VOID_PATH_ID`.) In that case, an account must exists. If there is payload
|
||||
## of type `accountData`, its `storageID` must be unset or equal to the
|
||||
## `payload.root` vertex ID.
|
||||
##
|
||||
# Check whether the leaf is on the database and payloads match
|
||||
block:
|
||||
let vid = db.lTab.getOrVoid leafTie
|
||||
@ -612,7 +661,9 @@ proc merge*(
|
||||
if vtx.isValid and vtx.lData == payload:
|
||||
return err(MergeLeafPathCachedAlready)
|
||||
|
||||
if not leafTie.root.isValid:
|
||||
if LEAST_FREE_VID <= leafTie.root.distinctBase:
|
||||
? db.registerStorageRootAccount(leafTie.root, accPath)
|
||||
elif not leafTie.root.isValid:
|
||||
return err(MergeRootMissing)
|
||||
|
||||
let hike = leafTie.hikeUp(db).to(Hike)
|
||||
@ -662,47 +713,65 @@ proc merge*(
|
||||
root: VertexID; # MPT state root
|
||||
path: openArray[byte]; # Even nibbled byte path
|
||||
payload: PayloadRef; # Payload value
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
||||
## object.
|
||||
let lty = LeafTie(root: root, path: ? path.pathToTag)
|
||||
db.merge(lty, payload).to(typeof result)
|
||||
db.merge(lty, payload, accPath).to(typeof result)
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
path: openArray[byte]; # Even nibbled byte path
|
||||
payload: PayloadRef; # Payload value
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for `(VertexID(1),path)` arguments instead of a
|
||||
## `LeafTie` object.
|
||||
let lty = LeafTie(root: VertexID(1), path: ? path.pathToTag)
|
||||
db.merge(lty, payload, VOID_PATH_ID).to(typeof result)
|
||||
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # MPT state root
|
||||
path: openArray[byte]; # Leaf item to add to the database
|
||||
data: openArray[byte]; # Raw data payload value
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
|
||||
## The argument `data` is stored as-is as a a `RawData` payload value.
|
||||
db.merge(root, path, PayloadRef(pType: RawData, rawBlob: @data))
|
||||
## The argument `data` is stored as-is as a `RawData` payload value.
|
||||
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
||||
db.merge(root, path, pyl, accPath)
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
path: openArray[byte]; # Leaf item to add to the database
|
||||
data: openArray[byte]; # Raw data payload value
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for `(VertexID(1),path)` arguments instead of a
|
||||
## `LeafTie`. The argument `data` is stored as-is as a `RawData` payload
|
||||
## value.
|
||||
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
||||
db.merge(VertexID(1), path, pyl, VOID_PATH_ID)
|
||||
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()`. This function will not indicate if the leaf
|
||||
## was cached, already.
|
||||
db.merge(leaf.leafTie, leaf.payload, accPath).to(typeof result)
|
||||
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()`. This function will not indicate if the leaf
|
||||
## was cached, already.
|
||||
db.merge(leaf.leafTie, leaf.payload).to(typeof result)
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
||||
): tuple[merged: int, dups: int, error: AristoError] =
|
||||
## Variant of `merge()` for leaf lists.
|
||||
var (merged, dups) = (0, 0)
|
||||
for n,w in leafs:
|
||||
let rc = db.merge(w.leafTie, w.payload)
|
||||
if rc.isOk:
|
||||
merged.inc
|
||||
elif rc.error in {MergeLeafPathCachedAlready,
|
||||
MergeLeafPathOnBackendAlready}:
|
||||
dups.inc
|
||||
else:
|
||||
return (n,dups,rc.error)
|
||||
|
||||
(merged, dups, AristoError(0))
|
||||
## Variant of `merge()`, shortcut for `db.merge(leaf, VOID_PATH_ID)`. Note
|
||||
## that this function fails unless `leaf.root == VertexID(1)`.
|
||||
db.merge(leaf.leafTie, leaf.payload, VOID_PATH_ID).to(typeof result)
|
||||
|
||||
# ---------------------
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -64,7 +64,7 @@ func pathAsHEP*(tag: PathID; isLeaf = false): Blob =
|
||||
func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] =
|
||||
## Convert the argument `partPath` to a `PathID` type value.
|
||||
if partPath.len == 0:
|
||||
return ok PathID()
|
||||
return ok VOID_PATH_ID
|
||||
if partPath.len <= 64:
|
||||
return ok PathID(
|
||||
pfx: UInt256.fromBytesBE partPath.pathPfxPad(0).getBytes(),
|
||||
@ -74,7 +74,7 @@ func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] =
|
||||
func pathToTag*(partPath: openArray[byte]): Result[PathID,AristoError] =
|
||||
## Variant of `pathToTag()`
|
||||
if partPath.len == 0:
|
||||
return ok PathID()
|
||||
return ok VOID_PATH_ID
|
||||
if partPath.len <= 32:
|
||||
return ok PathID(
|
||||
pfx: UInt256.fromBytesBE @partPath & 0u8.repeat(32-partPath.len),
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -17,7 +17,7 @@ import
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hashify, aristo_init,
|
||||
aristo_merge, aristo_vid]
|
||||
aristo_merge]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, signature generator
|
||||
@ -27,7 +27,7 @@ proc merkleSignBegin*(): MerkleSignRef =
|
||||
## Start signature calculator for a list of key-value items.
|
||||
let
|
||||
db = AristoDbRef.init VoidBackendRef
|
||||
vid = db.vidFetch # => 2
|
||||
vid = VertexID(2)
|
||||
MerkleSignRef(
|
||||
root: vid,
|
||||
db: db)
|
||||
@ -41,7 +41,7 @@ proc merkleSignAdd*(
|
||||
## is irrelevant.
|
||||
if sdb.error == AristoError(0):
|
||||
sdb.count.inc
|
||||
discard sdb.db.merge(sdb.root, key, val).valueOr:
|
||||
discard sdb.db.merge(sdb.root, key, val, VOID_PATH_ID).valueOr:
|
||||
sdb.`error` = error
|
||||
sdb.errKey = @key
|
||||
return
|
||||
|
@ -344,7 +344,17 @@ proc stow*(
|
||||
return err(error[1])
|
||||
db.top = LayerRef(
|
||||
delta: LayerDeltaRef(),
|
||||
final: LayerFinalRef(vGen: db.roFilter.vGen))
|
||||
final: LayerFinalRef())
|
||||
if db.roFilter.isValid:
|
||||
db.top.final.vGen = db.roFilter.vGen
|
||||
else:
|
||||
let rc = db.getIdgUBE()
|
||||
if rc.isOk:
|
||||
db.top.final.vGen = rc.value
|
||||
else:
|
||||
# It is OK if there was no `Idg`. Otherwise something serious happened
|
||||
# and there is no way to recover easily.
|
||||
doAssert rc.error == GetIdgNotFound
|
||||
|
||||
if persistent:
|
||||
? db.resolveBackendFilter()
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -34,7 +34,7 @@ proc toAccount*(
|
||||
try:
|
||||
return ok(rlp.decode(payload.rlpBlob, Account))
|
||||
except RlpError:
|
||||
return err(AccountRlpDecodingError)
|
||||
return err(AccRlpDecodingError)
|
||||
of AccountData:
|
||||
var acc = Account(
|
||||
nonce: payload.account.nonce,
|
||||
@ -56,7 +56,7 @@ proc toAccount*(
|
||||
## Variant of `toAccount()` for a `Leaf` vertex.
|
||||
if vtx.isValid and vtx.vType == Leaf:
|
||||
return vtx.lData.toAccount db
|
||||
err AccountVtxUnsupported
|
||||
err AccVtxUnsupported
|
||||
|
||||
proc toAccount*(
|
||||
node: NodeRef;
|
||||
@ -69,7 +69,7 @@ proc toAccount*(
|
||||
try:
|
||||
return ok(rlp.decode(node.lData.rlpBlob, Account))
|
||||
except RlpError:
|
||||
return err(AccountRlpDecodingError)
|
||||
return err(AccRlpDecodingError)
|
||||
of AccountData:
|
||||
var acc = Account(
|
||||
nonce: node.lData.account.nonce,
|
||||
@ -78,13 +78,13 @@ proc toAccount*(
|
||||
storageRoot: EMPTY_ROOT_HASH)
|
||||
if node.lData.account.storageID.isValid:
|
||||
if not node.key[0].isValid:
|
||||
return err(AccountStorageKeyMissing)
|
||||
return err(AccStorageKeyMissing)
|
||||
acc.storageRoot = node.key[0].to(Hash256)
|
||||
return ok(acc)
|
||||
else:
|
||||
return err(PayloadTypeUnsupported)
|
||||
|
||||
err AccountNodeUnsupported
|
||||
err AccNodeUnsupported
|
||||
|
||||
# ---------------------
|
||||
|
||||
@ -163,10 +163,13 @@ proc toNode*(
|
||||
|
||||
|
||||
proc subVids*(vtx: VertexRef): seq[VertexID] =
|
||||
## Returns the list of all sub-vertex IDs for the argument `vtx`
|
||||
## Returns the list of all sub-vertex IDs for the argument `vtx`.
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
discard
|
||||
if vtx.lData.pType == AccountData:
|
||||
let vid = vtx.lData.account.storageID
|
||||
if vid.isValid:
|
||||
result.add vid
|
||||
of Branch:
|
||||
for vid in vtx.bVid:
|
||||
if vid.isValid:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -14,7 +14,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils],
|
||||
std/[algorithm, sequtils, typetraits],
|
||||
"."/[aristo_desc, aristo_layers]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -32,8 +32,8 @@ proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
|
||||
## when creating leaf vertices.
|
||||
if db.vGen.len == 0:
|
||||
# Note that `VertexID(1)` is the root of the main trie
|
||||
db.top.final.vGen = @[VertexID(3)]
|
||||
result = VertexID(2)
|
||||
db.top.final.vGen = @[VertexID(LEAST_FREE_VID+1)]
|
||||
result = VertexID(LEAST_FREE_VID)
|
||||
elif db.vGen.len == 1 or pristine:
|
||||
result = db.vGen[^1]
|
||||
db.top.final.vGen[^1] = result + 1
|
||||
@ -41,6 +41,7 @@ proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
|
||||
result = db.vGen[^2]
|
||||
db.top.final.vGen[^2] = db.top.final.vGen[^1]
|
||||
db.top.final.vGen.setLen(db.vGen.len-1)
|
||||
doAssert LEAST_FREE_VID <= result.distinctBase
|
||||
|
||||
|
||||
proc vidPeek*(db: AristoDbRef): VertexID =
|
||||
@ -48,7 +49,7 @@ proc vidPeek*(db: AristoDbRef): VertexID =
|
||||
## would be returned by the `new()` function.
|
||||
case db.vGen.len:
|
||||
of 0:
|
||||
VertexID(2)
|
||||
VertexID(LEAST_FREE_VID)
|
||||
of 1:
|
||||
db.vGen[^1]
|
||||
else:
|
||||
@ -58,7 +59,7 @@ proc vidPeek*(db: AristoDbRef): VertexID =
|
||||
proc vidDispose*(db: AristoDbRef; vid: VertexID) =
|
||||
## Recycle the argument `vtxID` which is useful after deleting entries from
|
||||
## the vertex table to prevent the `VertexID` type key values small.
|
||||
if VertexID(1) < vid:
|
||||
if LEAST_FREE_VID <= vid.distinctBase:
|
||||
if db.vGen.len == 0:
|
||||
db.top.final.vGen = @[vid]
|
||||
else:
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -19,7 +19,7 @@
|
||||
##
|
||||
import
|
||||
../aristo_init/[rocks_db, persistent],
|
||||
".."/[aristo_desc, aristo_init],
|
||||
../aristo_desc,
|
||||
"."/[walk_private, memory_only]
|
||||
|
||||
export
|
||||
@ -31,8 +31,8 @@ export
|
||||
# Public iterators (all in one)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator walkVtxBe*(
|
||||
T: type RdbBackendRef;
|
||||
iterator walkVtxBe*[T: RdbBackendRef](
|
||||
_: type T;
|
||||
db: AristoDbRef;
|
||||
): tuple[vid: VertexID, vtx: VertexRef] =
|
||||
## Iterate over filtered RocksDB backend vertices. This function depends on
|
||||
@ -40,23 +40,23 @@ iterator walkVtxBe*(
|
||||
for (vid,vtx) in walkVtxBeImpl[T](db):
|
||||
yield (vid,vtx)
|
||||
|
||||
iterator walkKeyBe*(
|
||||
T: type RdbBackendRef;
|
||||
iterator walkKeyBe*[T: RdbBackendRef](
|
||||
_: type T;
|
||||
db: AristoDbRef;
|
||||
): tuple[vid: VertexID, key: HashKey] =
|
||||
## Similar to `walkVtxBe()` but for keys.
|
||||
for (vid,key) in walkKeyBeImpl[T](db):
|
||||
yield (vid,key)
|
||||
|
||||
iterator walkFilBe*(
|
||||
be: RdbBackendRef;
|
||||
iterator walkFilBe*[T: RdbBackendRef](
|
||||
be: T;
|
||||
): tuple[qid: QueueID, filter: FilterRef] =
|
||||
## Iterate over backend filters.
|
||||
for (qid,filter) in be.walkFilBeImpl:
|
||||
yield (qid,filter)
|
||||
|
||||
iterator walkFifoBe*(
|
||||
be: RdbBackendRef;
|
||||
iterator walkFifoBe*[T: RdbBackendRef](
|
||||
be: T;
|
||||
): tuple[qid: QueueID, fid: FilterRef] =
|
||||
## Walk filter slots in fifo order.
|
||||
for (qid,filter) in be.walkFifoBeImpl:
|
||||
@ -64,8 +64,8 @@ iterator walkFifoBe*(
|
||||
|
||||
# -----------
|
||||
|
||||
iterator walkPairs*(
|
||||
T: type RdbBackendRef;
|
||||
iterator walkPairs*[T: RdbBackendRef](
|
||||
_: type T;
|
||||
db: AristoDbRef;
|
||||
): tuple[vid: VertexID, vtx: VertexRef] =
|
||||
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
|
||||
@ -73,8 +73,8 @@ iterator walkPairs*(
|
||||
for (vid,vtx) in walkPairsImpl[T](db):
|
||||
yield (vid,vtx)
|
||||
|
||||
iterator replicate*(
|
||||
T: type RdbBackendRef;
|
||||
iterator replicate*[T: RdbBackendRef](
|
||||
_: type T;
|
||||
db: AristoDbRef;
|
||||
): tuple[vid: VertexID, key: HashKey, vtx: VertexRef, node: NodeRef] =
|
||||
## Variant of `walkPairsImpl()` for legacy applications.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -10,7 +10,7 @@
|
||||
# distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/[sequtils, sets, tables],
|
||||
std/[algorithm, sequtils, sets, tables],
|
||||
results,
|
||||
".."/[aristo_desc, aristo_get, aristo_init, aristo_layers, aristo_utils]
|
||||
|
||||
@ -124,7 +124,7 @@ iterator walkPairsImpl*[T](
|
||||
if vtx.isValid:
|
||||
yield (vid,vtx)
|
||||
|
||||
for (_,vid,vtx) in walkVtxBeImpl[T](db):
|
||||
for (vid,vtx) in walkVtxBeImpl[T](db):
|
||||
if vid notin seen:
|
||||
yield (vid,vtx)
|
||||
|
||||
|
@ -307,7 +307,7 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
||||
if not rootOk:
|
||||
cMpt.root = mpt.vidFetch(pristine=true)
|
||||
|
||||
let rc = mpt.merge(cMpt.root, k, v)
|
||||
let rc = mpt.merge(cMpt.root, k, v, VOID_PATH_ID)
|
||||
if rc.isErr:
|
||||
# Re-cycle unused ID (prevents from leaking IDs)
|
||||
if not rootOk:
|
||||
@ -442,7 +442,7 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
||||
mpt = cAcc.mpt
|
||||
key = address.keccakHash.data
|
||||
val = acc.toPayloadRef()
|
||||
rc = mpt.merge(cAcc.root, key, val)
|
||||
rc = mpt.merge(cAcc.root, key, val, VOID_PATH_ID)
|
||||
if rc.isErr:
|
||||
return err(rc.error.toError(db, info))
|
||||
ok()
|
||||
@ -791,7 +791,7 @@ proc newAccHandler*(
|
||||
elif root.isNil:
|
||||
rVid = AristoCoreDbVid(haveCtx: false, base: base, aVid: VertexID(1))
|
||||
elif rID != VertexID(1):
|
||||
let error = (rID,AccountRootUnacceptable)
|
||||
let error = (rID,AccRootUnacceptable)
|
||||
return err(error.toError(db, info, RootUnacceptable))
|
||||
|
||||
let (mode, mpt) = case saveMode:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -30,7 +30,7 @@ iterator walkPairsImpl*[T](
|
||||
when T isnot VoidBackendRef:
|
||||
mixin walk
|
||||
|
||||
for (_,key,data) in db.backend.T.walk:
|
||||
for (key,data) in db.backend.T.walk:
|
||||
if key notin seen and data.isValid:
|
||||
yield (key,data)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -106,6 +106,8 @@ proc accountsRunner(
|
||||
baseDir = getTmpDir() / sample.name & "-accounts"
|
||||
dbDir = if persistent: baseDir / "tmp" else: ""
|
||||
isPersistent = if persistent: "persistent DB" else: "mem DB only"
|
||||
doRdbOk = (cmpBackends and 0 < dbDir.len)
|
||||
cmpBeInfo = if doRdbOk: "persistent" else: "memory"
|
||||
|
||||
defer:
|
||||
try: baseDir.removeDir except CatchableError: discard
|
||||
@ -115,14 +117,15 @@ proc accountsRunner(
|
||||
test &"Merge {accLst.len} proof & account lists to database":
|
||||
check noisy.testTxMergeProofAndKvpList(accLst, dbDir, resetDb)
|
||||
|
||||
test &"Compare {accLst.len} account lists on different database backends":
|
||||
if cmpBackends and 0 < dbDir.len:
|
||||
test &"Compare {accLst.len} account lists on {cmpBeInfo}" &
|
||||
" db backend vs. cache":
|
||||
check noisy.testBackendConsistency(accLst, dbDir, resetDb)
|
||||
else:
|
||||
skip()
|
||||
|
||||
test &"Delete accounts database, successively {accLst.len} entries":
|
||||
check noisy.testTxMergeAndDelete(accLst, dbDir)
|
||||
test &"Delete accounts database successively, {accLst.len} lists":
|
||||
check noisy.testTxMergeAndDeleteOneByOne(accLst, dbDir)
|
||||
|
||||
test &"Delete accounts database sub-trees, {accLst.len} lists":
|
||||
check noisy.testTxMergeAndDeleteSubTree(accLst, dbDir)
|
||||
|
||||
test &"Distributed backend access {accLst.len} entries":
|
||||
check noisy.testDistributedAccess(accLst, dbDir)
|
||||
@ -146,6 +149,8 @@ proc storagesRunner(
|
||||
baseDir = getTmpDir() / sample.name & "-storage"
|
||||
dbDir = if persistent: baseDir / "tmp" else: ""
|
||||
isPersistent = if persistent: "persistent DB" else: "mem DB only"
|
||||
doRdbOk = (cmpBackends and 0 < dbDir.len)
|
||||
cmpBeInfo = if doRdbOk: "persistent" else: "memory"
|
||||
|
||||
defer:
|
||||
try: baseDir.removeDir except CatchableError: discard
|
||||
@ -156,14 +161,15 @@ proc storagesRunner(
|
||||
check noisy.testTxMergeProofAndKvpList(
|
||||
stoLst, dbDir, resetDb, fileInfo, oops)
|
||||
|
||||
test &"Compare {stoLst.len} slot lists on different database backends":
|
||||
if cmpBackends and 0 < dbDir.len:
|
||||
test &"Compare {stoLst.len} slot lists on {cmpBeInfo}" &
|
||||
" db backend vs. cache":
|
||||
check noisy.testBackendConsistency(stoLst, dbDir, resetDb)
|
||||
else:
|
||||
skip()
|
||||
|
||||
test &"Delete storage database, successively {stoLst.len} entries":
|
||||
check noisy.testTxMergeAndDelete(stoLst, dbDir)
|
||||
test &"Delete storage database successively, {stoLst.len} lists":
|
||||
check noisy.testTxMergeAndDeleteOneByOne(stoLst, dbDir)
|
||||
|
||||
test &"Delete storage database sub-trees, {stoLst.len} lists":
|
||||
check noisy.testTxMergeAndDeleteSubTree(stoLst, dbDir)
|
||||
|
||||
test &"Distributed backend access {stoLst.len} entries":
|
||||
check noisy.testDistributedAccess(stoLst, dbDir)
|
||||
@ -212,7 +218,7 @@ when isMainModule:
|
||||
noisy.storagesRunner(sam, resetDb=true, oops=knownFailures)
|
||||
|
||||
when true: # and false:
|
||||
let persistent = false
|
||||
let persistent = false # or true
|
||||
noisy.showElapsed("@snap_test_list"):
|
||||
for n,sam in snapTestList:
|
||||
noisy.accountsRunner(sam, persistent=persistent)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -81,7 +81,7 @@ proc mergeData(
|
||||
let proved = db.merge(proof, rc.value)
|
||||
xCheck proved.error in {AristoError(0),MergeHashKeyCachedAlready}
|
||||
|
||||
let merged = db.merge leafs
|
||||
let merged = db.mergeList leafs
|
||||
xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready}
|
||||
|
||||
block:
|
||||
@ -94,13 +94,15 @@ proc mergeData(
|
||||
|
||||
true
|
||||
|
||||
|
||||
proc verify(
|
||||
ly: LayerRef; # Database layer
|
||||
be: MemBackendRef|RdbBackendRef; # Backend
|
||||
be: BackendRef; # Backend
|
||||
noisy: bool;
|
||||
): bool =
|
||||
## ..
|
||||
|
||||
proc verifyImpl[T](noisy: bool; ly: LayerRef; be: T): bool =
|
||||
## ..
|
||||
let
|
||||
beSTab = be.walkVtx.toSeq.mapIt((it[0],it[1])).toTable
|
||||
beKMap = be.walkKey.toSeq.mapIt((it[0],it[1])).toTable
|
||||
@ -124,6 +126,14 @@ proc verify(
|
||||
|
||||
true
|
||||
|
||||
case be.kind:
|
||||
of BackendMemory:
|
||||
noisy.verifyImpl(ly, be.MemBackendRef)
|
||||
of BackendRocksDB:
|
||||
noisy.verifyImpl(ly, be.RdbBackendRef)
|
||||
else:
|
||||
raiseAssert "Oops, unsupported backend " & $be.kind
|
||||
|
||||
# -----------
|
||||
|
||||
proc collectFilter(
|
||||
@ -147,14 +157,16 @@ proc collectFilter(
|
||||
|
||||
true
|
||||
|
||||
proc verifyFiltersImpl[T](
|
||||
be: T;
|
||||
proc verifyFilters(
|
||||
db: AristoDbRef;
|
||||
tab: Table[QueueID,Hash];
|
||||
noisy: bool;
|
||||
): bool =
|
||||
|
||||
proc verifyImpl[T](noisy: bool; tab: Table[QueueID,Hash]; be: T): bool =
|
||||
## Compare stored filters against registered ones
|
||||
var n = 0
|
||||
for (fid,filter) in be.walkFilBe:
|
||||
for (fid,filter) in walkFilBe(be):
|
||||
let
|
||||
filterHash = filter.hash
|
||||
registered = tab.getOrDefault(fid, BlindHash)
|
||||
@ -172,21 +184,15 @@ proc verifyFiltersImpl[T](
|
||||
xCheck n == tab.len
|
||||
true
|
||||
|
||||
proc verifyFilters(
|
||||
db: AristoDbRef;
|
||||
tab: Table[QueueID,Hash];
|
||||
noisy: bool;
|
||||
): bool =
|
||||
## Wrapper
|
||||
let be = db.backend
|
||||
case be.kind:
|
||||
of BackendMemory:
|
||||
return be.MemBackendRef.verifyFiltersImpl(tab, noisy)
|
||||
noisy.verifyImpl(tab, be.MemBackendRef)
|
||||
of BackendRocksDB:
|
||||
return be.RdbBackendRef.verifyFiltersImpl(tab, noisy)
|
||||
noisy.verifyImpl(tab, be.RdbBackendRef)
|
||||
else:
|
||||
discard
|
||||
check db.backend.kind == BackendMemory or db.backend.kind == BackendRocksDB
|
||||
raiseAssert "Oops, unsupported backend " & $be.kind
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
@ -195,9 +201,8 @@ proc verifyFilters(
|
||||
proc testBackendConsistency*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData]; # Test data
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
rdbPath = ""; # Rocks DB storage directory
|
||||
resetDb = false;
|
||||
doRdbOk = true;
|
||||
): bool =
|
||||
## Import accounts
|
||||
var
|
||||
@ -211,8 +216,6 @@ proc testBackendConsistency*(
|
||||
defer:
|
||||
rdb.finish(flush=true)
|
||||
|
||||
xCheck rdbPath != ""
|
||||
|
||||
for n,w in list:
|
||||
if w.root != rootKey or resetDB:
|
||||
rootKey = w.root
|
||||
@ -220,15 +223,17 @@ proc testBackendConsistency*(
|
||||
ndb = AristoDbRef.init()
|
||||
mdb = AristoDbRef.init MemBackendRef
|
||||
|
||||
if doRdbOk:
|
||||
if not rdb.backend.isNil: # ignore bootstrap
|
||||
let verifyFiltersOk = rdb.verifyFilters(filTab, noisy)
|
||||
xCheck verifyFiltersOk
|
||||
filTab.clear
|
||||
rdb.finish(flush=true)
|
||||
if 0 < rdbPath.len:
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath)
|
||||
xCheckRc rc.error == 0
|
||||
rdb = rc.value
|
||||
else:
|
||||
rdb = AristoDbRef.init MemBackendRef # fake `rdb` database
|
||||
|
||||
# Disable automated filter management, still allow filter table access
|
||||
# for low level read/write testing.
|
||||
@ -237,54 +242,47 @@ proc testBackendConsistency*(
|
||||
|
||||
xCheck ndb.backend.isNil
|
||||
xCheck not mdb.backend.isNil
|
||||
xCheck doRdbOk or not rdb.backend.isNil
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "beCon(1) <", n, "/", list.len-1, ">", " groups=", count
|
||||
noisy.say "***", "beCon(1) <", n, "/", list.len-1, ">",
|
||||
" groups=", count,
|
||||
"\n ndb\n ", ndb.pp(backendOk = true),
|
||||
"\n -------------",
|
||||
"\n mdb\n ", mdb.pp(backendOk = true),
|
||||
"\n -------------",
|
||||
"\n rdb\n ", rdb.pp(backendOk = true),
|
||||
"\n -------------"
|
||||
|
||||
block:
|
||||
let
|
||||
rootVid = VertexID(1)
|
||||
leafs = w.kvpLst.mapRootVid VertexID(1) # for merging it into main trie
|
||||
leafs = w.kvpLst.mapRootVid rootVid # for merging it into main trie
|
||||
|
||||
block:
|
||||
let ndbOk = ndb.mergeData(
|
||||
rootKey, rootVid, w.proof, leafs, noisy=false)
|
||||
let ndbOk = ndb.mergeData(rootKey, rootVid, w.proof, leafs, noisy=false)
|
||||
xCheck ndbOk
|
||||
block:
|
||||
let mdbOk = mdb.mergeData(
|
||||
rootKey, rootVid, w.proof, leafs, noisy=false)
|
||||
|
||||
let mdbOk = mdb.mergeData(rootKey, rootVid, w.proof, leafs, noisy=false)
|
||||
xCheck mdbOk
|
||||
if doRdbOk: # optional
|
||||
let rdbOk = rdb.mergeData(
|
||||
rootKey, rootVid, w.proof, leafs, noisy=false)
|
||||
|
||||
let rdbOk = rdb.mergeData(rootKey, rootVid, w.proof, leafs, noisy=false)
|
||||
xCheck rdbOk
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "beCon(2) <", n, "/", list.len-1, ">",
|
||||
" groups=", count,
|
||||
"\n cache dump\n ", ndb.pp,
|
||||
"\n backend dump\n ", ndb.backend.pp(ndb),
|
||||
"\n ndb\n ", ndb.pp(backendOk = true),
|
||||
"\n -------------",
|
||||
"\n mdb cache\n ", mdb.pp,
|
||||
"\n mdb backend\n ", mdb.backend.pp(ndb),
|
||||
"\n -------------",
|
||||
"\n rdb cache\n ", rdb.pp,
|
||||
"\n rdb backend\n ", rdb.backend.pp(ndb),
|
||||
"\n mdb\n ", mdb.pp(backendOk = true),
|
||||
#"\n -------------",
|
||||
#"\n rdb\n ", rdb.pp(backendOk = true),
|
||||
"\n -------------"
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">", " groups=", count
|
||||
|
||||
var
|
||||
mdbPreSaveCache, mdbPreSaveBackend: string
|
||||
rdbPreSaveCache, rdbPreSaveBackend: string
|
||||
when true: # and false:
|
||||
#mdbPreSaveCache = mdb.pp
|
||||
#mdbPreSaveBackend = mdb.backend.pp(mdb)
|
||||
rdbPreSaveCache = rdb.pp
|
||||
rdbPreSaveBackend = rdb.backend.pp(rdb)
|
||||
|
||||
mdbPreSave = ""
|
||||
rdbPreSave = ""
|
||||
when true and false:
|
||||
mdbPreSave = mdb.pp(backendOk = true)
|
||||
rdbPreSave = rdb.pp(backendOk = true)
|
||||
|
||||
# Provide filter, store filter on permanent BE, and register filter digest
|
||||
block:
|
||||
@ -298,44 +296,35 @@ proc testBackendConsistency*(
|
||||
#noisy.say "***", "db-dump\n ", mdb.pp
|
||||
let rc = mdb.stow(persistent=true, chunkedMpt=true)
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
if doRdbOk:
|
||||
block:
|
||||
let rc = rdb.stow(persistent=true, chunkedMpt=true)
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let mdbVerifyOk = ndb.top.verify(mdb.backend.MemBackendRef, noisy)
|
||||
let mdbVerifyOk = ndb.top.verify(mdb.backend, noisy)
|
||||
xCheck mdbVerifyOk:
|
||||
when true and false:
|
||||
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">",
|
||||
" groups=", count,
|
||||
"\n ndb cache\n ", ndb.pp,
|
||||
"\n ndb backend=", ndb.backend.isNil.not,
|
||||
"\n ndb\n ", ndb.pp(backendOk = true),
|
||||
#"\n -------------",
|
||||
#"\n mdb pre-save cache\n ", mdbPreSaveCache,
|
||||
#"\n mdb pre-save backend\n ", mdbPreSaveBackend,
|
||||
#"\n mdb pre-stow\n ", mdbPreSave,
|
||||
"\n -------------",
|
||||
"\n mdb cache\n ", mdb.pp,
|
||||
"\n mdb backend\n ", mdb.backend.pp(ndb),
|
||||
"\n mdb\n ", mdb.pp(backendOk = true),
|
||||
"\n -------------"
|
||||
|
||||
if doRdbOk:
|
||||
let rdbVerifyOk = ndb.top.verify(rdb.backend.RdbBackendRef, noisy)
|
||||
let rdbVerifyOk = ndb.top.verify(rdb.backend, noisy)
|
||||
xCheck rdbVerifyOk:
|
||||
when true and false:
|
||||
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">",
|
||||
noisy.say "***", "beCon(5) <", n, "/", list.len-1, ">",
|
||||
" groups=", count,
|
||||
"\n ndb cache\n ", ndb.pp,
|
||||
"\n ndb backend=", ndb.backend.isNil.not,
|
||||
"\n ndb\n ", ndb.pp(backendOk = true),
|
||||
"\n -------------",
|
||||
"\n rdb pre-save cache\n ", rdbPreSaveCache,
|
||||
"\n rdb pre-save backend\n ", rdbPreSaveBackend,
|
||||
"\n rdb pre-stow\n ", rdbPreSave,
|
||||
"\n -------------",
|
||||
"\n rdb cache\n ", rdb.pp,
|
||||
"\n rdb backend\n ", rdb.backend.pp(ndb),
|
||||
"\n rdb\n ", rdb.pp(backendOk = true),
|
||||
#"\n -------------",
|
||||
#"\n mdb cache\n ", mdb.pp,
|
||||
#"\n mdb backend\n ", mdb.backend.pp(ndb),
|
||||
#"\n mdb\n ", mdb.pp(backendOk = true),
|
||||
"\n -------------"
|
||||
|
||||
when true and false:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -169,7 +169,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
||||
|
||||
# Fill backend
|
||||
block:
|
||||
let report = db.merge w[0]
|
||||
let report = db.mergeList w[0]
|
||||
if report.error != 0:
|
||||
db.finish(flush=true)
|
||||
check report.error == 0
|
||||
@ -183,7 +183,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
||||
|
||||
# Clause (9) from `aristo/README.md` example
|
||||
for n in 0 ..< dx.len:
|
||||
let report = dx[n].merge w[n+1]
|
||||
let report = dx[n].mergeList w[n+1]
|
||||
if report.error != 0:
|
||||
db.finish(flush=true)
|
||||
check (n, report.error) == (n,0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -207,6 +207,27 @@ func mapRootVid*(
|
||||
leafTie: LeafTie(root: toVid, path: it.leafTie.path),
|
||||
payload: it.payload))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc mergeList*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
||||
): tuple[merged: int, dups: int, error: AristoError] =
|
||||
## Variant of `merge()` for leaf lists.
|
||||
var (merged, dups) = (0, 0)
|
||||
for n,w in leafs:
|
||||
let rc = db.merge(w.leafTie, w.payload, VOID_PATH_ID)
|
||||
if rc.isOk:
|
||||
merged.inc
|
||||
elif rc.error in {MergeLeafPathCachedAlready,MergeLeafPathOnBackendAlready}:
|
||||
dups.inc
|
||||
else:
|
||||
return (n,dups,rc.error)
|
||||
|
||||
(merged, dups, AristoError(0))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -309,11 +309,11 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool =
|
||||
db.top.final.vGen.setLen(0)
|
||||
for n in 0 .. 5:
|
||||
let w = db.vidFetch()
|
||||
xCheck w == VertexID(2) + n # VertexID(1) is default root ID
|
||||
xCheck w == VertexID(LEAST_FREE_VID) + n # VertexID(1) is default root ID
|
||||
xCheck db.vGen.len == 1
|
||||
|
||||
# Recycling and re-org tests
|
||||
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(it))
|
||||
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(LEAST_FREE_VID+it))
|
||||
|
||||
xCheck @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[3, 4, 5, 7] .toVQ
|
||||
xCheck @[8, 7, 6, 3, 4, 5, 9] .toVQ.vidReorg == @[3] .toVQ
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -298,13 +298,14 @@ proc mergeRlpData*(
|
||||
): Result[void,AristoError] =
|
||||
block body:
|
||||
discard db.merge(
|
||||
LeafTie(
|
||||
LeafTiePayload(
|
||||
leafTie: LeafTie(
|
||||
root: VertexID(1),
|
||||
path: path.normal),
|
||||
PayloadRef(
|
||||
payload: PayloadRef(
|
||||
pType: RlpData,
|
||||
rlpBlob: @rlpData)).valueOr:
|
||||
if error == MergeLeafPathCachedAlready:
|
||||
rlpBlob: @rlpData))).valueOr:
|
||||
if error in {MergeLeafPathCachedAlready,MergeLeafPathOnBackendAlready}:
|
||||
break body
|
||||
return err(error)
|
||||
ok()
|
||||
@ -313,7 +314,7 @@ proc mergeRlpData*(
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc testTxMergeAndDelete*(
|
||||
proc testTxMergeAndDeleteOneByOne*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
@ -384,7 +385,7 @@ proc testTxMergeAndDelete*(
|
||||
" n=", n, "/", list.len,
|
||||
"\n leaf=", leaf.pp(db),
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
"\n"
|
||||
""
|
||||
|
||||
# Delete leaf
|
||||
block:
|
||||
@ -416,6 +417,85 @@ proc testTxMergeAndDelete*(
|
||||
true
|
||||
|
||||
|
||||
proc testTxMergeAndDeleteSubTree*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
): bool =
|
||||
var
|
||||
prng = PrngDesc.init 42
|
||||
db = AristoDbRef()
|
||||
fwdRevVfyToggle = true
|
||||
defer:
|
||||
db.finish(flush=true)
|
||||
|
||||
for n,w in list:
|
||||
# Start with brand new persistent database.
|
||||
db = block:
|
||||
if 0 < rdbPath.len:
|
||||
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, qidLayout=TxQidLyo)
|
||||
xCheckRc rc.error == 0
|
||||
rc.value
|
||||
else:
|
||||
AristoDbRef.init(MemBackendRef, qidLayout=TxQidLyo)
|
||||
|
||||
# Start transaction (double frame for testing)
|
||||
xCheck db.txTop.isErr
|
||||
var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
xCheck tx.isTop()
|
||||
xCheck tx.level == 2
|
||||
|
||||
# Reset database so that the next round has a clean setup
|
||||
defer: db.innerCleanUp
|
||||
|
||||
# Merge leaf data into main trie (w/vertex ID 1)
|
||||
let kvpLeafs = block:
|
||||
var lst = w.kvpLst.mapRootVid VertexID(1)
|
||||
# The list might be reduced for isolation of particular properties,
|
||||
# e.g. lst.setLen(min(5,lst.len))
|
||||
lst
|
||||
for i,leaf in kvpLeafs:
|
||||
let rc = db.merge leaf
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
# List of all leaf entries that should be on the database
|
||||
var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
||||
|
||||
# Provide a (reproducible) peudo-random copy of the leafs list
|
||||
let leafVidPairs = db.randomisedLeafs prng
|
||||
xCheck leafVidPairs.len == leafsLeft.len
|
||||
|
||||
# === delete sub-tree ===
|
||||
block:
|
||||
let saveBeOk = tx.saveToBackend(
|
||||
chunkedMpt=false, relax=false, noisy=noisy, 1 + list.len * n)
|
||||
xCheck saveBeOk:
|
||||
noisy.say "***", "del(1)",
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
# Delete sub-tree
|
||||
block:
|
||||
let rc = db.delete VertexID(1)
|
||||
xCheckRc rc.error == (0,0):
|
||||
noisy.say "***", "del(2)",
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
block:
|
||||
let saveBeOk = tx.saveToBackend(
|
||||
chunkedMpt=false, relax=false, noisy=noisy, 2 + list.len * n)
|
||||
xCheck saveBeOk:
|
||||
noisy.say "***", "del(3)",
|
||||
" n=", n, "/", list.len,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
when true and false:
|
||||
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len
|
||||
|
||||
true
|
||||
|
||||
|
||||
proc testTxMergeProofAndKvpList*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
@ -480,7 +560,7 @@ proc testTxMergeProofAndKvpList*(
|
||||
xCheck proved.merged < db.nLayersLebal()
|
||||
|
||||
let
|
||||
merged = db.merge leafs
|
||||
merged = db.mergeList leafs
|
||||
|
||||
xCheck db.lTab.len == lTabLen + merged.merged
|
||||
xCheck merged.merged + merged.dups == leafs.len
|
||||
|
Loading…
x
Reference in New Issue
Block a user