Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
|
|
|
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
2023-01-23 16:09:12 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Snap sync components tester and TDD environment
|
|
|
|
|
|
|
|
import
|
2023-08-04 11:10:09 +00:00
|
|
|
std/sequtils,
|
|
|
|
eth/[common, p2p],
|
2023-01-23 16:09:12 +00:00
|
|
|
unittest2,
|
2023-08-04 11:10:09 +00:00
|
|
|
../../nimbus/db/core_db,
|
2023-01-23 16:09:12 +00:00
|
|
|
../../nimbus/sync/snap/range_desc,
|
|
|
|
../../nimbus/sync/snap/worker/db/[
|
2023-09-26 09:21:13 +00:00
|
|
|
hexary_desc, hexary_inspect, hexary_paths, snapdb_accounts, snapdb_desc],
|
2023-01-23 16:09:12 +00:00
|
|
|
../replay/[pp, undump_accounts],
|
|
|
|
./test_helpers
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc test_inspectSingleAccountsMemDb*(
|
|
|
|
inList: seq[seq[UndumpAccounts]];
|
|
|
|
memBase: SnapDbRef;
|
|
|
|
singleStats: var seq[(int,TrieNodeStat)];
|
|
|
|
) =
|
|
|
|
## Fingerprinting single accounts lists for in-memory-db (modifies
|
|
|
|
## `singleStats`)
|
|
|
|
for n,accList in inList:
|
|
|
|
# Separate storage
|
|
|
|
let
|
|
|
|
root = accList[0].root
|
|
|
|
rootKey = root.to(NodeKey)
|
|
|
|
desc = SnapDbAccountsRef.init(memBase, root, Peer())
|
|
|
|
for w in accList:
|
|
|
|
check desc.importAccounts(w.base, w.data, persistent=false).isImportOk
|
|
|
|
let stats = desc.hexaDb.hexaryInspectTrie(rootKey)
|
|
|
|
check not stats.stopped
|
|
|
|
let
|
|
|
|
dangling = stats.dangling.mapIt(it.partialPath)
|
|
|
|
keys = dangling.hexaryPathNodeKeys(
|
|
|
|
rootKey, desc.hexaDb, missingOk=true)
|
|
|
|
check dangling.len == keys.len
|
|
|
|
singleStats.add (desc.hexaDb.tab.len,stats)
|
|
|
|
|
|
|
|
# Verify piecemeal approach for `hexaryInspectTrie()` ...
|
|
|
|
var
|
|
|
|
ctx = TrieNodeStatCtxRef()
|
|
|
|
piecemeal: HashSet[Blob]
|
|
|
|
while not ctx.isNil:
|
|
|
|
let stat2 = desc.hexaDb.hexaryInspectTrie(
|
|
|
|
rootKey, resumeCtx=ctx, suspendAfter=128)
|
|
|
|
check not stat2.stopped
|
|
|
|
ctx = stat2.resumeCtx
|
|
|
|
piecemeal.incl stat2.dangling.mapIt(it.partialPath).toHashSet
|
|
|
|
# Must match earlier all-in-one result
|
|
|
|
check dangling.len == piecemeal.len
|
|
|
|
check dangling.toHashSet == piecemeal
|
|
|
|
|
|
|
|
proc test_inspectSingleAccountsPersistent*(
|
|
|
|
inList: seq[seq[UndumpAccounts]];
|
|
|
|
dbSlotCb: proc(n: int): SnapDbRef;
|
|
|
|
singleStats: seq[(int,TrieNodeStat)];
|
|
|
|
) =
|
|
|
|
## Fingerprinting single accounts listsfor persistent db"
|
|
|
|
for n,accList in inList:
|
|
|
|
let
|
|
|
|
root = accList[0].root
|
|
|
|
rootKey = root.to(NodeKey)
|
|
|
|
dbBase = n.dbSlotCb
|
|
|
|
if dbBase.isNil:
|
|
|
|
break
|
|
|
|
# Separate storage on persistent DB (leaving first db slot empty)
|
|
|
|
let desc = SnapDbAccountsRef.init(dbBase, root, Peer())
|
|
|
|
|
|
|
|
for w in accList:
|
|
|
|
check desc.importAccounts(w.base,w.data, persistent=true).isImportOk
|
|
|
|
let stats = desc.getAccountFn.hexaryInspectTrie(rootKey)
|
|
|
|
check not stats.stopped
|
|
|
|
let
|
|
|
|
dangling = stats.dangling.mapIt(it.partialPath)
|
|
|
|
keys = dangling.hexaryPathNodeKeys(
|
|
|
|
rootKey, desc.hexaDb, missingOk=true)
|
|
|
|
check dangling.len == keys.len
|
|
|
|
# Must be the same as the in-memory fingerprint
|
|
|
|
let ssn1 = singleStats[n][1].dangling.mapIt(it.partialPath)
|
|
|
|
check ssn1.toHashSet == dangling.toHashSet
|
|
|
|
|
|
|
|
# Verify piecemeal approach for `hexaryInspectTrie()` ...
|
|
|
|
var
|
|
|
|
ctx = TrieNodeStatCtxRef()
|
|
|
|
piecemeal: HashSet[Blob]
|
|
|
|
while not ctx.isNil:
|
|
|
|
let stat2 = desc.getAccountFn.hexaryInspectTrie(
|
|
|
|
rootKey, resumeCtx=ctx, suspendAfter=128)
|
|
|
|
check not stat2.stopped
|
|
|
|
ctx = stat2.resumeCtx
|
|
|
|
piecemeal.incl stat2.dangling.mapIt(it.partialPath).toHashSet
|
|
|
|
# Must match earlier all-in-one result
|
|
|
|
check dangling.len == piecemeal.len
|
|
|
|
check dangling.toHashSet == piecemeal
|
|
|
|
|
|
|
|
proc test_inspectAccountsInMemDb*(
|
|
|
|
inList: seq[seq[UndumpAccounts]];
|
|
|
|
memBase: SnapDbRef;
|
|
|
|
accuStats: var seq[(int,TrieNodeStat)];
|
|
|
|
) =
|
|
|
|
## Fingerprinting accumulated accounts for in-memory-db (updates `accuStats`)
|
|
|
|
let memDesc = SnapDbAccountsRef.init(memBase, Hash256(), Peer())
|
|
|
|
|
|
|
|
for n,accList in inList:
|
|
|
|
# Accumulated storage
|
|
|
|
let
|
|
|
|
root = accList[0].root
|
|
|
|
rootKey = root.to(NodeKey)
|
|
|
|
desc = memDesc.dup(root,Peer())
|
|
|
|
for w in accList:
|
|
|
|
check desc.importAccounts(w.base, w.data, persistent=false).isImportOk
|
|
|
|
let stats = desc.hexaDb.hexaryInspectTrie(rootKey)
|
|
|
|
check not stats.stopped
|
|
|
|
let
|
|
|
|
dangling = stats.dangling.mapIt(it.partialPath)
|
|
|
|
keys = dangling.hexaryPathNodeKeys(
|
|
|
|
rootKey, desc.hexaDb, missingOk=true)
|
|
|
|
check dangling.len == keys.len
|
|
|
|
accuStats.add (desc.hexaDb.tab.len, stats)
|
|
|
|
|
|
|
|
proc test_inspectAccountsPersistent*(
|
|
|
|
inList: seq[seq[UndumpAccounts]];
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
accuStats: seq[(int,TrieNodeStat)];
|
|
|
|
) =
|
|
|
|
## Fingerprinting accumulated accounts for persistent db
|
|
|
|
let
|
|
|
|
perBase = SnapDbRef.init(cdb)
|
|
|
|
perDesc = SnapDbAccountsRef.init(perBase, Hash256(), Peer())
|
|
|
|
|
|
|
|
for n,accList in inList:
|
|
|
|
# Accumulated storage on persistent DB (using first db slot)
|
|
|
|
let
|
|
|
|
root = accList[0].root
|
|
|
|
rootKey = root.to(NodeKey)
|
|
|
|
rootSet = [rootKey].toHashSet
|
|
|
|
desc = perDesc.dup(root,Peer())
|
|
|
|
for w in accList:
|
|
|
|
check desc.importAccounts(w.base, w.data, persistent=true).isImportOk
|
|
|
|
let stats = desc.getAccountFn.hexaryInspectTrie(rootKey)
|
|
|
|
check not stats.stopped
|
|
|
|
let
|
|
|
|
dangling = stats.dangling.mapIt(it.partialPath)
|
|
|
|
keys = dangling.hexaryPathNodeKeys(
|
|
|
|
rootKey, desc.hexaDb, missingOk=true)
|
|
|
|
check dangling.len == keys.len
|
|
|
|
check accuStats[n][1] == stats
|
|
|
|
|
|
|
|
proc test_inspectCascadedMemDb*(
|
|
|
|
inList: seq[seq[UndumpAccounts]];
|
|
|
|
) =
|
|
|
|
## Cascaded fingerprinting accounts for in-memory-db
|
|
|
|
let
|
2023-08-04 11:10:09 +00:00
|
|
|
cscBase = SnapDbRef.init(newCoreDbRef LegacyDbMemory)
|
2023-01-23 16:09:12 +00:00
|
|
|
cscDesc = SnapDbAccountsRef.init(cscBase, Hash256(), Peer())
|
|
|
|
var
|
|
|
|
cscStep: Table[NodeKey,(int,seq[Blob])]
|
|
|
|
|
|
|
|
for n,accList in inList:
|
|
|
|
# Accumulated storage
|
|
|
|
let
|
|
|
|
root = accList[0].root
|
|
|
|
rootKey = root.to(NodeKey)
|
|
|
|
desc = cscDesc.dup(root,Peer())
|
|
|
|
for w in accList:
|
|
|
|
check desc.importAccounts(w.base, w.data, persistent=false).isImportOk
|
|
|
|
if cscStep.hasKeyOrPut(rootKey, (1, seq[Blob].default)):
|
|
|
|
cscStep[rootKey][0].inc
|
|
|
|
let
|
|
|
|
stat0 = desc.hexaDb.hexaryInspectTrie(rootKey)
|
|
|
|
stats = desc.hexaDb.hexaryInspectTrie(rootKey, cscStep[rootKey][1])
|
|
|
|
check not stat0.stopped
|
|
|
|
check not stats.stopped
|
|
|
|
let
|
|
|
|
accumulated = stat0.dangling.mapIt(it.partialPath).toHashSet
|
|
|
|
cascaded = stats.dangling.mapIt(it.partialPath).toHashSet
|
|
|
|
check accumulated == cascaded
|
|
|
|
# Make sure that there are no trivial cases
|
|
|
|
let trivialCases = toSeq(cscStep.values).filterIt(it[0] <= 1).len
|
|
|
|
check trivialCases == 0
|
|
|
|
|
|
|
|
proc test_inspectCascadedPersistent*(
|
|
|
|
inList: seq[seq[UndumpAccounts]];
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Cascaded fingerprinting accounts for persistent db
|
|
|
|
let
|
|
|
|
cscBase = SnapDbRef.init(cdb)
|
|
|
|
cscDesc = SnapDbAccountsRef.init(cscBase, Hash256(), Peer())
|
|
|
|
var
|
|
|
|
cscStep: Table[NodeKey,(int,seq[Blob])]
|
|
|
|
|
|
|
|
for n,accList in inList:
|
|
|
|
# Accumulated storage
|
|
|
|
let
|
|
|
|
root = accList[0].root
|
|
|
|
rootKey = root.to(NodeKey)
|
|
|
|
desc = cscDesc.dup(root, Peer())
|
|
|
|
for w in accList:
|
|
|
|
check desc.importAccounts(w.base, w.data, persistent=true).isImportOk
|
|
|
|
if cscStep.hasKeyOrPut(rootKey, (1, seq[Blob].default)):
|
|
|
|
cscStep[rootKey][0].inc
|
|
|
|
let
|
|
|
|
stat0 = desc.getAccountFn.hexaryInspectTrie(rootKey)
|
|
|
|
stats = desc.getAccountFn.hexaryInspectTrie(rootKey, cscStep[rootKey][1])
|
|
|
|
check not stat0.stopped
|
|
|
|
check not stats.stopped
|
|
|
|
let
|
|
|
|
accumulated = stat0.dangling.mapIt(it.partialPath).toHashSet
|
|
|
|
cascaded = stats.dangling.mapIt(it.partialPath).toHashSet
|
|
|
|
check accumulated == cascaded
|
|
|
|
# Make sure that there are no trivial cases
|
|
|
|
let trivialCases = toSeq(cscStep.values).filterIt(it[0] <= 1).len
|
|
|
|
check trivialCases == 0
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|