Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
2024-02-01 21:27:48 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-07-05 13:50:11 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
2023-08-17 13:42:01 +00:00
|
|
|
## Aristo (aka Patricia) DB records transaction based merge test
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
import
|
|
|
|
std/[algorithm, bitops, sequtils, sets, tables],
|
|
|
|
eth/common,
|
2023-08-17 13:42:01 +00:00
|
|
|
results,
|
2023-07-05 13:50:11 +00:00
|
|
|
unittest2,
|
2023-09-13 02:32:38 +00:00
|
|
|
stew/endians2,
|
2024-06-05 15:08:29 +00:00
|
|
|
../../nimbus/db/opts,
|
2023-08-07 17:45:23 +00:00
|
|
|
../../nimbus/db/aristo/[
|
2024-02-29 21:10:24 +00:00
|
|
|
aristo_check,
|
|
|
|
aristo_debug,
|
|
|
|
aristo_delete,
|
|
|
|
aristo_desc,
|
|
|
|
aristo_get,
|
|
|
|
aristo_hike,
|
|
|
|
aristo_init/persistent,
|
|
|
|
aristo_layers,
|
|
|
|
aristo_merge,
|
|
|
|
aristo_nearby,
|
|
|
|
aristo_tx],
|
2023-10-11 19:09:11 +00:00
|
|
|
../replay/xcheck,
|
2023-07-05 13:50:11 +00:00
|
|
|
./test_helpers
|
|
|
|
|
|
|
|
type
|
|
|
|
PrngDesc = object
|
|
|
|
prng: uint32 ## random state
|
|
|
|
|
|
|
|
KnownHasherFailure* = seq[(string,(int,AristoError))]
|
|
|
|
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
|
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
const
|
2023-08-11 17:23:57 +00:00
|
|
|
MaxFilterBulk = 150_000
|
|
|
|
## Policy settig for `pack()`
|
2023-08-10 20:01:28 +00:00
|
|
|
|
2023-07-05 13:50:11 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc posixPrngRand(state: var uint32): byte =
|
|
|
|
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
|
|
|
|
state = state * 1103515245 + 12345;
|
|
|
|
let val = (state shr 16) and 32767 # mod 2^31
|
|
|
|
(val shr 8).byte # Extract second byte
|
|
|
|
|
|
|
|
proc rand[W: SomeInteger|VertexID](ap: var PrngDesc; T: type W): T =
|
|
|
|
var a: array[sizeof T,byte]
|
|
|
|
for n in 0 ..< sizeof T:
|
|
|
|
a[n] = ap.prng.posixPrngRand().byte
|
|
|
|
when sizeof(T) == 1:
|
|
|
|
let w = uint8.fromBytesBE(a).T
|
|
|
|
when sizeof(T) == 2:
|
|
|
|
let w = uint16.fromBytesBE(a).T
|
|
|
|
when sizeof(T) == 4:
|
|
|
|
let w = uint32.fromBytesBE(a).T
|
|
|
|
else:
|
|
|
|
let w = uint64.fromBytesBE(a).T
|
|
|
|
when T is SomeUnsignedInt:
|
|
|
|
# That way, `fromBytesBE()` can be applied to `uint`
|
|
|
|
result = w
|
|
|
|
else:
|
|
|
|
# That way the result is independent of endianness
|
|
|
|
(addr result).copyMem(unsafeAddr w, sizeof w)
|
|
|
|
|
|
|
|
proc init(T: type PrngDesc; seed: int): PrngDesc =
|
|
|
|
result.prng = (seed and 0x7fffffff).uint32
|
|
|
|
|
|
|
|
proc rand(td: var PrngDesc; top: int): int =
|
|
|
|
if 0 < top:
|
|
|
|
let mask = (1 shl (8 * sizeof(int) - top.countLeadingZeroBits)) - 1
|
|
|
|
for _ in 0 ..< 100:
|
|
|
|
let w = mask and td.rand(typeof(result))
|
|
|
|
if w < top:
|
|
|
|
return w
|
|
|
|
raiseAssert "Not here (!)"
|
|
|
|
|
|
|
|
# -----------------------
|
|
|
|
|
|
|
|
proc randomisedLeafs(
|
2023-08-07 17:45:23 +00:00
|
|
|
db: AristoDbRef;
|
2024-02-22 08:24:58 +00:00
|
|
|
ltys: HashSet[LeafTie];
|
2023-07-05 13:50:11 +00:00
|
|
|
td: var PrngDesc;
|
2024-02-22 08:24:58 +00:00
|
|
|
): Result[seq[(LeafTie,VertexID)],(VertexID,AristoError)] =
|
|
|
|
var lvp: seq[(LeafTie,VertexID)]
|
|
|
|
for lty in ltys:
|
|
|
|
let hike = lty.hikeUp(db).valueOr:
|
|
|
|
return err((error[0],error[1]))
|
|
|
|
lvp.add (lty,hike.legs[^1].wp.vid)
|
|
|
|
|
|
|
|
var lvp2 = lvp.sorted(
|
|
|
|
cmp = proc(a,b: (LeafTie,VertexID)): int = cmp(a[0],b[0]))
|
|
|
|
if 2 < lvp2.len:
|
|
|
|
for n in 0 ..< lvp2.len-1:
|
|
|
|
let r = n + td.rand(lvp2.len - n)
|
|
|
|
lvp2[n].swap lvp2[r]
|
|
|
|
ok lvp2
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2024-03-15 14:20:00 +00:00
|
|
|
proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} =
|
2023-07-05 13:50:11 +00:00
|
|
|
## Defer action
|
2024-03-15 14:20:00 +00:00
|
|
|
if not db.isNil:
|
|
|
|
let rx = db.txTop()
|
|
|
|
if rx.isOk:
|
|
|
|
let rc = rx.value.collapse(commit=false)
|
|
|
|
xCheckRc rc.error == 0
|
2024-06-14 11:19:48 +00:00
|
|
|
db.finish(eradicate=true)
|
2024-03-15 14:20:00 +00:00
|
|
|
db = AristoDbRef(nil)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-12-19 12:39:23 +00:00
|
|
|
proc schedStow(
|
|
|
|
db: AristoDbRef; # Database
|
|
|
|
chunkedMpt = false; # Partial data (e.g. from `snap`)
|
|
|
|
): Result[void,AristoError] =
|
|
|
|
## Scheduled storage
|
|
|
|
let
|
2024-02-14 19:11:59 +00:00
|
|
|
layersMeter = db.nLayersVtx() + db.nLayersKey()
|
2024-06-03 20:10:35 +00:00
|
|
|
filterMeter = if db.balancer.isNil: 0
|
|
|
|
else: db.balancer.sTab.len + db.balancer.kMap.len
|
2023-12-19 12:39:23 +00:00
|
|
|
persistent = MaxFilterBulk < max(layersMeter, filterMeter)
|
2024-04-29 20:17:17 +00:00
|
|
|
if persistent:
|
|
|
|
db.persist(chunkedMpt=chunkedMpt)
|
|
|
|
else:
|
|
|
|
db.stow(chunkedMpt=chunkedMpt)
|
2023-12-19 12:39:23 +00:00
|
|
|
|
2023-07-05 13:50:11 +00:00
|
|
|
proc saveToBackend(
|
|
|
|
tx: var AristoTxRef;
|
2023-08-11 17:23:57 +00:00
|
|
|
chunkedMpt: bool;
|
2023-07-05 13:50:11 +00:00
|
|
|
relax: bool;
|
|
|
|
noisy: bool;
|
|
|
|
debugID: int;
|
|
|
|
): bool =
|
2023-08-07 17:45:23 +00:00
|
|
|
var db = tx.to(AristoDbRef)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-09-05 13:57:20 +00:00
|
|
|
# Verify context: nesting level must be 2 (i.e. two transactions)
|
|
|
|
xCheck tx.level == 2
|
|
|
|
|
2023-07-05 13:50:11 +00:00
|
|
|
block:
|
2023-12-12 17:47:41 +00:00
|
|
|
let rc = db.checkTop()
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheckRc rc.error == (0,0)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-08-07 17:45:23 +00:00
|
|
|
# Commit and hashify the current layer
|
2023-07-05 13:50:11 +00:00
|
|
|
block:
|
2023-09-05 13:57:20 +00:00
|
|
|
let rc = tx.commit()
|
2023-09-15 15:23:53 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
# Make sure MPT hashes are OK
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheck db.dirty.len == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.txTop()
|
|
|
|
xCheckRc rc.error == 0
|
|
|
|
tx = rc.value
|
|
|
|
|
|
|
|
# Verify context: nesting level must be 1 (i.e. one transaction)
|
|
|
|
xCheck tx.level == 1
|
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.checkBE(relax=true)
|
|
|
|
xCheckRc rc.error == (0,0)
|
2023-08-07 17:45:23 +00:00
|
|
|
|
|
|
|
# Commit and save to backend
|
2023-07-05 13:50:11 +00:00
|
|
|
block:
|
2023-09-05 13:57:20 +00:00
|
|
|
let rc = tx.commit()
|
2023-09-15 15:23:53 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
# Make sure MPT hashes are OK
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheck db.dirty.len == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.txTop()
|
|
|
|
xCheckErr rc.value.level < 0 # force error
|
|
|
|
|
|
|
|
block:
|
2023-12-19 12:39:23 +00:00
|
|
|
let rc = db.schedStow(chunkedMpt=chunkedMpt)
|
2023-09-15 15:23:53 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.checkBE(relax=relax)
|
2023-12-04 20:39:26 +00:00
|
|
|
xCheckRc rc.error == (0,0):
|
|
|
|
noisy.say "***", "saveToBackend (8)", " debugID=", debugID
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Update layers to original level
|
2023-08-11 17:23:57 +00:00
|
|
|
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
proc saveToBackendWithOops(
|
|
|
|
tx: var AristoTxRef;
|
2023-08-11 17:23:57 +00:00
|
|
|
chunkedMpt: bool;
|
2023-07-05 13:50:11 +00:00
|
|
|
noisy: bool;
|
|
|
|
debugID: int;
|
|
|
|
oops: (int,AristoError);
|
|
|
|
): bool =
|
2023-08-07 17:45:23 +00:00
|
|
|
var db = tx.to(AristoDbRef)
|
|
|
|
|
2023-09-05 13:57:20 +00:00
|
|
|
# Verify context: nesting level must be 2 (i.e. two transactions)
|
|
|
|
xCheck tx.level == 2
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-08-07 17:45:23 +00:00
|
|
|
# Commit and hashify the current layer
|
2023-07-05 13:50:11 +00:00
|
|
|
block:
|
2023-09-05 13:57:20 +00:00
|
|
|
let rc = tx.commit()
|
2023-09-15 15:23:53 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
# Make sure MPT hashes are OK
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheck db.dirty.len == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.txTop()
|
|
|
|
xCheckRc rc.error == 0
|
|
|
|
tx = rc.value
|
|
|
|
|
|
|
|
# Verify context: nesting level must be 1 (i.e. one transaction)
|
|
|
|
xCheck tx.level == 1
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-08-07 17:45:23 +00:00
|
|
|
# Commit and save to backend
|
2023-07-05 13:50:11 +00:00
|
|
|
block:
|
2023-09-05 13:57:20 +00:00
|
|
|
let rc = tx.commit()
|
2023-09-15 15:23:53 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
# Make sure MPT hashes are OK
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheck db.dirty.len == 0
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.txTop()
|
|
|
|
xCheckErr rc.value.level < 0 # force error
|
|
|
|
|
|
|
|
block:
|
2023-12-19 12:39:23 +00:00
|
|
|
let rc = db.schedStow(chunkedMpt=chunkedMpt)
|
2023-09-15 15:23:53 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Update layers to original level
|
2023-08-11 17:23:57 +00:00
|
|
|
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
|
|
|
|
proc fwdWalkVerify(
|
2023-08-07 17:45:23 +00:00
|
|
|
db: AristoDbRef;
|
2023-07-05 13:50:11 +00:00
|
|
|
root: VertexID;
|
2023-07-12 23:03:14 +00:00
|
|
|
leftOver: HashSet[LeafTie];
|
2023-07-05 13:50:11 +00:00
|
|
|
noisy: bool;
|
|
|
|
debugID: int;
|
|
|
|
): bool =
|
|
|
|
let
|
2023-07-12 23:03:14 +00:00
|
|
|
nLeafs = leftOver.len
|
2023-07-05 13:50:11 +00:00
|
|
|
var
|
2023-07-12 23:03:14 +00:00
|
|
|
leftOver = leftOver
|
|
|
|
last = LeafTie()
|
2023-07-05 13:50:11 +00:00
|
|
|
n = 0
|
2024-02-29 21:10:24 +00:00
|
|
|
for (key,_) in db.rightPairs low(LeafTie,root):
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck key in leftOver:
|
2023-10-27 21:36:51 +00:00
|
|
|
noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID
|
2023-07-12 23:03:14 +00:00
|
|
|
leftOver.excl key
|
|
|
|
last = key
|
|
|
|
n.inc
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
# Verify stop condition
|
|
|
|
if last.root == VertexID(0):
|
|
|
|
last = low(LeafTie,root)
|
|
|
|
elif last != high(LeafTie,root):
|
2023-10-27 21:36:51 +00:00
|
|
|
last = last.next
|
2023-08-07 17:45:23 +00:00
|
|
|
let rc = last.right db
|
2023-10-27 21:36:51 +00:00
|
|
|
xCheck rc.isErr
|
|
|
|
xCheck rc.error[1] == NearbyBeyondRange
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck n == nLeafs
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
true
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
proc revWalkVerify(
|
2023-08-07 17:45:23 +00:00
|
|
|
db: AristoDbRef;
|
2023-07-05 13:50:11 +00:00
|
|
|
root: VertexID;
|
2023-07-12 23:03:14 +00:00
|
|
|
leftOver: HashSet[LeafTie];
|
2023-07-05 13:50:11 +00:00
|
|
|
noisy: bool;
|
|
|
|
debugID: int;
|
|
|
|
): bool =
|
|
|
|
let
|
2023-07-12 23:03:14 +00:00
|
|
|
nLeafs = leftOver.len
|
2023-07-05 13:50:11 +00:00
|
|
|
var
|
2023-07-12 23:03:14 +00:00
|
|
|
leftOver = leftOver
|
|
|
|
last = LeafTie()
|
2023-07-05 13:50:11 +00:00
|
|
|
n = 0
|
2024-02-29 21:10:24 +00:00
|
|
|
for (key,_) in db.leftPairs high(LeafTie,root):
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck key in leftOver:
|
2023-07-12 23:03:14 +00:00
|
|
|
noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID
|
|
|
|
leftOver.excl key
|
|
|
|
last = key
|
|
|
|
n.inc
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
# Verify stop condition
|
|
|
|
if last.root == VertexID(0):
|
|
|
|
last = high(LeafTie,root)
|
|
|
|
elif last != low(LeafTie,root):
|
2023-10-27 21:36:51 +00:00
|
|
|
last = last.prev
|
2023-08-07 17:45:23 +00:00
|
|
|
let rc = last.left db
|
2023-10-27 21:36:51 +00:00
|
|
|
xCheck rc.isErr
|
|
|
|
xCheck rc.error[1] == NearbyBeyondRange
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck n == nLeafs
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
true
|
2023-07-05 13:50:11 +00:00
|
|
|
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
proc mergeRlpData*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
path: PathID; # Path into database
|
|
|
|
rlpData: openArray[byte]; # RLP encoded payload data
|
|
|
|
): Result[void,AristoError] =
|
|
|
|
block body:
|
2024-02-29 21:10:24 +00:00
|
|
|
discard db.mergeLeaf(
|
2024-02-01 21:27:48 +00:00
|
|
|
LeafTiePayload(
|
|
|
|
leafTie: LeafTie(
|
|
|
|
root: VertexID(1),
|
|
|
|
path: path.normal),
|
|
|
|
payload: PayloadRef(
|
|
|
|
pType: RlpData,
|
|
|
|
rlpBlob: @rlpData))).valueOr:
|
|
|
|
if error in {MergeLeafPathCachedAlready,MergeLeafPathOnBackendAlready}:
|
|
|
|
break body
|
|
|
|
return err(error)
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
ok()
|
|
|
|
|
2023-07-05 13:50:11 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
proc testTxMergeAndDeleteOneByOne*(
|
2023-07-05 13:50:11 +00:00
|
|
|
noisy: bool;
|
|
|
|
list: openArray[ProofTrieData];
|
|
|
|
rdbPath: string; # Rocks DB storage directory
|
|
|
|
): bool =
|
|
|
|
var
|
|
|
|
prng = PrngDesc.init 42
|
2024-03-15 14:20:00 +00:00
|
|
|
db = AristoDbRef(nil)
|
2023-07-05 13:50:11 +00:00
|
|
|
fwdRevVfyToggle = true
|
|
|
|
defer:
|
2024-03-15 14:20:00 +00:00
|
|
|
if not db.isNil:
|
2024-06-14 11:19:48 +00:00
|
|
|
db.finish(eradicate=true)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
for n,w in list:
|
|
|
|
# Start with brand new persistent database.
|
|
|
|
db = block:
|
2023-09-12 18:45:12 +00:00
|
|
|
if 0 < rdbPath.len:
|
2024-06-05 15:08:29 +00:00
|
|
|
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
|
2023-09-12 18:45:12 +00:00
|
|
|
xCheckRc rc.error == 0
|
|
|
|
rc.value
|
|
|
|
else:
|
2024-06-03 20:10:35 +00:00
|
|
|
AristoDbRef.init(MemBackendRef)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Start transaction (double frame for testing)
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck db.txTop.isErr
|
2023-08-11 17:23:57 +00:00
|
|
|
var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck tx.isTop()
|
|
|
|
xCheck tx.level == 2
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Reset database so that the next round has a clean setup
|
2023-08-07 17:45:23 +00:00
|
|
|
defer: db.innerCleanUp
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Merge leaf data into main trie (w/vertex ID 1)
|
2023-12-04 20:39:26 +00:00
|
|
|
let kvpLeafs = block:
|
|
|
|
var lst = w.kvpLst.mapRootVid VertexID(1)
|
|
|
|
# The list might be reduced for isolation of particular properties,
|
|
|
|
# e.g. lst.setLen(min(5,lst.len))
|
|
|
|
lst
|
|
|
|
for i,leaf in kvpLeafs:
|
2024-02-29 21:10:24 +00:00
|
|
|
let rc = db.mergeLeaf leaf
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# List of all leaf entries that should be on the database
|
|
|
|
var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
|
|
|
|
|
|
|
# Provide a (reproducible) peudo-random copy of the leafs list
|
2024-02-22 08:24:58 +00:00
|
|
|
let leafVidPairs = block:
|
|
|
|
let rc = db.randomisedLeafs(leafsLeft, prng)
|
|
|
|
xCheckRc rc.error == (0,0)
|
|
|
|
rc.value
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Trigger subsequent saving tasks in loop below
|
|
|
|
let (saveMod, saveRest, relax) = block:
|
|
|
|
if leafVidPairs.len < 17: (7, 3, false)
|
|
|
|
elif leafVidPairs.len < 31: (11, 7, false)
|
|
|
|
else: (leafVidPairs.len div 5, 11, true)
|
|
|
|
|
|
|
|
# === Loop over leafs ===
|
|
|
|
for u,lvp in leafVidPairs:
|
|
|
|
let
|
|
|
|
runID = n + list.len * u
|
|
|
|
tailWalkVerify = 7 # + 999
|
|
|
|
doSaveBeOk = ((u mod saveMod) == saveRest)
|
|
|
|
(leaf, lid) = lvp
|
|
|
|
|
|
|
|
if doSaveBeOk:
|
2023-12-04 20:39:26 +00:00
|
|
|
let saveBeOk = tx.saveToBackend(
|
|
|
|
chunkedMpt=false, relax=relax, noisy=noisy, runID)
|
|
|
|
xCheck saveBeOk:
|
|
|
|
noisy.say "***", "del(2)",
|
|
|
|
" u=", u,
|
|
|
|
" n=", n, "/", list.len,
|
|
|
|
"\n leaf=", leaf.pp(db),
|
|
|
|
"\n db\n ", db.pp(backendOk=true),
|
2024-02-01 21:27:48 +00:00
|
|
|
""
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Delete leaf
|
2023-12-04 20:39:26 +00:00
|
|
|
block:
|
2024-02-08 16:32:16 +00:00
|
|
|
let rc = db.delete(leaf, VOID_PATH_ID)
|
2023-12-04 20:39:26 +00:00
|
|
|
xCheckRc rc.error == (0,0)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Update list of remaininf leafs
|
|
|
|
leafsLeft.excl leaf
|
|
|
|
|
|
|
|
let deletedVtx = tx.db.getVtx lid
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck deletedVtx.isValid == false
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Walking the database is too slow for large tables. So the hope is that
|
|
|
|
# potential errors will not go away and rather pop up later, as well.
|
|
|
|
if leafsLeft.len <= tailWalkVerify:
|
|
|
|
if u < leafVidPairs.len-1:
|
|
|
|
if fwdRevVfyToggle:
|
|
|
|
fwdRevVfyToggle = false
|
2023-08-07 17:45:23 +00:00
|
|
|
if not db.fwdWalkVerify(leaf.root, leafsLeft, noisy, runID):
|
2023-07-05 13:50:11 +00:00
|
|
|
return
|
|
|
|
else:
|
|
|
|
fwdRevVfyToggle = true
|
2023-08-07 17:45:23 +00:00
|
|
|
if not db.revWalkVerify(leaf.root, leafsLeft, noisy, runID):
|
2023-07-05 13:50:11 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
proc testTxMergeAndDeleteSubTree*(
|
|
|
|
noisy: bool;
|
|
|
|
list: openArray[ProofTrieData];
|
|
|
|
rdbPath: string; # Rocks DB storage directory
|
|
|
|
): bool =
|
2024-05-30 17:48:38 +00:00
|
|
|
const
|
|
|
|
# Need to reconfigure for the test, root ID 1 cannot be deleted as a trie
|
|
|
|
testRootVid = VertexID(2)
|
2024-02-01 21:27:48 +00:00
|
|
|
var
|
|
|
|
prng = PrngDesc.init 42
|
2024-03-15 14:20:00 +00:00
|
|
|
db = AristoDbRef(nil)
|
2024-02-01 21:27:48 +00:00
|
|
|
defer:
|
2024-03-15 14:20:00 +00:00
|
|
|
if not db.isNil:
|
2024-06-14 11:19:48 +00:00
|
|
|
db.finish(eradicate=true)
|
2024-02-01 21:27:48 +00:00
|
|
|
|
|
|
|
for n,w in list:
|
|
|
|
# Start with brand new persistent database.
|
|
|
|
db = block:
|
|
|
|
if 0 < rdbPath.len:
|
2024-06-05 15:08:29 +00:00
|
|
|
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
|
2024-02-01 21:27:48 +00:00
|
|
|
xCheckRc rc.error == 0
|
|
|
|
rc.value
|
|
|
|
else:
|
2024-06-03 20:10:35 +00:00
|
|
|
AristoDbRef.init(MemBackendRef)
|
2024-02-01 21:27:48 +00:00
|
|
|
|
2024-05-30 17:48:38 +00:00
|
|
|
if testRootVid != VertexID(1):
|
|
|
|
# Add a dummy entry so the journal logic can be triggered
|
|
|
|
discard db.merge(VertexID(1), @[n.byte], @[42.byte], VOID_PATH_ID)
|
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
# Start transaction (double frame for testing)
|
|
|
|
xCheck db.txTop.isErr
|
|
|
|
var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
|
|
|
xCheck tx.isTop()
|
|
|
|
xCheck tx.level == 2
|
|
|
|
|
|
|
|
# Reset database so that the next round has a clean setup
|
|
|
|
defer: db.innerCleanUp
|
|
|
|
|
2024-05-30 17:48:38 +00:00
|
|
|
# Merge leaf data into main trie (w/vertex ID 2)
|
2024-02-01 21:27:48 +00:00
|
|
|
let kvpLeafs = block:
|
2024-05-30 17:48:38 +00:00
|
|
|
var lst = w.kvpLst.mapRootVid testRootVid
|
2024-02-01 21:27:48 +00:00
|
|
|
# The list might be reduced for isolation of particular properties,
|
|
|
|
# e.g. lst.setLen(min(5,lst.len))
|
|
|
|
lst
|
|
|
|
for i,leaf in kvpLeafs:
|
2024-02-29 21:10:24 +00:00
|
|
|
let rc = db.mergeLeaf leaf
|
2024-02-01 21:27:48 +00:00
|
|
|
xCheckRc rc.error == 0
|
|
|
|
|
|
|
|
# List of all leaf entries that should be on the database
|
|
|
|
var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
|
|
|
|
|
|
|
|
# Provide a (reproducible) peudo-random copy of the leafs list
|
2024-02-22 08:24:58 +00:00
|
|
|
let leafVidPairs = block:
|
|
|
|
let rc = db.randomisedLeafs(leafsLeft, prng)
|
|
|
|
xCheckRc rc.error == (0,0)
|
|
|
|
rc.value
|
2024-03-20 07:35:38 +00:00
|
|
|
discard leafVidPairs
|
2024-02-01 21:27:48 +00:00
|
|
|
|
|
|
|
# === delete sub-tree ===
|
|
|
|
block:
|
|
|
|
let saveBeOk = tx.saveToBackend(
|
|
|
|
chunkedMpt=false, relax=false, noisy=noisy, 1 + list.len * n)
|
|
|
|
xCheck saveBeOk:
|
|
|
|
noisy.say "***", "del(1)",
|
|
|
|
" n=", n, "/", list.len,
|
|
|
|
"\n db\n ", db.pp(backendOk=true),
|
|
|
|
""
|
|
|
|
# Delete sub-tree
|
|
|
|
block:
|
2024-05-30 17:48:38 +00:00
|
|
|
let rc = db.delTree(testRootVid, VOID_PATH_ID)
|
2024-02-01 21:27:48 +00:00
|
|
|
xCheckRc rc.error == (0,0):
|
|
|
|
noisy.say "***", "del(2)",
|
|
|
|
" n=", n, "/", list.len,
|
|
|
|
"\n db\n ", db.pp(backendOk=true),
|
|
|
|
""
|
2024-05-30 17:48:38 +00:00
|
|
|
|
|
|
|
if testRootVid != VertexID(1):
|
|
|
|
# Update dummy entry so the journal logic can be triggered
|
|
|
|
discard db.merge(VertexID(1), @[n.byte], @[43.byte], VOID_PATH_ID)
|
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
block:
|
|
|
|
let saveBeOk = tx.saveToBackend(
|
|
|
|
chunkedMpt=false, relax=false, noisy=noisy, 2 + list.len * n)
|
|
|
|
xCheck saveBeOk:
|
|
|
|
noisy.say "***", "del(3)",
|
|
|
|
" n=", n, "/", list.len,
|
|
|
|
"\n db\n ", db.pp(backendOk=true),
|
|
|
|
""
|
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
|
2023-07-05 13:50:11 +00:00
|
|
|
proc testTxMergeProofAndKvpList*(
|
|
|
|
noisy: bool;
|
|
|
|
list: openArray[ProofTrieData];
|
|
|
|
rdbPath: string; # Rocks DB storage directory
|
|
|
|
resetDb = false;
|
|
|
|
idPfx = "";
|
|
|
|
oops: KnownHasherFailure = @[];
|
|
|
|
): bool =
|
|
|
|
let
|
|
|
|
oopsTab = oops.toTable
|
|
|
|
var
|
2024-03-15 14:20:00 +00:00
|
|
|
db = AristoDbRef(nil)
|
2023-08-07 17:45:23 +00:00
|
|
|
tx = AristoTxRef(nil)
|
2023-11-08 12:18:32 +00:00
|
|
|
rootKey: Hash256
|
2023-07-05 13:50:11 +00:00
|
|
|
count = 0
|
|
|
|
defer:
|
2024-04-19 18:37:27 +00:00
|
|
|
if not db.isNil:
|
2024-06-14 11:19:48 +00:00
|
|
|
db.finish(eradicate=true)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
for n,w in list:
|
|
|
|
|
|
|
|
# Start new database upon request
|
|
|
|
if resetDb or w.root != rootKey or w.proof.len == 0:
|
2023-08-07 17:45:23 +00:00
|
|
|
db.innerCleanUp
|
|
|
|
db = block:
|
2023-09-05 13:57:20 +00:00
|
|
|
# New DB with disabled filter slots management
|
2023-09-12 18:45:12 +00:00
|
|
|
if 0 < rdbPath.len:
|
2024-06-05 15:08:29 +00:00
|
|
|
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
|
2023-09-12 18:45:12 +00:00
|
|
|
xCheckRc rc.error == 0
|
|
|
|
rc.value
|
|
|
|
else:
|
2024-06-03 20:10:35 +00:00
|
|
|
AristoDbRef.init(MemBackendRef)
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Start transaction (double frame for testing)
|
2023-08-11 17:23:57 +00:00
|
|
|
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck tx.isTop()
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
# Update root
|
|
|
|
rootKey = w.root
|
|
|
|
count = 0
|
|
|
|
count.inc
|
|
|
|
|
|
|
|
let
|
|
|
|
testId = idPfx & "#" & $w.id & "." & $n
|
|
|
|
runID = n
|
2023-12-19 12:39:23 +00:00
|
|
|
sTabLen = db.nLayersVtx()
|
2023-07-05 13:50:11 +00:00
|
|
|
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
|
|
|
|
|
|
|
|
if 0 < w.proof.len:
|
2024-02-22 08:24:58 +00:00
|
|
|
let root = block:
|
2024-06-07 10:56:31 +00:00
|
|
|
let rc = db.mergeProof(rootKey, VertexID(1))
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheckRc rc.error == 0
|
|
|
|
rc.value
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
let nMerged = block:
|
2024-06-07 10:56:31 +00:00
|
|
|
let rc = db.mergeProof(w.proof, root)
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheckRc rc.error == 0
|
|
|
|
rc.value
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheck w.proof.len == nMerged
|
|
|
|
xCheck db.nLayersVtx() <= nMerged + sTabLen
|
2023-07-05 13:50:11 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
let merged = db.mergeList leafs
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready}
|
2024-02-22 08:24:58 +00:00
|
|
|
xCheck merged.merged + merged.dups == leafs.len
|
2023-07-05 13:50:11 +00:00
|
|
|
|
|
|
|
block:
|
|
|
|
let oops = oopsTab.getOrDefault(testId,(0,AristoError(0)))
|
2023-08-10 20:01:28 +00:00
|
|
|
if not tx.saveToBackendWithOops(
|
2023-08-11 17:23:57 +00:00
|
|
|
chunkedMpt=true, noisy=noisy, debugID=runID, oops):
|
2023-07-05 13:50:11 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
when true and false:
|
2024-04-03 15:48:35 +00:00
|
|
|
noisy.say "***", "testTxMergeProofAndKvpList (1)",
|
|
|
|
" <", n, "/", list.len-1, ">",
|
|
|
|
" runID=", runID,
|
|
|
|
" groups=", count, " merged=", merged
|
2024-02-22 08:24:58 +00:00
|
|
|
|
2023-07-05 13:50:11 +00:00
|
|
|
true
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|