Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
2024-02-01 21:27:48 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-05-11 14:25:29 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
2023-08-22 18:44:54 +00:00
|
|
|
std/[hashes, os, sequtils],
|
2023-05-11 14:25:29 +00:00
|
|
|
eth/common,
|
|
|
|
rocksdb,
|
2023-06-09 11:17:37 +00:00
|
|
|
../../nimbus/db/aristo/[
|
2024-05-03 17:38:17 +00:00
|
|
|
aristo_debug, aristo_desc, aristo_delete, aristo_journal/journal_scheduler,
|
2024-02-29 21:10:24 +00:00
|
|
|
aristo_hashify, aristo_hike, aristo_merge],
|
2023-05-11 14:25:29 +00:00
|
|
|
../../nimbus/db/kvstore_rocksdb,
|
2023-06-02 10:04:29 +00:00
|
|
|
../../nimbus/sync/protocol/snap/snap_types,
|
|
|
|
../test_sync_snap/test_types,
|
|
|
|
../replay/[pp, undump_accounts, undump_storages]
|
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
from ../../nimbus/sync/snap/range_desc
|
2023-11-08 12:18:32 +00:00
|
|
|
import NodeKey, ByteArray32
|
2023-06-12 18:16:03 +00:00
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
type
|
|
|
|
ProofTrieData* = object
|
2023-11-08 12:18:32 +00:00
|
|
|
root*: Hash256
|
2023-06-02 10:04:29 +00:00
|
|
|
id*: int
|
|
|
|
proof*: seq[SnapProof]
|
2023-06-12 13:48:47 +00:00
|
|
|
kvpLst*: seq[LeafTiePayload]
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-09-05 13:57:20 +00:00
|
|
|
const
|
2024-04-26 13:43:52 +00:00
|
|
|
samples = [
|
|
|
|
[ (4,0,10), (3,3,10), (3,4,10), (3,5,10)],
|
|
|
|
[(2,0,high int),(1,1,high int),(1,1,high int),(1,1,high int)],
|
|
|
|
]
|
|
|
|
|
|
|
|
LyoSamples* = samples.mapIt((it, (3 * it.capacity.minCovered) div 2))
|
2023-09-05 13:57:20 +00:00
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func toPfx(indent: int): string =
|
2023-06-09 11:17:37 +00:00
|
|
|
"\n" & " ".repeat(indent)
|
2023-05-11 14:25:29 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func to(a: NodeKey; T: type UInt256): T =
|
|
|
|
T.fromBytesBE ByteArray32(a)
|
|
|
|
|
|
|
|
func to(a: NodeKey; T: type PathID): T =
|
|
|
|
a.to(UInt256).to(T)
|
2023-06-12 18:16:03 +00:00
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2023-06-09 11:17:37 +00:00
|
|
|
# Public pretty printing
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func pp*(
|
2023-06-12 13:48:47 +00:00
|
|
|
w: ProofTrieData;
|
|
|
|
rootID: VertexID;
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef;
|
2023-06-12 13:48:47 +00:00
|
|
|
indent = 4;
|
|
|
|
): string =
|
2023-11-08 12:18:32 +00:00
|
|
|
let
|
|
|
|
pfx = indent.toPfx
|
|
|
|
rootLink = w.root.to(HashKey)
|
2024-02-14 19:11:59 +00:00
|
|
|
result = "(" & rootLink.pp(db)
|
2023-06-12 13:48:47 +00:00
|
|
|
result &= "," & $w.id & ",[" & $w.proof.len & "],"
|
2023-06-09 11:17:37 +00:00
|
|
|
result &= pfx & " ["
|
|
|
|
for n,kvp in w.kvpLst:
|
|
|
|
if 0 < n:
|
|
|
|
result &= "," & pfx & " "
|
2023-06-12 13:48:47 +00:00
|
|
|
result &= "(" & kvp.leafTie.pp(db) & "," & $kvp.payload.pType & ")"
|
2023-06-09 11:17:37 +00:00
|
|
|
result &= "])"
|
|
|
|
|
|
|
|
proc pp*(w: ProofTrieData; indent = 4): string =
|
2023-07-04 18:24:03 +00:00
|
|
|
var db = AristoDbRef()
|
2023-06-12 13:48:47 +00:00
|
|
|
w.pp(VertexID(1), db, indent)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
proc pp*(
|
|
|
|
w: openArray[ProofTrieData];
|
|
|
|
rootID: VertexID;
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef;
|
2023-06-12 13:48:47 +00:00
|
|
|
indent = 4): string =
|
2023-06-09 11:17:37 +00:00
|
|
|
let pfx = indent.toPfx
|
2023-06-12 13:48:47 +00:00
|
|
|
"[" & w.mapIt(it.pp(rootID, db, indent + 1)).join("," & pfx & " ") & "]"
|
2023-06-09 11:17:37 +00:00
|
|
|
|
|
|
|
proc pp*(w: openArray[ProofTrieData]; indent = 4): string =
|
|
|
|
let pfx = indent.toPfx
|
|
|
|
"[" & w.mapIt(it.pp(indent + 1)).join("," & pfx & " ") & "]"
|
|
|
|
|
2023-07-04 18:24:03 +00:00
|
|
|
proc pp*(ltp: LeafTiePayload; db: AristoDbRef): string =
|
2023-06-22 11:13:24 +00:00
|
|
|
"(" & ltp.leafTie.pp(db) & "," & ltp.payload.pp(db) & ")"
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
# ----------
|
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|
|
|
if noisy:
|
|
|
|
if args.len == 0:
|
|
|
|
echo "*** ", pfx
|
|
|
|
elif 0 < pfx.len and pfx[^1] != ' ':
|
|
|
|
echo pfx, " ", args.toSeq.join
|
|
|
|
else:
|
|
|
|
echo pfx, args.toSeq.join
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func `==`*[T: AristoError|VertexID](a: T, b: int): bool =
|
2023-06-30 22:22:33 +00:00
|
|
|
a == T(b)
|
|
|
|
|
2024-04-26 13:43:52 +00:00
|
|
|
func `==`*(a: (VertexID|QueueID,AristoError), b: (int,int)): bool =
|
2023-07-05 13:50:11 +00:00
|
|
|
(a[0].int,a[1].int) == b
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =
|
2023-07-05 13:50:11 +00:00
|
|
|
(a[0].int,a[1]) == b
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func `==`*(a: (int,AristoError), b: (int,int)): bool =
|
2023-07-05 13:50:11 +00:00
|
|
|
(a[0],a[1].int) == b
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool =
|
2023-08-18 19:46:55 +00:00
|
|
|
(a[0], a[1].int, a[2].int) == b
|
2023-06-30 22:22:33 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func `==`*(a: (QueueID,Hash), b: (int,Hash)): bool =
|
2023-08-22 18:44:54 +00:00
|
|
|
(a[0].int,a[1]) == b
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func to*(a: Hash256; T: type UInt256): T =
|
|
|
|
T.fromBytesBE a.data
|
|
|
|
|
|
|
|
func to*(a: Hash256; T: type PathID): T =
|
|
|
|
a.to(UInt256).to(T)
|
|
|
|
|
|
|
|
func to*(a: HashKey; T: type UInt256): T =
|
|
|
|
T.fromBytesBE 0u8.repeat(32 - a.len) & @a
|
|
|
|
|
|
|
|
func to*(fid: FilterID; T: type Hash256): T =
|
|
|
|
result.data = fid.uint64.u256.toBytesBE
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
|
|
|
|
## Convert test data into usable in-memory format
|
|
|
|
let file = sample.file.findFilePath.value
|
|
|
|
var root: Hash256
|
|
|
|
for w in file.undumpNextAccount:
|
|
|
|
let n = w.seenAccounts - 1
|
|
|
|
if n < sample.firstItem:
|
|
|
|
continue
|
|
|
|
if sample.lastItem < n:
|
|
|
|
break
|
|
|
|
if sample.firstItem == n:
|
|
|
|
root = w.root
|
|
|
|
elif w.root != root:
|
|
|
|
break
|
|
|
|
result.add w
|
|
|
|
|
|
|
|
proc to*(sample: AccountsSample; T: type seq[UndumpStorages]): T =
|
|
|
|
## Convert test data into usable in-memory format
|
|
|
|
let file = sample.file.findFilePath.value
|
|
|
|
var root: Hash256
|
|
|
|
for w in file.undumpNextStorages:
|
|
|
|
let n = w.seenAccounts - 1 # storages selector based on accounts
|
|
|
|
if n < sample.firstItem:
|
|
|
|
continue
|
|
|
|
if sample.lastItem < n:
|
|
|
|
break
|
|
|
|
if sample.firstItem == n:
|
|
|
|
root = w.root
|
|
|
|
elif w.root != root:
|
|
|
|
break
|
|
|
|
result.add w
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
|
|
|
var (rootKey, rootVid) = (Hash256(), VertexID(0))
|
2023-06-09 11:17:37 +00:00
|
|
|
for w in ua:
|
2023-11-08 12:18:32 +00:00
|
|
|
let thisRoot = w.root
|
2023-06-09 11:17:37 +00:00
|
|
|
if rootKey != thisRoot:
|
|
|
|
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
2023-06-12 13:48:47 +00:00
|
|
|
if 0 < w.data.accounts.len:
|
|
|
|
result.add ProofTrieData(
|
|
|
|
root: rootKey,
|
|
|
|
proof: w.data.proof,
|
|
|
|
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
|
2023-06-12 18:16:03 +00:00
|
|
|
leafTie: LeafTie(
|
|
|
|
root: rootVid,
|
2023-11-08 12:18:32 +00:00
|
|
|
path: it.accKey.to(PathID)),
|
2023-07-05 20:27:48 +00:00
|
|
|
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob))))
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
|
|
|
var (rootKey, rootVid) = (Hash256(), VertexID(0))
|
2023-06-09 11:17:37 +00:00
|
|
|
for n,s in us:
|
|
|
|
for w in s.data.storages:
|
2023-11-08 12:18:32 +00:00
|
|
|
let thisRoot = w.account.storageRoot
|
2023-06-09 11:17:37 +00:00
|
|
|
if rootKey != thisRoot:
|
|
|
|
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
2023-06-12 13:48:47 +00:00
|
|
|
if 0 < w.data.len:
|
|
|
|
result.add ProofTrieData(
|
|
|
|
root: thisRoot,
|
|
|
|
id: n + 1,
|
|
|
|
kvpLst: w.data.mapIt(LeafTiePayload(
|
2023-06-12 18:16:03 +00:00
|
|
|
leafTie: LeafTie(
|
|
|
|
root: rootVid,
|
2023-11-08 12:18:32 +00:00
|
|
|
path: it.slotHash.to(PathID)),
|
2023-07-05 20:27:48 +00:00
|
|
|
payload: PayloadRef(pType: RawData, rawBlob: it.slotData))))
|
2023-06-09 11:17:37 +00:00
|
|
|
if 0 < result.len:
|
|
|
|
result[^1].proof = s.data.proof
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
func mapRootVid*(
|
2023-06-12 13:48:47 +00:00
|
|
|
a: openArray[LeafTiePayload];
|
|
|
|
toVid: VertexID;
|
|
|
|
): seq[LeafTiePayload] =
|
|
|
|
a.mapIt(LeafTiePayload(
|
|
|
|
leafTie: LeafTie(root: toVid, path: it.leafTie.path),
|
2023-06-09 11:17:37 +00:00
|
|
|
payload: it.payload))
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-02-29 21:10:24 +00:00
|
|
|
proc hashify*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
noisy: bool;
|
|
|
|
): Result[void,(VertexID,AristoError)] =
|
|
|
|
when declared(aristo_hashify.noisy):
|
2024-03-20 15:15:56 +00:00
|
|
|
aristo_hashify.exec(noisy, aristo_hashify.hashify(db))
|
2024-02-29 21:10:24 +00:00
|
|
|
else:
|
|
|
|
aristo_hashify.hashify(db)
|
|
|
|
|
|
|
|
|
|
|
|
proc delete*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
root: VertexID;
|
|
|
|
path: openArray[byte];
|
|
|
|
accPath: PathID;
|
|
|
|
noisy: bool;
|
|
|
|
): Result[bool,(VertexID,AristoError)] =
|
|
|
|
when declared(aristo_delete.noisy):
|
2024-03-20 15:15:56 +00:00
|
|
|
aristo_delete.exec(noisy, aristo_delete.delete(db, root, path, accPath))
|
2024-02-29 21:10:24 +00:00
|
|
|
else:
|
|
|
|
aristo_delete.delete(db, root, path, accPath)
|
|
|
|
|
|
|
|
proc delete*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
lty: LeafTie;
|
|
|
|
accPath: PathID;
|
|
|
|
noisy: bool;
|
|
|
|
): Result[bool,(VertexID,AristoError)] =
|
|
|
|
when declared(aristo_delete.noisy):
|
2024-03-20 15:15:56 +00:00
|
|
|
aristo_delete.exec(noisy, aristo_delete.delete(db, lty, accPath))
|
2024-02-29 21:10:24 +00:00
|
|
|
else:
|
|
|
|
aristo_delete.delete(db, lty, accPath)
|
|
|
|
|
|
|
|
proc delTree*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
root: VertexID;
|
|
|
|
accPath: PathID;
|
|
|
|
noisy: bool;
|
|
|
|
): Result[void,(VertexID,AristoError)] =
|
|
|
|
when declared(aristo_delete.noisy):
|
2024-03-20 15:15:56 +00:00
|
|
|
aristo_delete.exec(noisy, aristo_delete.delTree(db, root, accPath))
|
2024-02-29 21:10:24 +00:00
|
|
|
else:
|
|
|
|
aristo_delete.delTree(db, root, accPath)
|
|
|
|
|
|
|
|
|
2024-03-20 07:35:38 +00:00
|
|
|
proc merge*(
|
2024-02-29 21:10:24 +00:00
|
|
|
db: AristoDbRef;
|
|
|
|
root: VertexID;
|
|
|
|
path: openArray[byte];
|
|
|
|
data: openArray[byte];
|
|
|
|
accPath: PathID;
|
|
|
|
noisy: bool;
|
|
|
|
): Result[bool, AristoError] =
|
|
|
|
when declared(aristo_merge.noisy):
|
2024-03-20 15:15:56 +00:00
|
|
|
aristo_merge.exec(noisy, aristo_merge.merge(db, root, path, data, accPath))
|
2024-02-29 21:10:24 +00:00
|
|
|
else:
|
|
|
|
aristo_merge.merge(db, root, path, data, accPath)
|
|
|
|
|
|
|
|
proc mergePayload*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
lty: LeafTie;
|
|
|
|
pyl: PayloadRef;
|
|
|
|
accPath: PathID;
|
|
|
|
noisy: bool;
|
|
|
|
): Result[Hike,AristoError] =
|
|
|
|
when declared(aristo_merge.noisy):
|
2024-03-20 15:15:56 +00:00
|
|
|
aristo_merge.exec(noisy, aristo_merge.mergePayload(db, lty, pyl, accPath))
|
2024-02-29 21:10:24 +00:00
|
|
|
else:
|
|
|
|
aristo_merge.mergePayload(db, lty, pyl, accPath)
|
|
|
|
|
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
proc mergeList*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
2024-02-22 08:24:58 +00:00
|
|
|
noisy = false;
|
2024-02-01 21:27:48 +00:00
|
|
|
): tuple[merged: int, dups: int, error: AristoError] =
|
|
|
|
## Variant of `merge()` for leaf lists.
|
|
|
|
var (merged, dups) = (0, 0)
|
|
|
|
for n,w in leafs:
|
2024-02-22 08:24:58 +00:00
|
|
|
noisy.say "*** mergeList",
|
|
|
|
" n=", n, "/", leafs.len
|
2024-02-29 21:10:24 +00:00
|
|
|
let rc = db.mergePayload(w.leafTie, w.payload, VOID_PATH_ID, noisy=noisy)
|
2024-02-22 08:24:58 +00:00
|
|
|
noisy.say "*** mergeList",
|
|
|
|
" n=", n, "/", leafs.len,
|
|
|
|
" rc=", (if rc.isOk: "ok" else: $rc.error),
|
|
|
|
"\n -------------\n"
|
2024-02-01 21:27:48 +00:00
|
|
|
if rc.isOk:
|
|
|
|
merged.inc
|
|
|
|
elif rc.error in {MergeLeafPathCachedAlready,MergeLeafPathOnBackendAlready}:
|
|
|
|
dups.inc
|
|
|
|
else:
|
|
|
|
return (n,dups,rc.error)
|
|
|
|
|
|
|
|
(merged, dups, AristoError(0))
|
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|