Aristo db transaction based interface (#1628)

* Provide transaction based interface for standard operations

* Provide unit tests for new Aristo interface using transactions

details:
  These new tests combine and replace several single-purpose tests.
  The now unused test sources will be kept for a while to be eventually
  removed.
This commit is contained in:
Jordan Hrycaj 2023-07-05 14:50:11 +01:00 committed by GitHub
parent ff6673beac
commit ccf639fc3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 962 additions and 25 deletions

34
nimbus/db/aristo.nim Normal file
View File

@ -0,0 +1,34 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Standard interface
## ===============================
##
{.push raises: [].}
import
eth/common,
aristo/aristo_desc/[aristo_types_identifiers, aristo_types_structural],
aristo/[aristo_constants, aristo_desc, aristo_init, aristo_transaction]
export
aristo_constants,
aristo_transaction,
aristo_types_identifiers,
aristo_types_structural,
AristoBackendType,
AristoDbRef,
AristoError,
init,
isValid,
finish
# End

View File

@ -82,6 +82,9 @@ func isValid*(vtx: VertexRef): bool =
func isValid*(nd: NodeRef): bool =
nd != NodeRef(nil)
func isValid*(pld: PayloadRef): bool =
pld != PayloadRef(nil)
func isValid*(key: HashKey): bool =
key != VOID_HASH_KEY
@ -92,6 +95,7 @@ func isValid*(vid: VertexID): bool =
vid != VertexID(0)
# ------------------------------------------------------------------------------
# Public functions, miscellaneous
# ------------------------------------------------------------------------------

View File

@ -188,4 +188,12 @@ type
RdbBeFinishSstWriter
RdbBeIngestSstWriter
# Transaction wrappers
TxDbStackNonEmpty
TxValidHandleExpected
TxBaseHandleExpected
TxTopHandleExpected
TxCacheKeyFetchFail
TxBeKeyFetchFail
# End

View File

@ -21,7 +21,7 @@ import
./aristo_desc/aristo_types_backend
export
AristoBackendType, AristoStorageType, TypedBackendRef
AristoBackendType, TypedBackendRef
# ------------------------------------------------------------------------------
# Public database constuctors, destructor

View File

@ -0,0 +1,420 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Transaction interface
## ==================================
##
{.push raises: [].}
import
std/[sets, tables],
chronicles,
eth/common,
stew/results,
"."/[aristo_delete, aristo_desc, aristo_get, aristo_hashify,
aristo_hike, aristo_init, aristo_layer, aristo_merge, aristo_nearby]
logScope:
topics = "aristo-tx"
type
AristoTxRef* = ref object
## This descriptor replaces the `AristoDbRef` one for transaction based
## database operations and management.
parent: AristoTxRef ## Parent transaction (if any)
db: AristoDbRef ## Database access
# ------------------------------------------------------------------------------
# Public functions: Constructor/destructor
# ------------------------------------------------------------------------------
proc to*(
db: AristoDbRef; # `init()` result
T: type AristoTxRef; # Type discriminator
): T =
## Embed the database descritor `db` into the transaction based one. After
## this operation, the argument descriptor should not be used anymore.
##
## The function will return a new transaction descriptor unless the stack of
## the argument `db` is already filled (e.g. using `push()` on the `db`.)
if db.stack.len == 0:
return AristoTxRef(db: db)
proc to*(
rc: Result[AristoDbRef,AristoError]; # `init()` result
T: type AristoTxRef; # Type discriminator
): Result[T, AristoError] =
## Variant of `to()` which passes on any constructor errors.
##
## Example:
## ::
## let rc = AristoDbRef.init(BackendRocksDB,"/var/tmp/rdb").to(AristoTxRef)
## ...
## let tdb = rc.value
## ...
##
if rc.isErr:
return err(rc.error)
let tdb = rc.value.to(AristoTxRef)
if tdb.isNil:
return err(TxDbStackNonEmpty)
ok tdb
proc done*(
tdb: AristoTxRef; # Database, transaction wrapper
flush = false; # Delete persistent data (if supported)
): Result[void,AristoError]
{.discardable.} =
## Database and transaction handle destructor. The `flush` argument is passed
## on to the database backend destructor. When used in the `BackendRocksDB`
## database, a `true` value for `flush` will wipe the entire database from
## the hard disc.
##
## Note that the function argument `tdb` must not have any pending open
## transaction layers, i.e. `tdb.isBase()` must return `true`.
if not tdb.parent.isNil or tdb.db.isNil:
return err(TxBaseHandleExpected)
tdb.db.finish flush
ok()
# ------------------------------------------------------------------------------
# Public functions: Classifiers
# ------------------------------------------------------------------------------
proc isBase*(tdb: AristoTxRef): bool =
## The function returns `true` if the argument handle `tdb` is the one that
## was returned from the `to()` constructor. A handle where this function
## returns `true` is called a *base level* handle.
##
## A *base level* handle may be a valid argument for the `begin()` function
## but not for either `commit()` ot `rollback()`.
tdb.parent.isNil and not tdb.db.isNil
proc isTop*(tdb: AristoTxRef): bool =
## If the function returns `true` for the argument handle `tdb`, then this
## handle can be used on any of the following functions.
not tdb.parent.isNil and not tdb.db.isNil
# ------------------------------------------------------------------------------
# Public functions: Transaction frame
# ------------------------------------------------------------------------------
proc begin*(
tdb: AristoTxRef; # Database, transaction wrapper
): Result[AristoTxRef,(VertexID,AristoError)] =
## Starts a new transaction. If successful, the function will return a new
## handle (or descriptor) which replaces the argument handle `tdb`. This
## argument handle `tdb` is rendered invalid for as long as the new
## transaction handle is valid. While valid, this new handle is called a
## *top level* handle.
##
## If the argument `tdb` is a *base level* or a *top level* handle, this
## function succeeds. Otherwise it will return the error
## `TxValidHandleExpected`.
##
## Example:
## ::
## proc doSomething(tdb: AristoTxRef) =
## let tx = tdb.begin.value # will crash on failure
## defer: tx.rollback()
## ...
## tx.commit()
##
if tdb.db.isNil:
return err((VertexID(0),TxValidHandleExpected))
tdb.db.push()
let pTx = AristoTxRef(parent: tdb, db: tdb.db)
tdb.db = AristoDbRef(nil)
ok pTx
proc rollback*(
tdb: AristoTxRef; # Database, transaction wrapper
): Result[AristoTxRef,(VertexID,AristoError)]
{.discardable.} =
## Given a *top level* handle, this function discards all database operations
## performed through this handle and returns the previous one which becomes
## either the *top level* or the *base level* handle, again.
##
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
block:
let rc = tdb.db.pop(merge = false)
if rc.isErr:
return err(rc.error)
let pTx = tdb.parent
pTx.db = tdb.db
tdb.parent = AristoTxRef(nil)
tdb.db = AristoDbRef(nil)
ok pTx
proc commit*(
tdb: AristoTxRef; # Database, transaction wrapper
hashify = false; # Always calc Merkle hashes if `true`
): Result[AristoTxRef,(VertexID,AristoError)]
{.discardable.} =
## Given a *top level* handle, this function accepts all database operations
## performed through this handle and merges it to the previous layer. It
## returns this previous layer which becomes either the *top level* or the
## *base level* handle, again.
##
## If the function return value is a *base level* handle, all the accumulated
## prevoius database operations will have been hashified and successfully
## stored on the persistent database.
##
## If the argument `hashify` is set `true`, the function will always hashify
## (i.e. calculate Merkle hashes) regardless of whether it is stored on the
## backend.
##
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
block:
let rc = tdb.db.pop(merge = true)
if rc.isErr:
return err(rc.error)
let pTx = tdb.parent
pTx.db = tdb.db
# Hashify and save (if any)
if hashify or pTx.parent.isNil:
let rc = tdb.db.hashify()
if rc.isErr:
return err(rc.error)
if pTx.parent.isNil:
let rc = tdb.db.save()
if rc.isErr:
return err(rc.error)
tdb.db = AristoDbRef(nil)
tdb.parent = AristoTxRef(nil)
ok pTx
proc collapse*(
tdb: AristoTxRef; # Database, transaction wrapper
commit: bool; # Commit is `true`, otherwise roll back
): Result[AristoTxRef,(VertexID,AristoError)] =
## Variation of `commit()` or `rollback()` performing the equivalent of
## ::
## while tx.isTop:
## let rc =
## if commit: tx.commit()
## else: tx.rollback()
## ...
## tx = rc.value
##
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
# Get base layer
var pTx = tdb.parent
while not pTx.parent.isNil:
pTx = pTx.parent
pTx.db = tdb.db
# Hashify and save, or complete rollback
if commit:
block:
let rc = tdb.db.hashify()
if rc.isErr:
return err(rc.error)
block:
let rc = tdb.db.save()
if rc.isErr:
return err(rc.error)
else:
let rc = tdb.db.retool(flushStack = true)
if rc.isErr:
return err((VertexID(0),rc.error))
tdb.db = AristoDbRef(nil)
tdb.parent = AristoTxRef(nil)
ok pTx
# ------------------------------------------------------------------------------
# Public functions: DB manipulations
# ------------------------------------------------------------------------------
proc put*(
tdb: AristoTxRef; # Database, transaction wrapper
leaf: LeafTiePayload; # Leaf item to add to the database
): Result[bool,AristoError] =
## Add leaf entry to transaction layer.
if tdb.db.isNil or tdb.parent.isNil:
return err(TxTopHandleExpected)
let report = tdb.db.merge @[leaf]
if report.error != AristoError(0):
return err(report.error)
ok(0 < report.merged)
proc del*(
tdb: AristoTxRef; # Database, transaction wrapper
leaf: LeafTie; # `Patricia Trie` path root-to-leaf
): Result[void,(VertexID,AristoError)] =
## Delete leaf entry from transaction layer.
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
tdb.db.delete leaf
proc get*(
tdb: AristoTxRef; # Database, transaction wrapper
leaf: LeafTie; # `Patricia Trie` path root-to-leaf
): Result[PayloadRef,(VertexID,AristoError)] =
## Get leaf entry from database filtered through the transaction layer.
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
let hike = leaf.hikeUp tdb.db
if hike.error != AristoError(0):
let vid = if hike.legs.len == 0: VertexID(0) else: hike.legs[^1].wp.vid
return err((vid,hike.error))
ok hike.legs[^1].wp.vtx.lData
proc key*(
tdb: AristoTxRef; # Database, transaction wrapper
vid: VertexID;
): Result[HashKey,(VertexID,AristoError)] =
## Get the Merkle hash key for the argument vertex ID `vid`. This function
## hashifies (i.e. calculates Merkle hashe keys) unless available on the
## requested vertex ID.
##
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
if tdb.db.top.kMap.hasKey vid:
block:
let key = tdb.db.top.kMap.getOrVoid(vid).key
if key.isValid:
return ok(key)
let rc = tdb.db.hashify()
if rc.isErr:
return err(rc.error)
block:
let key = tdb.db.top.kMap.getOrVoid(vid).key
if key.isValid:
return ok(key)
return err((vid,TxCacheKeyFetchFail))
block:
let rc = tdb.db.getKeyBackend vid
if rc.isOk:
return ok(rc.value)
return err((vid,TxBeKeyFetchFail))
proc rootKey*(
tdb: AristoTxRef; # Database, transaction wrapper
): Result[HashKey,(VertexID,AristoError)] =
## Get the Merkle hash key for the main state root (with vertex ID `1`.)
tdb.key VertexID(1)
proc changeLog*(
tdb: AristoTxRef; # Database, transaction wrapper
clear = false; # Delete history
): seq[AristoChangeLogRef] =
## Get the save history, i.e. the changed states before the database was
## updated on disc. If the argument `chear` is set `true`, the history log
## on the descriptor is cleared.
##
## The argument `tdb` must be a *top level* descriptor, i.e. `tdb.isTop()`
## returns `true`. Otherwise the function `changeLog()` always returns an
## empty list.
##
if tdb.db.isNil or tdb.parent.isNil:
return
result = tdb.db.history
if clear:
tdb.db.history.setlen(0)
# ------------------------------------------------------------------------------
# Public functions: DB traversal
# ------------------------------------------------------------------------------
proc right*(
lty: LeafTie; # Some `Patricia Trie` path
tdb: AristoTxRef; # Database, transaction wrapper
): Result[LeafTie,(VertexID,AristoError)] =
## Finds the next leaf to the right (if any.) For details see
## `aristo_nearby.right()`.
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
lty.right tdb.db
proc left*(
lty: LeafTie; # Some `Patricia Trie` path
tdb: AristoTxRef; # Database, transaction wrapper
): Result[LeafTie,(VertexID,AristoError)] =
## Finds the next leaf to the left (if any.) For details see
## `aristo_nearby.left()`.
if tdb.db.isNil or tdb.parent.isNil:
return err((VertexID(0),TxTopHandleExpected))
lty.left tdb.db
# ------------------------------------------------------------------------------
# Public helpers, miscellaneous
# ------------------------------------------------------------------------------
proc level*(
tdb: AristoTxRef; # Database, transaction wrapper
): (int,int) =
## This function returns the nesting level of the transaction and the length
## of the internal stack. Both values must be equal (otherwise there would
## be an internal error.)
##
## The argument `tdb` must be a *top level* or *base level* descriptor, i.e.
## `tdb.isTop() or tdb.isBase()` evaluate `true`. Otherwise `(-1,-1)` is
## returned.
##
if tdb.db.isNil:
return (-1,-1)
if tdb.parent.isNil:
return (0, tdb.db.stack.len)
# Count base layer
var
count = 1
pTx = tdb.parent
while not pTx.parent.isNil:
count.inc
pTx = pTx.parent
(count, tdb.db.stack.len)
proc db*(
tdb: AristoTxRef; # Database, transaction wrapper
): AristoDbRef =
## Getter, provides access to the Aristo database cache and backend.
##
## The getter directive returns a valid object reference if the argument
## `tdb` is a *top level* or *base level* descriptor, i.e.
## `tdb.isTop() or tdb.isBase()` evaluate `true`.
##
tdb.db
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -23,9 +23,7 @@ import
../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc],
./replay/[pp, undump_accounts, undump_storages],
./test_sync_snap/[snap_test_xx, test_accounts, test_types],
./test_aristo/[
test_backend, test_delete, test_helpers, test_merge, test_nearby,
test_transcode]
./test_aristo/[test_backend, test_helpers, test_transcode, test_tx]
const
baseDir = [".", "..", ".."/"..", $DirSep]
@ -206,23 +204,17 @@ proc accountsRunner(
suite &"Aristo: accounts data dump from {fileInfo}{listMode}":
test &"Merge {accLst.len} account lists to database":
check noisy.test_mergeKvpList(accLst, dbDir, resetDb)
test &"Merge {accLst.len} proof & account lists to database":
check noisy.test_mergeProofAndKvpList(accLst, dbDir, resetDb)
check noisy.testTxMergeProofAndKvpList(accLst, dbDir, resetDb)
test &"Compare {accLst.len} account lists on database backends":
if cmpBackends:
check noisy.test_backendConsistency(accLst, dbDir, resetDb)
check noisy.testBackendConsistency(accLst, dbDir, resetDb)
else:
skip()
test &"Traverse accounts database w/{accLst.len} account lists":
check noisy.test_nearbyKvpList(accLst, resetDb)
test &"Delete accounts database, successively {accLst.len} entries":
check noisy.test_delete(accLst, dbDir)
check noisy.testTxMergeAndDelete(accLst, dbDir)
proc storagesRunner(
@ -244,24 +236,18 @@ proc storagesRunner(
suite &"Aristo: storages data dump from {fileInfo}{listMode}":
test &"Merge {stoLst.len} storage slot lists to database":
check noisy.test_mergeKvpList(stoLst, dbDir, resetDb)
test &"Merge {stoLst.len} proof & slots lists to database":
check noisy.test_mergeProofAndKvpList(
check noisy.testTxMergeProofAndKvpList(
stoLst, dbDir, resetDb, fileInfo, oops)
test &"Compare {stoLst.len} slot lists on database backends":
if cmpBackends:
check noisy.test_backendConsistency(stoLst, dbDir, resetDb)
check noisy.testBackendConsistency(stoLst, dbDir, resetDb)
else:
skip()
test &"Traverse storage slots database w/{stoLst.len} account lists":
check noisy.test_nearbyKvpList(stoLst, resetDb)
test &"Delete storage database, successively {stoLst.len} entries":
check noisy.test_delete(stoLst, dbDir)
check noisy.testTxMergeAndDelete(stoLst, dbDir)
# ------------------------------------------------------------------------------
# Main function(s)

View File

@ -229,7 +229,7 @@ proc fwdWalkVerify(
# Public test function
# ------------------------------------------------------------------------------
proc test_delete*(
proc testDelete*(
noisy: bool;
list: openArray[ProofTrieData];
rdbPath: string; # Rocks DB storage directory

View File

@ -97,8 +97,15 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
proc `==`*[T: AristoError|VertexID](a: T, b: int): bool =
a == T(b)
proc `==`*[S,T](a: (S,T), b: (int,int)): bool =
a == (S(b[0]), T(b[1]))
proc `==`*(a: (VertexID,AristoError), b: (int,int)): bool =
(a[0].int,a[1].int) == b
proc `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =
(a[0].int,a[1]) == b
proc `==`*(a: (int,AristoError), b: (int,int)): bool =
(a[0],a[1].int) == b
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
## Convert test data into usable in-memory format

View File

@ -0,0 +1,478 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Aristo (aka Patricia) DB records merge test
import
std/[algorithm, bitops, sequtils, sets, tables],
eth/common,
stew/results,
unittest2,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[aristo_check, aristo_desc, aristo_get, aristo_merge],
./test_helpers
type
PrngDesc = object
prng: uint32 ## random state
KnownHasherFailure* = seq[(string,(int,AristoError))]
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
state = state * 1103515245 + 12345;
let val = (state shr 16) and 32767 # mod 2^31
(val shr 8).byte # Extract second byte
proc rand[W: SomeInteger|VertexID](ap: var PrngDesc; T: type W): T =
var a: array[sizeof T,byte]
for n in 0 ..< sizeof T:
a[n] = ap.prng.posixPrngRand().byte
when sizeof(T) == 1:
let w = uint8.fromBytesBE(a).T
when sizeof(T) == 2:
let w = uint16.fromBytesBE(a).T
when sizeof(T) == 4:
let w = uint32.fromBytesBE(a).T
else:
let w = uint64.fromBytesBE(a).T
when T is SomeUnsignedInt:
# That way, `fromBytesBE()` can be applied to `uint`
result = w
else:
# That way the result is independent of endianness
(addr result).copyMem(unsafeAddr w, sizeof w)
proc init(T: type PrngDesc; seed: int): PrngDesc =
result.prng = (seed and 0x7fffffff).uint32
proc rand(td: var PrngDesc; top: int): int =
if 0 < top:
let mask = (1 shl (8 * sizeof(int) - top.countLeadingZeroBits)) - 1
for _ in 0 ..< 100:
let w = mask and td.rand(typeof(result))
if w < top:
return w
raiseAssert "Not here (!)"
# -----------------------
proc randomisedLeafs(
tx: AristoTxRef;
td: var PrngDesc;
): seq[(LeafTie,VertexID)] =
result = tx.db.top.lTab.pairs.toSeq.filterIt(it[1].isvalid).sorted(
cmp = proc(a,b: (LeafTie,VertexID)): int = cmp(a[0], b[0]))
if 2 < result.len:
for n in 0 ..< result.len-1:
let r = n + td.rand(result.len - n)
result[n].swap result[r]
proc innerCleanUp(
tdb: AristoTxRef; # Level zero tx
tx: AristoTxRef; # Active transaction (if any)
) =
## Defer action
if not tx.isNil:
let rc = tx.collapse(commit=false)
if rc.isErr:
check rc.error == (0,0)
else:
check rc.value == tdb
if not tdb.isNil:
let rc = tdb.done(flush=true)
if rc.isErr:
check rc.error == 0
proc saveToBackend(
tx: var AristoTxRef;
relax: bool;
noisy: bool;
debugID: int;
): bool =
# Verify context (nesting level must be 2)
block:
let levels = tx.level
if levels != (2,2):
check levels == (2,2)
return
block:
let rc = tx.db.checkCache(relax=true)
if rc.isErr:
check rc.error == (0,0)
return
# Implicitely force hashify by committing the current layer
block:
let rc = tx.commit(hashify=true)
if rc.isErr:
check rc.error == (0,0)
return
tx = rc.value
let levels = tx.level
if levels != (1,1):
check levels == (1,1)
return
block:
let rc = tx.db.checkBE(relax=true)
if rc.isErr:
check rc.error == (0,0)
return
# Save to backend
block:
let rc = tx.commit()
if rc.isErr:
check rc.error == (0,0)
return
tx = rc.value
let levels = tx.level
if levels != (0,0):
check levels == (1,1)
return
block:
let rc = tx.db.checkBE(relax=relax)
if rc.isErr:
check rc.error == (0,0)
return
# Update layers to original level
tx = tx.begin.value.begin.value
true
proc saveToBackendWithOops(
tx: var AristoTxRef;
noisy: bool;
debugID: int;
oops: (int,AristoError);
): bool =
block:
let levels = tx.level
if levels != (2,2):
check levels == (2,2)
return
# Implicitely force hashify by committing the current layer
block:
let rc = tx.commit(hashify=true)
# Handle known errors
if rc.isOK:
if oops != (0,0):
check oops == (0,0)
return
else:
if rc.error != oops:
check rc.error == oops
return
tx = rc.value
let levels = tx.level
if levels != (1,1):
check levels == (1,1)
return
# Save to backend
block:
let rc = tx.commit()
if rc.isErr:
check rc.error == (0,0)
return
tx = rc.value
let levels = tx.level
if levels != (0,0):
check levels == (1,1)
return
# Update layers to original level
tx = tx.begin.value.begin.value
true
proc fwdWalkVerify(
tx: AristoTxRef;
root: VertexID;
left: HashSet[LeafTie];
noisy: bool;
debugID: int;
): bool =
let
nLeafs = left.len
var
lfLeft = left
lty = LeafTie(root: root)
n = 0
while n < nLeafs + 1:
let id = n + (nLeafs + 1) * debugID
let rc = lty.right tx
if rc.isErr:
if rc.error[1] == NearbyBeyondRange and lfLeft.len == 0:
return true
check rc.error == (0,0)
check lfLeft.len == 0
return
if rc.value notin lfLeft:
check rc.error == (0,0)
return
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
lfLeft.excl rc.value
n.inc
check n <= nLeafs
proc revWalkVerify(
tx: AristoTxRef;
root: VertexID;
left: HashSet[LeafTie];
noisy: bool;
debugID: int;
): bool =
let
nLeafs = left.len
var
lfLeft = left
lty = LeafTie(root: root, path: HashID(high(UInt256)))
n = 0
while n < nLeafs + 1:
let id = n + (nLeafs + 1) * debugID
let rc = lty.left tx
if rc.isErr:
if rc.error[1] == NearbyBeyondRange and lfLeft.len == 0:
return true
check rc.error == (0,0)
check lfLeft.len == 0
return
if rc.value notin lfLeft:
check rc.error == (0,0)
return
if low(HashID) < rc.value.path:
lty.path = HashID(rc.value.path.u256 - 1)
lfLeft.excl rc.value
n.inc
check n <= nLeafs
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc testTxMergeAndDelete*(
noisy: bool;
list: openArray[ProofTrieData];
rdbPath: string; # Rocks DB storage directory
): bool =
var
prng = PrngDesc.init 42
db = AristoDbRef()
fwdRevVfyToggle = true
defer:
db.finish(flush=true)
for n,w in list:
# Start with brand new persistent database.
db = block:
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
if rc.isErr:
check rc.error == 0
return
rc.value
# Convert to transaction layer
let tdb = db.to(AristoTxRef)
check tdb.isBase
check not tdb.isTop
# Start transaction (double frame for testing)
var tx = tdb.begin.value.begin.value
check not tx.isBase
check tx.isTop
# Reset database so that the next round has a clean setup
defer:
tdb.innerCleanUp tx
# Merge leaf data into main trie (w/vertex ID 1)
let kvpLeafs = w.kvpLst.mapRootVid VertexID(1)
for leaf in kvpLeafs:
let rc = tx.put leaf
if rc.isErr:
check rc.error == 0
return
# List of all leaf entries that should be on the database
var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet
# Provide a (reproducible) peudo-random copy of the leafs list
let leafVidPairs = tx.randomisedLeafs prng
if leafVidPairs.len != leafsLeft.len:
check leafVidPairs.len == leafsLeft.len
return
# Trigger subsequent saving tasks in loop below
let (saveMod, saveRest, relax) = block:
if leafVidPairs.len < 17: (7, 3, false)
elif leafVidPairs.len < 31: (11, 7, false)
else: (leafVidPairs.len div 5, 11, true)
# === Loop over leafs ===
for u,lvp in leafVidPairs:
let
runID = n + list.len * u
tailWalkVerify = 7 # + 999
doSaveBeOk = ((u mod saveMod) == saveRest)
(leaf, lid) = lvp
if doSaveBeOk:
if not tx.saveToBackend(relax=relax, noisy=noisy, runID):
return
# Delete leaf
let rc = tx.del leaf
if rc.isErr:
check rc.error == (0,0)
return
# Update list of remaininf leafs
leafsLeft.excl leaf
let deletedVtx = tx.db.getVtx lid
if deletedVtx.isValid:
check deletedVtx.isValid == false
return
# Walking the database is too slow for large tables. So the hope is that
# potential errors will not go away and rather pop up later, as well.
if leafsLeft.len <= tailWalkVerify:
if u < leafVidPairs.len-1:
if fwdRevVfyToggle:
fwdRevVfyToggle = false
if not tx.fwdWalkVerify(leaf.root, leafsLeft, noisy, runID):
return
else:
fwdRevVfyToggle = true
if not tx.revWalkVerify(leaf.root, leafsLeft, noisy, runID):
return
when true and false:
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len
true
proc testTxMergeProofAndKvpList*(
noisy: bool;
list: openArray[ProofTrieData];
rdbPath: string; # Rocks DB storage directory
resetDb = false;
idPfx = "";
oops: KnownHasherFailure = @[];
): bool =
let
oopsTab = oops.toTable
var
adb = AristoDbRef()
tdb, tx: AristoTxRef
rootKey: HashKey
count = 0
defer:
adb.finish(flush=true)
for n,w in list:
# Start new database upon request
if resetDb or w.root != rootKey or w.proof.len == 0:
tdb.innerCleanUp tx
adb = block:
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
if rc.isErr:
check rc.error == 0
return
rc.value
# Convert to transaction layer
tdb = adb.to(AristoTxRef)
check tdb.isBase
check not tdb.isTop
# Start transaction (double frame for testing)
tx = tdb.begin.value.begin.value
check not tx.isBase
check tx.isTop
# Update root
rootKey = w.root
count = 0
count.inc
let
testId = idPfx & "#" & $w.id & "." & $n
runID = n
lstLen = list.len
sTabLen = tx.db.top.sTab.len
lTabLen = tx.db.top.lTab.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
var
proved: tuple[merged: int, dups: int, error: AristoError]
if 0 < w.proof.len:
let rc = tx.db.merge(rootKey, VertexID(1))
if rc.isErr:
check rc.error == 0
return
proved = tx.db.merge(w.proof, rc.value) # , noisy)
check proved.error in {AristoError(0),MergeHashKeyCachedAlready}
check w.proof.len == proved.merged + proved.dups
check tx.db.top.lTab.len == lTabLen
check tx.db.top.sTab.len <= proved.merged + sTabLen
check proved.merged < tx.db.top.pAmk.len
let
merged = tx.db.merge leafs
check tx.db.top.lTab.len == lTabLen + merged.merged
check merged.merged + merged.dups == leafs.len
block:
if merged.error notin {AristoError(0), MergeLeafPathCachedAlready}:
check merged.error in {AristoError(0), MergeLeafPathCachedAlready}
return
block:
let oops = oopsTab.getOrDefault(testId,(0,AristoError(0)))
if not tx.saveToBackendWithOops(noisy, runID, oops):
return
when true and false:
noisy.say "***", "proofs(6) <", n, "/", lstLen-1, ">",
" groups=", count, " proved=", proved.pp, " merged=", merged.pp
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------