Remove `forkTx` and friends (#2951)

The forking facility has been replaced by ForkedChain - frames and
layers are two other mechanisms that mostly do the same thing at the
aristo level, without quite providing the functionality FC needs - this
cleanup will make that integration easier.
This commit is contained in:
Jacek Sieka 2024-12-18 11:56:46 +01:00 committed by GitHub
parent 45bc6422a0
commit 06a544ac85
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
47 changed files with 67 additions and 1447 deletions

View File

@ -13,7 +13,7 @@
import
std/tables,
eth/eip1559,
eth/common/[hashes, accounts, headers, addresses],
eth/common/[blocks, hashes, accounts, headers, addresses],
../db/[ledger, core_db],
../constants,
./chain_config

View File

@ -11,7 +11,7 @@
import
std/[tables, sets],
stint,
eth/common,
eth/common/[addresses, transactions],
../utils/mergeutils
type
@ -67,9 +67,9 @@ proc add*(ac: var AccessList, address: Address, slot: UInt256) =
proc clear*(ac: var AccessList) {.inline.} =
ac.slots.clear()
func getAccessList*(ac: AccessList): common.AccessList =
func getAccessList*(ac: AccessList): transactions.AccessList =
for address, slots in ac.slots:
result.add common.AccessPair(
result.add transactions.AccessPair(
address : address,
storageKeys: slots.toStorageKeys,
)

View File

@ -115,28 +115,6 @@ type
{.noRaise.}
## Fetch the Merkle hash of the storage root related to `accPath`.
AristoApiFindTxFn* =
proc(db: AristoDbRef;
rvid: RootedVertexID;
key: HashKey;
): Result[int,AristoError]
{.noRaise.}
## Find the transaction where the vertex with ID `vid` exists and has
## the Merkle hash key `key`. If there is no transaction available,
## search in the filter and then in the backend.
##
## If the above procedure succeeds, an integer indicating the transaction
## level is returned:
##
## * `0` -- top level, current layer
## * `1`,`2`,`..` -- some transaction level further down the stack
## * `-1` -- the filter between transaction stack and database backend
## * `-2` -- the databse backend
##
## A successful return code might be used for the `forkTx()` call for
## creating a forked descriptor that provides the pair `(vid,key)`.
##
AristoApiFinishFn* =
proc(db: AristoDbRef;
eradicate = false;
@ -161,32 +139,6 @@ type
## A non centre descriptor should always be destructed after use (see
## also# comments on `fork()`.)
AristoApiForkTxFn* =
proc(db: AristoDbRef;
backLevel: int;
): Result[AristoDbRef,AristoError]
{.noRaise.}
## Fork a new descriptor obtained from parts of the argument database
## as described by arguments `db` and `backLevel`.
##
## If the argument `backLevel` is non-negative, the forked descriptor
## will provide the database view where the first `backLevel` transaction
## layers are stripped and the remaing layers are squashed into a single
## transaction.
##
## If `backLevel` is `-1`, a database descriptor with empty transaction
## layers will be provided where the `balancer` between database and
## transaction layers are kept in place.
##
## If `backLevel` is `-2`, a database descriptor with empty transaction
## layers will be provided without a `balancer`.
##
## The returned database descriptor will always have transaction level one.
## If there were no transactions that could be squashed, an empty
## transaction is added.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
AristoApiHashifyFn* =
proc(db: AristoDbRef;
): Result[void,(VertexID,AristoError)]
@ -233,14 +185,6 @@ type
## Getter, non-negative nesting level (i.e. number of pending
## transactions)
AristoApiNForkedFn* =
proc(db: AristoDbRef;
): int
{.noRaise.}
## Returns the number of non centre descriptors (see comments on
## `reCentre()` for details.) This function is a fast version of
## `db.forked.toSeq.len`.
AristoApiMergeAccountRecordFn* =
proc(db: AristoDbRef;
accPath: Hash32;
@ -358,23 +302,6 @@ type
##
## The argument `nxtSid` will be the ID for the next saved state record.
AristoApiReCentreFn* =
proc(db: AristoDbRef;
): Result[void,AristoError]
{.noRaise.}
## Re-focus the `db` argument descriptor so that it becomes the centre.
## Nothing is done if the `db` descriptor is the centre, already.
##
## With several descriptors accessing the same backend database there is
## a single one that has write permission for the backend (regardless
## whether there is a backend, at all.) The descriptor entity with write
## permission is called *the centre*.
##
## After invoking `reCentre()`, the argument database `db` can only be
## destructed by `finish()` which also destructs all other descriptors
## accessing the same backend database. Descriptors where `isCentre()`
## returns `false` must be single destructed with `forget()`.
AristoApiRollbackFn* =
proc(tx: AristoTxRef;
): Result[void,AristoError]
@ -425,17 +352,13 @@ type
fetchStorageData*: AristoApiFetchStorageDataFn
fetchStorageRoot*: AristoApiFetchStorageRootFn
findTx*: AristoApiFindTxFn
finish*: AristoApiFinishFn
forget*: AristoApiForgetFn
forkTx*: AristoApiForkTxFn
hasPathAccount*: AristoApiHasPathAccountFn
hasPathStorage*: AristoApiHasPathStorageFn
hasStorageData*: AristoApiHasStorageDataFn
isTop*: AristoApiIsTopFn
level*: AristoApiLevelFn
nForked*: AristoApiNForkedFn
mergeAccountRecord*: AristoApiMergeAccountRecordFn
mergeStorageData*: AristoApiMergeStorageDataFn
@ -449,7 +372,6 @@ type
pathAsBlob*: AristoApiPathAsBlobFn
persist*: AristoApiPersistFn
reCentre*: AristoApiReCentreFn
rollback*: AristoApiRollbackFn
txBegin*: AristoApiTxBeginFn
txLevel*: AristoApiTxLevelFn
@ -472,10 +394,7 @@ type
AristoApiProfFetchStorageDataFn = "fetchStorageData"
AristoApiProfFetchStorageRootFn = "fetchStorageRoot"
AristoApiProfFindTxFn = "findTx"
AristoApiProfFinishFn = "finish"
AristoApiProfForgetFn = "forget"
AristoApiProfForkTxFn = "forkTx"
AristoApiProfHasPathAccountFn = "hasPathAccount"
AristoApiProfHasPathStorageFn = "hasPathStorage"
@ -483,7 +402,6 @@ type
AristoApiProfIsTopFn = "isTop"
AristoApiProfLevelFn = "level"
AristoApiProfNForkedFn = "nForked"
AristoApiProfMergeAccountRecordFn = "mergeAccountRecord"
AristoApiProfMergeStorageDataFn = "mergeStorageData"
@ -495,7 +413,6 @@ type
AristoApiProfPathAsBlobFn = "pathAsBlob"
AristoApiProfPersistFn = "persist"
AristoApiProfReCentreFn = "reCentre"
AristoApiProfRollbackFn = "rollback"
AristoApiProfTxBeginFn = "txBegin"
AristoApiProfTxLevelFn = "txLevel"
@ -534,10 +451,7 @@ when AutoValidateApiHooks:
doAssert not api.fetchStorageData.isNil
doAssert not api.fetchStorageRoot.isNil
doAssert not api.findTx.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.forkTx.isNil
doAssert not api.hasPathAccount.isNil
doAssert not api.hasPathStorage.isNil
@ -545,7 +459,6 @@ when AutoValidateApiHooks:
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.mergeAccountRecord.isNil
doAssert not api.mergeStorageData.isNil
@ -557,7 +470,6 @@ when AutoValidateApiHooks:
doAssert not api.pathAsBlob.isNil
doAssert not api.persist.isNil
doAssert not api.reCentre.isNil
doAssert not api.rollback.isNil
doAssert not api.txBegin.isNil
doAssert not api.txLevel.isNil
@ -601,10 +513,7 @@ func init*(api: var AristoApiObj) =
api.fetchStorageData = fetchStorageData
api.fetchStorageRoot = fetchStorageRoot
api.findTx = findTx
api.finish = finish
api.forget = forget
api.forkTx = forkTx
api.hasPathAccount = hasPathAccount
api.hasPathStorage = hasPathStorage
@ -612,7 +521,6 @@ func init*(api: var AristoApiObj) =
api.isTop = isTop
api.level = level
api.nForked = nForked
api.mergeAccountRecord = mergeAccountRecord
api.mergeStorageData = mergeStorageData
@ -624,7 +532,6 @@ func init*(api: var AristoApiObj) =
api.pathAsBlob = pathAsBlob
api.persist = persist
api.reCentre = reCentre
api.rollback = rollback
api.txBegin = txBegin
api.txLevel = txLevel
@ -650,10 +557,7 @@ func dup*(api: AristoApiRef): AristoApiRef =
fetchStorageData: api.fetchStorageData,
fetchStorageRoot: api.fetchStorageRoot,
findTx: api.findTx,
finish: api.finish,
forget: api.forget,
forkTx: api.forkTx,
hasPathAccount: api.hasPathAccount,
hasPathStorage: api.hasPathStorage,
@ -661,7 +565,6 @@ func dup*(api: AristoApiRef): AristoApiRef =
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
mergeAccountRecord: api.mergeAccountRecord,
mergeStorageData: api.mergeStorageData,
@ -673,7 +576,6 @@ func dup*(api: AristoApiRef): AristoApiRef =
pathAsBlob: api.pathAsBlob,
persist: api.persist,
reCentre: api.reCentre,
rollback: api.rollback,
txBegin: api.txBegin,
txLevel: api.txLevel,
@ -753,26 +655,11 @@ func init*(
AristoApiProfFetchStorageRootFn.profileRunner:
result = api.fetchStorageRoot(a, b)
profApi.findTx =
proc(a: AristoDbRef; b: RootedVertexID; c: HashKey): auto =
AristoApiProfFindTxFn.profileRunner:
result = api.findTx(a, b, c)
profApi.finish =
proc(a: AristoDbRef; b = false) =
AristoApiProfFinishFn.profileRunner:
api.finish(a, b)
profApi.forget =
proc(a: AristoDbRef): auto =
AristoApiProfForgetFn.profileRunner:
result = api.forget(a)
profApi.forkTx =
proc(a: AristoDbRef; b: int): auto =
AristoApiProfForkTxFn.profileRunner:
result = api.forkTx(a, b)
profApi.hasPathAccount =
proc(a: AristoDbRef; b: Hash32): auto =
AristoApiProfHasPathAccountFn.profileRunner:
@ -798,11 +685,6 @@ func init*(
AristoApiProfLevelFn.profileRunner:
result = api.level(a)
profApi.nForked =
proc(a: AristoDbRef): auto =
AristoApiProfNForkedFn.profileRunner:
result = api.nForked(a)
profApi.mergeAccountRecord =
proc(a: AristoDbRef; b: Hash32; c: AristoAccount): auto =
AristoApiProfMergeAccountRecordFn.profileRunner:
@ -843,11 +725,6 @@ func init*(
AristoApiProfPersistFn.profileRunner:
result = api.persist(a, b)
profApi.reCentre =
proc(a: AristoDbRef): auto =
AristoApiProfReCentreFn.profileRunner:
result = api.reCentre(a)
profApi.rollback =
proc(a: AristoTxRef): auto =
AristoApiProfRollbackFn.profileRunner:

View File

@ -16,9 +16,8 @@ import
std/tables,
eth/common,
results,
./aristo_delta/[delta_merge, delta_reverse],
./aristo_desc/desc_backend,
"."/[aristo_desc, aristo_layers]
"."/[aristo_desc]
# ------------------------------------------------------------------------------
# Public functions, save to backend
@ -26,13 +25,12 @@ import
proc deltaPersistentOk*(db: AristoDbRef): bool =
## Check whether the read-only filter can be merged into the backend
not db.backend.isNil and db.isCentre
not db.backend.isNil
proc deltaPersistent*(
db: AristoDbRef; # Database
nxtFid = 0u64; # Next filter ID (if any)
reCentreOk = false;
): Result[void,AristoError] =
## Resolve (i.e. move) the balancer into the physical backend database.
##
@ -62,32 +60,6 @@ proc deltaPersistent*(
? be.putEndFn(? be.putBegFn())
return ok()
# Make sure that the argument `db` is at the centre so the backend is in
# read-write mode for this peer.
let parent = db.getCentre
if db != parent:
if not reCentreOk:
return err(FilBackendRoMode)
? db.reCentre()
# Always re-centre to `parent` (in case `reCentreOk` was set)
defer: discard parent.reCentre()
# Update forked balancers here do that errors are detected early (if any.)
if 0 < db.nForked:
let rev = db.revFilter(db.balancer).valueOr:
return err(error[1])
if not rev.isEmpty: # Can an empty `rev` happen at all?
var unsharedRevOk = true
for w in db.forked:
if not w.db.balancer.isValid:
unsharedRevOk = false
# The `rev` filter can be modified if one can make sure that it is
# not shared (i.e. only previously merged into the w.db.balancer.)
# Note that it is trivially true for a single fork.
let modLowerOk = w.isLast and unsharedRevOk
w.db.balancer = deltaMerge(
w.db.balancer, modUpperOk=false, rev, modLowerOk=modLowerOk)
let lSst = SavedState(
key: EMPTY_ROOT_HASH, # placeholder for more
serial: nxtFid)

View File

@ -9,7 +9,6 @@
# except according to those terms.
import
std/tables,
".."/[aristo_desc, aristo_layers]
# ------------------------------------------------------------------------------
@ -18,9 +17,7 @@ import
proc deltaMerge*(
upper: LayerRef; # Think of `top`, `nil` is ok
modUpperOk: bool; # May re-use/modify `upper`
lower: LayerRef; # Think of `balancer`, `nil` is ok
modLowerOk: bool; # May re-use/modify `lower`
): LayerRef =
## Merge argument `upper` into the `lower` filter instance.
##
@ -29,51 +26,18 @@ proc deltaMerge*(
##
if lower.isNil:
# Degenerate case: `upper` is void
result = upper
upper
elif upper.isNil:
# Degenerate case: `lower` is void
result = lower
lower
elif modLowerOk:
else:
# Can modify `lower` which is the prefered action mode but applies only
# in cases where the `lower` argument is not shared.
lower.vTop = upper.vTop
layersMergeOnto(upper, lower[])
result = lower
elif not modUpperOk:
# Cannot modify any argument layers.
result = LayerRef(
sTab: lower.sTab, # shallow copy (entries will not be modified)
kMap: lower.kMap,
accLeaves: lower.accLeaves,
stoLeaves: lower.stoLeaves,
vTop: upper.vTop)
layersMergeOnto(upper, result[])
else:
# Otherwise avoid copying some tables by modifying `upper`. This is not
# completely free as the merge direction changes to merging the `lower`
# layer up into the higher prioritised `upper` layer (note that the `lower`
# argument filter is read-only.) Here again, the `upper` argument must not
# be a shared layer/filter.
for (rvid,vtx) in lower.sTab.pairs:
if not upper.sTab.hasKey(rvid):
upper.sTab[rvid] = vtx
for (rvid,key) in lower.kMap.pairs:
if not upper.kMap.hasKey(rvid):
upper.kMap[rvid] = key
for (accPath,leafVtx) in lower.accLeaves.pairs:
if not upper.accLeaves.hasKey(accPath):
upper.accLeaves[accPath] = leafVtx
for (mixPath,leafVtx) in lower.stoLeaves.pairs:
if not upper.stoLeaves.hasKey(mixPath):
upper.stoLeaves[mixPath] = leafVtx
result = upper
lower
# ------------------------------------------------------------------------------
# End

View File

@ -1,104 +0,0 @@
# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
std/tables,
eth/common,
results,
".."/[aristo_desc, aristo_get, aristo_utils]
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc revSubTree(
db: AristoDbRef;
rev: LayerRef;
rvid: RootedVertexID;
): Result[void,(VertexID,AristoError)] =
## Collect subtrees marked for deletion
let
vtx = block:
let rc = db.getVtxUbe rvid
if rc.isOk:
rc.value
elif rc.error == GetVtxNotFound:
VertexRef(nil)
else:
return err((rvid.vid,rc.error))
key = block:
let rc = db.getKeyUbe(rvid, {})
if rc.isOk:
rc.value[0]
elif rc.error == GetKeyNotFound:
VOID_HASH_KEY
else:
return err((rvid.vid,rc.error))
if vtx.isValid:
for vid in vtx.subVids:
? db.revSubTree(rev, (rvid.root,vid))
rev.sTab[rvid] = vtx
if key.isValid:
rev.kMap[rvid] = key
ok()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc revFilter*(
db: AristoDbRef; # Database
filter: LayerRef; # Filter to revert
): Result[LayerRef,(VertexID,AristoError)] =
## Assemble reverse filter for the `filter` argument, i.e. changes to the
## backend that reverse the effect of applying the this read-only filter.
##
## This read-only filter is calculated against the current unfiltered
## backend (excluding optionally installed read-only filter.)
##
let rev = LayerRef()
# Get vid generator state on backend
block:
let rc = db.getTuvUbe()
if rc.isOk:
rev.vTop = rc.value
elif rc.error != GetTuvNotFound:
return err((VertexID(0), rc.error))
# Calculate reverse changes for the `sTab[]` structural table
for rvid in filter.sTab.keys:
let rc = db.getVtxUbe rvid
if rc.isOk:
rev.sTab[rvid] = rc.value
elif rc.error == GetVtxNotFound:
rev.sTab[rvid] = VertexRef(nil)
else:
return err((rvid.vid,rc.error))
# Calculate reverse changes for the `kMap[]` structural table.
for rvid in filter.kMap.keys:
let rc = db.getKeyUbe(rvid, {})
if rc.isOk:
rev.kMap[rvid] = rc.value[0]
elif rc.error == GetKeyNotFound:
rev.kMap[rvid] = VOID_HASH_KEY
else:
return err((rvid.vid,rc.error))
ok(rev)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -46,13 +46,6 @@ type
txUid*: uint ## Unique ID among transactions
level*: int ## Stack index for this transaction
DudesRef = ref object
## List of peers accessing the same database. This list is layzily allocated
## and might be kept with a single entry, i.e. so that `{centre} == peers`.
##
centre: AristoDbRef ## Link to peer with write permission
peers: HashSet[AristoDbRef] ## List of all peers
AristoDbRef* = ref object
## Three tier database object supporting distributed instances.
top*: LayerRef ## Database working layer, mutable
@ -62,7 +55,6 @@ type
txRef*: AristoTxRef ## Latest active transaction
txUidGen*: uint ## Tx-relative unique number generator
dudes: DudesRef ## Related DB descriptors
accLeaves*: LruCache[Hash32, VertexRef]
## Account path to payload cache - accounts are frequently accessed by
@ -160,139 +152,6 @@ func hash*(db: AristoDbRef): Hash =
## Table/KeyedQueue/HashSet mixin
cast[pointer](db).hash
# ------------------------------------------------------------------------------
# Public functions, `dude` related
# ------------------------------------------------------------------------------
func isCentre*(db: AristoDbRef): bool =
## This function returns `true` is the argument `db` is the centre (see
## comments on `reCentre()` for details.)
##
db.dudes.isNil or db.dudes.centre == db
func getCentre*(db: AristoDbRef): AristoDbRef =
## Get the centre descriptor among all other descriptors accessing the same
## backend database (see comments on `reCentre()` for details.)
##
if db.dudes.isNil: db else: db.dudes.centre
proc reCentre*(db: AristoDbRef): Result[void,AristoError] =
## Re-focus the `db` argument descriptor so that it becomes the centre.
## Nothing is done if the `db` descriptor is the centre, already.
##
## With several descriptors accessing the same backend database there is a
## single one that has write permission for the backend (regardless whether
## there is a backend, at all.) The descriptor entity with write permission
## is called *the centre*.
##
## After invoking `reCentre()`, the argument database `db` can only be
## destructed by `finish()` which also destructs all other descriptors
## accessing the same backend database. Descriptors where `isCentre()`
## returns `false` must be single destructed with `forget()`.
##
if not db.dudes.isNil:
db.dudes.centre = db
ok()
proc fork*(
db: AristoDbRef;
noTopLayer = false;
noFilter = false;
): Result[AristoDbRef,AristoError] =
## This function creates a new empty descriptor accessing the same backend
## (if any) database as the argument `db`. This new descriptor joins the
## list of descriptors accessing the same backend database.
##
## After use, any unused non centre descriptor should be destructed via
## `forget()`. Not doing so will not only hold memory ressources but might
## also cost computing ressources for maintaining and updating backend
## filters when writing to the backend database .
##
## If the argument `noFilter` is set `true` the function will fork directly
## off the backend database and ignore any filter.
##
## If the argument `noTopLayer` is set `true` the function will provide an
## uninitalised and inconsistent (!) descriptor object without top layer.
## This setting avoids some database lookup for cases where the top layer
## is redefined anyway.
##
# Make sure that there is a dudes list
if db.dudes.isNil:
db.dudes = DudesRef(centre: db, peers: @[db].toHashSet)
let clone = AristoDbRef(
dudes: db.dudes,
backend: db.backend,
accLeaves: db.accLeaves,
stoLeaves: db.stoLeaves,
)
if not noFilter:
clone.balancer = db.balancer # Ref is ok here (filters are immutable)
if not noTopLayer:
clone.top = LayerRef.init()
if not db.balancer.isNil:
clone.top.vTop = db.balancer.vTop
else:
let rc = clone.backend.getTuvFn()
if rc.isOk:
clone.top.vTop = rc.value
elif rc.error != GetTuvNotFound:
return err(rc.error)
# Add to peer list of clones
db.dudes.peers.incl clone
ok clone
iterator forked*(db: AristoDbRef): tuple[db: AristoDbRef, isLast: bool] =
## Interate over all non centre descriptors (see comments on `reCentre()`
## for details.)
##
## The second `isLast` yielded loop entry is `true` if the yielded tuple
## is the last entry in the list.
##
if not db.dudes.isNil:
var nLeft = db.dudes.peers.len
for dude in db.dudes.peers.items:
if dude != db.dudes.centre:
nLeft.dec
yield (dude, nLeft == 1)
func nForked*(db: AristoDbRef): int =
## Returns the number of non centre descriptors (see comments on `reCentre()`
## for details.) This function is a fast version of `db.forked.toSeq.len`.
if not db.dudes.isNil:
return db.dudes.peers.len - 1
proc forget*(db: AristoDbRef): Result[void,AristoError] =
## Destruct the non centre argument `db` descriptor (see comments on
## `reCentre()` for details.)
##
## A non centre descriptor should always be destructed after use (see also
## comments on `fork()`.)
##
if db.isCentre:
err(DescNotAllowedOnCentre)
elif db notin db.dudes.peers:
err(DescStaleDescriptor)
else:
db.dudes.peers.excl db # Unlink argument `db` from peers list
ok()
proc forgetOthers*(db: AristoDbRef): Result[void,AristoError] =
## For the centre argument `db` descriptor (see comments on `reCentre()`
## for details), destruct all other descriptors accessing the same backend.
##
if not db.dudes.isNil:
if db.dudes.centre != db:
return err(DescMustBeOnCentre)
db.dudes = DudesRef(nil)
ok()
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------

View File

@ -79,7 +79,6 @@ proc finish*(db: AristoDbRef; eradicate = false) =
##
if not db.backend.isNil:
db.backend.closeFn eradicate
discard db.getCentre.forgetOthers()
# ------------------------------------------------------------------------------
# End

View File

@ -15,8 +15,8 @@
import
results,
./aristo_tx/[tx_fork, tx_frame, tx_stow],
"."/[aristo_desc, aristo_get]
./aristo_tx/[tx_frame, tx_stow],
./aristo_desc
# ------------------------------------------------------------------------------
# Public functions, getters
@ -47,124 +47,6 @@ func to*(tx: AristoTxRef; T: type[AristoDbRef]): T =
## Getter, retrieves the parent database descriptor from argument `tx`
tx.db
proc forkTx*(
db: AristoDbRef;
backLevel: int; # Backward location of transaction
): Result[AristoDbRef,AristoError] =
## Fork a new descriptor obtained from parts of the argument database
## as described by arguments `db` and `backLevel`.
##
## If the argument `backLevel` is non-negative, the forked descriptor will
## provide the database view where the first `backLevel` transaction layers
## are stripped and the remaing layers are squashed into a single transaction.
##
## If `backLevel` is `-1`, a database descriptor with empty transaction
## layers will be provided where the `balancer` between database and
## transaction layers are kept in place.
##
## If `backLevel` is `-2`, a database descriptor with empty transaction
## layers will be provided without a `balancer`.
##
## The returned database descriptor will always have transaction level one.
## If there were no transactions that could be squashed, an empty
## transaction is added.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
# Fork top layer (with or without pending transaction)?
if backLevel == 0:
return db.txForkTop()
# Fork bottom layer (=> 0 < db.stack.len)
if backLevel == db.stack.len:
return db.txForkBase()
# Inspect transaction stack
if 0 < backLevel:
var tx = db.txRef
if tx.isNil or db.stack.len < backLevel:
return err(TxLevelTooDeep)
# Fetch tx of level `backLevel` (seed to skip some items)
for _ in 0 ..< backLevel:
tx = tx.parent
if tx.isNil:
return err(TxStackGarbled)
return tx.txFork()
# Plain fork, include `balancer`
if backLevel == -1:
let xb = ? db.fork(noFilter=false)
discard xb.txFrameBegin()
return ok(xb)
# Plain fork, unfiltered backend
if backLevel == -2:
let xb = ? db.fork(noFilter=true)
discard xb.txFrameBegin()
return ok(xb)
err(TxLevelUseless)
proc findTx*(
db: AristoDbRef;
rvid: RootedVertexID; # Pivot vertex (typically `VertexID(1)`)
key: HashKey; # Hash key of pivot vertex
): Result[int,AristoError] =
## Find the transaction where the vertex with ID `vid` exists and has the
## Merkle hash key `key`. If there is no transaction available, search in
## the filter and then in the backend.
##
## If the above procedure succeeds, an integer indicating the transaction
## level integer is returned:
##
## * `0` -- top level, current layer
## * `1`, `2`, ... -- some transaction level further down the stack
## * `-1` -- the filter between transaction stack and database backend
## * `-2` -- the databse backend
##
## A successful return code might be used for the `forkTx()` call for
## creating a forked descriptor that provides the pair `(vid,key)`.
##
if not rvid.isValid or
not key.isValid:
return err(TxArgsUseless)
if db.txRef.isNil:
# Try `(vid,key)` on top layer
let topKey = db.top.kMap.getOrVoid rvid
if topKey == key:
return ok(0)
else:
# Find `(vid,key)` on transaction layers
for (n,tx,layer,error) in db.txRef.txFrameWalk:
if error != AristoError(0):
return err(error)
if layer.kMap.getOrVoid(rvid) == key:
return ok(n)
# Try bottom layer
let botKey = db.stack[0].kMap.getOrVoid rvid
if botKey == key:
return ok(db.stack.len)
# Try `(vid,key)` on balancer
if not db.balancer.isNil:
let roKey = db.balancer.kMap.getOrVoid rvid
if roKey == key:
return ok(-1)
# Try `(vid,key)` on unfiltered backend
block:
let beKey = db.getKeyUbe(rvid, {}).valueOr: (VOID_HASH_KEY, nil)
if beKey[0] == key:
return ok(-2)
err(TxNotFound)
# ------------------------------------------------------------------------------
# Public functions: Transaction frame
# ------------------------------------------------------------------------------

View File

@ -1,129 +0,0 @@
# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Transaction fork helpers
## =====================================
##
{.push raises: [].}
import
results,
./tx_frame,
".."/[aristo_desc, aristo_get, aristo_layers]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc txFork*(
tx: AristoTxRef; # Transaction descriptor
): Result[AristoDbRef,AristoError] =
## Clone a transaction into a new DB descriptor accessing the same backend
## database (if any) as the argument `db`. The new descriptor is linked to
## the transaction parent and is fully functional as a forked instance (see
## comments on `aristo_desc.reCentre()` for details.)
##
## Input situation:
## ::
## tx -> db0 with tx is top transaction, tx.level > 0
##
## Output situation:
## ::
## tx -> db0 \
## > share the same backend
## tx1 -> db1 /
##
## where `tx.level > 0`, `db1.level == 1` and `db1` is returned. The
## transaction `tx1` can be retrieved via `db1.txTop()`.
##
## The new DB descriptor will contain a copy of the argument transaction
## `tx` as top layer of level 1 (i.e. this is he only transaction.) Rolling
## back will end up at the backend layer (incl. backend filter.)
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
let db = tx.db
# Verify `tx` argument
if db.txRef == tx:
if db.top.txUid != tx.txUid:
return err(TxArgStaleTx)
elif db.stack.len <= tx.level:
return err(TxArgStaleTx)
elif db.stack[tx.level].txUid != tx.txUid:
return err(TxArgStaleTx)
# Provide new empty stack layer
let stackLayer = block:
let rc = db.getTuvBE()
if rc.isOk:
LayerRef(vTop: rc.value)
elif rc.error == GetTuvNotFound:
LayerRef.init()
else:
return err(rc.error)
# Set up clone associated to `db`
let txClone = ? db.fork(noToplayer = true, noFilter = false)
txClone.top = db.layersCc tx.level # Provide tx level 1 stack
txClone.stack = @[stackLayer] # Zero level stack
txClone.top.txUid = 1
txClone.txUidGen = 1
# Install transaction similar to `tx` on clone
txClone.txRef = AristoTxRef(
db: txClone,
txUid: 1,
level: 1)
ok(txClone)
proc txForkTop*(
db: AristoDbRef;
): Result[AristoDbRef,AristoError] =
## Variant of `forkTx()` for the top transaction if there is any. Otherwise
## the top layer is cloned, and an empty transaction is set up. After
## successful fork the returned descriptor has transaction level 1.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
if db.txRef.isNil:
let txClone = ? db.fork(noToplayer=true, noFilter=false)
txClone.top = db.layersCc # Is a deep copy
discard txClone.txFrameBegin()
return ok(txClone)
# End if()
db.txRef.txFork()
proc txForkBase*(
db: AristoDbRef;
): Result[AristoDbRef,AristoError] =
## Variant of `forkTx()`, sort of the opposite of `forkTop()`. This is the
## equivalent of top layer forking after all tranactions have been rolled
## back.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
if db.txRef.isNil:
return db.txForkTop()
let txClone = ? db.fork(noToplayer=true, noFilter=false)
txClone.top = db.layersCc 0
discard txClone.txFrameBegin()
ok(txClone)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -52,8 +52,7 @@ proc txStow*(
# `db.balancer` is `nil`. Also, the `db.balancer` is read-only. In the
# case that there are no forked peers one can ignore that restriction as
# no balancer is shared.
db.balancer = deltaMerge(
db.top, modUpperOk = true, db.balancer, modLowerOk = db.nForked()==0)
db.balancer = deltaMerge(db.top, db.balancer)
# New empty top layer
db.top = LayerRef(vTop: db.balancer.vTop)

View File

@ -11,9 +11,7 @@
{.push raises: [].}
import
eth/common,
../../aristo as use_ari,
../../aristo/aristo_desc/desc_identifiers,
../../aristo/[aristo_init/memory_only, aristo_walk],
../../kvt as use_kvt,
../../kvt/[kvt_init/memory_only, kvt_walk],
@ -53,41 +51,6 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef =
KvtDbRef.init(use_kvt.VoidBackendRef),
AristoDbRef.init(use_ari.VoidBackendRef))
proc newCtxByKey*(
ctx: CoreDbCtxRef;
key: Hash32;
info: static[string];
): CoreDbRc[CoreDbCtxRef] =
const
rvid: RootedVertexID = (VertexID(1),VertexID(1))
let
db = ctx.parent
# Find `(vid,key)` on transaction stack
inx = block:
let rc = db.ariApi.call(findTx, ctx.mpt, rvid, key.to(HashKey))
if rc.isErr:
return err(rc.error.toError info)
rc.value
# Fork MPT descriptor that provides `(vid,key)`
newMpt = block:
let rc = db.ariApi.call(forkTx, ctx.mpt, inx)
if rc.isErr:
return err(rc.error.toError info)
rc.value
# Fork KVT descriptor parallel to `newMpt`
newKvt = block:
let rc = db.kvtApi.call(forkTx, ctx.kvt, inx)
if rc.isErr:
discard db.ariApi.call(forget, newMpt)
return err(rc.error.toError info)
rc.value
# Create new context
ok(db.bless CoreDbCtxRef(kvt: newKvt, mpt: newMpt))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -12,7 +12,6 @@
import
chronicles,
eth/common,
rocksdb,
results,
../../aristo,

View File

@ -13,15 +13,13 @@
import
std/typetraits,
eth/common,
"../.."/[constants, errors],
"../.."/[constants],
".."/[kvt, aristo],
./backend/aristo_db,
./base/[api_tracking, base_config, base_desc, base_helpers]
export
CoreDbAccRef,
CoreDbAccount,
CoreDbApiError,
CoreDbCtxRef,
CoreDbErrorCode,
CoreDbError,
@ -70,75 +68,6 @@ proc ctx*(db: CoreDbRef): CoreDbCtxRef =
##
db.defCtx
proc newCtxByKey*(ctx: CoreDbCtxRef; root: Hash32): CoreDbRc[CoreDbCtxRef] =
## Create new context derived from a matching transaction of the currently
## active context. If successful, the resulting context has the following
## properties:
##
## * Transaction level is 1
## * The state of the accounts column is equal to the argument `root`
##
## If successful, the resulting descriptor **must** be manually released
## with `forget()` when it is not used, anymore.
##
## Note:
## The underlying `Aristo` backend uses lazy hashing so this function
## might fail simply because there is no computed state when nesting
## the next transaction. If the previous transaction needs to be found,
## then it must called like this:
## ::
## let db = .. # Instantiate CoreDb handle
## ...
## discard db.ctx.getAccounts.state() # Compute state hash
## db.ctx.newTransaction() # Enter new transaction
##
## However, remember that unused hash computations are contle relative
## to processing time.
##
ctx.setTrackNewApi CtxNewCtxByKeyFn
result = ctx.newCtxByKey(root, $api)
ctx.ifTrackNewApi: debug logTxt, api, elapsed, root=($$root), result
proc swapCtx*(ctx: CoreDbCtxRef; db: CoreDbRef): CoreDbCtxRef =
## Activate argument context `ctx` as default and return the previously
## active context. This function goes typically together with `forget()`.
## A valid scenario might look like
## ::
## let db = .. # Instantiate CoreDb handle
## ...
## let ctx = newCtxByKey(..).expect "ctx" # Create new context
## let saved = db.swapCtx ctx # Swap context dandles
## defer: db.swapCtx(saved).forget() # Restore
## ...
##
doAssert not ctx.isNil
assert db.defCtx != ctx # debugging only
db.setTrackNewApi CtxSwapCtxFn
# Swap default context with argument `ctx`
result = db.defCtx
db.defCtx = ctx
# Set read-write access and install
CoreDbAccRef(ctx).call(reCentre, db.ctx.mpt).isOkOr:
raiseAssert $api & " failed: " & $error
CoreDbKvtRef(ctx).call(reCentre, db.ctx.kvt).isOkOr:
raiseAssert $api & " failed: " & $error
doAssert db.defCtx != result
db.ifTrackNewApi: debug logTxt, api, elapsed
proc forget*(ctx: CoreDbCtxRef) =
## Dispose `ctx` argument context and related columns created with this
## context. This function throws an exception `ctx` is the default context.
##
ctx.setTrackNewApi CtxForgetFn
doAssert ctx != ctx.parent.defCtx
CoreDbAccRef(ctx).call(forget, ctx.mpt).isOkOr:
raiseAssert $api & ": " & $error
CoreDbKvtRef(ctx).call(forget, ctx.kvt).isOkOr:
raiseAssert $api & ": " & $error
ctx.ifTrackNewApi: debug logTxt, api, elapsed
# ------------------------------------------------------------------------------
# Public base descriptor methods
# ------------------------------------------------------------------------------

View File

@ -33,7 +33,6 @@ type
AccClearStorageFn = "clearStorage"
AccDeleteFn = "acc/delete"
AccFetchFn = "acc/fetch"
AccForgetFn = "acc/forget"
AccHasPathFn = "acc/hasPath"
AccMergeFn = "acc/merge"
AccProofFn = "acc/proof"
@ -64,11 +63,8 @@ type
CptPopFn = "pop"
CptStopCaptureFn = "stopCapture"
CtxForgetFn = "ctx/forget"
CtxGetAccountsFn = "getAccounts"
CtxGetGenericFn = "getGeneric"
CtxNewCtxByKeyFn = "newCtxByKey"
CtxSwapCtxFn = "swapCtx"
KvtDelFn = "del"
KvtGetFn = "get"

View File

@ -12,19 +12,19 @@
import
std/typetraits,
eth/common,
../../errors,
stint,
eth/common/hashes,
../aristo as use_ari,
../kvt as use_kvt,
../kvt/[kvt_init/memory_only, kvt_walk],
./base/[api_tracking, base_config, base_desc]
export stint, hashes
when CoreDbEnableApiJumpTable:
discard
else:
import
../aristo/[aristo_desc, aristo_path],
../kvt/[kvt_desc, kvt_tx]
../aristo/[aristo_desc, aristo_path]
when CoreDbEnableApiTracking:
import
@ -34,20 +34,11 @@ when CoreDbEnableApiTracking:
const
logTxt = "API"
# Annotation helper(s)
{.pragma: apiRaise, gcsafe, raises: [CoreDbApiError].}
template valueOrApiError[U,V](rc: Result[U,V]; info: static[string]): U =
rc.valueOr: raise (ref CoreDbApiError)(msg: info)
template dbType(dsc: CoreDbKvtRef | CoreDbAccRef): CoreDbType =
dsc.distinctBase.parent.dbType
# ---------------
template kvt(dsc: CoreDbKvtRef): KvtDbRef =
dsc.distinctBase.kvt
template call(api: KvtApiRef; fn: untyped; args: varargs[untyped]): untyped =
when CoreDbEnableApiJumpTable:
api.fn(args)
@ -79,25 +70,6 @@ template call(
# Public iterators
# ------------------------------------------------------------------------------
iterator pairs*(kvt: CoreDbKvtRef): (seq[byte], seq[byte]) {.apiRaise.} =
## Iterator supported on memory DB (otherwise implementation dependent)
##
kvt.setTrackNewApi KvtPairsIt
case kvt.dbType:
of AristoDbMemory:
let p = kvt.call(forkTx, kvt.kvt, 0).valueOrApiError "kvt/pairs()"
defer: discard kvt.call(forget, p)
for (k,v) in use_kvt.MemBackendRef.walkPairs p:
yield (k,v)
of AristoDbVoid:
let p = kvt.call(forkTx, kvt.kvt, 0).valueOrApiError "kvt/pairs()"
defer: discard kvt.call(forget, p)
for (k,v) in use_kvt.VoidBackendRef.walkPairs p:
yield (k,v)
of Ooops, AristoDbRocks:
raiseAssert: "Unsupported database type: " & $kvt.dbType
kvt.ifTrackNewApi: debug logTxt, api, elapsed
iterator slotPairs*(acc: CoreDbAccRef; accPath: Hash32): (seq[byte], UInt256) =
acc.setTrackNewApi AccSlotPairsIt
case acc.dbType:

View File

@ -11,7 +11,6 @@
{.push raises: [].}
import
eth/common,
../aristo,
./backend/aristo_db,
./base/base_config,
@ -25,7 +24,6 @@ export
base,
base_config,
base_iterators,
common,
core_apps
# ------------------------------------------------------------------------------

View File

@ -13,7 +13,6 @@
import
std/times,
eth/common,
results,
../aristo/aristo_profile,
./kvt_desc/desc_backend,
@ -46,21 +45,16 @@ type
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.}
KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiForkTxFn* = proc(db: KvtDbRef,
backLevel: int): Result[KvtDbRef,KvtError] {.noRaise.}
KvtApiGetFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[seq[byte],KvtError] {.noRaise.}
KvtApiLenFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[int,KvtError] {.noRaise.}
KvtApiHasKeyRcFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[bool,KvtError] {.noRaise.}
KvtApiIsCentreFn* = proc(db: KvtDbRef): bool {.noRaise.}
KvtApiIsTopFn* = proc(tx: KvtTxRef): bool {.noRaise.}
KvtApiLevelFn* = proc(db: KvtDbRef): int {.noRaise.}
KvtApiNForkedFn* = proc(db: KvtDbRef): int {.noRaise.}
KvtApiPutFn* = proc(db: KvtDbRef,
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
KvtApiReCentreFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
KvtApiPersistFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
@ -75,17 +69,12 @@ type
commit*: KvtApiCommitFn
del*: KvtApiDelFn
finish*: KvtApiFinishFn
forget*: KvtApiForgetFn
forkTx*: KvtApiForkTxFn
get*: KvtApiGetFn
len*: KvtApiLenFn
hasKeyRc*: KvtApiHasKeyRcFn
isCentre*: KvtApiIsCentreFn
isTop*: KvtApiIsTopFn
level*: KvtApiLevelFn
nForked*: KvtApiNForkedFn
put*: KvtApiPutFn
reCentre*: KvtApiReCentreFn
rollback*: KvtApiRollbackFn
persist*: KvtApiPersistFn
toKvtDbRef*: KvtApiToKvtDbRefFn
@ -100,17 +89,12 @@ type
KvtApiProfCommitFn = "commit"
KvtApiProfDelFn = "del"
KvtApiProfFinishFn = "finish"
KvtApiProfForgetFn = "forget"
KvtApiProfForkTxFn = "forkTx"
KvtApiProfGetFn = "get"
KvtApiProfLenFn = "len"
KvtApiProfHasKeyRcFn = "hasKeyRc"
KvtApiProfIsCentreFn = "isCentre"
KvtApiProfIsTopFn = "isTop"
KvtApiProfLevelFn = "level"
KvtApiProfNForkedFn = "nForked"
KvtApiProfPutFn = "put"
KvtApiProfReCentreFn = "reCentre"
KvtApiProfRollbackFn = "rollback"
KvtApiProfPersistFn = "persist"
KvtApiProfToKvtDbRefFn = "toKvtDbRef"
@ -136,16 +120,11 @@ when AutoValidateApiHooks:
doAssert not api.commit.isNil
doAssert not api.del.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.forkTx.isNil
doAssert not api.get.isNil
doAssert not api.hasKeyRc.isNil
doAssert not api.isCentre.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.put.isNil
doAssert not api.reCentre.isNil
doAssert not api.rollback.isNil
doAssert not api.persist.isNil
doAssert not api.toKvtDbRef.isNil
@ -178,17 +157,12 @@ func init*(api: var KvtApiObj) =
api.commit = commit
api.del = del
api.finish = finish
api.forget = forget
api.forkTx = forkTx
api.get = get
api.len = len
api.hasKeyRc = hasKeyRc
api.isCentre = isCentre
api.isTop = isTop
api.level = level
api.nForked = nForked
api.put = put
api.reCentre = reCentre
api.rollback = rollback
api.persist = persist
api.toKvtDbRef = toKvtDbRef
@ -206,17 +180,12 @@ func dup*(api: KvtApiRef): KvtApiRef =
commit: api.commit,
del: api.del,
finish: api.finish,
forget: api.forget,
forkTx: api.forkTx,
get: api.get,
len: api.len,
hasKeyRc: api.hasKeyRc,
isCentre: api.isCentre,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
put: api.put,
reCentre: api.reCentre,
rollback: api.rollback,
persist: api.persist,
toKvtDbRef: api.toKvtDbRef,
@ -267,16 +236,6 @@ func init*(
KvtApiProfFinishFn.profileRunner:
api.finish(a, b)
profApi.forget =
proc(a: KvtDbRef): auto =
KvtApiProfForgetFn.profileRunner:
result = api.forget(a)
profApi.forkTx =
proc(a: KvtDbRef, b: int): auto =
KvtApiProfForkTxFn.profileRunner:
result = api.forkTx(a, b)
profApi.get =
proc(a: KvtDbRef, b: openArray[byte]): auto =
KvtApiProfGetFn.profileRunner:
@ -292,11 +251,6 @@ func init*(
KvtApiProfHasKeyRcFn.profileRunner:
result = api.hasKeyRc(a, b)
profApi.isCentre =
proc(a: KvtDbRef): auto =
KvtApiProfIsCentreFn.profileRunner:
result = api.isCentre(a)
profApi.isTop =
proc(a: KvtTxRef): auto =
KvtApiProfIsTopFn.profileRunner:
@ -307,21 +261,11 @@ func init*(
KvtApiProfLevelFn.profileRunner:
result = api.level(a)
profApi.nForked =
proc(a: KvtDbRef): auto =
KvtApiProfNForkedFn.profileRunner:
result = api.nForked(a)
profApi.put =
proc(a: KvtDbRef; b, c: openArray[byte]): auto =
KvtApiProfPutFn.profileRunner:
result = api.put(a, b, c)
profApi.reCentre =
proc(a: KvtDbRef): auto =
KvtApiProfReCentreFn.profileRunner:
result = api.reCentre(a)
profApi.rollback =
proc(a: KvtTxRef): auto =
KvtApiProfRollbackFn.profileRunner:

View File

@ -12,7 +12,6 @@
import
std/[algorithm, sequtils, strutils, tables],
eth/common,
results,
stew/byteutils,
./kvt_desc/desc_backend,

View File

@ -16,8 +16,7 @@ import
std/tables,
results,
./kvt_desc,
./kvt_desc/desc_backend,
./kvt_delta/[delta_merge, delta_reverse]
./kvt_desc/desc_backend
# ------------------------------------------------------------------------------
# Public functions
@ -25,12 +24,11 @@ import
proc deltaPersistentOk*(db: KvtDbRef): bool =
## Check whether the balancer filter can be merged into the backend
not db.backend.isNil and db.isCentre
not db.backend.isNil
proc deltaPersistent*(
db: KvtDbRef; # Database
reCentreOk = false;
): Result[void,KvtError] =
## Resolve (i.e. move) the backend filter into the physical backend database.
##
@ -50,32 +48,6 @@ proc deltaPersistent*(
if db.balancer.isNil:
return ok()
# Make sure that the argument `db` is at the centre so the backend is in
# read-write mode for this peer.
let parent = db.getCentre
if db != parent:
if not reCentreOk:
return err(FilBackendRoMode)
? db.reCentre()
# Always re-centre to `parent` (in case `reCentreOk` was set)
defer: discard parent.reCentre()
# Update forked balancers here do that errors are detected early (if any.)
if 0 < db.nForked:
let rev = db.revFilter(db.balancer).valueOr:
return err(error[1])
if 0 < rev.sTab.len: # Can an empty `rev` happen at all?
var unsharedRevOk = true
for w in db.forked:
if not w.db.balancer.isValid:
unsharedRevOk = false
# The `rev` filter can be modified if one can make sure that it is
# not shared (i.e. only previously merged into the w.db.balancer.)
# Note that it is trivially true for a single fork.
let modLowerOk = w.isLast and unsharedRevOk
w.db.balancer = deltaMerge(
w.db.balancer, modUpperOk=false, rev, modLowerOk=modLowerOk)
# Store structural single trie entries
let writeBatch = ? be.putBegFn()
for k,v in db.balancer.sTab:

View File

@ -9,7 +9,6 @@
# except according to those terms.
import
std/tables,
../kvt_desc
# ------------------------------------------------------------------------------
@ -26,9 +25,7 @@ proc layersMergeOnto(src: LayerRef; trg: var LayerObj) =
proc deltaMerge*(
upper: LayerRef; # Think of `top`, `nil` is ok
modUpperOk: bool; # May re-use/modify `upper`
lower: LayerRef; # Think of `balancer`, `nil` is ok
modLowerOk: bool; # May re-use/modify `lower`
): LayerRef =
## Merge argument `upper` into the `lower` filter instance.
##
@ -37,33 +34,17 @@ proc deltaMerge*(
##
if lower.isNil:
# Degenerate case: `upper` is void
result = upper
upper
elif upper.isNil:
# Degenerate case: `lower` is void
result = lower
lower
elif modLowerOk:
else:
# Can modify `lower` which is the prefered action mode but applies only
# in cases where the `lower` argument is not shared.
layersMergeOnto(upper, lower[])
result = lower
elif not modUpperOk:
# Cannot modify any argument layers.
result = LayerRef(sTab: lower.sTab)
layersMergeOnto(upper, result[])
else:
# Otherwise avoid copying some tables by modifyinh `upper`. This is not
# completely free as the merge direction changes to merging the `lower`
# layer up into the higher prioritised `upper` layer (note that the `lower`
# argument filter is read-only.) Here again, the `upper` argument must not
# be a shared layer/filter.
for (key,val) in lower.sTab.pairs:
if not upper.sTab.hasKey(key):
upper.sTab[key] = val
result = upper
lower
# ------------------------------------------------------------------------------
# End

View File

@ -1,47 +0,0 @@
# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
std/tables,
eth/common,
results,
".."/[kvt_desc, kvt_utils]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc revFilter*(
db: KvtDbRef; # Database
filter: LayerRef; # Filter to revert
): Result[LayerRef,(seq[byte],KvtError)] =
## Assemble reverse filter for the `filter` argument, i.e. changes to the
## backend that reverse the effect of applying the this read-only filter.
##
## This read-only filter is calculated against the current unfiltered
## backend (excluding optionally installed read-only filter.)
##
let rev = LayerRef()
# Calculate reverse changes for the `sTab[]` structural table
for key in filter.sTab.keys:
let rc = db.getUbe key
if rc.isOk:
rev.sTab[key] = rc.value
elif rc.error == GetNotFound:
rev.sTab[key] = EmptyBlob
else:
return err((key,rc.error))
ok(rev)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -14,9 +14,7 @@
{.push raises: [].}
import
std/[hashes, sets, tables],
eth/common,
results,
std/[hashes, tables],
./kvt_constants,
./kvt_desc/[desc_error, desc_structural]
@ -25,7 +23,7 @@ from ./kvt_desc/desc_backend
# Not auto-exporting backend
export
kvt_constants, desc_error, desc_structural
tables, kvt_constants, desc_error, desc_structural
type
KvtTxRef* = ref object
@ -35,13 +33,6 @@ type
txUid*: uint ## Unique ID among transactions
level*: int ## Stack index for this transaction
DudesRef = ref object
## List of peers accessing the same database. This list is layzily
## allocated and might be kept with a single entry, i.e. so that
## `{centre} == peers`.
centre: KvtDbRef ## Link to peer with write permission
peers: HashSet[KvtDbRef] ## List of all peers
KvtDbRef* = ref object of RootRef
## Three tier database object supporting distributed instances.
top*: LayerRef ## Database working layer, mutable
@ -51,7 +42,6 @@ type
txRef*: KvtTxRef ## Latest active transaction
txUidGen*: uint ## Tx-relative unique number generator
dudes: DudesRef ## Related DB descriptors
# Debugging data below, might go away in future
xIdGen*: uint64
@ -61,17 +51,6 @@ type
KvtDbAction* = proc(db: KvtDbRef) {.gcsafe, raises: [].}
## Generic call back function/closure.
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc canMod(db: KvtDbRef): Result[void,KvtError] =
## Ask for permission before doing nasty stuff
if db.backend.isNil:
ok()
else:
db.backend.canModFn()
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
@ -98,120 +77,6 @@ func hash*(db: KvtDbRef): Hash =
# Public functions, `dude` related
# ------------------------------------------------------------------------------
func isCentre*(db: KvtDbRef): bool =
## This function returns `true` is the argument `db` is the centre (see
## comments on `reCentre()` for details.)
##
db.dudes.isNil or db.dudes.centre == db
func getCentre*(db: KvtDbRef): KvtDbRef =
## Get the centre descriptor among all other descriptors accessing the same
## backend database (see comments on `reCentre()` for details.)
##
if db.dudes.isNil: db else: db.dudes.centre
proc reCentre*(db: KvtDbRef): Result[void,KvtError] =
## Re-focus the `db` argument descriptor so that it becomes the centre.
## Nothing is done if the `db` descriptor is the centre, already.
##
## With several descriptors accessing the same backend database there is a
## single one that has write permission for the backend (regardless whether
## there is a backend, at all.) The descriptor entity with write permission
## is called *the centre*.
##
## After invoking `reCentre()`, the argument database `db` can only be
## destructed by `finish()` which also destructs all other descriptors
## accessing the same backend database. Descriptors where `isCentre()`
## returns `false` must be single destructed with `forget()`.
##
if not db.dudes.isNil and db.dudes.centre != db:
? db.canMod()
db.dudes.centre = db
ok()
proc fork*(
db: KvtDbRef;
noTopLayer = false;
noFilter = false;
): Result[KvtDbRef,KvtError] =
## This function creates a new empty descriptor accessing the same backend
## (if any) database as the argument `db`. This new descriptor joins the
## list of descriptors accessing the same backend database.
##
## After use, any unused non centre descriptor should be destructed via
## `forget()`. Not doing so will not only hold memory ressources but might
## also cost computing ressources for maintaining and updating backend
## filters when writing to the backend database .
##
## If the argument `noFilter` is set `true` the function will fork directly
## off the backend database and ignore any filter.
##
# Make sure that there is a dudes list
if db.dudes.isNil:
db.dudes = DudesRef(centre: db, peers: @[db].toHashSet)
let clone = KvtDbRef(
backend: db.backend,
dudes: db.dudes)
if not noFilter:
clone.balancer = db.balancer # Ref is ok here (filters are immutable)
if not noTopLayer:
clone.top = LayerRef.init()
# Add to peer list of clones
db.dudes.peers.incl clone
ok clone
iterator forked*(db: KvtDbRef): tuple[db: KvtDbRef, isLast: bool] =
## Interate over all non centre descriptors (see comments on `reCentre()`
## for details.)
##
## The second `isLast` yielded loop entry is `true` if the yielded tuple
## is the last entry in the list.
if not db.dudes.isNil:
var nLeft = db.dudes.peers.len
for dude in db.dudes.peers.items:
if dude != db.dudes.centre:
nLeft.dec
yield (dude, nLeft == 1)
func nForked*(db: KvtDbRef): int =
## Returns the number of non centre descriptors (see comments on `reCentre()`
## for details.) This function is a fast version of `db.forked.toSeq.len`.
if not db.dudes.isNil:
return db.dudes.peers.len - 1
proc forget*(db: KvtDbRef): Result[void,KvtError] =
## Destruct the non centre argument `db` descriptor (see comments on
## `reCentre()` for details.)
##
## A non centre descriptor should always be destructed after use (see also
## comments on `fork()`.)
##
if db.isCentre:
err(NotAllowedOnCentre)
elif db notin db.dudes.peers:
err(StaleDescriptor)
else:
? db.canMod()
db.dudes.peers.excl db # Unlink argument `db` from peers list
ok()
proc forgetOthers*(db: KvtDbRef): Result[void,KvtError] =
## For the centre argument `db` descriptor (see comments on `reCentre()`
## for details), release all other descriptors accessing the same backend.
##
if not db.dudes.isNil:
if db.dudes.centre != db:
return err(MustBeOnCentre)
? db.canMod()
db.dudes = DudesRef(nil)
ok()
iterator rstack*(db: KvtDbRef): LayerRef =
# Stack in reverse order
for i in 0..<db.stack.len:

View File

@ -15,7 +15,6 @@
{.push raises: [].}
import
eth/common,
results,
./desc_error
@ -56,12 +55,6 @@ type
## `false` the outcome might differ depending on the type of backend
## (e.g. in-memory backends would eradicate on close.)
CanModFn* =
proc(): Result[void,KvtError] {.gcsafe, raises: [].}
## This function returns OK if there is nothing to prevent the main
## `KVT` descriptors being modified (e.g. by `reCentre()`) or by
## adding/removing a new peer (e.g. by `fork()` or `forget()`.)
SetWrReqFn* =
proc(db: RootRef): Result[void,KvtError] {.gcsafe, raises: [].}
## This function stores a request function for the piggiback mode
@ -86,7 +79,6 @@ type
putEndFn*: PutEndFn ## Commit bulk store session
closeFn*: CloseFn ## Generic destructor
canModFn*: CanModFn ## Lock-alike
setWrReqFn*: SetWrReqFn ## Register main descr for write request
@ -97,7 +89,6 @@ proc init*(trg: var BackendObj; src: BackendObj) =
trg.putKvpFn = src.putKvpFn
trg.putEndFn = src.putEndFn
trg.closeFn = src.closeFn
trg.canModFn = src.canModFn
trg.setWrReqFn = src.setWrReqFn
# ------------------------------------------------------------------------------

View File

@ -16,6 +16,8 @@
import
std/tables
export tables
type
LayerRef* = ref LayerObj
LayerObj* = object

View File

@ -129,11 +129,6 @@ proc closeFn(db: MemBackendRef): CloseFn =
proc(ignore: bool) =
discard
proc canModFn(db: MemBackendRef): CanModFn =
result =
proc(): Result[void,KvtError] =
ok()
proc setWrReqFn(db: MemBackendRef): SetWrReqFn =
result =
proc(kvt: RootRef): Result[void,KvtError] =
@ -156,16 +151,9 @@ proc memoryBackend*: BackendRef =
db.putEndFn = putEndFn db
db.closeFn = closeFn db
db.canModFn = canModFn db
db.setWrReqFn = setWrReqFn db
db
proc dup*(db: MemBackendRef): MemBackendRef =
## Duplicate descriptor shell as needed for API debugging
new result
init_common.init(result[], db[])
result.mdb = db.mdb
# ------------------------------------------------------------------------------
# Public iterators (needs direct backend access)
# ------------------------------------------------------------------------------

View File

@ -63,7 +63,7 @@ proc init*(
): T =
## Shortcut for `KvtDbRef.init(VoidBackendRef)`
KvtDbRef.init VoidBackendRef
proc finish*(db: KvtDbRef; eradicate = false) =
## Backend destructor. The argument `eradicate` indicates that a full
@ -73,7 +73,6 @@ proc finish*(db: KvtDbRef; eradicate = false) =
##
if not db.backend.isNil:
db.backend.closeFn eradicate
discard db.getCentre.forgetOthers()
# ------------------------------------------------------------------------------
# End

View File

@ -28,7 +28,6 @@
import
chronicles,
eth/common,
rocksdb,
results,
../../aristo/aristo_init/persistent,
@ -153,11 +152,6 @@ proc closeFn(db: RdbBackendRef): CloseFn =
proc(eradicate: bool) =
db.rdb.destroy(eradicate)
proc canModFn(db: RdbBackendRef): CanModFn =
result =
proc(): Result[void,KvtError] =
ok()
proc setWrReqFn(db: RdbBackendRef): SetWrReqFn =
result =
proc(kvt: RootRef): Result[void,KvtError] =
@ -206,15 +200,6 @@ proc closeTriggeredFn(db: RdbBackendRef): CloseFn =
# Nothing to do here as we do not own the backend
discard
proc canModTriggeredFn(db: RdbBackendRef): CanModFn =
## Variant of `canModFn()` for piggyback write batch
result =
proc(): Result[void,KvtError] =
# Deny modifications/changes if there is a pending write request
if not db.rdb.delayedPersist.isNil:
return err(RdbBeDelayedLocked)
ok()
proc setWrReqTriggeredFn(db: RdbBackendRef): SetWrReqFn =
result =
proc(kvt: RootRef): Result[void,KvtError] =
@ -291,7 +276,6 @@ proc rocksDbKvtBackend*(
db.putEndFn = putEndFn db
db.closeFn = closeFn db
db.canModFn = canModFn db
db.setWrReqFn = setWrReqFn db
ok db
@ -321,16 +305,9 @@ proc rocksDbKvtTriggeredBackend*(
db.putEndFn = putEndTriggeredFn db
db.closeFn = closeTriggeredFn db
db.canModFn = canModTriggeredFn db
db.setWrReqFn = setWrReqTriggeredFn db
ok db
proc dup*(db: RdbBackendRef): RdbBackendRef =
new result
init_common.init(result[], db[])
result.rdb = db.rdb
# ------------------------------------------------------------------------------
# Public iterators (needs direct backend access)
# ------------------------------------------------------------------------------

View File

@ -18,6 +18,8 @@ import
../../kvt_desc,
rocksdb
export rocksdb
type
RdbInst* = object
store*: KvtCfStore ## Rocks DB database handler

View File

@ -14,8 +14,6 @@
{.push raises: [].}
import
eth/common,
rocksdb,
results,
"../.."/[kvt_constants, kvt_desc],
./rdb_desc

View File

@ -15,7 +15,6 @@
import
std/[sequtils, os],
rocksdb,
results,
../../../opts,
../../kvt_desc,

View File

@ -14,8 +14,6 @@
{.push raises: [].}
import
eth/common,
rocksdb,
results,
../../kvt_desc,
./rdb_desc

View File

@ -14,8 +14,6 @@
{.push raises: [].}
import
eth/common,
rocksdb,
./rdb_desc
const

View File

@ -12,7 +12,6 @@
import
std/[sequtils, sets, tables],
eth/common,
results,
./kvt_desc

View File

@ -15,7 +15,7 @@
import
results,
./kvt_tx/[tx_fork, tx_frame, tx_stow],
./kvt_tx/[tx_frame, tx_stow],
./kvt_init/memory_only,
./kvt_desc
@ -52,65 +52,6 @@ func toKvtDbRef*(tx: KvtTxRef): KvtDbRef =
## Same as `.to(KvtDbRef)`
tx.db
proc forkTx*(
db: KvtDbRef;
backLevel: int; # Backward location of transaction
): Result[KvtDbRef,KvtError] =
## Fork a new descriptor obtained from parts of the argument database
## as described by arguments `db` and `backLevel`.
##
## If the argument `backLevel` is non-negative, the forked descriptor will
## provide the database view where the first `backLevel` transaction layers
## are stripped and the remaing layers are squashed into a single transaction.
##
## If `backLevel` is `-1`, a database descriptor with empty transaction
## layers will be provided where the `roFilter` between database and
## transaction layers are kept in place.
##
## If `backLevel` is `-2`, a database descriptor with empty transaction
## layers will be provided without an `roFilter`.
##
## The returned database descriptor will always have transaction level one.
## If there were no transactions that could be squashed, an empty
## transaction is added.
##
## Use `kvt_desc.forget()` to clean up this descriptor.
##
# Fork top layer (with or without pending transaction)?
if backLevel == 0:
return db.txForkTop()
# Fork bottom layer (=> 0 < db.stack.len)
if backLevel == db.stack.len:
return db.txForkBase()
# Inspect transaction stack
if 0 < backLevel:
var tx = db.txRef
if tx.isNil or db.stack.len < backLevel:
return err(TxLevelTooDeep)
# Fetch tx of level `backLevel` (seed to skip some items)
for _ in 0 ..< backLevel:
tx = tx.parent
if tx.isNil:
return err(TxStackGarbled)
return tx.txFork()
# Plain fork, include `roFilter`
if backLevel == -1:
let xb = ? db.fork(noFilter=false)
discard xb.txFrameBegin()
return ok(xb)
# Plain fork, unfiltered backend
if backLevel == -2:
let xb = ? db.fork(noFilter=true)
discard xb.txFrameBegin()
return ok(xb)
err(TxLevelUseless)
# ------------------------------------------------------------------------------
# Public functions: Transaction frame
# ------------------------------------------------------------------------------

View File

@ -1,95 +0,0 @@
# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Kvt DB -- Transaction fork helpers
## ==================================
##
{.push raises: [].}
import
results,
./tx_frame,
".."/[kvt_desc, kvt_layers]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc txFork*(tx: KvtTxRef): Result[KvtDbRef,KvtError] =
## Clone a transaction into a new DB descriptor accessing the same backend
## (if any) database as the argument `db`. The new descriptor is linked to
## the transaction parent and is fully functional as a forked instance (see
## comments on `kvt_desc.reCentre()` for details.)
##
## The new DB descriptor will contain a copy of the argument transaction
## `tx` as top layer of level 1 (i.e. this is he only transaction.) Rolling
## back will end up at the backend layer (incl. backend filter.)
##
## Use `kvt_desc.forget()` to clean up this descriptor.
##
let db = tx.db
# Verify `tx` argument
if db.txRef == tx:
if db.top.txUid != tx.txUid:
return err(TxArgStaleTx)
elif db.stack.len <= tx.level:
return err(TxArgStaleTx)
elif db.stack[tx.level].txUid != tx.txUid:
return err(TxArgStaleTx)
# Set up clone associated to `db`
let txClone = ? db.fork()
txClone.top = db.layersCc tx.level
txClone.stack = @[LayerRef.init()] # Provide tx level 1 stack
txClone.top.txUid = 1
txClone.txUidGen = 1 # Used value of `txClone.top.txUid`
# Install transaction similar to `tx` on clone
txClone.txRef = KvtTxRef(
db: txClone,
txUid: 1,
level: 1)
ok(txClone)
proc txForkTop*(db: KvtDbRef): Result[KvtDbRef,KvtError] =
## Variant of `forkTx()` for the top transaction if there is any. Otherwise
## the top layer is cloned, and an empty transaction is set up. After
## successful fork the returned descriptor has transaction level 1.
##
## Use `kvt_desc.forget()` to clean up this descriptor.
##
if db.txRef.isNil:
let dbClone = ? db.fork(noToplayer=true)
dbClone.top = db.layersCc()
discard dbClone.txFrameBegin()
return ok(dbClone)
db.txRef.txFork
proc txForkBase*(
db: KvtDbRef;
): Result[KvtDbRef,KvtError] =
if db.txRef.isNil:
return db.txForkTop()
let txClone = ? db.fork(noToplayer=true, noFilter=false)
txClone.top = db.layersCc 0
discard txClone.txFrameBegin()
ok(txClone)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -53,8 +53,7 @@ proc txStow*(
# `db.balancer` is `nil`. Also, the `db.balancer` is read-only. In the
# case that there are no forked peers one can ignore that restriction as
# no balancer is shared.
db.balancer = deltaMerge(
db.top, modUpperOk = true, db.balancer, modLowerOk = db.nForked()==0)
db.balancer = deltaMerge(db.top, db.balancer)
# New empty top layer
db.top = LayerRef()

View File

@ -15,11 +15,12 @@
import
std/tables,
eth/common,
results,
./kvt_desc/desc_backend,
"."/[kvt_desc, kvt_layers]
export results
# ------------------------------------------------------------------------------
# Public functions, converters
# ------------------------------------------------------------------------------

View File

@ -12,7 +12,6 @@
## ==================================================
##
import
eth/common,
../kvt_init/[memory_db, memory_only],
".."/[kvt_desc, kvt_init],
./walk_private

View File

@ -17,7 +17,6 @@
## `./kvt_walk/persistent`.)
##
import
eth/common,
../kvt_init/[rocks_db, persistent],
../kvt_desc,
"."/[memory_only, walk_private]

View File

@ -9,8 +9,7 @@
# distributed except according to those terms.
import
std/[sets, tables],
eth/common,
std/sets,
".."/[kvt_desc, kvt_init, kvt_layers]
# ------------------------------------------------------------------------------

View File

@ -11,7 +11,7 @@
import
tables,
stint,
eth/common,
eth/common/addresses,
../utils/mergeutils
type

View File

@ -9,14 +9,6 @@ type
EVMError* = object of CatchableError
## Base error class for all evm errors.
BlockNotFound* = object of EVMError
## The block with the given number/hash does not exist.
CanonicalHeadNotFound* = object of EVMError
## The chain has no canonical head.
ValidationError* = object of EVMError
## Error to signal something does not pass a validation check.
CoreDbApiError* = object of CatchableError
## Errors related to `CoreDB` API

View File

@ -229,8 +229,6 @@ proc getBlockContent(oracle: Oracle,
return ok(bc)
except RlpError as exc:
return err(exc.msg)
except BlockNotFound as exc:
return err(exc.msg)
type
OracleResult = object

View File

@ -57,10 +57,14 @@ proc init(
root: common.Hash32;
): T =
let ctx = block:
let rc = com.db.ctx.newCtxByKey(root)
if rc.isErr:
raiseAssert "newCptCtx: " & $$rc.error
rc.value
when false:
let rc = com.db.ctx.newCtxByKey(root)
if rc.isErr:
raiseAssert "newCptCtx: " & $$rc.error
rc.value
else:
{.warning: "TODO make a temporary context? newCtxByKey has been obsoleted".}
com.db.ctx
T(db: com.db, root: root, cpt: com.db.pushCapture(), ctx: ctx)
proc init(
@ -75,14 +79,18 @@ proc activate(cc: CaptCtxRef): CaptCtxRef {.discardable.} =
## Install/activate new context `cc.ctx`, old one in `cc.restore`
doAssert not cc.isNil
doAssert cc.restore.isNil # otherwise activated, already
cc.restore = cc.ctx.swapCtx cc.db
if true:
raiseAssert "TODO activte context"
# cc.restore = cc.ctx.swapCtx cc.db
cc
proc release(cc: CaptCtxRef) =
if not cc.restore.isNil: # switch to original context (if any)
let ctx = cc.restore.swapCtx(cc.db)
doAssert ctx == cc.ctx
cc.ctx.forget() # dispose
# if not cc.restore.isNil: # switch to original context (if any)
# let ctx = cc.restore.swapCtx(cc.db)
# doAssert ctx == cc.ctx
if true:
raiseAssert "TODO release context"
# cc.ctx.forget() # dispose
cc.cpt.pop() # discard top layer of actions tracer
# -------------------

View File

@ -105,9 +105,6 @@ proc accountsRunner(
test &"Delete accounts database sub-trees, {accLst.len} lists":
check noisy.testTxMergeAndDeleteSubTree(accLst, dbDir)
test &"Distributed backend balancers {accLst.len} entries":
check noisy.testBalancer(accLst, dbDir)
proc storagesRunner(
noisy = true;
@ -136,9 +133,6 @@ proc storagesRunner(
test &"Delete storage database sub-trees, {stoLst.len} lists":
check noisy.testTxMergeAndDeleteSubTree(stoLst, dbDir)
test &"Distributed backend balancers {stoLst.len} entries":
check noisy.testBalancer(stoLst, dbDir)
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------

View File

@ -64,9 +64,6 @@ proc dump(pfx: string; dx: varargs[AristoDbRef]): string =
proc dump(dx: varargs[AristoDbRef]): string {.used.} =
"".dump dx
proc dump(w: DbTriplet): string {.used.} =
"db".dump(w[0], w[1], w[2])
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
@ -99,7 +96,7 @@ iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet =
yield [collect[0], collect[1], collect[2], lst]
collect.setLen(0)
proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[AristoDbRef,AristoError] =
let db = block:
if 0 < rdbPath.len:
let (dbOpts, cfOpts) = DbOptions.init().toRocksDb()
@ -123,28 +120,13 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
xCheckRc rc.error == 0:
result = err(rc.error)
let dx = [db, db.forkTx(0).value, db.forkTx(0).value]
xCheck dx[0].nForked == 2
# Reduce unwanted tx layers
for n in 1 ..< dx.len:
xCheck dx[n].level == 1
xCheck dx[n].txTop.value.commit.isOk
# Clause (9) from `aristo/README.md` example
for n in 0 ..< dx.len:
let report = dx[n].mergeList w[n+1]
if report.error != 0:
db.finish(eradicate=true)
xCheck (n, report.error) == (n,0)
return ok(dx)
let dx = db
# ----------------------
proc cleanUp(dx: var DbTriplet) =
if not dx[0].isNil:
dx[0].finish(eradicate=true)
proc cleanUp(dx: var AristoDbRef) =
if not dx.isNil:
dx.finish(eradicate=true)
dx.reset
# ----------------------
@ -227,16 +209,14 @@ proc isDbEq(a, b: LayerRef; db: AristoDbRef; noisy = true): bool =
# ----------------------
proc checkBeOk(
dx: DbTriplet;
dx: AristoDbRef;
forceCache = false;
noisy = true;
): bool =
## ..
for n in 0 ..< dx.len:
let rc = dx[n].checkBE()
xCheckRc rc.error == (0,0):
noisy.say "***", "db checkBE failed",
" n=", n, "/", dx.len-1
let rc = dx.checkBE()
xCheckRc rc.error == (0,0):
noisy.say "***", "db checkBE failed"
true
# ------------------------------------------------------------------------------
@ -267,7 +247,7 @@ proc testBalancer*(
let rc = dbTriplet(w, rdbPath)
xCheckRc rc.error == 0
rc.value
(db1, db2, db3) = (dx[0], dx[1], dx[2])
db1 = dx
defer:
dx.cleanUp()
@ -279,31 +259,15 @@ proc testBalancer*(
let rc = db1.persist()
xCheckRc rc.error == 0
xCheck db1.balancer == LayerRef(nil)
xCheck db2.balancer == db3.balancer
block:
let rc = db2.stow() # non-persistent
xCheckRc rc.error == 0:
noisy.say "*** testDistributedAccess (3)", "n=", n, "db2".dump db2
xCheck db1.balancer == LayerRef(nil)
xCheck db2.balancer != db3.balancer
# Clause (11) from `aristo/README.md` example
discard db2.reCentre()
block:
let rc = db2.persist()
xCheckRc rc.error == 0
xCheck db2.balancer == LayerRef(nil)
# Check/verify backends
block:
let ok = dx.checkBeOk(noisy=noisy)
xCheck ok:
noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3
noisy.say "*** testDistributedAccess (4)", "n=", n
# Capture filters from clause (11)
c11Filter1 = db1.balancer
c11Filter3 = db3.balancer
# Clean up
dx.cleanUp()
@ -317,24 +281,10 @@ proc testBalancer*(
let rc = dbTriplet(w, rdbPath)
xCheckRc rc.error == 0
rc.value
(db1, db2, db3) = (dy[0], dy[1], dy[2])
db1 = dy
defer:
dy.cleanUp()
# Build clause (12) from `aristo/README.md` example
discard db2.reCentre()
block:
let rc = db2.persist()
xCheckRc rc.error == 0
xCheck db2.balancer == LayerRef(nil)
xCheck db1.balancer == db3.balancer
# Clause (13) from `aristo/README.md` example
xCheck not db1.isCentre()
block:
let rc = db1.stow() # non-persistent
xCheckRc rc.error == 0
# Clause (14) from `aristo/README.md` check
let c11Fil1_eq_db1RoFilter = c11Filter1.isDbEq(db1.balancer, db1, noisy)
xCheck c11Fil1_eq_db1RoFilter:
@ -342,12 +292,6 @@ proc testBalancer*(
"db1".dump(db1),
""
# Clause (15) from `aristo/README.md` check
let c11Fil3_eq_db3RoFilter = c11Filter3.isDbEq(db3.balancer, db3, noisy)
xCheck c11Fil3_eq_db3RoFilter:
noisy.say "*** testDistributedAccess (8)", "n=", n,
"db3".dump(db3),
""
# Check/verify backends
block:
let ok = dy.checkBeOk(noisy=noisy)