Aristo db integrate hashify into tx (#1679)

* Renamed type `NoneBackendRef` => `VoidBackendRef`

* Clarify names: `BE=filter+backend` and `UBE=backend (unfiltered)`

why:
  Most functions used full names as `getVtxUnfilteredBackend()` or
  `getKeyBackend()`. After defining abbreviations (and its meaning) it
   seems easier to use `getVtxUBE()` and `getKeyBE()`.

* Integrate `hashify()` process into transaction logic

why:
  Is now transparent unless explicitly controlled.

details:
  Cache changes imply setting a `dirty` flag which in turn triggers
  `hashify()` processing in transaction and `pack()` directives.

* Removed `aristo_tx.exec()` directive

why:
  Inconsistent implementation, functionality will be provided with a
  different paradigm.
This commit is contained in:
Jordan Hrycaj 2023-08-11 18:23:57 +01:00 committed by GitHub
parent 09fabd04eb
commit 01fe172738
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 293 additions and 294 deletions

View File

@ -14,10 +14,10 @@
{.push raises: [].}
import aristo/[
aristo_constants, aristo_delete, aristo_fetch, aristo_hashify, aristo_init,
aristo_constants, aristo_delete, aristo_fetch, aristo_init,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils]
export
aristo_constants, aristo_delete, aristo_fetch, aristo_hashify, aristo_init,
aristo_constants, aristo_delete, aristo_fetch, aristo_init,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils
import

View File

@ -83,8 +83,8 @@ proc checkBE*(
return MemBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendRocksDB:
return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendNone:
return NoneBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendVoid:
return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
ok()
# ------------------------------------------------------------------------------

View File

@ -36,7 +36,7 @@ proc invTo(s: IntervalSetRef[VertexID,uint64]; T: type HashSet[VertexID]): T =
for pt in w.minPt .. w.maxPt:
result.incl pt
proc toNodeBe(
proc toNodeBE(
vtx: VertexRef; # Vertex to convert
db: AristoDbRef; # Database, top layer
): Result[NodeRef,VertexID] =
@ -47,7 +47,7 @@ proc toNodeBe(
if vtx.lData.pType == AccountData:
let vid = vtx.lData.account.storageID
if vid.isValid:
let rc = db.getKeyBackend vid
let rc = db.getKeyBE vid
if rc.isErr or not rc.value.isValid:
return err(vid)
node.key[0] = rc.value
@ -58,7 +58,7 @@ proc toNodeBe(
for n in 0 .. 15:
let vid = vtx.bVid[n]
if vid.isValid:
let rc = db.getKeyBackend vid
let rc = db.getKeyBE vid
if rc.isOk and rc.value.isValid:
node.key[n] = rc.value
else:
@ -69,7 +69,7 @@ proc toNodeBe(
of Extension:
let
vid = vtx.eVid
rc = db.getKeyBackend vid
rc = db.getKeyBE vid
if rc.isOk and rc.value.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
node.key[0] = rc.value
@ -80,7 +80,7 @@ proc toNodeBe(
# Public functions
# ------------------------------------------------------------------------------
proc checkBE*[T: RdbBackendRef|MemBackendRef|NoneBackendRef](
proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
_: type T;
db: AristoDbRef; # Database, top layer
relax: bool; # Not compiling hashes if `true`
@ -94,17 +94,17 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|NoneBackendRef](
for (_,vid,vtx) in T.walkVtxBE db:
if not vtx.isValid:
return err((vid,CheckBeVtxInvalid))
let rc = db.getKeyBackend vid
let rc = db.getKeyBE vid
if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeKeyMissing))
for (_,vid,key) in T.walkKeyBE db:
if not key.isvalid:
return err((vid,CheckBeKeyInvalid))
let rc = db.getVtxBackend vid
let rc = db.getVtxBE vid
if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeVtxMissing))
let rx = rc.value.toNodeBe db # backend only
let rx = rc.value.toNodeBE db # backend only
if rx.isErr:
return err((vid,CheckBeKeyCantCompile))
if not relax:
@ -143,7 +143,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|NoneBackendRef](
if lbl.isValid:
return err((vid,CheckBeCacheKeyNonEmpty))
# There must be a representation on the backend DB
if db.getVtxBackend(vid).isErr:
if db.getVtxBE(vid).isErr:
return err((vid,CheckBeCacheVidUnsynced))
# Register deleted vid against backend generator state
discard vids.merge Interval[VertexID,uint64].new(vid,vid)

View File

@ -103,7 +103,7 @@ proc checkCacheCommon*(
for (vid,vtx) in db.top.sTab.pairs:
if not vtx.isValid:
nNilVtx.inc
let rc = db.getVtxBackend vid
let rc = db.getVtxBE vid
if rc.isErr:
return err((vid,CheckAnyVidVtxMissing))
if not db.top.kMap.hasKey vid:

View File

@ -376,7 +376,8 @@ proc ppLayer(
let
pfx1 = indent.toPfx
pfx2 = indent.toPfx(1)
tagOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
nOKs = sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
tagOk = 1 < nOKs
var
pfy = ""
@ -419,6 +420,10 @@ proc ppLayer(
tLen = layer.pPrf.len
info = "pPrf(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.pPrf.ppPPrf
if 0 < nOKs:
let
info = if layer.dirty: "dirty" else: "clean"
result &= info.doPrefix(false)
# ------------------------------------------------------------------------------
# Public functions
@ -615,14 +620,14 @@ proc pp*(
indent = 4;
): string =
## May be called as `db.to(TypedBackendRef).pp(db)`
case (if be.isNil: BackendNone else: be.kind)
case (if be.isNil: BackendVoid else: be.kind)
of BackendMemory:
be.MemBackendRef.ppBe(db, indent)
of BackendRocksDB:
be.RdbBackendRef.ppBe(db, indent)
of BackendNone:
of BackendVoid:
db.roFilter.ppFilter(db, indent) & indent.toPfx & "<BackendNone>"
# ------------------------------------------------------------------------------

View File

@ -50,7 +50,7 @@ proc clearKey(
if lbl.isValid:
db.top.kMap.del vid
db.top.pAmk.del lbl
elif db.getKeyBackend(vid).isOK:
elif db.getKeyBE(vid).isOK:
# Register for deleting on backend
db.top.kMap[vid] = VOID_HASH_LABEL
db.top.pAmk.del lbl
@ -60,7 +60,7 @@ proc doneWith(
vid: VertexID; # Vertex IDs to clear
) =
# Remove entry
if db.getVtxBackend(vid).isOk:
if db.getVtxBE(vid).isOk:
db.top.sTab[vid] = VertexRef(nil) # Will be propagated to backend
else:
db.top.sTab.del vid
@ -278,7 +278,7 @@ proc deleteImpl(
# Will be needed at the end. Just detect an error early enouhh
let leafVidBe = block:
let rc = db.getVtxBackend lf.vid
let rc = db.getVtxBE lf.vid
if rc.isErr:
if rc.error != GetVtxNotFound:
return err((lf.vid, rc.error))
@ -286,6 +286,9 @@ proc deleteImpl(
else:
rc.value
# Will modify top level cache
db.top.dirty = true
db.doneWith lf.vid
if 1 < hike.legs.len:

View File

@ -42,7 +42,7 @@ type
db*: AristoDbRef ## Database descriptor
parent*: AristoTxRef ## Previous transaction
txUid*: uint ## Unique ID among transactions
stackInx*: int ## Stack index for this transaction
level*: int ## Stack index for this transaction
AristoDbRef* = ref AristoDbObj
AristoDbObj* = object

View File

@ -191,14 +191,12 @@ type
RdbBeIngestSstWriter
# Transaction wrappers
TxNoPendingTx
TxArgStaleTx
TxNotTopTx
TxExecNestingAttempt
TxExecBaseTxLocked
TxExecDirectiveLocked
TxStackUnderflow
TxBackendMissing
TxNoPendingTx
TxNotTopTx
TxStackGarbled
TxStackUnderflow
# Miscelaneous handy helpers
PayloadTypeUnsupported

View File

@ -106,6 +106,7 @@ type
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
txUid*: uint ## Transaction identifier if positive
dirty*: bool ## Needs to be hashified if `true`
# ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef`

View File

@ -29,7 +29,7 @@ type
proc getBeStateRoot(
db: AristoDbRef;
): Result[HashKey,AristoError] =
let rc = db.getKeyBackend VertexID(1)
let rc = db.getKeyBE VertexID(1)
if rc.isOk:
return ok(rc.value)
if rc.error == GetKeyNotFound:
@ -39,7 +39,7 @@ proc getBeStateRoot(
proc getLayerStateRoots(
db: AristoDbRef;
layer: AristoLayerRef;
extendOK: bool;
chunkedMpt: bool;
): Result[StateRootPair,AristoError] =
## Get the Merkle hash key for target state root to arrive at after this
## reverse filter was applied.
@ -53,7 +53,7 @@ proc getLayerStateRoots(
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
if spr.fg.isValid:
return ok(spr)
if extendOK:
if chunkedMpt:
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
if vid == VertexID(1):
spr.fg = spr.be
@ -90,7 +90,7 @@ func bulk*(layer: AristolayerRef): int =
proc fwdFilter*(
db: AristoDbRef;
layer: AristoLayerRef;
extendOK = false;
chunkedMpt = false;
): Result[AristoFilterRef,(VertexID,AristoError)] =
## Assemble forward delta, i.e. changes to the backend equivalent to applying
## the current top layer.
@ -107,7 +107,7 @@ proc fwdFilter*(
# Register the Merkle hash keys of the MPT where this reverse filter will be
# applicable: `be => fg`
let (srcRoot, trgRoot) = block:
let rc = db.getLayerStateRoots(layer, extendOk)
let rc = db.getLayerStateRoots(layer, chunkedMpt)
if rc.isOK:
(rc.value.be, rc.value.fg)
elif rc.error == FilPrettyPointlessLayer:
@ -182,7 +182,7 @@ proc merge*(
if vtx.isValid or not newFilter.sTab.hasKey vid:
newFilter.sTab[vid] = vtx
elif newFilter.sTab.getOrVoid(vid).isValid:
let rc = db.getVtxUnfilteredBackend vid
let rc = db.getVtxUBE vid
if rc.isOk:
newFilter.sTab[vid] = vtx # VertexRef(nil)
elif rc.error == GetVtxNotFound:
@ -194,7 +194,7 @@ proc merge*(
if key.isValid or not newFilter.kMap.hasKey vid:
newFilter.kMap[vid] = key
elif newFilter.kMap.getOrVoid(vid).isValid:
let rc = db.getKeyUnfilteredBackend vid
let rc = db.getKeyUBE vid
if rc.isOk:
newFilter.kMap[vid] = key # VOID_HASH_KEY
elif rc.error == GetKeyNotFound:

View File

@ -27,30 +27,30 @@ type
# Public functions
# ------------------------------------------------------------------------------
proc getIdgUnfilteredBackend*(
proc getIdgUBE*(
db: AristoDbRef;
): Result[seq[VertexID],AristoError] =
## Get the ID generator state the `backened` layer if available.
## Get the ID generator state from the unfiltered backened if available.
let be = db.backend
if not be.isNil:
return be.getIdgFn()
err(GetIdgNotFound)
proc getVtxUnfilteredBackend*(
proc getVtxUBE*(
db: AristoDbRef;
vid: VertexID;
): Result[VertexRef,AristoError] =
## Get the vertex from the `backened` layer if available.
## Get the vertex from the unfiltered backened if available.
let be = db.backend
if not be.isNil:
return be.getVtxFn vid
err GetVtxNotFound
proc getKeyUnfilteredBackend*(
proc getKeyUBE*(
db: AristoDbRef;
vid: VertexID;
): Result[HashKey,AristoError] =
## Get the merkle hash/key from the backend
## Get the merkle hash/key from the unfiltered backend if available.
let be = db.backend
if not be.isNil:
return be.getKeyFn vid
@ -58,37 +58,37 @@ proc getKeyUnfilteredBackend*(
# ------------------
proc getIdgBackend*(
proc getIdgBE*(
db: AristoDbRef;
): Result[seq[VertexID],AristoError] =
## Get the ID generator state the `backened` layer if available.
if not db.roFilter.isNil and db.roFilter.vGen.isSome:
return ok(db.roFilter.vGen.unsafeGet)
db.getIdgUnfilteredBackend()
db.getIdgUBE()
proc getVtxBackend*(
proc getVtxBE*(
db: AristoDbRef;
vid: VertexID;
): Result[VertexRef,AristoError] =
## Get the vertex from the `backened` layer if available.
## Get the vertex from the (filtered) backened if available.
if not db.roFilter.isNil and db.roFilter.sTab.hasKey vid:
let vtx = db.roFilter.sTab.getOrVoid vid
if vtx.isValid:
return ok(vtx)
return err(GetVtxNotFound)
db.getVtxUnfilteredBackend vid
db.getVtxUBE vid
proc getKeyBackend*(
proc getKeyBE*(
db: AristoDbRef;
vid: VertexID;
): Result[HashKey,AristoError] =
## Get the merkle hash/key from the backend
## Get the merkle hash/key from the (filtered) backend if available.
if not db.roFilter.isNil and db.roFilter.kMap.hasKey vid:
let key = db.roFilter.kMap.getOrVoid vid
if key.isValid:
return ok(key)
return err(GetKeyNotFound)
db.getKeyUnfilteredBackend vid
db.getKeyUBE vid
# ------------------
@ -126,7 +126,7 @@ proc getVtx*(db: AristoDbRef; vid: VertexID): VertexRef =
# If the vertex is to be deleted on the backend, a `VertexRef(nil)` entry
# is kept in the local table in which case it is OK to return this value.
return db.top.sTab.getOrVoid vid
let rc = db.getVtxBackend vid
let rc = db.getVtxBE vid
if rc.isOk:
return rc.value
VertexRef(nil)
@ -139,7 +139,7 @@ proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =
# If the key is to be deleted on the backend, a `VOID_HASH_LABEL` entry
# is kept on the local table in which case it is OK to return this value.
return db.top.kMap.getOrVoid(vid).key
let rc = db.getKeyBackend vid
let rc = db.getKeyBE vid
if rc.isOk:
return rc.value
VOID_HASH_KEY

View File

@ -105,7 +105,7 @@ proc updateHashKey(
# overwitten.)
if backend:
# Ok, vertex is on the backend.
let rc = db.getKeyBackend vid
let rc = db.getKeyBE vid
if rc.isOk:
let key = rc.value
if key == expected:
@ -238,6 +238,9 @@ proc hashify*(
backLink: BackVidTab
downMost: BackVidTab
# Unconditionally mark the top layer
db.top.dirty = true
for (lky,vid) in db.top.lTab.pairs:
let hike = lky.hikeUp(db)
@ -329,6 +332,7 @@ proc hashify*(
backLink.del vid
downMost = redo
db.top.dirty = false
ok completed
# ------------------------------------------------------------------------------

View File

@ -83,7 +83,7 @@ proc hikeUp*(
if not leg.wp.vtx.isValid:
# Register vertex fetched from backend (if any)
let rc = db.getVtxBackend vid
let rc = db.getVtxBE vid
if rc.isErr:
break
leg.backend = true

View File

@ -19,7 +19,7 @@ const
type
AristoBackendType* = enum
BackendNone ## For providing backend-less constructor
BackendVoid ## For providing backend-less constructor
BackendMemory
BackendRocksDB

View File

@ -20,13 +20,13 @@ import
"."/[aristo_init_common, aristo_memory]
type
NoneBackendRef* = ref object of TypedBackendRef
VoidBackendRef* = ref object of TypedBackendRef
## Dummy descriptor type, will typically used as `nil` reference
export
AristoBackendType,
VoidBackendRef,
MemBackendRef,
NoneBackendRef,
TypedBackendRef
# ------------------------------------------------------------------------------
@ -37,7 +37,7 @@ proc newAristoDbRef*(
backend: static[AristoBackendType];
): AristoDbRef =
## Simplified prototype for `BackendNone` and `BackendMemory` type backend.
when backend == BackendNone:
when backend == BackendVoid:
AristoDbRef(top: AristoLayerRef())
elif backend == BackendMemory:
@ -66,7 +66,7 @@ proc finish*(db: AristoDbRef; flush = false) =
# -----------------
proc to*[W: TypedBackendRef|MemBackendRef|NoneBackendRef](
proc to*[W: TypedBackendRef|MemBackendRef|VoidBackendRef](
db: AristoDbRef;
T: type W;
): T =

View File

@ -51,7 +51,7 @@ proc newAristoDbRef*(
rc.value
ok AristoDbRef(top: AristoLayerRef(vGen: vGen), backend: be)
elif backend == BackendNone:
elif backend == BackendVoid:
{.error: "Use BackendNone.init() without path argument".}
elif backend == BackendMemory:

View File

@ -79,10 +79,12 @@ proc clearMerkleKeys(
if lbl.isValid:
db.top.kMap.del vid
db.top.pAmk.del lbl
elif db.getKeyBackend(vid).isOK:
db.top.dirty = true # Modified top level cache
elif db.getKeyBE(vid).isOK:
# Register for deleting on backend
db.top.kMap[vid] = VOID_HASH_LABEL
db.top.pAmk.del lbl
db.top.dirty = true # Modified top level cache
# -----------
@ -132,6 +134,9 @@ proc insertBranch(
var
leafLeg = Leg(nibble: -1)
# Will modify top level cache
db.top.dirty = true
# Install `forkVtx`
block:
# Clear Merkle hashes (aka hash keys) unless proof mode.
@ -240,6 +245,9 @@ proc concatBranchAndLeaf(
result = Hike(root: hike.root, legs: hike.legs)
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
# Will modify top level cache
db.top.dirty = true
# Append leaf vertex
let
vid = db.vidFetch(pristine = true)
@ -287,6 +295,9 @@ proc topIsBranchAddLeaf(
debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
nibble, linkID, leafPfx=hike.tail
# Will modify top level cache
db.top.dirty = true
# Reuse placeholder entry in table
let vtx = VertexRef(
vType: Leaf,
@ -333,6 +344,10 @@ proc topIsExtAddLeaf(
#
# <-------- immutable -------------->
#
# Will modify top level cache
db.top.dirty = true
let vtx = VertexRef(
vType: Leaf,
lPfx: extVtx.ePfx & hike.tail,
@ -357,6 +372,9 @@ proc topIsExtAddLeaf(
if linkID.isValid:
return Hike(error: MergeRootBranchLinkBusy)
# Will modify top level cache
db.top.dirty = true
# Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid)
@ -372,6 +390,7 @@ proc topIsExtAddLeaf(
brVtx.bVid[nibble] = vid
db.top.sTab[brVid] = brVtx
db.top.sTab[vid] = vtx
db.top.dirty = true # Modified top level cache
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
@ -389,6 +408,9 @@ proc topIsEmptyAddLeaf(
if rootVtx.bVid[nibble].isValid:
return Hike(error: MergeRootBranchLinkBusy)
# Will modify top level cache
db.top.dirty = true
# Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, hike.root)
@ -404,6 +426,7 @@ proc topIsEmptyAddLeaf(
rootVtx.bVid[nibble] = leafVid
db.top.sTab[hike.root] = rootVtx
db.top.sTab[leafVid] = leafVtx
db.top.dirty = true # Modified top level cache
return Hike(
root: hike.root,
legs: @[Leg(wp: VidVtxPair(vtx: rootVtx, vid: hike.root), nibble: nibble),
@ -426,8 +449,12 @@ proc updatePayload(
if vtx.lData != payload:
let vid = result.legs[^1].wp.vid
# Will modify top level cache
db.top.dirty = true
vtx.lData = payload
db.top.sTab[vid] = vtx
db.top.dirty = true # Modified top level cache
db.top.lTab[leafTie] = vid
db.clearMerkleKeys(result, vid)
@ -518,12 +545,9 @@ proc mergeNodeImpl(
vtx.bVid[n] = db.vidAttach bLbl
db.top.pPrf.incl vid
if hasVtx:
let key = db.getKey vid
if key != hashKey:
db.top.sTab[vid] = vtx
else:
if not hasVtx or db.getKey(vid) != hashKey:
db.top.sTab[vid] = vtx
db.top.dirty = true # Modified top level cache
ok vid
@ -583,6 +607,7 @@ proc merge*(
lPfx: leafTie.path.to(NibblesSeq),
lData: payload))
db.top.sTab[wp.vid] = wp.vtx
db.top.dirty = true # Modified top level cache
result = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
# Double check the result until the code is more reliable
@ -597,6 +622,7 @@ proc merge*(
# End else (1st level)
proc merge*(
db: AristoDbRef; # Database, top layer
leaf: LeafTiePayload # Leaf item to add to the database
@ -719,6 +745,7 @@ proc merge*(
vid = db.top.pAmk.getOrVoid lbl
if not vid.isvalid:
db.top.pAmk[lbl] = rootVid
db.top.dirty = true # Modified top level cache
# Process over chains in reverse mode starting with the root node. This
# allows the algorithm to find existing nodes on the backend.

View File

@ -16,45 +16,29 @@
import
std/[options, sequtils, tables],
results,
"."/[aristo_desc, aristo_filter]
"."/[aristo_desc, aristo_filter, aristo_hashify]
type
AristoTxAction* = proc() {.gcsafe, raises: [CatchableError].}
const
TxUidLocked = high(uint) div 2
## The range of valid transactions of is roughly `high(int)`. For
## normal transactions, the lower range is applied while for restricted
## transactions used with `execute()` below, the higher range is used.
func isTop*(tx: AristoTxRef): bool
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc backup(db: AristoDbRef): AristoDbRef =
AristoDbRef(
top: db.top, # ref
stack: db.stack, # sequence of refs
txRef: db.txRef, # ref
txUidGen: db.txUidGen) # number
proc restore(db: AristoDbRef, backup: AristoDbRef) =
db.top = backup.top
db.stack = backup.stack
db.txRef = backup.txRef
db.txUidGen = backup.txUidGen
func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
if not tx.isTop():
return err(TxNotTopTx)
let db = tx.db
if tx.level != db.stack.len:
return err(TxStackUnderflow)
ok db
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc getTxUid(db: AristoDbRef): uint =
if db.txUidGen < TxUidLocked:
if db.txUidGen == TxUidLocked - 1:
db.txUidGen = 0
else:
if db.txUidGen == high(uint):
db.txUidGen = TxUidLocked
if db.txUidGen == high(uint):
db.txUidGen = 0
db.txUidGen.inc
db.txUidGen
@ -62,39 +46,43 @@ proc getTxUid(db: AristoDbRef): uint =
# Public functions, getters
# ------------------------------------------------------------------------------
proc txTop*(db: AristoDbRef): Result[AristoTxRef,AristoError] =
func txTop*(db: AristoDbRef): Result[AristoTxRef,AristoError] =
## Getter, returns top level transaction if there is any.
if db.txRef.isNil:
err(TxNoPendingTx)
else:
ok(db.txRef)
proc isTop*(tx: AristoTxRef): bool =
func isTop*(tx: AristoTxRef): bool =
## Getter, returns `true` if the argument `tx` referes to the current top
## level transaction.
tx.db.txRef == tx and tx.db.top.txUid == tx.txUid
proc level*(tx: AristoTxRef): int =
## Getter, non-negaitve transaction nesting level
var tx = tx
while tx.parent != AristoTxRef(nil):
tx = tx.parent
result.inc
func level*(tx: AristoTxRef): int =
## Getter, positive nesting level of transaction argument `tx`
tx.level
func level*(db: AristoDbRef): int =
## Getter, non-negative nesting level (i.e. number of pending transactions)
if not db.txRef.isNil:
result = db.txRef.level
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc to*(tx: AristoTxRef; T: type[AristoDbRef]): T =
## Getter, retrieves the parent database descriptor
func to*(tx: AristoTxRef; T: type[AristoDbRef]): T =
## Getter, retrieves the parent database descriptor from argument `tx`
tx.db
proc rebase*(tx: AristoTxRef): Result[void,AristoError] =
proc rebase*(
tx: AristoTxRef; # Some transaction on database
): Result[void,AristoError] =
## Revert transaction stack to an earlier point in time.
if not tx.isTop():
let
db = tx.db
inx = tx.stackInx
inx = tx.level
if db.stack.len <= inx or db.stack[inx].txUid != tx.txUid:
return err(TxArgStaleTx)
# Roll back to some earlier layer.
@ -102,60 +90,11 @@ proc rebase*(tx: AristoTxRef): Result[void,AristoError] =
db.stack.setLen(inx)
ok()
proc exec*(
tx: AristoTxRef;
action: AristoTxAction;
): Result[void,AristoError]
{.gcsafe, raises: [CatchableError].} =
## Execute function argument `action()` on a transaction `tx` which might
## refer to an earlier one. There are some restrictions on the database
## `tx` referres to which might have been captured by the `action` closure.
##
## Restrictions:
## * For the argument transaction `tx`, the expressions `tx.commit()` or
## `tx.rollack()` will throw an `AssertDefect` error.
## * The `ececute()` call must not be nested. Doing otherwise will throw an
## `AssertDefect` error.
## * Changes on the database referred to by `tx` can be staged but not saved
## persistently with the `stow()` directive.
##
## After return, the state of the underlying database will not have changed.
## Any transactions left open by the `action()` call will have been discarded.
##
## So these restrictions amount to sort of a temporary *read-only* mode for
## the underlying database.
##
if TxUidLocked <= tx.txUid:
return err(TxExecNestingAttempt)
# Move current DB to a backup copy
let
db = tx.db
saved = db.backup
# Install transaction layer
if not tx.isTop():
if db.stack.len <= tx.stackInx:
return err(TxArgStaleTx)
db.top[] = db.stack[tx.stackInx][] # deep copy
db.top.txUid = TxUidLocked
db.stack = @[AristoLayerRef()]
db.txUidGen = TxUidLocked
db.txRef = AristoTxRef(db: db, txUid: TxUidLocked, stackInx: 1)
# execute action
action()
# restore
db.restore saved
ok()
# ------------------------------------------------------------------------------
# Public functions: Transaction frame
# ------------------------------------------------------------------------------
proc txBegin*(db: AristoDbRef): AristoTxRef =
proc txBegin*(db: AristoDbRef): Result[AristoTxRef,(VertexID,AristoError)] =
## Starts a new transaction.
##
## Example:
@ -166,33 +105,33 @@ proc txBegin*(db: AristoDbRef): AristoTxRef =
## ... continue using db ...
## tx.commit()
##
if db.level != db.stack.len:
return err((VertexID(0),TxStackGarbled))
db.stack.add db.top.dup # push (save and use top later)
db.top.txUid = db.getTxUid()
db.txRef = AristoTxRef(
db: db,
txUid: db.top.txUid,
parent: db.txRef,
stackInx: db.stack.len)
db.txRef
db: db,
txUid: db.top.txUid,
parent: db.txRef,
level: db.stack.len)
ok db.txRef
proc rollback*(tx: AristoTxRef): Result[void,AristoError] =
proc rollback*(
tx: AristoTxRef; # Top transaction on database
): Result[void,(VertexID,AristoError)] =
## Given a *top level* handle, this function discards all database operations
## performed for this transactio. The previous transaction is returned if
## there was any.
##
## This function will throw a `AssertionDefect` exception unless `tx` is the
## top level transaction descriptor and the layer stack was not maipulated
## externally.
if not tx.isTop():
return err(TxNotTopTx)
if tx.txUid == TxUidLocked:
return err(TxExecBaseTxLocked)
let db = tx.db
if db.stack.len == 0:
return err(TxStackUnderflow)
let db = block:
let rc = tx.getDbDescFromTopTx()
if rc.isErr:
return err((VertexID(0),rc.error))
rc.value
# Roll back to previous layer.
db.top = db.stack[^1]
@ -201,22 +140,28 @@ proc rollback*(tx: AristoTxRef): Result[void,AristoError] =
db.txRef = tx.parent
ok()
proc commit*(tx: AristoTxRef): Result[void,AristoError] =
proc commit*(
tx: AristoTxRef; # Top transaction on database
dontHashify = false; # Process/fix MPT hashes
): Result[void,(VertexID,AristoError)] =
## Given a *top level* handle, this function accepts all database operations
## performed through this handle and merges it to the previous layer. The
## previous transaction is returned if there was any.
##
## This function will throw a `AssertionDefect` exception unless `tx` is the
## top level transaction descriptor and the layer stack was not maipulated
## externally.
if not tx.isTop():
return err(TxNotTopTx)
if tx.txUid == TxUidLocked:
return err(TxExecBaseTxLocked)
## Unless the argument `dontHashify` is set `true`, the function will process
## Merkle Patricia Treee hashes unless there was no change to this layer.
## This may produce additional errors (see `hashify()`.)
let db = block:
let rc = tx.getDbDescFromTopTx()
if rc.isErr:
return err((VertexID(0),rc.error))
rc.value
let db = tx.db
if db.stack.len == 0:
return err(TxStackUnderflow)
if not dontHashify:
let rc = db.hashify()
if rc.isErr:
return err(rc.error)
# Keep top and discard layer below
db.top.txUid = db.stack[^1].txUid
@ -227,40 +172,39 @@ proc commit*(tx: AristoTxRef): Result[void,AristoError] =
proc collapse*(
tx: AristoTxRef; # Database, transaction wrapper
commit: bool; # Commit is `true`, otherwise roll back
): Result[void,AristoError] =
tx: AristoTxRef; # Top transaction on database
commit: bool; # Commit if `true`, otherwise roll back
dontHashify = false; # Process/fix MPT hashes
): Result[void,(VertexID,AristoError)] =
## Iterated application of `commit()` or `rollback()` performing the
## something similar to
## ::
## if tx.isTop():
## while true:
## discard tx.commit() # ditto for rollback()
## if db.topTx.isErr: break
## tx = db.topTx.value
## while true:
## discard tx.commit() # ditto for rollback()
## if db.topTx.isErr: break
## tx = db.topTx.value
##
if not tx.isTop():
return err(TxNotTopTx)
if tx.txUid == TxUidLocked:
return err(TxExecBaseTxLocked)
## The `dontHashify` is treated as described for `commit()`
let db = block:
let rc = tx.getDbDescFromTopTx()
if rc.isErr:
return err((VertexID(0),rc.error))
rc.value
# Get the first transaction
var txBase = tx
while txBase.parent != AristoTxRef(nil):
txBase = txBase.parent
# If commit, then leave the current layer and clear the stack, oterwise
# install the stack bottom.
if not commit:
db.stack[0].swap db.top
let
db = tx.db
inx = txBase.stackInx-1
if not dontHashify:
var rc = db.hashify()
if rc.isErr:
if not commit:
db.stack[0].swap db.top # restore
return err(rc.error)
if commit:
# If commit, then leave the current layer and clear the stack
db.top.txUid = 0
else:
# Otherwise revert to previous layer from stack
db.top = db.stack[inx]
db.stack.setLen(inx)
db.top.txUid = 0
db.stack.setLen(0)
ok()
# ------------------------------------------------------------------------------
@ -268,43 +212,46 @@ proc collapse*(
# ------------------------------------------------------------------------------
proc stow*(
db: AristoDbRef;
stageOnly = true;
extendOK = false;
): Result[void,AristoError] =
## If there is no backend while the `stageOnly` is set `true`, the function
## returns immediately with an error.The same happens if the backend is
## locked due to an `exec()` call while `stageOnly` is set.
db: AristoDbRef; # Database
persistent = false; # Stage only unless `true`
dontHashify = false; # Process/fix MPT hashes
chunkedMpt = false; # Partial data (e.g. from `snap`)
): Result[void,(VertexID,AristoError)] =
## If there is no backend while the `persistent` argument is set `true`,
## the function returns immediately with an error.The same happens if the
## backend is locked while `persistent` is set (e.g. by an `exec()` call.)
##
## Otherwise, the data changes from the top layer cache are merged into the
## The `dontHashify` is treated as described for `commit()`.
##
## The function then merges the data from the top layer cache into the
## backend stage area. After that, the top layer cache is cleared.
##
## If the argument `stageOnly` is set `true`, all the staged data are merged
## into the backend database. The staged data area is cleared.
## Staging the top layer cache might fail withh a partial MPT when it is
## set up from partial MPT chunks as it happens with `snap` sync processing.
## In this case, the `chunkedMpt` argument must be set `true` (see alse
## `fwdFilter`.)
##
## If the argument `persistent` is set `true`, all the staged data are merged
## into the physical backend database and the staged data area is cleared.
##
if not db.txRef.isNil and
TxUidLocked <= db.txRef.txUid and
not stageOnly:
return err(TxExecDirectiveLocked)
let be = db.backend
if be.isNil and not stageOnly:
return err(TxBackendMissing)
if be.isNil and persistent:
return err((VertexID(0),TxBackendMissing))
let fwd = block:
let rc = db.fwdFilter(db.top, extendOK)
let rc = db.fwdFilter(db.top, chunkedMpt)
if rc.isErr:
return err(rc.error[1])
return err(rc.error)
rc.value
if fwd.vGen.isSome: # Otherwise this layer is pointless
block:
let rc = db.merge fwd
if rc.isErr:
return err(rc.error[1])
return err(rc.error)
rc.value
if not stageOnly:
if persistent:
# Save structural and other table entries
let txFrame = be.putBegFn()
be.putVtxFn(txFrame, db.roFilter.sTab.pairs.toSeq)
@ -312,7 +259,7 @@ proc stow*(
be.putIdgFn(txFrame, db.roFilter.vGen.unsafeGet)
let w = be.putEndFn txFrame
if w != AristoError(0):
return err(w)
return err((VertexID(0),w))
db.roFilter = AristoFilterRef(nil)
@ -323,14 +270,17 @@ proc stow*(
ok()
proc stow*(
db: AristoDbRef;
stageLimit: int;
extendOK = false;
): Result[void,AristoError] =
## Variant of `stow()` with the `stageOnly` argument replaced by
## `stageLimit <= max(db.roFilter.bulk, db.top.bulk)`.
let w = max(db.roFilter.bulk, db.top.bulk)
db.stow(stageOnly=(stageLimit <= w), extendOK=extendOK)
db: AristoDbRef; # Database
stageLimit: int; # Policy based persistent storage
dontHashify = false; # Process/fix MPT hashes
chunkedMpt = false; # Partial data (e.g. from `snap`)
): Result[void,(VertexID,AristoError)] =
## Variant of `stow()` with the `persistent` argument replaced by
## `stageLimit < max(db.roFilter.bulk, db.top.bulk)`.
db.stow(
persistent = (stageLimit < max(db.roFilter.bulk, db.top.bulk)),
dontHashify = dontHashify,
chunkedMpt = chunkedMpt)
# ------------------------------------------------------------------------------
# End

View File

@ -99,6 +99,7 @@ proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) =
## Attach (i.r. register) a Merkle hash key to a vertex ID.
db.top.pAmk[lbl] = vid
db.top.kMap[vid] = lbl
db.top.dirty = true # Modified top level cache
proc vidAttach*(db: AristoDbRef; lbl: HashLabel): VertexID {.discardable.} =
## Variant of `vidAttach()` with auto-generated vertex ID

View File

@ -24,7 +24,7 @@ iterator walkVtxBeImpl*[T](
## Generic iterator
var n = 0
when be is NoneBackendRef:
when be is VoidBackendRef:
let filter = if db.roFilter.isNil: AristoFilterRef() else: db.roFilter
else:
@ -59,7 +59,7 @@ iterator walkKeyBeImpl*[T](
## Generic iterator
var n = 0
when be is NoneBackendRef:
when be is VoidBackendRef:
let filter = if db.roFilter.isNil: AristoFilterRef() else: db.roFilter
else:
@ -97,7 +97,7 @@ iterator walkIdgBeImpl*[T](
yield(0, VertexID(0), db.roFilter.vGen.unsafeGet)
nNext = 1
when be isnot NoneBackendRef:
when be isnot VoidBackendRef:
mixin walkIdg
for (n,vid,vGen) in be.walkIdg:

View File

@ -24,7 +24,7 @@ export
# Public iterators (all in one)
# ------------------------------------------------------------------------------
iterator walkVtxBe*[T: MemBackendRef|NoneBackendRef](
iterator walkVtxBe*[T: MemBackendRef|VoidBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, vtx: VertexRef] =
@ -34,7 +34,7 @@ iterator walkVtxBe*[T: MemBackendRef|NoneBackendRef](
for (n,vid,vtx) in db.to(T).walkVtxBeImpl db:
yield (n,vid,vtx)
iterator walkKeyBe*[T: MemBackendRef|NoneBackendRef](
iterator walkKeyBe*[T: MemBackendRef|VoidBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, key: HashKey] =
@ -42,7 +42,7 @@ iterator walkKeyBe*[T: MemBackendRef|NoneBackendRef](
for (n,vid,key) in db.to(T).walkKeyBeImpl db:
yield (n,vid,key)
iterator walkIdgBe*[T: MemBackendRef|NoneBackendRef](
iterator walkIdgBe*[T: MemBackendRef|VoidBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =

View File

@ -17,7 +17,7 @@ import
unittest2,
../../nimbus/sync/protocol,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[aristo_desc, aristo_debug],
../../nimbus/db/aristo/[aristo_desc, aristo_debug, aristo_hashify],
../../nimbus/db/aristo/aristo_init/[
aristo_memory, aristo_rocksdb, persistent],
./test_helpers
@ -127,7 +127,7 @@ proc test_backendConsistency*(
if w.root != rootKey or resetDB:
rootKey = w.root
count = 0
ndb = newAristoDbRef BackendNone
ndb = newAristoDbRef BackendVoid
mdb = newAristoDbRef BackendMemory
if doRdbOk:
rdb.finish(flush=true)
@ -197,15 +197,15 @@ proc test_backendConsistency*(
# Store onto backend database
block:
#noisy.say "***", "db-dump\n ", mdb.pp
let rc = mdb.stow(stageOnly=false, extendOK=true)
let rc = mdb.stow(persistent=true, dontHashify=true, chunkedMpt=true)
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
if doRdbOk:
let rc = rdb.stow(stageOnly=false, extendOK=true)
let rc = rdb.stow(persistent=true, dontHashify=true, chunkedMpt=true)
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
if not ndb.top.verify(mdb.to(MemBackendRef), noisy):

View File

@ -87,7 +87,7 @@ proc test_transcodeAccounts*(
) =
## Transcoder tests on accounts database
var
adb = newAristoDbRef BackendNone
adb = newAristoDbRef BackendVoid
count = -1
for (n, key, value) in rocky.walkAllDb():
if stopAfter < n:
@ -178,7 +178,7 @@ proc test_transcodeAccounts*(
proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
## Transcode VID lists held in `AristoDb` descriptor
var td = TesterDesc.init seed
let db = newAristoDbRef BackendNone
let db = newAristoDbRef BackendVoid
# Add some randum numbers
block:
@ -204,7 +204,7 @@ proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
# Deserialise
let
db1 = newAristoDbRef BackendNone
db1 = newAristoDbRef BackendVoid
rc = dbBlob.deblobify seq[VertexID]
if rc.isErr:
check rc.error == AristoError(0)

View File

@ -16,10 +16,9 @@ import
eth/common,
stew/results,
unittest2,
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
../../nimbus/db/aristo/[
aristo_check, aristo_delete, aristo_desc, aristo_hashify,
aristo_get, aristo_merge],
aristo_check, aristo_delete, aristo_desc, aristo_get, aristo_merge],
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
./test_helpers
type
@ -30,7 +29,8 @@ type
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
const
MaxFilterBulk = 15_000
MaxFilterBulk = 150_000
## Policy settig for `pack()`
WalkStopRc =
Result[LeafTie,(VertexID,AristoError)].err((VertexID(0),NearbyBeyondRange))
@ -95,12 +95,12 @@ proc innerCleanUp(db: AristoDbRef) =
if rc.isOk:
let rx = rc.value.collapse(commit=false)
if rx.isErr:
check rx.error == 0
check rx.error == (0,0)
db.finish(flush=true)
proc saveToBackend(
tx: var AristoTxRef;
extendOK: bool;
chunkedMpt: bool;
relax: bool;
noisy: bool;
debugID: int;
@ -111,8 +111,8 @@ proc saveToBackend(
block:
block:
let level = tx.level
if level != 1:
check level == 1
if level != 2:
check level == 2
return
block:
let rc = db.checkCache(relax=true)
@ -125,7 +125,12 @@ proc saveToBackend(
block:
let rc = tx.commit()
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
block:
# Make sure MPT hashes are OK
if db.top.dirty:
check db.top.dirty == false
return
block:
let rc = db.txTop()
@ -134,13 +139,8 @@ proc saveToBackend(
return
tx = rc.value
let level = tx.level
if level != 0:
check level == 0
return
block:
let rc = db.hashify()
if rc.isErr:
check rc.error == (0,0)
if level != 1:
check level == 1
return
block:
let rc = db.checkBE(relax=true)
@ -153,17 +153,22 @@ proc saveToBackend(
block:
let rc = tx.commit()
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
block:
# Make sure MPT hashes are OK
if db.top.dirty:
check db.top.dirty == false
return
block:
let rc = db.txTop()
if rc.isOk:
check rc.value.level < 0
check rc.value.level < 0 # force error
return
block:
let rc = db.stow(stageLimit=MaxFilterBulk, extendOK=extendOK)
let rc = db.stow(stageLimit=MaxFilterBulk, chunkedMpt=chunkedMpt)
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
block:
let rc = db.checkBE(relax=relax)
@ -172,13 +177,13 @@ proc saveToBackend(
return
# Update layers to original level
tx = db.txBegin().to(AristoDbRef).txBegin()
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
true
proc saveToBackendWithOops(
tx: var AristoTxRef;
extendOK: bool;
chunkedMpt: bool;
noisy: bool;
debugID: int;
oops: (int,AristoError);
@ -189,8 +194,8 @@ proc saveToBackendWithOops(
block:
block:
let level = tx.level
if level != 1:
check level == 1
if level != 2:
check level == 2
return
# Commit and hashify the current layer
@ -198,7 +203,12 @@ proc saveToBackendWithOops(
block:
let rc = tx.commit()
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
block:
# Make sure MPT hashes are OK
if db.top.dirty:
check db.top.dirty == false
return
block:
let rc = db.txTop()
@ -207,13 +217,8 @@ proc saveToBackendWithOops(
return
tx = rc.value
let level = tx.level
if level != 0:
check level == 0
return
block:
let rc = db.hashify()
if rc.isErr:
check rc.error == (0,0)
if level != 1:
check level == 1
return
# Commit and save to backend
@ -221,7 +226,12 @@ proc saveToBackendWithOops(
block:
let rc = tx.commit()
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
block:
# Make sure MPT hashes are OK
if db.top.dirty:
check db.top.dirty == false
return
block:
let rc = db.txTop()
@ -229,13 +239,13 @@ proc saveToBackendWithOops(
check rc.value.level < 0
return
block:
let rc = db.stow(stageLimit=MaxFilterBulk, extendOK=extendOK)
let rc = db.stow(stageLimit=MaxFilterBulk, chunkedMpt=chunkedMpt)
if rc.isErr:
check rc.error == 0
check rc.error == (0,0)
return
# Update layers to original level
tx = db.txBegin().to(AristoDbRef).txBegin()
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
true
@ -343,9 +353,9 @@ proc testTxMergeAndDelete*(
# Start transaction (double frame for testing)
check db.txTop.isErr
var tx = db.txBegin().to(AristoDbRef).txBegin()
var tx = db.txBegin().value.to(AristoDbRef).txBegin().value
check tx.isTop()
check tx.level == 1
check tx.level == 2
# Reset database so that the next round has a clean setup
defer: db.innerCleanUp
@ -383,7 +393,7 @@ proc testTxMergeAndDelete*(
if doSaveBeOk:
if not tx.saveToBackend(
extendOK=false, relax=relax, noisy=noisy, runID):
chunkedMpt=false, relax=relax, noisy=noisy, runID):
return
# Delete leaf
@ -450,7 +460,7 @@ proc testTxMergeProofAndKvpList*(
rc.value
# Start transaction (double frame for testing)
tx = db.txBegin().to(AristoDbRef).txBegin()
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
check tx.isTop()
# Update root
@ -496,7 +506,7 @@ proc testTxMergeProofAndKvpList*(
block:
let oops = oopsTab.getOrDefault(testId,(0,AristoError(0)))
if not tx.saveToBackendWithOops(
extendOK=true, noisy=noisy, debugID=runID, oops):
chunkedMpt=true, noisy=noisy, debugID=runID, oops):
return
when true and false: