mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-28 20:00:43 +00:00
Simplify txFrame protocol, improve persist performance (#3077)
* Simplify txFrame protocol, improve persist performance To prepare forked-layers for further surgery to avoid the nesting tax, the commit/rollback style of interacting must first be adjusted, since it does not provide a point in time where the frame is "done" and goes from being actively written to, to simply waiting to be persisted or discarded. A collateral benefit of this change is that the scheme removes some complexity from the process by moving the "last saved block number" into txframe along with the actual state changes thus reducing the risk that they go "out of sync" and removing the "commit" consolidation responsibility from ForkedChain. * commit/rollback become checkpoint/dispose - since these are pure in-memory constructs, there's less error handling and there's no real "rollback" involved - dispose better implies that the instance cannot be used and we can more aggressively clear the memory it uses * simplified block number handling that moves to become part of txFrame just like the data that the block number references * avoid reparenting step by replacing the base instead of keeping a singleton instance * persist builds the set of changes from the bottom which helps avoid moving changes in the top layers through each ancestor level of the frame stack * when using an in-memory database in tests, allow the instance to be passed around to enable testing persist and reload logic
This commit is contained in:
parent
c8e6247a16
commit
caca11b30b
@ -2,16 +2,16 @@ TracerTests
|
|||||||
===
|
===
|
||||||
## TracerTests
|
## TracerTests
|
||||||
```diff
|
```diff
|
||||||
block46147.json Skip
|
+ block46147.json OK
|
||||||
block46400.json Skip
|
+ block46400.json OK
|
||||||
block46402.json Skip
|
+ block46402.json OK
|
||||||
block47205.json Skip
|
+ block47205.json OK
|
||||||
block48712.json Skip
|
+ block48712.json OK
|
||||||
block48915.json Skip
|
+ block48915.json OK
|
||||||
block49018.json Skip
|
+ block49018.json OK
|
||||||
block97.json Skip
|
+ block97.json OK
|
||||||
```
|
```
|
||||||
OK: 0/8 Fail: 0/8 Skip: 8/8
|
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 0/8 Fail: 0/8 Skip: 8/8
|
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||||
|
@ -43,7 +43,7 @@ const
|
|||||||
proc processBlock(c: ForkedChainRef,
|
proc processBlock(c: ForkedChainRef,
|
||||||
parent: Header,
|
parent: Header,
|
||||||
txFrame: CoreDbTxRef,
|
txFrame: CoreDbTxRef,
|
||||||
blk: Block): Result[seq[Receipt], string] =
|
blk: Block, blkHash: Hash32): Result[seq[Receipt], string] =
|
||||||
template header(): Header =
|
template header(): Header =
|
||||||
blk.header
|
blk.header
|
||||||
|
|
||||||
@ -62,11 +62,7 @@ proc processBlock(c: ForkedChainRef,
|
|||||||
|
|
||||||
# We still need to write header to database
|
# We still need to write header to database
|
||||||
# because validateUncles still need it
|
# because validateUncles still need it
|
||||||
let blockHash = header.blockHash()
|
?txFrame.persistHeader(blkHash, header, c.com.startOfHistory)
|
||||||
?txFrame.persistHeader(
|
|
||||||
blockHash,
|
|
||||||
header,
|
|
||||||
c.com.startOfHistory)
|
|
||||||
|
|
||||||
# update currentBlock *after* we persist it
|
# update currentBlock *after* we persist it
|
||||||
# so the rpc return consistent result
|
# so the rpc return consistent result
|
||||||
@ -93,7 +89,7 @@ func updateBranch(c: ForkedChainRef,
|
|||||||
c.activeBranch = newBranch
|
c.activeBranch = newBranch
|
||||||
|
|
||||||
proc writeBaggage(c: ForkedChainRef,
|
proc writeBaggage(c: ForkedChainRef,
|
||||||
blk: Block,
|
blk: Block, blkHash: Hash32,
|
||||||
txFrame: CoreDbTxRef,
|
txFrame: CoreDbTxRef,
|
||||||
receipts: openArray[Receipt]) =
|
receipts: openArray[Receipt]) =
|
||||||
template header(): Header =
|
template header(): Header =
|
||||||
@ -140,13 +136,16 @@ proc validateBlock(c: ForkedChainRef,
|
|||||||
requestsHash: blk.header.requestsHash,
|
requestsHash: blk.header.requestsHash,
|
||||||
)
|
)
|
||||||
|
|
||||||
var res = c.processBlock(parent.header, txFrame, blk)
|
var receipts = c.processBlock(parent.header, txFrame, blk, blkHash).valueOr:
|
||||||
if res.isErr:
|
txFrame.dispose()
|
||||||
txFrame.rollback()
|
return err(error)
|
||||||
return err(res.error)
|
|
||||||
|
|
||||||
c.writeBaggage(blk, txFrame, res.value)
|
c.writeBaggage(blk, blkHash, txFrame, receipts)
|
||||||
c.updateBranch(parent, blk, blkHash, txFrame, move(res.value))
|
|
||||||
|
# Block fully written to txFrame, mark it as such
|
||||||
|
txFrame.checkpoint(blk.header.number)
|
||||||
|
|
||||||
|
c.updateBranch(parent, blk, blkHash, txFrame, move(receipts))
|
||||||
|
|
||||||
for i, tx in blk.transactions:
|
for i, tx in blk.transactions:
|
||||||
c.txRecords[rlpHash(tx)] = (blkHash, uint64(i))
|
c.txRecords[rlpHash(tx)] = (blkHash, uint64(i))
|
||||||
@ -266,15 +265,11 @@ func calculateNewBase(
|
|||||||
|
|
||||||
doAssert(false, "Unreachable code, finalized block outside canonical chain")
|
doAssert(false, "Unreachable code, finalized block outside canonical chain")
|
||||||
|
|
||||||
proc removeBlockFromCache(c: ForkedChainRef, bd: BlockDesc, commit = false) =
|
proc removeBlockFromCache(c: ForkedChainRef, bd: BlockDesc) =
|
||||||
c.hashToBlock.del(bd.hash)
|
c.hashToBlock.del(bd.hash)
|
||||||
for tx in bd.blk.transactions:
|
for tx in bd.blk.transactions:
|
||||||
c.txRecords.del(rlpHash(tx))
|
c.txRecords.del(rlpHash(tx))
|
||||||
if commit:
|
bd.txFrame.dispose()
|
||||||
if bd.txFrame != c.baseTxFrame:
|
|
||||||
bd.txFrame.commit()
|
|
||||||
else:
|
|
||||||
bd.txFrame.dispose()
|
|
||||||
|
|
||||||
proc updateHead(c: ForkedChainRef, head: BlockPos) =
|
proc updateHead(c: ForkedChainRef, head: BlockPos) =
|
||||||
## Update head if the new head is different from current head.
|
## Update head if the new head is different from current head.
|
||||||
@ -374,10 +369,10 @@ proc updateBase(c: ForkedChainRef, newBase: BlockPos) =
|
|||||||
# Cleanup in-memory blocks starting from newBase backward
|
# Cleanup in-memory blocks starting from newBase backward
|
||||||
# e.g. B3 backward. Switch to parent branch if needed.
|
# e.g. B3 backward. Switch to parent branch if needed.
|
||||||
|
|
||||||
template commitBlocks(number, branch) =
|
template disposeBlocks(number, branch) =
|
||||||
let tailNumber = branch.tailNumber
|
let tailNumber = branch.tailNumber
|
||||||
while number >= tailNumber:
|
while number >= tailNumber:
|
||||||
c.removeBlockFromCache(branch.blocks[number - tailNumber], commit = true)
|
c.removeBlockFromCache(branch.blocks[number - tailNumber])
|
||||||
inc count
|
inc count
|
||||||
|
|
||||||
if number == 0:
|
if number == 0:
|
||||||
@ -385,10 +380,6 @@ proc updateBase(c: ForkedChainRef, newBase: BlockPos) =
|
|||||||
break
|
break
|
||||||
dec number
|
dec number
|
||||||
|
|
||||||
proc commitBase(c: ForkedChainRef, bd: BlockDesc) =
|
|
||||||
if bd.txFrame != c.baseTxFrame:
|
|
||||||
bd.txFrame.commit()
|
|
||||||
|
|
||||||
let
|
let
|
||||||
# Cache to prevent crash after we shift
|
# Cache to prevent crash after we shift
|
||||||
# the blocks
|
# the blocks
|
||||||
@ -401,10 +392,11 @@ proc updateBase(c: ForkedChainRef, newBase: BlockPos) =
|
|||||||
|
|
||||||
let nextIndex = int(newBase.number - branch.tailNumber)
|
let nextIndex = int(newBase.number - branch.tailNumber)
|
||||||
|
|
||||||
# Commit base block but don't remove from FC
|
# Persist the new base block - this replaces the base tx in coredb!
|
||||||
c.commitBase(branch.blocks[nextIndex])
|
c.com.db.persist(newBase.txFrame)
|
||||||
|
c.baseTxFrame = newBase.txFrame
|
||||||
|
|
||||||
commitBlocks(number, branch)
|
disposeBlocks(number, branch)
|
||||||
|
|
||||||
# Update base if it indeed changed
|
# Update base if it indeed changed
|
||||||
if nextIndex > 0:
|
if nextIndex > 0:
|
||||||
@ -424,7 +416,7 @@ proc updateBase(c: ForkedChainRef, newBase: BlockPos) =
|
|||||||
# Older branches will gone
|
# Older branches will gone
|
||||||
branch = branch.parent
|
branch = branch.parent
|
||||||
while not branch.isNil:
|
while not branch.isNil:
|
||||||
commitBlocks(number, branch)
|
disposeBlocks(number, branch)
|
||||||
|
|
||||||
for i, brc in c.branches:
|
for i, brc in c.branches:
|
||||||
if brc == branch:
|
if brc == branch:
|
||||||
@ -454,12 +446,6 @@ proc updateBase(c: ForkedChainRef, newBase: BlockPos) =
|
|||||||
baseNumber = c.baseBranch.tailNumber,
|
baseNumber = c.baseBranch.tailNumber,
|
||||||
baseHash = c.baseBranch.tailHash.short
|
baseHash = c.baseBranch.tailHash.short
|
||||||
|
|
||||||
# Update base txFrame
|
|
||||||
if c.baseBranch.blocks[0].txFrame != c.baseTxFrame:
|
|
||||||
c.baseBranch.blocks[0].txFrame = c.baseTxFrame
|
|
||||||
if c.baseBranch.len > 1:
|
|
||||||
c.baseBranch.blocks[1].txFrame.reparent(c.baseTxFrame)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -565,11 +551,6 @@ proc forkChoice*(c: ForkedChainRef,
|
|||||||
doAssert(newBaseNumber <= finalized.number)
|
doAssert(newBaseNumber <= finalized.number)
|
||||||
c.updateBase(newBase)
|
c.updateBase(newBase)
|
||||||
|
|
||||||
# Save and record the block number before the last saved block state.
|
|
||||||
if newBaseNumber > 0:
|
|
||||||
c.com.db.persist(newBaseNumber).isOkOr:
|
|
||||||
return err("Failed to save state: " & $$error)
|
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
func haveBlockAndState*(c: ForkedChainRef, blockHash: Hash32): bool =
|
func haveBlockAndState*(c: ForkedChainRef, blockHash: Hash32): bool =
|
||||||
|
@ -30,8 +30,7 @@ proc fcKvtPersist*(c: ForkedChainRef) =
|
|||||||
## should not be any.)
|
## should not be any.)
|
||||||
##
|
##
|
||||||
let db = c.com.db
|
let db = c.com.db
|
||||||
db.persist(c.baseTxFrame.getSavedStateBlockNumber()).isOkOr:
|
db.persist(c.baseTxFrame)
|
||||||
raiseAssert "fcKvtPersist: persistent() failed: " & $$error
|
|
||||||
|
|
||||||
proc fcKvtHasKey*(c: ForkedChainRef, key: openArray[byte]): bool =
|
proc fcKvtHasKey*(c: ForkedChainRef, key: openArray[byte]): bool =
|
||||||
## Check whether the argument `key` exists on the `kvt` table (i.e. `get()`
|
## Check whether the argument `key` exists on the `kvt` table (i.e. `get()`
|
||||||
|
@ -64,8 +64,10 @@ proc getVmState(
|
|||||||
if p.vmState == nil:
|
if p.vmState == nil:
|
||||||
let
|
let
|
||||||
vmState = BaseVMState()
|
vmState = BaseVMState()
|
||||||
txFrame = p.c.db.baseTxFrame()
|
txFrame = p.c.db.baseTxFrame.txFrameBegin()
|
||||||
parent = ?txFrame.getBlockHeader(header.parentHash)
|
parent = ?txFrame.getBlockHeader(header.parentHash)
|
||||||
|
|
||||||
|
doAssert txFrame.getSavedStateBlockNumber() == parent.number
|
||||||
vmState.init(parent, header, p.c.com, txFrame, storeSlotHash = storeSlotHash)
|
vmState.init(parent, header, p.c.com, txFrame, storeSlotHash = storeSlotHash)
|
||||||
p.vmState = vmState
|
p.vmState = vmState
|
||||||
else:
|
else:
|
||||||
@ -78,14 +80,15 @@ proc getVmState(
|
|||||||
ok(p.vmState)
|
ok(p.vmState)
|
||||||
|
|
||||||
proc dispose*(p: var Persister) =
|
proc dispose*(p: var Persister) =
|
||||||
p.c.db.baseTxFrame().rollback()
|
p.vmState.ledger.txFrame.dispose()
|
||||||
|
p.vmState = nil
|
||||||
|
|
||||||
proc init*(T: type Persister, c: ChainRef, flags: PersistBlockFlags): T =
|
proc init*(T: type Persister, c: ChainRef, flags: PersistBlockFlags): T =
|
||||||
T(c: c, flags: flags)
|
T(c: c, flags: flags)
|
||||||
|
|
||||||
proc checkpoint*(p: var Persister): Result[void, string] =
|
proc checkpoint*(p: var Persister): Result[void, string] =
|
||||||
if NoValidation notin p.flags:
|
if NoValidation notin p.flags:
|
||||||
let stateRoot = p.c.db.baseTxFrame().getStateRoot().valueOr:
|
let stateRoot = p.vmState.ledger.txFrame.getStateRoot().valueOr:
|
||||||
return err($$error)
|
return err($$error)
|
||||||
|
|
||||||
if p.parent.stateRoot != stateRoot:
|
if p.parent.stateRoot != stateRoot:
|
||||||
@ -100,9 +103,10 @@ proc checkpoint*(p: var Persister): Result[void, string] =
|
|||||||
"stateRoot mismatch, expect: " & $p.parent.stateRoot & ", got: " & $stateRoot
|
"stateRoot mismatch, expect: " & $p.parent.stateRoot & ", got: " & $stateRoot
|
||||||
)
|
)
|
||||||
|
|
||||||
# Save and record the block number before the last saved block state.
|
# Move in-memory state to disk
|
||||||
p.c.db.persist(p.parent.number).isOkOr:
|
p.c.db.persist(p.vmState.ledger.txFrame)
|
||||||
return err("Failed to save state: " & $$error)
|
# Get a new frame since the DB assumes ownership
|
||||||
|
p.vmState.ledger.txFrame = p.c.db.baseTxFrame().txFrameBegin()
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
@ -170,6 +174,8 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] =
|
|||||||
p.stats.txs += blk.transactions.len
|
p.stats.txs += blk.transactions.len
|
||||||
p.stats.gas += blk.header.gasUsed
|
p.stats.gas += blk.header.gasUsed
|
||||||
|
|
||||||
|
txFrame.checkpoint(header.number)
|
||||||
|
|
||||||
assign(p.parent, header)
|
assign(p.parent, header)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
@ -39,14 +39,11 @@ when AristoPersistentBackendOk:
|
|||||||
{.pragma: noRaise, gcsafe, raises: [].}
|
{.pragma: noRaise, gcsafe, raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
AristoApiCommitFn* =
|
AristoApiCheckpointFn* =
|
||||||
proc(tx: AristoTxRef;
|
proc(tx: AristoTxRef;
|
||||||
): Result[void,AristoError]
|
blockNumber: uint64
|
||||||
{.noRaise.}
|
) {.noRaise.}
|
||||||
## Given a *top level* handle, this function accepts all database
|
## Update the txFrame to the given checkpoint "identifier", or block number
|
||||||
## operations performed through this handle and merges it to the
|
|
||||||
## previous layer. The previous transaction is returned if there
|
|
||||||
## was any.
|
|
||||||
|
|
||||||
AristoApiDeleteAccountRecordFn* =
|
AristoApiDeleteAccountRecordFn* =
|
||||||
proc(db: AristoTxRef;
|
proc(db: AristoTxRef;
|
||||||
@ -78,9 +75,9 @@ type
|
|||||||
## Variant of `deleteStorageData()` for purging the whole storage tree
|
## Variant of `deleteStorageData()` for purging the whole storage tree
|
||||||
## associated to the account argument `accPath`.
|
## associated to the account argument `accPath`.
|
||||||
|
|
||||||
AristoApiFetchLastSavedStateFn* =
|
AristoApiFetchLastCheckpointFn* =
|
||||||
proc(db: AristoTxRef
|
proc(db: AristoTxRef
|
||||||
): Result[SavedState,AristoError]
|
): Result[uint64,AristoError]
|
||||||
{.noRaise.}
|
{.noRaise.}
|
||||||
## The function returns the state of the last saved state. This is a
|
## The function returns the state of the last saved state. This is a
|
||||||
## Merkle hash tag for vertex with ID 1 and a bespoke `uint64` identifier
|
## Merkle hash tag for vertex with ID 1 and a bespoke `uint64` identifier
|
||||||
@ -249,41 +246,25 @@ type
|
|||||||
proc(
|
proc(
|
||||||
db: AristoDbRef;
|
db: AristoDbRef;
|
||||||
batch: PutHdlRef;
|
batch: PutHdlRef;
|
||||||
nxtSid = 0u64;
|
txFrame: AristoTxRef;
|
||||||
) {.noRaise.}
|
) {.noRaise.}
|
||||||
## Persistently store data onto backend database. If the system is
|
## Persistently store the cumulative set of changes that `txFrame`
|
||||||
## running without a database backend, the function returns immediately
|
## represents to the database. `txFrame` becomes the new base after this
|
||||||
## with an error. The same happens if there is a pending transaction.
|
## operation.
|
||||||
##
|
|
||||||
## The function merges all staged data from the top layer cache onto the
|
|
||||||
## backend stage area. After that, the top layer cache is cleared.
|
|
||||||
##
|
|
||||||
## Finally, the staged data are merged into the physical backend
|
|
||||||
## database and the staged data area is cleared.
|
|
||||||
##
|
|
||||||
## The argument `nxtSid` will be the ID for the next saved state record.
|
|
||||||
|
|
||||||
AristoApiRollbackFn* =
|
AristoApiDisposeFn* =
|
||||||
proc(tx: AristoTxRef;
|
proc(tx: AristoTxRef;
|
||||||
): Result[void,AristoError]
|
) {.noRaise.}
|
||||||
{.noRaise.}
|
## Release a frame releasing its associated resources. This operation
|
||||||
## Given a *top level* handle, this function discards all database
|
## makes all frames built on top of it invalid - they still need to be
|
||||||
## operations performed for this transactio. The previous transaction
|
## released however.
|
||||||
## is returned if there was any.
|
|
||||||
|
|
||||||
AristoApiTxFrameBeginFn* =
|
AristoApiTxFrameBeginFn* =
|
||||||
proc(db: AristoDbRef; parent: AristoTxRef
|
proc(db: AristoDbRef; parent: AristoTxRef
|
||||||
): Result[AristoTxRef,AristoError]
|
): AristoTxRef
|
||||||
{.noRaise.}
|
{.noRaise.}
|
||||||
## Starts a new transaction.
|
## Create a new layered transaction frame - the frame can later be
|
||||||
##
|
## released or frozen and persisted.
|
||||||
## Example:
|
|
||||||
## ::
|
|
||||||
## proc doSomething(db: AristoTxRef) =
|
|
||||||
## let tx = db.begin
|
|
||||||
## defer: tx.rollback()
|
|
||||||
## ... continue using db ...
|
|
||||||
## tx.commit()
|
|
||||||
|
|
||||||
AristoApiBaseTxFrameFn* =
|
AristoApiBaseTxFrameFn* =
|
||||||
proc(db: AristoDbRef;
|
proc(db: AristoDbRef;
|
||||||
@ -293,13 +274,13 @@ type
|
|||||||
AristoApiRef* = ref AristoApiObj
|
AristoApiRef* = ref AristoApiObj
|
||||||
AristoApiObj* = object of RootObj
|
AristoApiObj* = object of RootObj
|
||||||
## Useful set of `Aristo` fuctions that can be filtered, stacked etc.
|
## Useful set of `Aristo` fuctions that can be filtered, stacked etc.
|
||||||
commit*: AristoApiCommitFn
|
checkpoint*: AristoApiCheckpointFn
|
||||||
|
|
||||||
deleteAccountRecord*: AristoApiDeleteAccountRecordFn
|
deleteAccountRecord*: AristoApiDeleteAccountRecordFn
|
||||||
deleteStorageData*: AristoApiDeleteStorageDataFn
|
deleteStorageData*: AristoApiDeleteStorageDataFn
|
||||||
deleteStorageTree*: AristoApiDeleteStorageTreeFn
|
deleteStorageTree*: AristoApiDeleteStorageTreeFn
|
||||||
|
|
||||||
fetchLastSavedState*: AristoApiFetchLastSavedStateFn
|
fetchLastCheckpoint*: AristoApiFetchLastCheckpointFn
|
||||||
|
|
||||||
fetchAccountRecord*: AristoApiFetchAccountRecordFn
|
fetchAccountRecord*: AristoApiFetchAccountRecordFn
|
||||||
fetchStateRoot*: AristoApiFetchStateRootFn
|
fetchStateRoot*: AristoApiFetchStateRootFn
|
||||||
@ -321,7 +302,7 @@ type
|
|||||||
|
|
||||||
pathAsBlob*: AristoApiPathAsBlobFn
|
pathAsBlob*: AristoApiPathAsBlobFn
|
||||||
persist*: AristoApiPersistFn
|
persist*: AristoApiPersistFn
|
||||||
rollback*: AristoApiRollbackFn
|
dispose*: AristoApiDisposeFn
|
||||||
txFrameBegin*: AristoApiTxFrameBeginFn
|
txFrameBegin*: AristoApiTxFrameBeginFn
|
||||||
baseTxFrame*: AristoApiBaseTxFrameFn
|
baseTxFrame*: AristoApiBaseTxFrameFn
|
||||||
|
|
||||||
@ -329,13 +310,13 @@ type
|
|||||||
AristoApiProfNames* = enum
|
AristoApiProfNames* = enum
|
||||||
## Index/name mapping for profile slots
|
## Index/name mapping for profile slots
|
||||||
AristoApiProfTotal = "total"
|
AristoApiProfTotal = "total"
|
||||||
AristoApiProfCommitFn = "commit"
|
AristoApiProfCheckpointFn = "checkpoint"
|
||||||
|
|
||||||
AristoApiProfDeleteAccountRecordFn = "deleteAccountRecord"
|
AristoApiProfDeleteAccountRecordFn = "deleteAccountRecord"
|
||||||
AristoApiProfDeleteStorageDataFn = "deleteStorageData"
|
AristoApiProfDeleteStorageDataFn = "deleteStorageData"
|
||||||
AristoApiProfDeleteStorageTreeFn = "deleteStorageTree"
|
AristoApiProfDeleteStorageTreeFn = "deleteStorageTree"
|
||||||
|
|
||||||
AristoApiProfFetchLastSavedStateFn = "fetchLastSavedState"
|
AristoApiProfFetchLastCheckpointFn = "fetchLastCheckpoint"
|
||||||
|
|
||||||
AristoApiProfFetchAccountRecordFn = "fetchAccountRecord"
|
AristoApiProfFetchAccountRecordFn = "fetchAccountRecord"
|
||||||
AristoApiProfFetchStateRootFn = "fetchStateRoot"
|
AristoApiProfFetchStateRootFn = "fetchStateRoot"
|
||||||
@ -358,9 +339,9 @@ type
|
|||||||
|
|
||||||
AristoApiProfPathAsBlobFn = "pathAsBlob"
|
AristoApiProfPathAsBlobFn = "pathAsBlob"
|
||||||
AristoApiProfPersistFn = "persist"
|
AristoApiProfPersistFn = "persist"
|
||||||
AristoApiProfRollbackFn = "rollback"
|
AristoApiProfDisposeFn = "dispose"
|
||||||
AristoApiProfTxFrameBeginFn = "txFrameBegin"
|
AristoApiProfTxFrameBeginFn = "txFrameBegin"
|
||||||
AristoApiProfBaseTxFrameFn = "baseTxFrame"
|
AristoApiProfBaseTxFrameFn = "baseTxFrame"
|
||||||
|
|
||||||
AristoApiProfBeGetVtxFn = "be/getVtx"
|
AristoApiProfBeGetVtxFn = "be/getVtx"
|
||||||
AristoApiProfBeGetKeyFn = "be/getKey"
|
AristoApiProfBeGetKeyFn = "be/getKey"
|
||||||
@ -410,13 +391,13 @@ func init*(api: var AristoApiObj) =
|
|||||||
##
|
##
|
||||||
when AutoValidateApiHooks:
|
when AutoValidateApiHooks:
|
||||||
api.reset
|
api.reset
|
||||||
api.commit = commit
|
api.checkpoint = checkpoint
|
||||||
|
|
||||||
api.deleteAccountRecord = deleteAccountRecord
|
api.deleteAccountRecord = deleteAccountRecord
|
||||||
api.deleteStorageData = deleteStorageData
|
api.deleteStorageData = deleteStorageData
|
||||||
api.deleteStorageTree = deleteStorageTree
|
api.deleteStorageTree = deleteStorageTree
|
||||||
|
|
||||||
api.fetchLastSavedState = fetchLastSavedState
|
api.fetchLastCheckpoint = fetchLastCheckpoint
|
||||||
|
|
||||||
api.fetchAccountRecord = fetchAccountRecord
|
api.fetchAccountRecord = fetchAccountRecord
|
||||||
api.fetchStateRoot = fetchStateRoot
|
api.fetchStateRoot = fetchStateRoot
|
||||||
@ -439,7 +420,7 @@ func init*(api: var AristoApiObj) =
|
|||||||
|
|
||||||
api.pathAsBlob = pathAsBlob
|
api.pathAsBlob = pathAsBlob
|
||||||
api.persist = persist
|
api.persist = persist
|
||||||
api.rollback = rollback
|
api.dispose = dispose
|
||||||
api.txFrameBegin = txFrameBegin
|
api.txFrameBegin = txFrameBegin
|
||||||
api.baseTxFrame = baseTxFrame
|
api.baseTxFrame = baseTxFrame
|
||||||
|
|
||||||
@ -483,10 +464,10 @@ func init*(
|
|||||||
code
|
code
|
||||||
data.update(n.ord, getTime() - start)
|
data.update(n.ord, getTime() - start)
|
||||||
|
|
||||||
profApi.commit =
|
profApi.checkpoint =
|
||||||
proc(a: AristoTxRef): auto =
|
proc(a: AristoTxRef): auto =
|
||||||
AristoApiProfCommitFn.profileRunner:
|
AristoApiProfCheckpointFn.profileRunner:
|
||||||
result = api.commit(a)
|
api.checkpoint(a)
|
||||||
|
|
||||||
profApi.deleteAccountRecord =
|
profApi.deleteAccountRecord =
|
||||||
proc(a: AristoTxRef; b: Hash32): auto =
|
proc(a: AristoTxRef; b: Hash32): auto =
|
||||||
@ -503,10 +484,10 @@ func init*(
|
|||||||
AristoApiProfDeleteStorageTreeFn.profileRunner:
|
AristoApiProfDeleteStorageTreeFn.profileRunner:
|
||||||
result = api.deleteStorageTree(a, b)
|
result = api.deleteStorageTree(a, b)
|
||||||
|
|
||||||
profApi.fetchLastSavedState =
|
profApi.fetchLastCheckpoint =
|
||||||
proc(a: AristoTxRef): auto =
|
proc(a: AristoTxRef): auto =
|
||||||
AristoApiProfFetchLastSavedStateFn.profileRunner:
|
AristoApiProfFetchLastCheckpointFn.profileRunner:
|
||||||
result = api.fetchLastSavedState(a)
|
result = api.fetchLastCheckpoint(a)
|
||||||
|
|
||||||
profApi.fetchAccountRecord =
|
profApi.fetchAccountRecord =
|
||||||
proc(a: AristoTxRef; b: Hash32): auto =
|
proc(a: AristoTxRef; b: Hash32): auto =
|
||||||
@ -588,10 +569,10 @@ func init*(
|
|||||||
AristoApiProfPersistFn.profileRunner:
|
AristoApiProfPersistFn.profileRunner:
|
||||||
result = api.persist(a, b)
|
result = api.persist(a, b)
|
||||||
|
|
||||||
profApi.rollback =
|
profApi.dispose =
|
||||||
proc(a: AristoTxRef): auto =
|
proc(a: AristoTxRef) =
|
||||||
AristoApiProfRollbackFn.profileRunner:
|
AristoApiProfDisposeFn.profileRunner:
|
||||||
result = api.rollback(a)
|
api.dispose(a)
|
||||||
|
|
||||||
profApi.txFrameBegin =
|
profApi.txFrameBegin =
|
||||||
proc(a: AristoTxRef): auto =
|
proc(a: AristoTxRef): auto =
|
||||||
|
@ -44,6 +44,7 @@ type
|
|||||||
db*: AristoDbRef ## Database descriptor
|
db*: AristoDbRef ## Database descriptor
|
||||||
parent*: AristoTxRef ## Previous transaction
|
parent*: AristoTxRef ## Previous transaction
|
||||||
layer*: LayerRef
|
layer*: LayerRef
|
||||||
|
blockNumber*: Opt[uint64] ## Block number set when freezing the frame
|
||||||
|
|
||||||
AristoDbRef* = ref object
|
AristoDbRef* = ref object
|
||||||
## Three tier database object supporting distributed instances.
|
## Three tier database object supporting distributed instances.
|
||||||
@ -151,8 +152,19 @@ func hash*(db: AristoDbRef): Hash =
|
|||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
iterator stack*(tx: AristoTxRef): AristoTxRef =
|
||||||
|
# Stack going from base to tx
|
||||||
|
var frames: seq[AristoTxRef]
|
||||||
|
var tx = tx
|
||||||
|
while tx != nil:
|
||||||
|
frames.add tx
|
||||||
|
tx = tx.parent
|
||||||
|
|
||||||
|
while frames.len > 0:
|
||||||
|
yield frames.pop()
|
||||||
|
|
||||||
iterator rstack*(tx: AristoTxRef): (LayerRef, int) =
|
iterator rstack*(tx: AristoTxRef): (LayerRef, int) =
|
||||||
# Stack in reverse order
|
# Stack in reverse order, ie going from tx to base
|
||||||
var tx = tx
|
var tx = tx
|
||||||
|
|
||||||
var i = 0
|
var i = 0
|
||||||
|
@ -125,8 +125,6 @@ type
|
|||||||
accLeaves*: Table[Hash32, VertexRef] ## Account path -> VertexRef
|
accLeaves*: Table[Hash32, VertexRef] ## Account path -> VertexRef
|
||||||
stoLeaves*: Table[Hash32, VertexRef] ## Storage path -> VertexRef
|
stoLeaves*: Table[Hash32, VertexRef] ## Storage path -> VertexRef
|
||||||
|
|
||||||
cTop*: VertexID ## Last committed vertex ID
|
|
||||||
|
|
||||||
GetVtxFlag* = enum
|
GetVtxFlag* = enum
|
||||||
PeekCache
|
PeekCache
|
||||||
## Peek into, but don't update cache - useful on work loads that are
|
## Peek into, but don't update cache - useful on work loads that are
|
||||||
|
@ -190,14 +190,17 @@ proc hasStoragePayload(
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc fetchLastSavedState*(
|
proc fetchLastCheckpoint*(
|
||||||
db: AristoTxRef;
|
db: AristoTxRef;
|
||||||
): Result[SavedState,AristoError] =
|
): Result[BlockNumber,AristoError] =
|
||||||
## Wrapper around `getLstBe()`. The function returns the state of the last
|
## Wrapper around `getLstBe()`. The function returns the state of the last
|
||||||
## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke
|
## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke
|
||||||
## `uint64` identifier (may be interpreted as block number.)
|
## `uint64` identifier (may be interpreted as block number.)
|
||||||
# TODO store in frame!!
|
if db.blockNumber.isSome():
|
||||||
db.db.getLstBe()
|
return ok db.blockNumber.get()
|
||||||
|
|
||||||
|
let state = ?db.db.getLstBe()
|
||||||
|
ok state.serial
|
||||||
|
|
||||||
proc fetchAccountRecord*(
|
proc fetchAccountRecord*(
|
||||||
db: AristoTxRef;
|
db: AristoTxRef;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -21,8 +21,8 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
./aristo_init/memory_only
|
./aristo_init/[init_common, memory_only]
|
||||||
export
|
export
|
||||||
memory_only
|
init_common, memory_only
|
||||||
|
|
||||||
# End
|
# End
|
||||||
|
@ -93,6 +93,37 @@ proc init*(trg: var TypedBackendObj; src: TypedBackendObj) =
|
|||||||
trg.txGen = src.txGen
|
trg.txGen = src.txGen
|
||||||
trg.txId = src.txId
|
trg.txId = src.txId
|
||||||
|
|
||||||
|
proc init*(
|
||||||
|
T: type AristoDbRef;
|
||||||
|
backend: BackendRef
|
||||||
|
): Result[T, AristoError] =
|
||||||
|
let
|
||||||
|
vTop = if backend == nil: VertexID(0) else: ?backend.getTuvFn()
|
||||||
|
db = AristoDbRef(
|
||||||
|
txRef: AristoTxRef(layer: LayerRef(vTop: vTop)),
|
||||||
|
backend: backend,
|
||||||
|
accLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE),
|
||||||
|
stoLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE),
|
||||||
|
)
|
||||||
|
|
||||||
|
db.txRef.db = db # TODO evaluate if this cyclic ref is worth the convenience
|
||||||
|
|
||||||
|
ok(db)
|
||||||
|
|
||||||
|
proc finish*(db: AristoDbRef; eradicate = false) =
|
||||||
|
## Backend destructor. The argument `eradicate` indicates that a full
|
||||||
|
## database deletion is requested. If set `false` the outcome might differ
|
||||||
|
## depending on the type of backend (e.g. the `BackendMemory` backend will
|
||||||
|
## always eradicate on close.)
|
||||||
|
##
|
||||||
|
## In case of distributed descriptors accessing the same backend, all
|
||||||
|
## distributed descriptors will be destroyed.
|
||||||
|
##
|
||||||
|
## This distructor may be used on already *destructed* descriptors.
|
||||||
|
##
|
||||||
|
if not db.backend.isNil:
|
||||||
|
db.backend.closeFn eradicate
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, options, sequtils, tables],
|
std/[algorithm, sequtils, tables],
|
||||||
results,
|
results,
|
||||||
../aristo_constants,
|
../aristo_constants,
|
||||||
../aristo_desc,
|
../aristo_desc,
|
||||||
@ -40,11 +40,11 @@ const
|
|||||||
## Enabled additional logging noise
|
## Enabled additional logging noise
|
||||||
|
|
||||||
type
|
type
|
||||||
MemDbRef = ref object
|
MemDbRef* = ref object
|
||||||
## Database
|
## Database
|
||||||
sTab: Table[RootedVertexID,seq[byte]] ## Structural vertex table making up a trie
|
sTab*: Table[RootedVertexID,seq[byte]] ## Structural vertex table making up a trie
|
||||||
tUvi: Option[VertexID] ## Top used vertex ID
|
tUvi*: Opt[VertexID] ## Top used vertex ID
|
||||||
lSst: Opt[SavedState] ## Last saved state
|
lSst*: Opt[SavedState] ## Last saved state
|
||||||
|
|
||||||
MemBackendRef* = ref object of TypedBackendRef
|
MemBackendRef* = ref object of TypedBackendRef
|
||||||
## Inheriting table so access can be extended for debugging purposes
|
## Inheriting table so access can be extended for debugging purposes
|
||||||
@ -52,7 +52,7 @@ type
|
|||||||
|
|
||||||
MemPutHdlRef = ref object of TypedPutHdlRef
|
MemPutHdlRef = ref object of TypedPutHdlRef
|
||||||
sTab: Table[RootedVertexID,seq[byte]]
|
sTab: Table[RootedVertexID,seq[byte]]
|
||||||
tUvi: Option[VertexID]
|
tUvi: Opt[VertexID]
|
||||||
lSst: Opt[SavedState]
|
lSst: Opt[SavedState]
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
@ -109,16 +109,12 @@ proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
|||||||
proc getTuvFn(db: MemBackendRef): GetTuvFn =
|
proc getTuvFn(db: MemBackendRef): GetTuvFn =
|
||||||
result =
|
result =
|
||||||
proc(): Result[VertexID,AristoError]=
|
proc(): Result[VertexID,AristoError]=
|
||||||
if db.mdb.tUvi.isSome:
|
db.mdb.tUvi or ok(VertexID(0))
|
||||||
return ok db.mdb.tUvi.unsafeGet
|
|
||||||
err(GetTuvNotFound)
|
|
||||||
|
|
||||||
proc getLstFn(db: MemBackendRef): GetLstFn =
|
proc getLstFn(db: MemBackendRef): GetLstFn =
|
||||||
result =
|
result =
|
||||||
proc(): Result[SavedState,AristoError]=
|
proc(): Result[SavedState,AristoError]=
|
||||||
if db.mdb.lSst.isSome:
|
db.mdb.lSst or err(GetLstNotFound)
|
||||||
return ok db.mdb.lSst.unsafeGet
|
|
||||||
err(GetLstNotFound)
|
|
||||||
|
|
||||||
# -------------
|
# -------------
|
||||||
|
|
||||||
@ -143,7 +139,7 @@ proc putTuvFn(db: MemBackendRef): PutTuvFn =
|
|||||||
proc(hdl: PutHdlRef; vs: VertexID) =
|
proc(hdl: PutHdlRef; vs: VertexID) =
|
||||||
let hdl = hdl.getSession db
|
let hdl = hdl.getSession db
|
||||||
if hdl.error.isNil:
|
if hdl.error.isNil:
|
||||||
hdl.tUvi = some(vs)
|
hdl.tUvi = Opt.some(vs)
|
||||||
|
|
||||||
proc putLstFn(db: MemBackendRef): PutLstFn =
|
proc putLstFn(db: MemBackendRef): PutLstFn =
|
||||||
result =
|
result =
|
||||||
@ -175,7 +171,7 @@ proc putEndFn(db: MemBackendRef): PutEndFn =
|
|||||||
|
|
||||||
let tuv = hdl.tUvi.get(otherwise = VertexID(0))
|
let tuv = hdl.tUvi.get(otherwise = VertexID(0))
|
||||||
if tuv.isValid:
|
if tuv.isValid:
|
||||||
db.mdb.tUvi = some(tuv)
|
db.mdb.tUvi = Opt.some(tuv)
|
||||||
|
|
||||||
if hdl.lSst.isSome:
|
if hdl.lSst.isSome:
|
||||||
db.mdb.lSst = hdl.lSst
|
db.mdb.lSst = hdl.lSst
|
||||||
@ -193,10 +189,10 @@ proc closeFn(db: MemBackendRef): CloseFn =
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc memoryBackend*(): BackendRef =
|
proc memoryBackend*(mdb = MemDbRef()): BackendRef =
|
||||||
let db = MemBackendRef(
|
let db = MemBackendRef(
|
||||||
beKind: BackendMemory,
|
beKind: BackendMemory,
|
||||||
mdb: MemDbRef())
|
mdb: mdb)
|
||||||
|
|
||||||
db.getVtxFn = getVtxFn db
|
db.getVtxFn = getVtxFn db
|
||||||
db.getKeyFn = getKeyFn db
|
db.getKeyFn = getKeyFn db
|
||||||
|
@ -54,14 +54,13 @@ proc init*(
|
|||||||
## Memory backend constructor.
|
## Memory backend constructor.
|
||||||
##
|
##
|
||||||
|
|
||||||
let db =
|
when B is VoidBackendRef:
|
||||||
when B is VoidBackendRef:
|
AristoDbRef.init(nil)[]
|
||||||
AristoDbRef(txRef: AristoTxRef(layer: LayerRef()))
|
|
||||||
|
|
||||||
elif B is MemBackendRef:
|
elif B is MemBackendRef:
|
||||||
AristoDbRef(txRef: AristoTxRef(layer: LayerRef()), backend: memoryBackend())
|
AristoDbRef.init(memoryBackend())[]
|
||||||
db.txRef.db = db
|
else:
|
||||||
db
|
raiseAssert "Unknown backend"
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type AristoDbRef; # Target type
|
T: type AristoDbRef; # Target type
|
||||||
@ -69,21 +68,6 @@ proc init*(
|
|||||||
## Shortcut for `AristoDbRef.init(VoidBackendRef)`
|
## Shortcut for `AristoDbRef.init(VoidBackendRef)`
|
||||||
AristoDbRef.init VoidBackendRef
|
AristoDbRef.init VoidBackendRef
|
||||||
|
|
||||||
|
|
||||||
proc finish*(db: AristoDbRef; eradicate = false) =
|
|
||||||
## Backend destructor. The argument `eradicate` indicates that a full
|
|
||||||
## database deletion is requested. If set `false` the outcome might differ
|
|
||||||
## depending on the type of backend (e.g. the `BackendMemory` backend will
|
|
||||||
## always eradicate on close.)
|
|
||||||
##
|
|
||||||
## In case of distributed descriptors accessing the same backend, all
|
|
||||||
## distributed descriptors will be destroyed.
|
|
||||||
##
|
|
||||||
## This distructor may be used on already *destructed* descriptors.
|
|
||||||
##
|
|
||||||
if not db.backend.isNil:
|
|
||||||
db.backend.closeFn eradicate
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -24,13 +24,13 @@ import
|
|||||||
../../opts,
|
../../opts,
|
||||||
../aristo_desc,
|
../aristo_desc,
|
||||||
./rocks_db/rdb_desc,
|
./rocks_db/rdb_desc,
|
||||||
"."/[rocks_db, memory_only]
|
"."/[init_common, rocks_db]
|
||||||
|
|
||||||
export
|
export
|
||||||
AristoDbRef,
|
AristoDbRef,
|
||||||
RdbBackendRef,
|
RdbBackendRef,
|
||||||
RdbWriteEventCb,
|
RdbWriteEventCb,
|
||||||
memory_only,
|
init_common,
|
||||||
aristo_desc
|
aristo_desc
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -45,22 +45,10 @@ proc init*(
|
|||||||
): Result[T, AristoError] =
|
): Result[T, AristoError] =
|
||||||
let
|
let
|
||||||
be = rocksDbBackend(opts, baseDb)
|
be = rocksDbBackend(opts, baseDb)
|
||||||
vTop = block:
|
db = AristoDbRef.init(be).valueOr:
|
||||||
let rc = be.getTuvFn()
|
be.closeFn(eradicate = false)
|
||||||
if rc.isErr:
|
return err(error)
|
||||||
be.closeFn(eradicate = false)
|
ok db
|
||||||
return err(rc.error)
|
|
||||||
rc.value
|
|
||||||
db = AristoDbRef(
|
|
||||||
txRef: AristoTxRef(layer: LayerRef(vTop: vTop, cTop: vTop)),
|
|
||||||
backend: be,
|
|
||||||
accLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE),
|
|
||||||
stoLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE),
|
|
||||||
)
|
|
||||||
|
|
||||||
db.txRef.db = db # TODO evaluate if this cyclic ref is worth the convenience
|
|
||||||
|
|
||||||
ok(db)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -21,26 +21,11 @@ import
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc persist*(
|
proc persist*(
|
||||||
db: AristoDbRef; # Database
|
db: AristoDbRef;
|
||||||
batch: PutHdlRef;
|
batch: PutHdlRef;
|
||||||
nxtSid = 0u64; # Next state ID (aka block number)
|
txFrame: AristoTxRef;
|
||||||
) =
|
) =
|
||||||
## Persistently store data onto backend database. If the system is running
|
db.txFramePersist(batch, txFrame)
|
||||||
## without a database backend, the function returns immediately with an
|
|
||||||
## error.
|
|
||||||
##
|
|
||||||
## The function merges all staged data from the top layer cache onto the
|
|
||||||
## backend stage area. After that, the top layer cache is cleared.
|
|
||||||
##
|
|
||||||
## Finally, the staged data are merged into the physical backend database
|
|
||||||
## and the staged data area is cleared. Wile performing this last step,
|
|
||||||
## the recovery journal is updated (if available.)
|
|
||||||
##
|
|
||||||
## If the argument `nxtSid` is passed non-zero, it will be the ID for the
|
|
||||||
## next recovery journal record. If non-zero, this ID must be greater than
|
|
||||||
## all previous IDs (e.g. block number when stowing after block execution.)
|
|
||||||
##
|
|
||||||
db.txFramePersist(batch, nxtSid)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -21,18 +21,7 @@ import
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): Result[AristoTxRef,AristoError] =
|
proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): AristoTxRef =
|
||||||
## Starts a new transaction.
|
|
||||||
##
|
|
||||||
## Example:
|
|
||||||
## ::
|
|
||||||
## proc doSomething(db: AristoDbRef) =
|
|
||||||
## let tx = db.begin
|
|
||||||
## defer: tx.rollback()
|
|
||||||
## ... continue using db ...
|
|
||||||
## tx.commit()
|
|
||||||
##
|
|
||||||
|
|
||||||
let parent = if parent == nil:
|
let parent = if parent == nil:
|
||||||
db.txRef
|
db.txRef
|
||||||
else:
|
else:
|
||||||
@ -40,9 +29,9 @@ proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): Result[AristoTxRef,Ari
|
|||||||
|
|
||||||
let
|
let
|
||||||
vTop = parent.layer.vTop
|
vTop = parent.layer.vTop
|
||||||
layer = LayerRef(vTop: vTop, cTop: vTop)
|
layer = LayerRef(vTop: vTop)
|
||||||
|
|
||||||
ok AristoTxRef(
|
AristoTxRef(
|
||||||
db: db,
|
db: db,
|
||||||
parent: parent,
|
parent: parent,
|
||||||
layer: layer)
|
layer: layer)
|
||||||
@ -50,66 +39,60 @@ proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): Result[AristoTxRef,Ari
|
|||||||
proc baseTxFrame*(db: AristoDbRef): AristoTxRef=
|
proc baseTxFrame*(db: AristoDbRef): AristoTxRef=
|
||||||
db.txRef
|
db.txRef
|
||||||
|
|
||||||
proc rollback*(
|
proc dispose*(
|
||||||
tx: AristoTxRef; # Top transaction on database
|
tx: AristoTxRef;
|
||||||
): Result[void,AristoError] =
|
) =
|
||||||
## Given a *top level* handle, this function discards all database operations
|
tx[].reset()
|
||||||
## performed for this transaction.
|
|
||||||
# TODO Everyone using this txref should repoint their parent field
|
|
||||||
|
|
||||||
let vTop = tx.layer[].cTop
|
proc checkpoint*(
|
||||||
tx.layer[] = Layer(vTop: vTop, cTop: vTop)
|
tx: AristoTxRef;
|
||||||
|
blockNumber: uint64;
|
||||||
ok()
|
) =
|
||||||
|
tx.blockNumber = Opt.some(blockNumber)
|
||||||
proc commit*(
|
|
||||||
tx: AristoTxRef; # Top transaction on database
|
|
||||||
): Result[void,AristoError] =
|
|
||||||
## This function pushes all changes done in this frame to its parent
|
|
||||||
##
|
|
||||||
# TODO Everyone using this txref should repoint their parent field
|
|
||||||
doAssert tx.parent != nil, "should not commit the base tx"
|
|
||||||
|
|
||||||
# A rollback after commit should reset to the new vTop!
|
|
||||||
tx.layer[].cTop = tx.layer[].vTop
|
|
||||||
|
|
||||||
mergeAndReset(tx.parent.layer[], tx.layer[])
|
|
||||||
|
|
||||||
ok()
|
|
||||||
|
|
||||||
proc txFramePersist*(
|
proc txFramePersist*(
|
||||||
db: AristoDbRef; # Database
|
db: AristoDbRef; # Database
|
||||||
batch: PutHdlRef;
|
batch: PutHdlRef;
|
||||||
nxtSid = 0u64; # Next state ID (aka block number)
|
txFrame: AristoTxRef;
|
||||||
) =
|
) =
|
||||||
## Persistently store data onto backend database. If the system is running
|
|
||||||
## without a database backend, the function returns immediately with an
|
if txFrame == db.txRef and txFrame.layer.sTab.len == 0:
|
||||||
## error.
|
# No changes in frame - no `checkpoint` requirement - nothing to do here
|
||||||
##
|
return
|
||||||
## The function merges all data staged in `txFrame` and merges it onto the
|
|
||||||
## backend database. `txFrame` becomes the new `baseTxFrame`.
|
|
||||||
##
|
|
||||||
## Any parent frames of `txFrame` become invalid after this operation.
|
|
||||||
##
|
|
||||||
## If the argument `nxtSid` is passed non-zero, it will be the ID for the
|
|
||||||
## next recovery journal record. If non-zero, this ID must be greater than
|
|
||||||
## all previous IDs (e.g. block number when stowing after block execution.)
|
|
||||||
##
|
|
||||||
let be = db.backend
|
let be = db.backend
|
||||||
doAssert not be.isNil, "Persisting to backend requires ... a backend!"
|
doAssert not be.isNil, "Persisting to backend requires ... a backend!"
|
||||||
|
|
||||||
let lSst = SavedState(
|
let lSst = SavedState(
|
||||||
key: emptyRoot, # placeholder for more
|
key: emptyRoot, # placeholder for more
|
||||||
serial: nxtSid)
|
serial: txFrame.blockNumber.expect("`checkpoint` before persisting frame"))
|
||||||
|
|
||||||
|
# Squash all changes up to the base
|
||||||
|
if txFrame != db.txRef:
|
||||||
|
# Consolidate the changes from the old to the new base going from the
|
||||||
|
# bottom of the stack to avoid having to cascade each change through
|
||||||
|
# the full stack
|
||||||
|
assert txFrame.parent != nil
|
||||||
|
for frame in txFrame.stack():
|
||||||
|
if frame == db.txRef:
|
||||||
|
continue
|
||||||
|
mergeAndReset(db.txRef.layer[], frame.layer[])
|
||||||
|
db.txRef.blockNumber = frame.blockNumber
|
||||||
|
|
||||||
|
frame.dispose() # This will also dispose `txFrame` itself!
|
||||||
|
|
||||||
|
# Put the now-merged contents in txFrame and make it the new base
|
||||||
|
swap(db.txRef[], txFrame[])
|
||||||
|
db.txRef = txFrame
|
||||||
|
|
||||||
# Store structural single trie entries
|
# Store structural single trie entries
|
||||||
for rvid, vtx in db.txRef.layer.sTab:
|
for rvid, vtx in txFrame.layer.sTab:
|
||||||
db.txRef.layer.kMap.withValue(rvid, key) do:
|
txFrame.layer.kMap.withValue(rvid, key) do:
|
||||||
be.putVtxFn(batch, rvid, vtx, key[])
|
be.putVtxFn(batch, rvid, vtx, key[])
|
||||||
do:
|
do:
|
||||||
be.putVtxFn(batch, rvid, vtx, default(HashKey))
|
be.putVtxFn(batch, rvid, vtx, default(HashKey))
|
||||||
|
|
||||||
be.putTuvFn(batch, db.txRef.layer.vTop)
|
be.putTuvFn(batch, txFrame.layer.vTop)
|
||||||
be.putLstFn(batch, lSst)
|
be.putLstFn(batch, lSst)
|
||||||
|
|
||||||
# TODO above, we only prepare the changes to the database but don't actually
|
# TODO above, we only prepare the changes to the database but don't actually
|
||||||
@ -118,19 +101,16 @@ proc txFramePersist*(
|
|||||||
# in-memory and on-disk state)
|
# in-memory and on-disk state)
|
||||||
|
|
||||||
# Copy back updated payloads
|
# Copy back updated payloads
|
||||||
for accPath, vtx in db.txRef.layer.accLeaves:
|
for accPath, vtx in txFrame.layer.accLeaves:
|
||||||
db.accLeaves.put(accPath, vtx)
|
db.accLeaves.put(accPath, vtx)
|
||||||
|
|
||||||
for mixPath, vtx in db.txRef.layer.stoLeaves:
|
for mixPath, vtx in txFrame.layer.stoLeaves:
|
||||||
db.stoLeaves.put(mixPath, vtx)
|
db.stoLeaves.put(mixPath, vtx)
|
||||||
|
|
||||||
# Done with txRef, all saved to backend
|
txFrame.layer.sTab.clear()
|
||||||
db.txRef.layer.cTop = db.txRef.layer.vTop
|
txFrame.layer.kMap.clear()
|
||||||
db.txRef.layer.sTab.clear()
|
txFrame.layer.accLeaves.clear()
|
||||||
db.txRef.layer.kMap.clear()
|
txFrame.layer.stoLeaves.clear()
|
||||||
db.txRef.layer.accLeaves.clear()
|
|
||||||
db.txRef.layer.stoLeaves.clear()
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -104,13 +104,10 @@ proc `$$`*(e: CoreDbError): string =
|
|||||||
|
|
||||||
proc persist*(
|
proc persist*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
blockNumber: BlockNumber;
|
txFrame: CoreDbTxRef;
|
||||||
): CoreDbRc[void] =
|
) =
|
||||||
## This function stored cached data from the default context (see `ctx()`
|
## This function persists changes up to and including the given frame to the
|
||||||
## below) to the persistent database.
|
## database.
|
||||||
##
|
|
||||||
## It also stores the argument block number `blockNumber` as a state record
|
|
||||||
## which can be retrieved via `stateBlockNumber()`.
|
|
||||||
##
|
##
|
||||||
db.setTrackNewApi BasePersistFn
|
db.setTrackNewApi BasePersistFn
|
||||||
|
|
||||||
@ -131,8 +128,8 @@ proc persist*(
|
|||||||
# kvt changes written to memory but not to disk because of an aristo
|
# kvt changes written to memory but not to disk because of an aristo
|
||||||
# error), we have to panic instead.
|
# error), we have to panic instead.
|
||||||
|
|
||||||
CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt, kvtBatch[])
|
CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt, kvtBatch[], txFrame.kTx)
|
||||||
CoreDbAccRef(db.ctx).call(persist, db.ctx.mpt, mptBatch[], blockNumber)
|
CoreDbAccRef(db.ctx).call(persist, db.ctx.mpt, mptBatch[], txFrame.aTx)
|
||||||
|
|
||||||
db.defCtx.kvt.backend.putEndFn(kvtBatch[]).isOkOr:
|
db.defCtx.kvt.backend.putEndFn(kvtBatch[]).isOkOr:
|
||||||
raiseAssert $api & ": " & $error
|
raiseAssert $api & ": " & $error
|
||||||
@ -140,7 +137,6 @@ proc persist*(
|
|||||||
db.defCtx.mpt.backend.putEndFn(mptBatch[]).isOkOr:
|
db.defCtx.mpt.backend.putEndFn(mptBatch[]).isOkOr:
|
||||||
raiseAssert $api & ": " & $error
|
raiseAssert $api & ": " & $error
|
||||||
|
|
||||||
result = ok()
|
|
||||||
else:
|
else:
|
||||||
discard kvtBatch.expect($api & ": should always be able to create batch")
|
discard kvtBatch.expect($api & ": should always be able to create batch")
|
||||||
discard mptBatch.expect($api & ": should always be able to create batch")
|
discard mptBatch.expect($api & ": should always be able to create batch")
|
||||||
@ -153,9 +149,9 @@ proc stateBlockNumber*(db: CoreDbTxRef): BlockNumber =
|
|||||||
##
|
##
|
||||||
db.setTrackNewApi BaseStateBlockNumberFn
|
db.setTrackNewApi BaseStateBlockNumberFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = db.ctx.parent.ariApi.call(fetchLastSavedState, db.aTx)
|
let rc = db.ctx.parent.ariApi.call(fetchLastCheckpoint, db.aTx)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
rc.value.serial.BlockNumber
|
rc.value.BlockNumber
|
||||||
else:
|
else:
|
||||||
0u64
|
0u64
|
||||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||||
@ -605,48 +601,28 @@ proc txFrameBegin*(ctx: CoreDbCtxRef, parent: CoreDbTxRef): CoreDbTxRef =
|
|||||||
##
|
##
|
||||||
ctx.setTrackNewApi BaseNewTxFn
|
ctx.setTrackNewApi BaseNewTxFn
|
||||||
let
|
let
|
||||||
kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt, if parent != nil: parent.kTx else: nil).valueOr:
|
kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt, if parent != nil: parent.kTx else: nil)
|
||||||
raiseAssert $api & ": " & $error
|
aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt, if parent != nil: parent.aTx else: nil)
|
||||||
aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt, if parent != nil: parent.aTx else: nil).valueOr:
|
|
||||||
raiseAssert $api & ": " & $error
|
|
||||||
result = ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx)
|
result = ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx)
|
||||||
ctx.ifTrackNewApi:
|
ctx.ifTrackNewApi:
|
||||||
let newLevel = CoreDbAccRef(ctx).call(level, ctx.mpt)
|
let newLevel = CoreDbAccRef(ctx).call(level, ctx.mpt)
|
||||||
debug logTxt, api, elapsed, newLevel
|
debug logTxt, api, elapsed, newLevel
|
||||||
|
|
||||||
proc commit*(tx: CoreDbTxRef) =
|
proc checkpoint*(tx: CoreDbTxRef, blockNumber: BlockNumber) =
|
||||||
tx.setTrackNewApi TxCommitFn:
|
tx.setTrackNewApi TxCommitFn:
|
||||||
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
||||||
CoreDbAccRef(tx.ctx).call(commit, tx.aTx).isOkOr:
|
CoreDbAccRef(tx.ctx).call(checkpoint, tx.aTx, blockNumber)
|
||||||
raiseAssert $api & ": " & $error
|
|
||||||
CoreDbKvtRef(tx.ctx).call(commit, tx.kTx).isOkOr:
|
|
||||||
raiseAssert $api & ": " & $error
|
|
||||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
|
||||||
|
|
||||||
proc rollback*(tx: CoreDbTxRef) =
|
|
||||||
tx.setTrackNewApi TxRollbackFn:
|
|
||||||
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
|
||||||
CoreDbAccRef(tx.ctx).call(rollback, tx.aTx).isOkOr:
|
|
||||||
raiseAssert $api & ": " & $error
|
|
||||||
CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr:
|
|
||||||
raiseAssert $api & ": " & $error
|
|
||||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||||
|
|
||||||
proc dispose*(tx: CoreDbTxRef) =
|
proc dispose*(tx: CoreDbTxRef) =
|
||||||
tx.setTrackNewApi TxDisposeFn:
|
tx.setTrackNewApi TxRollbackFn:
|
||||||
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
||||||
# if CoreDbAccRef(tx.ctx).call(isTop, tx.aTx):
|
CoreDbAccRef(tx.ctx).call(dispose, tx.aTx)
|
||||||
CoreDbAccRef(tx.ctx).call(rollback, tx.aTx).isOkOr:
|
CoreDbKvtRef(tx.ctx).call(dispose, tx.kTx)
|
||||||
raiseAssert $api & ": " & $error
|
tx[].reset()
|
||||||
# if CoreDbKvtRef(tx.ctx).call(isTop, tx.kTx):
|
|
||||||
CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr:
|
|
||||||
raiseAssert $api & ": " & $error
|
|
||||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||||
|
|
||||||
func reparent*(tx: CoreDbTxRef, parent: CoreDbTxRef) =
|
|
||||||
tx.aTx.parent = parent.aTx
|
|
||||||
tx.kTx.parent = parent.kTx
|
|
||||||
|
|
||||||
proc txFrameBegin*(tx: CoreDbTxRef): CoreDbTxRef =
|
proc txFrameBegin*(tx: CoreDbTxRef): CoreDbTxRef =
|
||||||
tx.ctx.txFrameBegin(tx)
|
tx.ctx.txFrameBegin(tx)
|
||||||
|
|
||||||
|
@ -40,7 +40,6 @@ type
|
|||||||
KvtDbProfData* = AristoDbProfData
|
KvtDbProfData* = AristoDbProfData
|
||||||
## Borrowed from `aristo_profile`
|
## Borrowed from `aristo_profile`
|
||||||
|
|
||||||
KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
|
||||||
KvtApiDelFn* = proc(db: KvtTxRef,
|
KvtApiDelFn* = proc(db: KvtTxRef,
|
||||||
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||||
KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.}
|
KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.}
|
||||||
@ -53,24 +52,23 @@ type
|
|||||||
key: openArray[byte]): Result[bool,KvtError] {.noRaise.}
|
key: openArray[byte]): Result[bool,KvtError] {.noRaise.}
|
||||||
KvtApiPutFn* = proc(db: KvtTxRef,
|
KvtApiPutFn* = proc(db: KvtTxRef,
|
||||||
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||||
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
KvtApiDisposeFn* = proc(tx: KvtTxRef) {.noRaise.}
|
||||||
KvtApiPersistFn* = proc(db: KvtDbRef, batch: PutHdlRef) {.noRaise.}
|
KvtApiPersistFn* = proc(db: KvtDbRef, batch: PutHdlRef, txFrame: KvtTxRef) {.noRaise.}
|
||||||
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
|
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
|
||||||
KvtApiTxFrameBeginFn* = proc(db: KvtDbRef, parent: KvtTxRef): Result[KvtTxRef,KvtError] {.noRaise.}
|
KvtApiTxFrameBeginFn* = proc(db: KvtDbRef, parent: KvtTxRef): KvtTxRef {.noRaise.}
|
||||||
KvtApiBaseTxFrameFn* = proc(db: KvtDbRef): KvtTxRef {.noRaise.}
|
KvtApiBaseTxFrameFn* = proc(db: KvtDbRef): KvtTxRef {.noRaise.}
|
||||||
|
|
||||||
KvtApiRef* = ref KvtApiObj
|
KvtApiRef* = ref KvtApiObj
|
||||||
KvtApiObj* = object of RootObj
|
KvtApiObj* = object of RootObj
|
||||||
## Useful set of `Kvt` fuctions that can be filtered, stacked etc. Note
|
## Useful set of `Kvt` fuctions that can be filtered, stacked etc. Note
|
||||||
## that this API is modelled after a subset of the `Aristo` API.
|
## that this API is modelled after a subset of the `Aristo` API.
|
||||||
commit*: KvtApiCommitFn
|
|
||||||
del*: KvtApiDelFn
|
del*: KvtApiDelFn
|
||||||
finish*: KvtApiFinishFn
|
finish*: KvtApiFinishFn
|
||||||
get*: KvtApiGetFn
|
get*: KvtApiGetFn
|
||||||
len*: KvtApiLenFn
|
len*: KvtApiLenFn
|
||||||
hasKeyRc*: KvtApiHasKeyRcFn
|
hasKeyRc*: KvtApiHasKeyRcFn
|
||||||
put*: KvtApiPutFn
|
put*: KvtApiPutFn
|
||||||
rollback*: KvtApiRollbackFn
|
dispose*: KvtApiDisposeFn
|
||||||
persist*: KvtApiPersistFn
|
persist*: KvtApiPersistFn
|
||||||
txFrameBegin*: KvtApiTxFrameBeginFn
|
txFrameBegin*: KvtApiTxFrameBeginFn
|
||||||
baseTxFrame*: KvtApiBaseTxFrameFn
|
baseTxFrame*: KvtApiBaseTxFrameFn
|
||||||
@ -80,14 +78,13 @@ type
|
|||||||
## index/name mapping for profile slots
|
## index/name mapping for profile slots
|
||||||
KvtApiProfTotal = "total"
|
KvtApiProfTotal = "total"
|
||||||
|
|
||||||
KvtApiProfCommitFn = "commit"
|
|
||||||
KvtApiProfDelFn = "del"
|
KvtApiProfDelFn = "del"
|
||||||
KvtApiProfFinishFn = "finish"
|
KvtApiProfFinishFn = "finish"
|
||||||
KvtApiProfGetFn = "get"
|
KvtApiProfGetFn = "get"
|
||||||
KvtApiProfLenFn = "len"
|
KvtApiProfLenFn = "len"
|
||||||
KvtApiProfHasKeyRcFn = "hasKeyRc"
|
KvtApiProfHasKeyRcFn = "hasKeyRc"
|
||||||
KvtApiProfPutFn = "put"
|
KvtApiProfPutFn = "put"
|
||||||
KvtApiProfRollbackFn = "rollback"
|
KvtApiProfDisposeFn = "dispose"
|
||||||
KvtApiProfPersistFn = "persist"
|
KvtApiProfPersistFn = "persist"
|
||||||
KvtApiProfTxFrameBeginFn = "txFrameBegin"
|
KvtApiProfTxFrameBeginFn = "txFrameBegin"
|
||||||
KvtApiProfBaseTxFrameFn = "baseTxFrame"
|
KvtApiProfBaseTxFrameFn = "baseTxFrame"
|
||||||
@ -134,14 +131,13 @@ proc dup(be: BackendRef): BackendRef =
|
|||||||
func init*(api: var KvtApiObj) =
|
func init*(api: var KvtApiObj) =
|
||||||
when AutoValidateApiHooks:
|
when AutoValidateApiHooks:
|
||||||
api.reset
|
api.reset
|
||||||
api.commit = commit
|
|
||||||
api.del = del
|
api.del = del
|
||||||
api.finish = finish
|
api.finish = finish
|
||||||
api.get = get
|
api.get = get
|
||||||
api.len = len
|
api.len = len
|
||||||
api.hasKeyRc = hasKeyRc
|
api.hasKeyRc = hasKeyRc
|
||||||
api.put = put
|
api.put = put
|
||||||
api.rollback = rollback
|
api.dispose = dispose
|
||||||
api.persist = persist
|
api.persist = persist
|
||||||
api.txFrameBegin = txFrameBegin
|
api.txFrameBegin = txFrameBegin
|
||||||
api.baseTxFrame = baseTxFrame
|
api.baseTxFrame = baseTxFrame
|
||||||
@ -185,11 +181,6 @@ func init*(
|
|||||||
code
|
code
|
||||||
data.update(n.ord, getTime() - start)
|
data.update(n.ord, getTime() - start)
|
||||||
|
|
||||||
profApi.commit =
|
|
||||||
proc(a: KvtTxRef): auto =
|
|
||||||
KvtApiProfCommitFn.profileRunner:
|
|
||||||
result = api.commit(a)
|
|
||||||
|
|
||||||
profApi.del =
|
profApi.del =
|
||||||
proc(a: KvtDbRef; b: openArray[byte]): auto =
|
proc(a: KvtDbRef; b: openArray[byte]): auto =
|
||||||
KvtApiProfDelFn.profileRunner:
|
KvtApiProfDelFn.profileRunner:
|
||||||
@ -220,10 +211,10 @@ func init*(
|
|||||||
KvtApiProfPutFn.profileRunner:
|
KvtApiProfPutFn.profileRunner:
|
||||||
result = api.put(a, b, c)
|
result = api.put(a, b, c)
|
||||||
|
|
||||||
profApi.rollback =
|
profApi.dispose =
|
||||||
proc(a: KvtTxRef): auto =
|
proc(a: KvtTxRef): auto =
|
||||||
KvtApiProfRollbackFn.profileRunner:
|
KvtApiProfDisposeFn.profileRunner:
|
||||||
result = api.rollback(a)
|
result = api.dispose(a)
|
||||||
|
|
||||||
profApi.persist =
|
profApi.persist =
|
||||||
proc(a: KvtDbRef): auto =
|
proc(a: KvtDbRef): auto =
|
||||||
@ -231,9 +222,9 @@ func init*(
|
|||||||
result = api.persist(a)
|
result = api.persist(a)
|
||||||
|
|
||||||
profApi.txFrameBegin =
|
profApi.txFrameBegin =
|
||||||
proc(a: KvtDbRef): auto =
|
proc(a: KvtDbRef) =
|
||||||
KvtApiProfTxFrameBeginFn.profileRunner:
|
KvtApiProfTxFrameBeginFn.profileRunner:
|
||||||
result = api.txFrameBegin(a)
|
api.txFrameBegin(a)
|
||||||
|
|
||||||
let beDup = be.dup()
|
let beDup = be.dup()
|
||||||
if beDup.isNil:
|
if beDup.isNil:
|
||||||
|
@ -68,6 +68,17 @@ func isValid*(layer: LayerRef): bool =
|
|||||||
# Don't put in a hash!
|
# Don't put in a hash!
|
||||||
func hash*(db: KvtDbRef): Hash {.error.}
|
func hash*(db: KvtDbRef): Hash {.error.}
|
||||||
|
|
||||||
|
iterator stack*(tx: KvtTxRef): KvtTxRef =
|
||||||
|
# Stack going from base to tx
|
||||||
|
var frames: seq[KvtTxRef]
|
||||||
|
var tx = tx
|
||||||
|
while tx != nil:
|
||||||
|
frames.add tx
|
||||||
|
tx = tx.parent
|
||||||
|
|
||||||
|
while frames.len > 0:
|
||||||
|
yield frames.pop()
|
||||||
|
|
||||||
iterator rstack*(tx: KvtTxRef): LayerRef =
|
iterator rstack*(tx: KvtTxRef): LayerRef =
|
||||||
var tx = tx
|
var tx = tx
|
||||||
# Stack in reverse order
|
# Stack in reverse order
|
||||||
|
@ -21,21 +21,11 @@ import
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc persist*(
|
proc persist*(
|
||||||
db: KvtDbRef; # Database
|
db: KvtDbRef;
|
||||||
batch: PutHdlRef;
|
batch: PutHdlRef;
|
||||||
|
txFrame: KvtTxRef
|
||||||
) =
|
) =
|
||||||
## Persistently store data onto backend database. If the system is running
|
db.txFramePersist(batch, txFrame)
|
||||||
## without a database backend, the function returns immediately with an
|
|
||||||
## error.
|
|
||||||
##
|
|
||||||
## The function merges all staged data from the top layer cache onto the
|
|
||||||
## backend stage area. After that, the top layer cache is cleared.
|
|
||||||
##
|
|
||||||
## Finally, the staged data are merged into the physical backend database
|
|
||||||
## and the staged data area is cleared. Wile performing this last step,
|
|
||||||
## the recovery journal is updated (if available.)
|
|
||||||
##
|
|
||||||
db.txFramePersist(batch)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
results,
|
|
||||||
./[kvt_desc, kvt_layers]
|
./[kvt_desc, kvt_layers]
|
||||||
|
|
||||||
|
|
||||||
@ -22,7 +21,7 @@ import
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc txFrameBegin*(db: KvtDbRef, parent: KvtTxRef): Result[KvtTxRef,KvtError] =
|
proc txFrameBegin*(db: KvtDbRef, parent: KvtTxRef): KvtTxRef =
|
||||||
## Starts a new transaction.
|
## Starts a new transaction.
|
||||||
##
|
##
|
||||||
## Example:
|
## Example:
|
||||||
@ -35,7 +34,7 @@ proc txFrameBegin*(db: KvtDbRef, parent: KvtTxRef): Result[KvtTxRef,KvtError] =
|
|||||||
##
|
##
|
||||||
|
|
||||||
let parent = if parent == nil: db.txRef else: parent
|
let parent = if parent == nil: db.txRef else: parent
|
||||||
ok KvtTxRef(
|
KvtTxRef(
|
||||||
db: db,
|
db: db,
|
||||||
layer: LayerRef(),
|
layer: LayerRef(),
|
||||||
parent: parent,
|
parent: parent,
|
||||||
@ -44,59 +43,44 @@ proc txFrameBegin*(db: KvtDbRef, parent: KvtTxRef): Result[KvtTxRef,KvtError] =
|
|||||||
proc baseTxFrame*(db: KvtDbRef): KvtTxRef =
|
proc baseTxFrame*(db: KvtDbRef): KvtTxRef =
|
||||||
db.txRef
|
db.txRef
|
||||||
|
|
||||||
proc rollback*(
|
proc dispose*(
|
||||||
tx: KvtTxRef; # Top transaction on database
|
tx: KvtTxRef;
|
||||||
): Result[void,KvtError] =
|
) =
|
||||||
## Given a *top level* handle, this function discards all database operations
|
|
||||||
## performed for this transactio. The previous transaction is returned if
|
|
||||||
## there was any.
|
|
||||||
##
|
|
||||||
|
|
||||||
tx.layer[] = Layer()
|
tx[].reset()
|
||||||
ok()
|
|
||||||
|
|
||||||
proc commit*(
|
|
||||||
tx: KvtTxRef; # Top transaction on database
|
|
||||||
): Result[void,KvtError] =
|
|
||||||
## Given a *top level* handle, this function accepts all database operations
|
|
||||||
## performed through this handle and merges it to the previous layer. The
|
|
||||||
## previous transaction is returned if there was any.
|
|
||||||
##
|
|
||||||
doAssert tx.parent != nil, "don't commit base tx"
|
|
||||||
|
|
||||||
mergeAndReset(tx.parent.layer[], tx.layer[])
|
|
||||||
|
|
||||||
ok()
|
|
||||||
|
|
||||||
proc txFramePersist*(
|
proc txFramePersist*(
|
||||||
db: KvtDbRef; # Database
|
db: KvtDbRef;
|
||||||
batch: PutHdlRef;
|
batch: PutHdlRef;
|
||||||
|
txFrame: KvtTxRef;
|
||||||
) =
|
) =
|
||||||
## Persistently store data onto backend database. If the system is running
|
|
||||||
## without a database backend, the function returns immediately with an
|
|
||||||
## error.
|
|
||||||
##
|
|
||||||
## The function merges all staged data from the top layer cache onto the
|
|
||||||
## backend stage area. After that, the top layer cache is cleared.
|
|
||||||
##
|
|
||||||
## Finally, the staged data are merged into the physical backend database
|
|
||||||
## and the staged data area is cleared. Wile performing this last step,
|
|
||||||
## the recovery journal is updated (if available.)
|
|
||||||
##
|
|
||||||
let be = db.backend
|
let be = db.backend
|
||||||
doAssert not be.isNil, "Persisting to backend requires ... a backend!"
|
doAssert not be.isNil, "Persisting to backend requires ... a backend!"
|
||||||
|
|
||||||
# Store structural single trie entries
|
if txFrame != db.txRef:
|
||||||
for k,v in db.txRef.layer.sTab:
|
# Consolidate the changes from the old to the new base going from the
|
||||||
be.putKvpFn(batch, k, v)
|
# bottom of the stack to avoid having to cascade each change through
|
||||||
|
# the full stack
|
||||||
|
assert txFrame.parent != nil
|
||||||
|
for frame in txFrame.stack():
|
||||||
|
if frame == db.txRef:
|
||||||
|
continue
|
||||||
|
mergeAndReset(db.txRef.layer[], frame.layer[])
|
||||||
|
frame.dispose()
|
||||||
|
|
||||||
|
# Put the now-merged contents in txFrame and make it the new base
|
||||||
|
swap(db.txRef[], txFrame[])
|
||||||
|
db.txRef = txFrame
|
||||||
|
|
||||||
|
# Store structural single trie entries
|
||||||
|
for k,v in txFrame.layer.sTab:
|
||||||
|
be.putKvpFn(batch, k, v)
|
||||||
# TODO above, we only prepare the changes to the database but don't actually
|
# TODO above, we only prepare the changes to the database but don't actually
|
||||||
# write them to disk - the code below that updates the frame should
|
# write them to disk - the code below that updates the frame should
|
||||||
# really run after things have been written (to maintain sync betweeen
|
# really run after things have been written (to maintain sync betweeen
|
||||||
# in-memory and on-disk state)
|
# in-memory and on-disk state)
|
||||||
|
|
||||||
# Done with txRef, all saved to backend
|
txFrame.layer.sTab.clear()
|
||||||
db.txRef.layer.sTab.clear()
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -69,8 +69,6 @@ proc processBlock(
|
|||||||
let clearEmptyAccount = com.isSpuriousOrLater(header.number)
|
let clearEmptyAccount = com.isSpuriousOrLater(header.number)
|
||||||
db.persist(clearEmptyAccount)
|
db.persist(clearEmptyAccount)
|
||||||
|
|
||||||
vmState.ledger.txFrame.commit()
|
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc getVmState(c: ChainRef, header: Header, txFrame: CoreDbTxRef):
|
proc getVmState(c: ChainRef, header: Header, txFrame: CoreDbTxRef):
|
||||||
@ -85,8 +83,7 @@ proc getVmState(c: ChainRef, header: Header, txFrame: CoreDbTxRef):
|
|||||||
# intended to accepts invalid block
|
# intended to accepts invalid block
|
||||||
proc setBlock*(c: ChainRef; blk: Block): Result[void, string] =
|
proc setBlock*(c: ChainRef; blk: Block): Result[void, string] =
|
||||||
template header: Header = blk.header
|
template header: Header = blk.header
|
||||||
let txFrame = c.db.ctx.txFrameBegin(nil)
|
let txFrame = c.db.ctx.txFrameBegin(c.db.baseTxFrame())
|
||||||
defer: txFrame.dispose()
|
|
||||||
|
|
||||||
# Needed for figuring out whether KVT cleanup is due (see at the end)
|
# Needed for figuring out whether KVT cleanup is due (see at the end)
|
||||||
let
|
let
|
||||||
@ -101,19 +98,18 @@ proc setBlock*(c: ChainRef; blk: Block): Result[void, string] =
|
|||||||
if blk.withdrawals.isSome:
|
if blk.withdrawals.isSome:
|
||||||
txFrame.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get)
|
txFrame.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get)
|
||||||
|
|
||||||
# update currentBlock *after* we persist it
|
txFrame.checkpoint(header.number)
|
||||||
# so the rpc return consistent result
|
|
||||||
# between eth_blockNumber and eth_syncing
|
|
||||||
c.com.syncCurrent = header.number
|
|
||||||
|
|
||||||
txFrame.commit()
|
|
||||||
|
|
||||||
# For the `Aristo` database, this code position is only reached if the
|
# For the `Aristo` database, this code position is only reached if the
|
||||||
# the parent state of the first block (as registered in `headers[0]`) was
|
# the parent state of the first block (as registered in `headers[0]`) was
|
||||||
# the canonical state before updating. So this state will be saved with
|
# the canonical state before updating. So this state will be saved with
|
||||||
# `persistent()` together with the respective block number.
|
# `persistent()` together with the respective block number.
|
||||||
c.db.persist(header.number - 1).isOkOr:
|
c.db.persist(txFrame)
|
||||||
return err($error)
|
|
||||||
|
# update currentBlock *after* we persist it
|
||||||
|
# so the rpc return consistent result
|
||||||
|
# between eth_blockNumber and eth_syncing
|
||||||
|
c.com.syncCurrent = header.number
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -18,13 +18,9 @@ import
|
|||||||
unittest2,
|
unittest2,
|
||||||
../execution_chain/db/aristo/aristo_desc,
|
../execution_chain/db/aristo/aristo_desc,
|
||||||
./replay/pp,
|
./replay/pp,
|
||||||
./test_aristo/test_blobify,
|
|
||||||
./test_aristo/test_merge_proof,
|
|
||||||
./test_aristo/test_nibbles,
|
|
||||||
./test_aristo/test_portal_proof,
|
|
||||||
./test_aristo/test_compute,
|
|
||||||
./test_aristo/[
|
./test_aristo/[
|
||||||
test_helpers, test_samples_xx, test_tx,
|
test_blobify, test_compute, test_helpers, test_merge_proof, test_nibbles,
|
||||||
|
test_portal_proof, test_samples_xx, test_tx, test_tx_frame,
|
||||||
undump_accounts, undump_storages]
|
undump_accounts, undump_storages]
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -21,6 +21,7 @@ import
|
|||||||
aristo_merge,
|
aristo_merge,
|
||||||
aristo_desc,
|
aristo_desc,
|
||||||
aristo_init,
|
aristo_init,
|
||||||
|
aristo_persist,
|
||||||
aristo_tx_frame,
|
aristo_tx_frame,
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -124,9 +125,10 @@ suite "Aristo compute":
|
|||||||
for (k, v, r) in samples[^1]:
|
for (k, v, r) in samples[^1]:
|
||||||
check:
|
check:
|
||||||
txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true)
|
txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true)
|
||||||
|
txFrame.checkpoint(1)
|
||||||
|
|
||||||
let batch = db.backend.putBegFn()[]
|
let batch = db.backend.putBegFn()[]
|
||||||
db.txFramePersist(batch, 1)
|
db.persist(batch, txFrame)
|
||||||
check db.backend.putEndFn(batch).isOk()
|
check db.backend.putEndFn(batch).isOk()
|
||||||
|
|
||||||
check txFrame.computeKeys(root).isOk()
|
check txFrame.computeKeys(root).isOk()
|
||||||
|
85
tests/test_aristo/test_tx_frame.nim
Normal file
85
tests/test_aristo/test_tx_frame.nim
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2025 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or
|
||||||
|
# distributed except according to those terms.
|
||||||
|
|
||||||
|
{.used.}
|
||||||
|
|
||||||
|
import
|
||||||
|
unittest2,
|
||||||
|
stew/endians2,
|
||||||
|
results,
|
||||||
|
eth/common/hashes,
|
||||||
|
../../execution_chain/db/aristo/[
|
||||||
|
aristo_delete,
|
||||||
|
aristo_desc,
|
||||||
|
aristo_fetch,
|
||||||
|
aristo_tx_frame,
|
||||||
|
aristo_init,
|
||||||
|
aristo_init/memory_db,
|
||||||
|
aristo_merge,
|
||||||
|
aristo_persist,
|
||||||
|
]
|
||||||
|
|
||||||
|
proc makeAccount(i: uint64): (Hash32, AristoAccount) =
|
||||||
|
var path: Hash32
|
||||||
|
path.data()[0 .. 7] = i.toBytesBE()
|
||||||
|
(path, AristoAccount(balance: i.u256, codeHash: EMPTY_CODE_HASH))
|
||||||
|
|
||||||
|
const
|
||||||
|
acc1 = makeAccount(1)
|
||||||
|
acc2 = makeAccount(2)
|
||||||
|
|
||||||
|
suite "Aristo TxFrame":
|
||||||
|
setup:
|
||||||
|
let
|
||||||
|
mdb = MemDbRef()
|
||||||
|
db = AristoDbRef.init(memoryBackend(mdb)).expect("working memory backend")
|
||||||
|
|
||||||
|
test "Frames should independently keep data":
|
||||||
|
let
|
||||||
|
tx0 = db.txFrameBegin(db.baseTxFrame())
|
||||||
|
tx1 = db.txFrameBegin(tx0)
|
||||||
|
tx2 = db.txFrameBegin(tx1)
|
||||||
|
tx2b = db.txFrameBegin(tx1)
|
||||||
|
|
||||||
|
check:
|
||||||
|
tx0.mergeAccountRecord(acc1[0], acc1[1]).isOk()
|
||||||
|
tx1.mergeAccountRecord(acc2[0], acc2[1]).isOk()
|
||||||
|
tx2.deleteAccountRecord(acc2[0]).isOk()
|
||||||
|
tx2b.deleteAccountRecord(acc1[0]).isOk()
|
||||||
|
|
||||||
|
check:
|
||||||
|
tx0.fetchAccountRecord(acc1[0]).isOk()
|
||||||
|
tx0.fetchAccountRecord(acc2[0]).isErr() # Doesn't exist in tx0
|
||||||
|
tx1.fetchAccountRecord(acc1[0]).isOk()
|
||||||
|
tx1.fetchAccountRecord(acc1[0]).isOk()
|
||||||
|
tx2.fetchAccountRecord(acc1[0]).isOk()
|
||||||
|
tx2.fetchAccountRecord(acc2[0]).isErr() # Doesn't exist in tx2
|
||||||
|
tx2b.fetchAccountRecord(acc1[0]).isErr() # Doesn't exist in tx2b
|
||||||
|
|
||||||
|
tx0.fetchAccountRecord(acc1[0]) == tx2.fetchAccountRecord(acc1[0])
|
||||||
|
|
||||||
|
tx0.fetchStateRoot() != tx1.fetchStateRoot()
|
||||||
|
tx0.fetchStateRoot() == tx2.fetchStateRoot()
|
||||||
|
|
||||||
|
tx2.checkpoint(1)
|
||||||
|
let batch = db.backend.putBegFn().expect("working batch")
|
||||||
|
db.persist(batch, tx2)
|
||||||
|
check:
|
||||||
|
db.backend.putEndFn(batch).isOk()
|
||||||
|
|
||||||
|
db.finish()
|
||||||
|
|
||||||
|
block:
|
||||||
|
let
|
||||||
|
db2 = AristoDbRef.init(memoryBackend(mdb)).expect("working backend")
|
||||||
|
tx = db2.baseTxFrame()
|
||||||
|
check:
|
||||||
|
tx.fetchAccountRecord(acc1[0]).isOk()
|
||||||
|
tx.fetchAccountRecord(acc2[0]).isErr() # Doesn't exist in tx2
|
Loading…
x
Reference in New Issue
Block a user