Core db remove legacy phrases (#2468)
* Rename `newKvt()` -> `ctx.getKvt()` why: Clean up legacy shortcut. Also, the `KVT` returned is not instantiated but refers to the shared `KVT` that resides in a context which is a generalisation of an in-memory database fork. The function `ctx` retrieves the default context. * Rename `newTransaction()` -> `ctx.newTransaction()` why: Clean up legacy shortcut. The transaction is applied to a context as a generalisation of an in-memory database fork. The function `ctx` retrieves the default context. * Rename `getColumn(CtGeneric)` -> `getGeneric()` why: No more a list of well known sub-tries needed, a single one is enough. In fact, `getColumn()` did only support a single sub-tree by now. * Reduce TODO list
This commit is contained in:
parent
9fc5495d49
commit
800fd77333
|
@ -42,7 +42,7 @@ proc processBlock(
|
||||||
## implementations (but can be savely removed, as well.)
|
## implementations (but can be savely removed, as well.)
|
||||||
## variant of `processBlock()` where the `header` argument is explicitely set.
|
## variant of `processBlock()` where the `header` argument is explicitely set.
|
||||||
template header: BlockHeader = blk.header
|
template header: BlockHeader = blk.header
|
||||||
var dbTx = vmState.com.db.newTransaction()
|
var dbTx = vmState.com.db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
|
|
||||||
let com = vmState.com
|
let com = vmState.com
|
||||||
|
@ -92,7 +92,7 @@ proc getVmState(c: ChainRef, header: BlockHeader):
|
||||||
# intended to accepts invalid block
|
# intended to accepts invalid block
|
||||||
proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
|
proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
|
||||||
template header: BlockHeader = blk.header
|
template header: BlockHeader = blk.header
|
||||||
let dbTx = c.db.newTransaction()
|
let dbTx = c.db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
|
|
||||||
c.com.hardForkTransition(header)
|
c.com.hardForkTransition(header)
|
||||||
|
|
|
@ -38,11 +38,11 @@ type
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc writeStatus(db: CoreDbRef, status: TransitionStatus) =
|
proc writeStatus(db: CoreDbRef, status: TransitionStatus) =
|
||||||
db.newKvt.put(transitionStatusKey().toOpenArray(), rlp.encode(status)).isOkOr:
|
db.ctx.getKvt.put(transitionStatusKey().toOpenArray(), rlp.encode(status)).isOkOr:
|
||||||
raiseAssert "writeStatus(): put() failed " & $$error
|
raiseAssert "writeStatus(): put() failed " & $$error
|
||||||
|
|
||||||
proc readStatus(db: CoreDbRef): TransitionStatus =
|
proc readStatus(db: CoreDbRef): TransitionStatus =
|
||||||
var bytes = db.newKvt.get(transitionStatusKey().toOpenArray()).valueOr:
|
var bytes = db.ctx.getKvt.get(transitionStatusKey().toOpenArray()).valueOr:
|
||||||
EmptyBlob
|
EmptyBlob
|
||||||
if bytes.len > 0:
|
if bytes.len > 0:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -355,7 +355,7 @@ proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType =
|
||||||
return com.config.consensusType
|
return com.config.consensusType
|
||||||
|
|
||||||
proc initializeEmptyDb*(com: CommonRef) =
|
proc initializeEmptyDb*(com: CommonRef) =
|
||||||
let kvt = com.db.newKvt()
|
let kvt = com.db.ctx.getKvt()
|
||||||
proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
|
proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
|
||||||
kvt.hasKey(key).expect "valid bool"
|
kvt.hasKey(key).expect "valid bool"
|
||||||
if canonicalHeadHashKey().toOpenArray notin kvt:
|
if canonicalHeadHashKey().toOpenArray notin kvt:
|
||||||
|
|
|
@ -145,7 +145,7 @@ proc validateBlock(c: ForkedChainRef,
|
||||||
parent: BlockHeader,
|
parent: BlockHeader,
|
||||||
blk: EthBlock,
|
blk: EthBlock,
|
||||||
updateCursor: bool = true): Result[void, string] =
|
updateCursor: bool = true): Result[void, string] =
|
||||||
let dbTx = c.db.newTransaction()
|
let dbTx = c.db.ctx.newTransaction()
|
||||||
defer:
|
defer:
|
||||||
dbTx.dispose()
|
dbTx.dispose()
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ proc replaySegment(c: ForkedChainRef, target: Hash256) =
|
||||||
prevHash = chain[^1].header.parentHash
|
prevHash = chain[^1].header.parentHash
|
||||||
|
|
||||||
c.stagingTx.rollback()
|
c.stagingTx.rollback()
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
c.cursorHeader = c.baseHeader
|
c.cursorHeader = c.baseHeader
|
||||||
for i in countdown(chain.high, chain.low):
|
for i in countdown(chain.high, chain.low):
|
||||||
c.validateBlock(c.cursorHeader, chain[i],
|
c.validateBlock(c.cursorHeader, chain[i],
|
||||||
|
@ -397,7 +397,7 @@ proc importBlock*(c: ForkedChainRef, blk: EthBlock): Result[void, string] =
|
||||||
# Try to import block to canonical or side chain.
|
# Try to import block to canonical or side chain.
|
||||||
# return error if the block is invalid
|
# return error if the block is invalid
|
||||||
if c.stagingTx.isNil:
|
if c.stagingTx.isNil:
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
|
|
||||||
template header(): BlockHeader =
|
template header(): BlockHeader =
|
||||||
blk.header
|
blk.header
|
||||||
|
@ -407,7 +407,7 @@ proc importBlock*(c: ForkedChainRef, blk: EthBlock): Result[void, string] =
|
||||||
|
|
||||||
if header.parentHash == c.baseHash:
|
if header.parentHash == c.baseHash:
|
||||||
c.stagingTx.rollback()
|
c.stagingTx.rollback()
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
return c.validateBlock(c.baseHeader, blk)
|
return c.validateBlock(c.baseHeader, blk)
|
||||||
|
|
||||||
if header.parentHash notin c.blocks:
|
if header.parentHash notin c.blocks:
|
||||||
|
@ -442,7 +442,7 @@ proc forkChoice*(c: ForkedChainRef,
|
||||||
if c.cursorHash != head.cursorHash:
|
if c.cursorHash != head.cursorHash:
|
||||||
if not c.stagingTx.isNil:
|
if not c.stagingTx.isNil:
|
||||||
c.stagingTx.rollback()
|
c.stagingTx.rollback()
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
c.replaySegment(headHash)
|
c.replaySegment(headHash)
|
||||||
|
|
||||||
c.trimCanonicalChain(head, headHash)
|
c.trimCanonicalChain(head, headHash)
|
||||||
|
@ -452,7 +452,7 @@ proc forkChoice*(c: ForkedChainRef,
|
||||||
|
|
||||||
if c.stagingTx.isNil:
|
if c.stagingTx.isNil:
|
||||||
# setHead below don't go straight to db
|
# setHead below don't go straight to db
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
|
|
||||||
c.setHead(headHash, head.header.number)
|
c.setHead(headHash, head.header.number)
|
||||||
return ok()
|
return ok()
|
||||||
|
@ -489,7 +489,7 @@ proc forkChoice*(c: ForkedChainRef,
|
||||||
|
|
||||||
# Write segment from base+1 to newBase into database
|
# Write segment from base+1 to newBase into database
|
||||||
c.stagingTx.rollback()
|
c.stagingTx.rollback()
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
|
|
||||||
if newBase.header.number > c.baseHeader.number:
|
if newBase.header.number > c.baseHeader.number:
|
||||||
c.replaySegment(newBase.hash)
|
c.replaySegment(newBase.hash)
|
||||||
|
@ -504,7 +504,7 @@ proc forkChoice*(c: ForkedChainRef,
|
||||||
if c.stagingTx.isNil:
|
if c.stagingTx.isNil:
|
||||||
# replaySegment or setHead below don't
|
# replaySegment or setHead below don't
|
||||||
# go straight to db
|
# go straight to db
|
||||||
c.stagingTx = c.db.newTransaction()
|
c.stagingTx = c.db.ctx.newTransaction()
|
||||||
|
|
||||||
# Move chain state forward to current head
|
# Move chain state forward to current head
|
||||||
if newBase.header.number < head.header.number:
|
if newBase.header.number < head.header.number:
|
||||||
|
|
|
@ -78,7 +78,7 @@ proc purgeOlderBlocksFromHistory(db: CoreDbRef, bn: BlockNumber) =
|
||||||
proc persistBlocksImpl(
|
proc persistBlocksImpl(
|
||||||
c: ChainRef, blocks: openArray[EthBlock], flags: PersistBlockFlags = {}
|
c: ChainRef, blocks: openArray[EthBlock], flags: PersistBlockFlags = {}
|
||||||
): Result[PersistStats, string] =
|
): Result[PersistStats, string] =
|
||||||
let dbTx = c.db.newTransaction()
|
let dbTx = c.db.ctx.newTransaction()
|
||||||
defer:
|
defer:
|
||||||
dbTx.dispose()
|
dbTx.dispose()
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ proc vmExecInit(xp: TxPoolRef): Result[TxPackerStateRef, string]
|
||||||
|
|
||||||
let packer = TxPackerStateRef( # return value
|
let packer = TxPackerStateRef( # return value
|
||||||
xp: xp,
|
xp: xp,
|
||||||
tr: AristoDbMemory.newCoreDbRef().ctx.getColumn(CtGeneric, clearData=true),
|
tr: AristoDbMemory.newCoreDbRef().ctx.getGeneric(clearData=true),
|
||||||
balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient),
|
balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient),
|
||||||
numBlobPerBlock: 0,
|
numBlobPerBlock: 0,
|
||||||
)
|
)
|
||||||
|
@ -274,7 +274,7 @@ proc packerVmExec*(xp: TxPoolRef): Result[void, string] {.gcsafe,raises: [Catcha
|
||||||
## Rebuild `packed` bucket by selection items from the `staged` bucket
|
## Rebuild `packed` bucket by selection items from the `staged` bucket
|
||||||
## after executing them in the VM.
|
## after executing them in the VM.
|
||||||
let db = xp.chain.com.db
|
let db = xp.chain.com.db
|
||||||
let dbTx = db.newTransaction
|
let dbTx = db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
|
|
||||||
var pst = xp.vmExecInit.valueOr:
|
var pst = xp.vmExecInit.valueOr:
|
||||||
|
|
|
@ -6,13 +6,6 @@
|
||||||
remember the follow up vertices which can travel through the tx-layers
|
remember the follow up vertices which can travel through the tx-layers
|
||||||
to be picked up by the backend store.
|
to be picked up by the backend store.
|
||||||
|
|
||||||
* Consider changing fetch/merge/delete prototypes for account and storage. At
|
|
||||||
the moment they all use `openArray[]` for strictly 32 byte arrays (which is
|
|
||||||
only implicitely enforced at run time -- i.e. it would fail otherwise.)
|
|
||||||
|
|
||||||
* Mental note: For *proof-mode* with pre-allocated locked vertices and Merkle
|
* Mental note: For *proof-mode* with pre-allocated locked vertices and Merkle
|
||||||
keys, verification of a patyion tree must be done by computing sub-tree keys
|
keys, verification of a partial tree must be done by computing sub-tree keys
|
||||||
at the relative roots and comparing them with the pre-allocated Merkle keys.
|
at the relative roots and comparing them with the pre-allocated Merkle keys.
|
||||||
|
|
||||||
* Remove legacy state format import from `deblobifyTo()` after a while (last
|
|
||||||
updated 28/06/24).
|
|
||||||
|
|
|
@ -41,9 +41,6 @@ template call(kvt: CoreDbKvtRef; fn: untyped; args: varArgs[untyped]): untyped =
|
||||||
template mpt(dsc: CoreDbAccRef | CoreDbMptRef): AristoDbRef =
|
template mpt(dsc: CoreDbAccRef | CoreDbMptRef): AristoDbRef =
|
||||||
dsc.distinctBase.mpt
|
dsc.distinctBase.mpt
|
||||||
|
|
||||||
template rootID(mpt: CoreDbMptRef): VertexID =
|
|
||||||
VertexID(CtGeneric)
|
|
||||||
|
|
||||||
template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped =
|
template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped =
|
||||||
when CoreDbEnableApiJumpTable:
|
when CoreDbEnableApiJumpTable:
|
||||||
api.fn(args)
|
api.fn(args)
|
||||||
|
@ -67,10 +64,11 @@ iterator aristoReplicate[T](
|
||||||
##
|
##
|
||||||
let p = mpt.call(forkTx, mpt.mpt, 0).valueOrApiError "aristoReplicate()"
|
let p = mpt.call(forkTx, mpt.mpt, 0).valueOrApiError "aristoReplicate()"
|
||||||
defer: discard mpt.call(forget, p)
|
defer: discard mpt.call(forget, p)
|
||||||
for (rvid,key,vtx,node) in T.replicate(p):
|
for (rVid,key,vtx,node) in T.replicate(p):
|
||||||
if key.len == 32:
|
if key.len == 32:
|
||||||
yield (@(key.data), node.encode)
|
yield (@(key.data), node.encode)
|
||||||
elif rvid.vid == mpt.rootID:
|
elif rVid.vid == CoreDbVidGeneric:
|
||||||
|
# FIXME: Would an assert rather be appropriate here?
|
||||||
yield (@(key.to(Hash256).data), node.encode)
|
yield (@(key.to(Hash256).data), node.encode)
|
||||||
|
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -24,8 +24,6 @@ import
|
||||||
./aristo_db,
|
./aristo_db,
|
||||||
../../opts
|
../../opts
|
||||||
|
|
||||||
include ./aristo_replicate
|
|
||||||
|
|
||||||
const
|
const
|
||||||
# Expectation messages
|
# Expectation messages
|
||||||
aristoFail = "Aristo/RocksDB init() failed"
|
aristoFail = "Aristo/RocksDB init() failed"
|
||||||
|
|
|
@ -44,7 +44,6 @@ export
|
||||||
CoreDbApiError,
|
CoreDbApiError,
|
||||||
#CoreDbCaptFlags,
|
#CoreDbCaptFlags,
|
||||||
#CoreDbCaptRef,
|
#CoreDbCaptRef,
|
||||||
CoreDbColType,
|
|
||||||
CoreDbCtxRef,
|
CoreDbCtxRef,
|
||||||
CoreDbErrorCode,
|
CoreDbErrorCode,
|
||||||
CoreDbErrorRef,
|
CoreDbErrorRef,
|
||||||
|
@ -170,9 +169,6 @@ template mpt(tx: CoreDbTxRef): AristoDbRef =
|
||||||
template ctx(acc: CoreDbAccRef): CoreDbCtxRef =
|
template ctx(acc: CoreDbAccRef): CoreDbCtxRef =
|
||||||
acc.distinctBase
|
acc.distinctBase
|
||||||
|
|
||||||
template rootID(mpt: CoreDbMptRef): VertexID =
|
|
||||||
VertexID(CtGeneric)
|
|
||||||
|
|
||||||
# ---------------
|
# ---------------
|
||||||
|
|
||||||
template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped =
|
template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped =
|
||||||
|
@ -344,8 +340,8 @@ proc stateBlockNumber*(db: CoreDbRef): BlockNumber =
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc getKvt*(ctx: CoreDbCtxRef): CoreDbKvtRef =
|
proc getKvt*(ctx: CoreDbCtxRef): CoreDbKvtRef =
|
||||||
## This function subscribes to the common base object shared with other
|
## This function retrieves the common base object shared with other KVT
|
||||||
## KVT descriptors. Any changes are immediately visible to subscribers.
|
## descriptors. Any changes are immediately visible to subscribers.
|
||||||
## On destruction (when the constructed object gets out of scope), changes
|
## On destruction (when the constructed object gets out of scope), changes
|
||||||
## are not saved to the backend database but are still cached and available.
|
## are not saved to the backend database but are still cached and available.
|
||||||
##
|
##
|
||||||
|
@ -444,7 +440,7 @@ proc getGeneric*(
|
||||||
ctx.setTrackNewApi CtxGetGenericFn
|
ctx.setTrackNewApi CtxGetGenericFn
|
||||||
result = CoreDbMptRef(ctx)
|
result = CoreDbMptRef(ctx)
|
||||||
if clearData:
|
if clearData:
|
||||||
result.call(deleteGenericTree, ctx.mpt, result.rootID).isOkOr:
|
result.call(deleteGenericTree, ctx.mpt, CoreDbVidGeneric).isOkOr:
|
||||||
raiseAssert $api & ": " & $error
|
raiseAssert $api & ": " & $error
|
||||||
ctx.ifTrackNewApi: debug newApiTxt, api, clearData, elapsed
|
ctx.ifTrackNewApi: debug newApiTxt, api, clearData, elapsed
|
||||||
|
|
||||||
|
@ -456,7 +452,7 @@ proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||||
##
|
##
|
||||||
mpt.setTrackNewApi MptFetchFn
|
mpt.setTrackNewApi MptFetchFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = mpt.call(fetchGenericData, mpt.mpt, mpt.rootID, key)
|
let rc = mpt.call(fetchGenericData, mpt.mpt, CoreDbVidGeneric, key)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
elif rc.error == FetchPathNotFound:
|
elif rc.error == FetchPathNotFound:
|
||||||
|
@ -471,7 +467,7 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||||
##
|
##
|
||||||
mpt.setTrackNewApi MptFetchOrEmptyFn
|
mpt.setTrackNewApi MptFetchOrEmptyFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = mpt.call(fetchGenericData, mpt.mpt, mpt.rootID, key)
|
let rc = mpt.call(fetchGenericData, mpt.mpt, CoreDbVidGeneric, key)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
elif rc.error == FetchPathNotFound:
|
elif rc.error == FetchPathNotFound:
|
||||||
|
@ -483,7 +479,7 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||||
proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
|
proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
|
||||||
mpt.setTrackNewApi MptDeleteFn
|
mpt.setTrackNewApi MptDeleteFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = mpt.call(deleteGenericData, mpt.mpt, mpt.rootID, key)
|
let rc = mpt.call(deleteGenericData, mpt.mpt,CoreDbVidGeneric, key)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok()
|
ok()
|
||||||
elif rc.error == DelPathNotFound:
|
elif rc.error == DelPathNotFound:
|
||||||
|
@ -499,7 +495,7 @@ proc merge*(
|
||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
mpt.setTrackNewApi MptMergeFn
|
mpt.setTrackNewApi MptMergeFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = mpt.call(mergeGenericData, mpt.mpt, mpt.rootID, key, val)
|
let rc = mpt.call(mergeGenericData, mpt.mpt,CoreDbVidGeneric, key, val)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok()
|
ok()
|
||||||
else:
|
else:
|
||||||
|
@ -513,7 +509,7 @@ proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||||
##
|
##
|
||||||
mpt.setTrackNewApi MptHasPathFn
|
mpt.setTrackNewApi MptHasPathFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = mpt.call(hasPathGeneric, mpt.mpt, mpt.rootID, key)
|
let rc = mpt.call(hasPathGeneric, mpt.mpt, CoreDbVidGeneric, key)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
else:
|
else:
|
||||||
|
@ -529,7 +525,7 @@ proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash256] =
|
||||||
##
|
##
|
||||||
mpt.setTrackNewApi MptStateFn
|
mpt.setTrackNewApi MptStateFn
|
||||||
result = block:
|
result = block:
|
||||||
let rc = mpt.call(fetchGenericState, mpt.mpt, mpt.rootID, updateOk)
|
let rc = mpt.call(fetchGenericState, mpt.mpt, CoreDbVidGeneric, updateOk)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
else:
|
else:
|
||||||
|
@ -874,28 +870,6 @@ proc dispose*(tx: CoreDbTxRef) =
|
||||||
raiseAssert $api & ": " & $error
|
raiseAssert $api & ": " & $error
|
||||||
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, prvLevel
|
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, prvLevel
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Legacy and convenience methods
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc newKvt*(db: CoreDbRef): CoreDbKvtRef =
|
|
||||||
## Variant of `getKvt()` retrieving the KVT from the default context
|
|
||||||
db.ctx.getKvt
|
|
||||||
|
|
||||||
proc newTransaction*(db: CoreDbRef): CoreDbTxRef =
|
|
||||||
## Variant of `newTransaction()` starting the transaction on the default
|
|
||||||
## context
|
|
||||||
db.ctx.newTransaction
|
|
||||||
|
|
||||||
proc getColumn*(
|
|
||||||
ctx: CoreDbCtxRef;
|
|
||||||
colType: CoreDbColType;
|
|
||||||
clearData = false;
|
|
||||||
): CoreDbMptRef =
|
|
||||||
## Variant of `getGenteric()` forcing `colType` to be `CtGeneric`
|
|
||||||
doAssert colType == CtGeneric
|
|
||||||
ctx.getGeneric clearData
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public tracer methods
|
# Public tracer methods
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -29,6 +29,10 @@ type
|
||||||
|
|
||||||
const
|
const
|
||||||
CoreDbPersistentTypes* = {AristoDbRocks}
|
CoreDbPersistentTypes* = {AristoDbRocks}
|
||||||
|
## List of persistent DB types (currently only a single one)
|
||||||
|
|
||||||
|
CoreDbVidGeneric* = VertexID(2)
|
||||||
|
## Generic `MPT` root vertex ID for calculating Merkle hashes
|
||||||
|
|
||||||
type
|
type
|
||||||
CoreDbProfListRef* = AristoDbProfListRef
|
CoreDbProfListRef* = AristoDbProfListRef
|
||||||
|
@ -59,9 +63,6 @@ type
|
||||||
StoNotFound
|
StoNotFound
|
||||||
TxPending
|
TxPending
|
||||||
|
|
||||||
CoreDbColType* = enum # Keep that legacy type for a while ..
|
|
||||||
CtGeneric = 2 # Actually only this constant is needed
|
|
||||||
|
|
||||||
CoreDbCaptFlags* {.pure.} = enum
|
CoreDbCaptFlags* {.pure.} = enum
|
||||||
PersistPut
|
PersistPut
|
||||||
PersistDel
|
PersistDel
|
||||||
|
|
|
@ -69,7 +69,7 @@ iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
|
||||||
mpt.setTrackNewApi MptPairsIt
|
mpt.setTrackNewApi MptPairsIt
|
||||||
case mpt.dbType:
|
case mpt.dbType:
|
||||||
of AristoDbMemory, AristoDbRocks, AristoDbVoid:
|
of AristoDbMemory, AristoDbRocks, AristoDbVoid:
|
||||||
for (path,data) in mpt.mpt.rightPairsGeneric mpt.rootID:
|
for (path,data) in mpt.mpt.rightPairsGeneric CoreDbVidGeneric:
|
||||||
yield (mpt.call(pathAsBlob, path), data)
|
yield (mpt.call(pathAsBlob, path), data)
|
||||||
of Ooops:
|
of Ooops:
|
||||||
raiseAssert: "Unsupported database type: " & $mpt.dbType
|
raiseAssert: "Unsupported database type: " & $mpt.dbType
|
||||||
|
|
|
@ -119,7 +119,7 @@ iterator getBlockTransactionData*(
|
||||||
if txRoot == EMPTY_ROOT_HASH:
|
if txRoot == EMPTY_ROOT_HASH:
|
||||||
break body
|
break body
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
for idx in 0'u16..<uint16.high:
|
for idx in 0'u16..<uint16.high:
|
||||||
let key = hashIndexKey(txRoot, idx)
|
let key = hashIndexKey(txRoot, idx)
|
||||||
let txData = kvt.getOrEmpty(key).valueOr:
|
let txData = kvt.getOrEmpty(key).valueOr:
|
||||||
|
@ -157,7 +157,7 @@ iterator getWithdrawals*(
|
||||||
if withdrawalsRoot == EMPTY_ROOT_HASH:
|
if withdrawalsRoot == EMPTY_ROOT_HASH:
|
||||||
break body
|
break body
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
for idx in 0'u16..<uint16.high:
|
for idx in 0'u16..<uint16.high:
|
||||||
let key = hashIndexKey(withdrawalsRoot, idx)
|
let key = hashIndexKey(withdrawalsRoot, idx)
|
||||||
let data = kvt.getOrEmpty(key).valueOr:
|
let data = kvt.getOrEmpty(key).valueOr:
|
||||||
|
@ -177,7 +177,7 @@ iterator getReceipts*(
|
||||||
if receiptsRoot == EMPTY_ROOT_HASH:
|
if receiptsRoot == EMPTY_ROOT_HASH:
|
||||||
break body
|
break body
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
for idx in 0'u16..<uint16.high:
|
for idx in 0'u16..<uint16.high:
|
||||||
let key = hashIndexKey(receiptsRoot, idx)
|
let key = hashIndexKey(receiptsRoot, idx)
|
||||||
let data = kvt.getOrEmpty(key).valueOr:
|
let data = kvt.getOrEmpty(key).valueOr:
|
||||||
|
@ -198,7 +198,7 @@ proc removeTransactionFromCanonicalChain(
|
||||||
) =
|
) =
|
||||||
## Removes the transaction specified by the given hash from the canonical
|
## Removes the transaction specified by the given hash from the canonical
|
||||||
## chain.
|
## chain.
|
||||||
db.newKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr:
|
db.ctx.getKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr:
|
||||||
warn logTxt "removeTransactionFromCanonicalChain()",
|
warn logTxt "removeTransactionFromCanonicalChain()",
|
||||||
transactionHash, action="del()", error=($$error)
|
transactionHash, action="del()", error=($$error)
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ proc setAsCanonicalChainHead(
|
||||||
db.addBlockNumberToHashLookup(h.number, h.blockHash)
|
db.addBlockNumberToHashLookup(h.number, h.blockHash)
|
||||||
|
|
||||||
let canonicalHeadHash = canonicalHeadHashKey()
|
let canonicalHeadHash = canonicalHeadHashKey()
|
||||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||||
warn logTxt "setAsCanonicalChainHead()",
|
warn logTxt "setAsCanonicalChainHead()",
|
||||||
canonicalHeadHash, action="put()", error=($$error)
|
canonicalHeadHash, action="put()", error=($$error)
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ proc markCanonicalChain(
|
||||||
|
|
||||||
# mark current header as canonical
|
# mark current header as canonical
|
||||||
let
|
let
|
||||||
kvt = db.newKvt()
|
kvt = db.ctx.getKvt()
|
||||||
key = blockNumberToHashKey(currHeader.number)
|
key = blockNumberToHashKey(currHeader.number)
|
||||||
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
|
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
|
||||||
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
|
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
|
||||||
|
@ -297,7 +297,7 @@ proc markCanonicalChain(
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||||
db.newKvt().hasKey(hash.data).valueOr:
|
db.ctx.getKvt().hasKey(hash.data).valueOr:
|
||||||
warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
|
warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ proc getBlockHeader*(
|
||||||
output: var BlockHeader;
|
output: var BlockHeader;
|
||||||
): bool =
|
): bool =
|
||||||
const info = "getBlockHeader()"
|
const info = "getBlockHeader()"
|
||||||
let data = db.newKvt().get(genericHashKey(blockHash).toOpenArray).valueOr:
|
let data = db.ctx.getKvt().get(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||||
if error.error != KvtNotFound:
|
if error.error != KvtNotFound:
|
||||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
warn logTxt info, blockHash, action="get()", error=($$error)
|
||||||
return false
|
return false
|
||||||
|
@ -339,7 +339,7 @@ proc getHash(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
key: DbKey;
|
key: DbKey;
|
||||||
): Opt[Hash256] =
|
): Opt[Hash256] =
|
||||||
let data = db.newKvt().get(key.toOpenArray).valueOr:
|
let data = db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||||
if error.error != KvtNotFound:
|
if error.error != KvtNotFound:
|
||||||
warn logTxt "getHash()", key, action="get()", error=($$error)
|
warn logTxt "getHash()", key, action="get()", error=($$error)
|
||||||
return Opt.none(Hash256)
|
return Opt.none(Hash256)
|
||||||
|
@ -435,7 +435,7 @@ proc getScore*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
blockHash: Hash256;
|
blockHash: Hash256;
|
||||||
): Opt[UInt256] =
|
): Opt[UInt256] =
|
||||||
let data = db.newKvt()
|
let data = db.ctx.getKvt()
|
||||||
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
||||||
if error.error != KvtNotFound:
|
if error.error != KvtNotFound:
|
||||||
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
|
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
|
||||||
|
@ -449,7 +449,7 @@ proc getScore*(
|
||||||
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
||||||
## for testing purpose
|
## for testing purpose
|
||||||
let scoreKey = blockHashToScoreKey blockHash
|
let scoreKey = blockHashToScoreKey blockHash
|
||||||
db.newKvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
db.ctx.getKvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||||
warn logTxt "setScore()", scoreKey, action="put()", error=($$error)
|
warn logTxt "setScore()", scoreKey, action="put()", error=($$error)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -484,7 +484,7 @@ proc getAncestorsHashes*(
|
||||||
proc addBlockNumberToHashLookup*(
|
proc addBlockNumberToHashLookup*(
|
||||||
db: CoreDbRef; blockNumber: BlockNumber, blockHash: Hash256) =
|
db: CoreDbRef; blockNumber: BlockNumber, blockHash: Hash256) =
|
||||||
let blockNumberKey = blockNumberToHashKey(blockNumber)
|
let blockNumberKey = blockNumberToHashKey(blockNumber)
|
||||||
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
db.ctx.getKvt.put(blockNumberKey.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||||
warn logTxt "addBlockNumberToHashLookup()",
|
warn logTxt "addBlockNumberToHashLookup()",
|
||||||
blockNumberKey, action="put()", error=($$error)
|
blockNumberKey, action="put()", error=($$error)
|
||||||
|
|
||||||
|
@ -500,7 +500,7 @@ proc persistTransactions*(
|
||||||
if transactions.len == 0:
|
if transactions.len == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
for idx, tx in transactions:
|
for idx, tx in transactions:
|
||||||
let
|
let
|
||||||
encodedTx = rlp.encode(tx)
|
encodedTx = rlp.encode(tx)
|
||||||
|
@ -523,7 +523,7 @@ proc forgetHistory*(
|
||||||
## returns `true`, if some history was available and deleted.
|
## returns `true`, if some history was available and deleted.
|
||||||
var blockHash: Hash256
|
var blockHash: Hash256
|
||||||
if db.getBlockHash(blockNum, blockHash):
|
if db.getBlockHash(blockNum, blockHash):
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
# delete blockNum->blockHash
|
# delete blockNum->blockHash
|
||||||
discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray)
|
discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray)
|
||||||
result = true
|
result = true
|
||||||
|
@ -542,7 +542,7 @@ proc getTransactionByIndex*(
|
||||||
const
|
const
|
||||||
info = "getTransaction()"
|
info = "getTransaction()"
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
let key = hashIndexKey(txRoot, txIndex)
|
let key = hashIndexKey(txRoot, txIndex)
|
||||||
let txData = kvt.getOrEmpty(key).valueOr:
|
let txData = kvt.getOrEmpty(key).valueOr:
|
||||||
warn logTxt "getTransaction()",
|
warn logTxt "getTransaction()",
|
||||||
|
@ -566,7 +566,7 @@ proc getTransactionCount*(
|
||||||
const
|
const
|
||||||
info = "getTransactionCount()"
|
info = "getTransactionCount()"
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
var txCount = 0'u16
|
var txCount = 0'u16
|
||||||
while true:
|
while true:
|
||||||
let key = hashIndexKey(txRoot, txCount)
|
let key = hashIndexKey(txRoot, txCount)
|
||||||
|
@ -590,7 +590,7 @@ proc getUnclesCount*(
|
||||||
if ommersHash != EMPTY_UNCLE_HASH:
|
if ommersHash != EMPTY_UNCLE_HASH:
|
||||||
let encodedUncles = block:
|
let encodedUncles = block:
|
||||||
let key = genericHashKey(ommersHash)
|
let key = genericHashKey(ommersHash)
|
||||||
db.newKvt().get(key.toOpenArray).valueOr:
|
db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||||
if error.error == KvtNotFound:
|
if error.error == KvtNotFound:
|
||||||
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
||||||
return 0
|
return 0
|
||||||
|
@ -605,7 +605,7 @@ proc getUncles*(
|
||||||
if ommersHash != EMPTY_UNCLE_HASH:
|
if ommersHash != EMPTY_UNCLE_HASH:
|
||||||
let encodedUncles = block:
|
let encodedUncles = block:
|
||||||
let key = genericHashKey(ommersHash)
|
let key = genericHashKey(ommersHash)
|
||||||
db.newKvt().get(key.toOpenArray).valueOr:
|
db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||||
if error.error == KvtNotFound:
|
if error.error == KvtNotFound:
|
||||||
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
||||||
return @[]
|
return @[]
|
||||||
|
@ -619,7 +619,7 @@ proc persistWithdrawals*(
|
||||||
const info = "persistWithdrawals()"
|
const info = "persistWithdrawals()"
|
||||||
if withdrawals.len == 0:
|
if withdrawals.len == 0:
|
||||||
return
|
return
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
for idx, wd in withdrawals:
|
for idx, wd in withdrawals:
|
||||||
let key = hashIndexKey(withdrawalsRoot, idx.uint16)
|
let key = hashIndexKey(withdrawalsRoot, idx.uint16)
|
||||||
kvt.put(key, rlp.encode(wd)).isOkOr:
|
kvt.put(key, rlp.encode(wd)).isOkOr:
|
||||||
|
@ -718,7 +718,7 @@ proc getUncleHashes*(
|
||||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||||
let
|
let
|
||||||
key = genericHashKey(header.ommersHash)
|
key = genericHashKey(header.ommersHash)
|
||||||
encodedUncles = db.newKvt().get(key.toOpenArray).valueOr:
|
encodedUncles = db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||||
if error.error == KvtNotFound:
|
if error.error == KvtNotFound:
|
||||||
warn logTxt "getUncleHashes()",
|
warn logTxt "getUncleHashes()",
|
||||||
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
||||||
|
@ -732,7 +732,7 @@ proc getTransactionKey*(
|
||||||
{.gcsafe, raises: [RlpError].} =
|
{.gcsafe, raises: [RlpError].} =
|
||||||
let
|
let
|
||||||
txKey = transactionHashToBlockKey(transactionHash)
|
txKey = transactionHashToBlockKey(transactionHash)
|
||||||
tx = db.newKvt().get(txKey.toOpenArray).valueOr:
|
tx = db.ctx.getKvt().get(txKey.toOpenArray).valueOr:
|
||||||
if error.error == KvtNotFound:
|
if error.error == KvtNotFound:
|
||||||
warn logTxt "getTransactionKey()",
|
warn logTxt "getTransactionKey()",
|
||||||
transactionHash, action="get()", `error`=($$error)
|
transactionHash, action="get()", `error`=($$error)
|
||||||
|
@ -742,7 +742,7 @@ proc getTransactionKey*(
|
||||||
|
|
||||||
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
|
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
|
||||||
## Returns True if the header with the given block hash is in our DB.
|
## Returns True if the header with the given block hash is in our DB.
|
||||||
db.newKvt().hasKey(genericHashKey(blockHash).toOpenArray).valueOr:
|
db.ctx.getKvt().hasKey(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||||
warn logTxt "headerExists()", blockHash, action="get()", `error`=($$error)
|
warn logTxt "headerExists()", blockHash, action="get()", `error`=($$error)
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
@ -759,7 +759,7 @@ proc setHead*(
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let canonicalHeadHash = canonicalHeadHashKey()
|
let canonicalHeadHash = canonicalHeadHashKey()
|
||||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||||
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
|
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
@ -770,7 +770,7 @@ proc setHead*(
|
||||||
): bool
|
): bool
|
||||||
{.gcsafe, raises: [RlpError].} =
|
{.gcsafe, raises: [RlpError].} =
|
||||||
var headerHash = rlpHash(header)
|
var headerHash = rlpHash(header)
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
if writeHeader:
|
if writeHeader:
|
||||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||||
warn logTxt "setHead()", headerHash, action="put()", error=($$error)
|
warn logTxt "setHead()", headerHash, action="put()", error=($$error)
|
||||||
|
@ -792,7 +792,7 @@ proc persistReceipts*(
|
||||||
if receipts.len == 0:
|
if receipts.len == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
let kvt = db.newKvt()
|
let kvt = db.ctx.getKvt()
|
||||||
for idx, rec in receipts:
|
for idx, rec in receipts:
|
||||||
let key = hashIndexKey(receiptsRoot, idx.uint16)
|
let key = hashIndexKey(receiptsRoot, idx.uint16)
|
||||||
kvt.put(key, rlp.encode(rec)).isOkOr:
|
kvt.put(key, rlp.encode(rec)).isOkOr:
|
||||||
|
@ -814,7 +814,7 @@ proc persistScore*(
|
||||||
score: UInt256
|
score: UInt256
|
||||||
): bool =
|
): bool =
|
||||||
let
|
let
|
||||||
kvt = db.newKvt()
|
kvt = db.ctx.getKvt()
|
||||||
scoreKey = blockHashToScoreKey(blockHash)
|
scoreKey = blockHashToScoreKey(blockHash)
|
||||||
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||||
warn logTxt "persistHeader()",
|
warn logTxt "persistHeader()",
|
||||||
|
@ -829,7 +829,7 @@ proc persistHeader*(
|
||||||
startOfHistory = GENESIS_PARENT_HASH;
|
startOfHistory = GENESIS_PARENT_HASH;
|
||||||
): bool =
|
): bool =
|
||||||
let
|
let
|
||||||
kvt = db.newKvt()
|
kvt = db.ctx.getKvt()
|
||||||
isStartOfHistory = header.parentHash == startOfHistory
|
isStartOfHistory = header.parentHash == startOfHistory
|
||||||
|
|
||||||
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
||||||
|
@ -902,7 +902,7 @@ proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
||||||
## Returns the uncles hash.
|
## Returns the uncles hash.
|
||||||
let enc = rlp.encode(uncles)
|
let enc = rlp.encode(uncles)
|
||||||
result = keccakHash(enc)
|
result = keccakHash(enc)
|
||||||
db.newKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr:
|
db.ctx.getKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr:
|
||||||
warn logTxt "persistUncles()",
|
warn logTxt "persistUncles()",
|
||||||
unclesHash=result, action="put()", `error`=($$error)
|
unclesHash=result, action="put()", `error`=($$error)
|
||||||
return EMPTY_ROOT_HASH
|
return EMPTY_ROOT_HASH
|
||||||
|
@ -913,7 +913,7 @@ proc safeHeaderHash*(db: CoreDbRef): Hash256 =
|
||||||
|
|
||||||
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||||
let safeHashKey = safeHashKey()
|
let safeHashKey = safeHashKey()
|
||||||
db.newKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
db.ctx.getKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||||
warn logTxt "safeHeaderHash()",
|
warn logTxt "safeHeaderHash()",
|
||||||
safeHashKey, action="put()", `error`=($$error)
|
safeHashKey, action="put()", `error`=($$error)
|
||||||
return
|
return
|
||||||
|
@ -925,7 +925,7 @@ proc finalizedHeaderHash*(
|
||||||
|
|
||||||
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||||
let finalizedHashKey = finalizedHashKey()
|
let finalizedHashKey = finalizedHashKey()
|
||||||
db.newKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
db.ctx.getKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||||
warn logTxt "finalizedHeaderHash()",
|
warn logTxt "finalizedHeaderHash()",
|
||||||
finalizedHashKey, action="put()", `error`=($$error)
|
finalizedHashKey, action="put()", `error`=($$error)
|
||||||
return
|
return
|
||||||
|
|
|
@ -154,7 +154,7 @@ proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
|
||||||
root: KeccakHash): AccountsLedgerRef =
|
root: KeccakHash): AccountsLedgerRef =
|
||||||
new result
|
new result
|
||||||
result.ledger = db.ctx.getAccounts()
|
result.ledger = db.ctx.getAccounts()
|
||||||
result.kvt = db.newKvt() # save manually in `persist()`
|
result.kvt = db.ctx.getKvt()
|
||||||
result.witnessCache = Table[EthAddress, WitnessData]()
|
result.witnessCache = Table[EthAddress, WitnessData]()
|
||||||
discard result.beginSavepoint
|
discard result.beginSavepoint
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ proc captureAccount(n: JsonNode, db: LedgerRef, address: EthAddress, name: strin
|
||||||
|
|
||||||
proc dumpMemoryDB*(node: JsonNode, db: CoreDbRef) =
|
proc dumpMemoryDB*(node: JsonNode, db: CoreDbRef) =
|
||||||
var n = newJObject()
|
var n = newJObject()
|
||||||
for k, v in db.newKvt():
|
for k, v in db.ctx.getKvt():
|
||||||
n[k.toHex(false)] = %v
|
n[k.toHex(false)] = %v
|
||||||
node["state"] = n
|
node["state"] = n
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ proc getMultiKeys*(
|
||||||
vmState.collectWitnessData = true # Enable saving witness data
|
vmState.collectWitnessData = true # Enable saving witness data
|
||||||
vmState.com.hardForkTransition(blockHeader)
|
vmState.com.hardForkTransition(blockHeader)
|
||||||
|
|
||||||
let dbTx = vmState.com.db.newTransaction()
|
let dbTx = vmState.com.db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
|
|
||||||
# Execute the block of transactions and collect the keys of the touched account state
|
# Execute the block of transactions and collect the keys of the touched account state
|
||||||
|
|
|
@ -30,15 +30,15 @@ logScope:
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
template get(sk: SkeletonRef, key: untyped): untyped =
|
template get(sk: SkeletonRef, key: untyped): untyped =
|
||||||
sk.db.newKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
sk.db.ctx.getKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
||||||
|
|
||||||
template put(sk: SkeletonRef, key, val: untyped): untyped =
|
template put(sk: SkeletonRef, key, val: untyped): untyped =
|
||||||
let rc = sk.db.newKvt().put(key.toOpenArray, val)
|
let rc = sk.db.ctx.getKvt().put(key.toOpenArray, val)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
raiseAssert "put() failed: " & $$rc.error
|
raiseAssert "put() failed: " & $$rc.error
|
||||||
|
|
||||||
template del(sk: SkeletonRef, key: untyped): untyped =
|
template del(sk: SkeletonRef, key: untyped): untyped =
|
||||||
discard sk.db.newKvt().del(key.toOpenArray)
|
discard sk.db.ctx.getKvt().del(key.toOpenArray)
|
||||||
|
|
||||||
proc append(w: var RlpWriter, s: Segment) =
|
proc append(w: var RlpWriter, s: Segment) =
|
||||||
w.startList(3)
|
w.startList(3)
|
||||||
|
|
|
@ -35,7 +35,7 @@ proc rpcCallEvm*(args: TransactionArgs,
|
||||||
let vmState = ? BaseVMState.new(topHeader, com)
|
let vmState = ? BaseVMState.new(topHeader, com)
|
||||||
let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas)
|
let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas)
|
||||||
|
|
||||||
var dbTx = com.db.newTransaction()
|
var dbTx = com.db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose() # always dispose state changes
|
defer: dbTx.dispose() # always dispose state changes
|
||||||
|
|
||||||
ok(runComputation(params))
|
ok(runComputation(params))
|
||||||
|
@ -47,7 +47,7 @@ proc rpcCallEvm*(args: TransactionArgs,
|
||||||
const globalGasCap = 0 # TODO: globalGasCap should configurable by user
|
const globalGasCap = 0 # TODO: globalGasCap should configurable by user
|
||||||
let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas)
|
let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas)
|
||||||
|
|
||||||
var dbTx = com.db.newTransaction()
|
var dbTx = com.db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose() # always dispose state changes
|
defer: dbTx.dispose() # always dispose state changes
|
||||||
|
|
||||||
ok(runComputation(params))
|
ok(runComputation(params))
|
||||||
|
@ -72,7 +72,7 @@ proc rpcEstimateGas*(args: TransactionArgs,
|
||||||
hi : GasInt = GasInt args.gas.get(0.Quantity)
|
hi : GasInt = GasInt args.gas.get(0.Quantity)
|
||||||
cap: GasInt
|
cap: GasInt
|
||||||
|
|
||||||
var dbTx = com.db.newTransaction()
|
var dbTx = com.db.ctx.newTransaction()
|
||||||
defer: dbTx.dispose() # always dispose state changes
|
defer: dbTx.dispose() # always dispose state changes
|
||||||
|
|
||||||
# Determine the highest gas limit can be used during the estimation.
|
# Determine the highest gas limit can be used during the estimation.
|
||||||
|
|
|
@ -86,7 +86,7 @@ proc calculateTransactionData(
|
||||||
## - root of transactions trie
|
## - root of transactions trie
|
||||||
## - list of transactions hashes
|
## - list of transactions hashes
|
||||||
## - total size of transactions in block
|
## - total size of transactions in block
|
||||||
var tr = newCoreDbRef(DefaultDbMemory).ctx.getColumn(CtGeneric)
|
var tr = newCoreDbRef(DefaultDbMemory).ctx.getGeneric()
|
||||||
var txHashes: seq[TxOrHash]
|
var txHashes: seq[TxOrHash]
|
||||||
var txSize: uint64
|
var txSize: uint64
|
||||||
for i, t in items:
|
for i, t in items:
|
||||||
|
|
|
@ -21,7 +21,7 @@ import
|
||||||
|
|
||||||
proc prepareBlockEnv(node: JsonNode, memoryDB: CoreDbRef) =
|
proc prepareBlockEnv(node: JsonNode, memoryDB: CoreDbRef) =
|
||||||
let state = node["state"]
|
let state = node["state"]
|
||||||
let kvt = memoryDB.newKvt()
|
let kvt = memoryDB.ctx.getKvt()
|
||||||
for k, v in state:
|
for k, v in state:
|
||||||
let key = hexToSeqByte(k)
|
let key = hexToSeqByte(k)
|
||||||
let value = hexToSeqByte(v.getStr())
|
let value = hexToSeqByte(v.getStr())
|
||||||
|
@ -34,7 +34,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: CoreDbRef, blockNumber: BlockNum
|
||||||
com = CommonRef.new(memoryDB)
|
com = CommonRef.new(memoryDB)
|
||||||
parent = com.db.getBlockHeader(parentNumber)
|
parent = com.db.getBlockHeader(parentNumber)
|
||||||
blk = com.db.getEthBlock(blockNumber)
|
blk = com.db.getEthBlock(blockNumber)
|
||||||
let transaction = memoryDB.newTransaction()
|
let transaction = memoryDB.ctx.newTransaction()
|
||||||
defer: transaction.dispose()
|
defer: transaction.dispose()
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
|
@ -28,7 +28,7 @@ proc dumpDebug(com: CommonRef, blockNumber: BlockNumber) =
|
||||||
capture = com.db.newCapture.value
|
capture = com.db.newCapture.value
|
||||||
captureCom = com.clone(capture.recorder)
|
captureCom = com.clone(capture.recorder)
|
||||||
|
|
||||||
let transaction = capture.recorder.newTransaction()
|
let transaction = capture.recorder.ctx.newTransaction()
|
||||||
defer: transaction.dispose()
|
defer: transaction.dispose()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ proc main() {.used.} =
|
||||||
var parentBlock = requestBlock(conf.head, { DownloadAndValidate })
|
var parentBlock = requestBlock(conf.head, { DownloadAndValidate })
|
||||||
discard com.db.setHead(parentBlock.header)
|
discard com.db.setHead(parentBlock.header)
|
||||||
|
|
||||||
let kvt = com.db.newKvt()
|
let kvt = com.db.ctx.getKvt()
|
||||||
if canonicalHeadHashKey().toOpenArray notin kvt:
|
if canonicalHeadHashKey().toOpenArray notin kvt:
|
||||||
persistToDb(com.db):
|
persistToDb(com.db):
|
||||||
com.initializeEmptyDb()
|
com.initializeEmptyDb()
|
||||||
|
|
|
@ -21,7 +21,7 @@ proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: BlockNumber, parent:
|
||||||
state = nimbus["state"]
|
state = nimbus["state"]
|
||||||
headerHash = rlpHash(header)
|
headerHash = rlpHash(header)
|
||||||
chainDB = newCoreDbRef(DefaultDbMemory)
|
chainDB = newCoreDbRef(DefaultDbMemory)
|
||||||
kvt = chainDB.newKvt()
|
kvt = chainDB.ctx.getKvt()
|
||||||
|
|
||||||
discard chainDB.setHead(parent, true)
|
discard chainDB.setHead(parent, true)
|
||||||
chainDB.persistTransactions(blockNumber, header.txRoot, blk.transactions)
|
chainDB.persistTransactions(blockNumber, header.txRoot, blk.transactions)
|
||||||
|
|
|
@ -29,7 +29,7 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber =
|
||||||
for i in 0 ..< numBlocks:
|
for i in 0 ..< numBlocks:
|
||||||
blocks[i] = com.db.getEthBlock(blockNumber + i.BlockNumber)
|
blocks[i] = com.db.getEthBlock(blockNumber + i.BlockNumber)
|
||||||
|
|
||||||
let transaction = com.db.newTransaction()
|
let transaction = com.db.ctx.newTransaction()
|
||||||
defer: transaction.dispose()
|
defer: transaction.dispose()
|
||||||
|
|
||||||
for i in 0 ..< numBlocks:
|
for i in 0 ..< numBlocks:
|
||||||
|
|
|
@ -117,7 +117,7 @@ proc forkedChainMain*() =
|
||||||
blk2 = cc.makeBlk(2, blk1)
|
blk2 = cc.makeBlk(2, blk1)
|
||||||
blk3 = cc.makeBlk(3, blk2)
|
blk3 = cc.makeBlk(3, blk2)
|
||||||
|
|
||||||
dbTx = cc.db.newTransaction()
|
dbTx = cc.db.ctx.newTransaction()
|
||||||
blk4 = cc.makeBlk(4, blk3)
|
blk4 = cc.makeBlk(4, blk3)
|
||||||
blk5 = cc.makeBlk(5, blk4)
|
blk5 = cc.makeBlk(5, blk4)
|
||||||
blk6 = cc.makeBlk(6, blk5)
|
blk6 = cc.makeBlk(6, blk5)
|
||||||
|
|
|
@ -213,7 +213,7 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false)
|
||||||
let eAddr = env.txs[inx].getRecipient
|
let eAddr = env.txs[inx].getRecipient
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let accTx = ledger.beginSavepoint
|
let accTx = ledger.beginSavepoint
|
||||||
|
@ -229,7 +229,7 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false)
|
||||||
dbTx.rollback()
|
dbTx.rollback()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let accTx = ledger.beginSavepoint
|
let accTx = ledger.beginSavepoint
|
||||||
|
@ -248,7 +248,7 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) =
|
||||||
let eAddr = env.txs[inx].getRecipient
|
let eAddr = env.txs[inx].getRecipient
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let accTx = ledger.beginSavepoint
|
let accTx = ledger.beginSavepoint
|
||||||
|
@ -278,7 +278,7 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) =
|
||||||
dbTx.commit()
|
dbTx.commit()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let accTx = ledger.beginSavepoint
|
let accTx = ledger.beginSavepoint
|
||||||
|
@ -355,7 +355,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
||||||
test &"Run {env.txi.len} two-step trials with rollback":
|
test &"Run {env.txi.len} two-step trials with rollback":
|
||||||
let head = env.xdb.getCanonicalHead()
|
let head = env.xdb.getCanonicalHead()
|
||||||
for n in env.txi:
|
for n in env.txi:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
let ledger = env.com.getLedger(head)
|
let ledger = env.com.getLedger(head)
|
||||||
env.runTrial2ok(ledger, n)
|
env.runTrial2ok(ledger, n)
|
||||||
|
@ -363,7 +363,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
||||||
test &"Run {env.txi.len} three-step trials with rollback":
|
test &"Run {env.txi.len} three-step trials with rollback":
|
||||||
let head = env.xdb.getCanonicalHead()
|
let head = env.xdb.getCanonicalHead()
|
||||||
for n in env.txi:
|
for n in env.txi:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
let ledger = env.com.getLedger(head)
|
let ledger = env.com.getLedger(head)
|
||||||
env.runTrial3(ledger, n, rollback = true)
|
env.runTrial3(ledger, n, rollback = true)
|
||||||
|
@ -372,7 +372,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
||||||
" throwing Exceptions":
|
" throwing Exceptions":
|
||||||
let head = env.xdb.getCanonicalHead()
|
let head = env.xdb.getCanonicalHead()
|
||||||
for n in env.txi:
|
for n in env.txi:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
let ledger = env.com.getLedger(head)
|
let ledger = env.com.getLedger(head)
|
||||||
env.runTrial3Survive(ledger, n, noisy)
|
env.runTrial3Survive(ledger, n, noisy)
|
||||||
|
@ -380,7 +380,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
||||||
test &"Run {env.txi.len} tree-step trials without rollback":
|
test &"Run {env.txi.len} tree-step trials without rollback":
|
||||||
let head = env.xdb.getCanonicalHead()
|
let head = env.xdb.getCanonicalHead()
|
||||||
for n in env.txi:
|
for n in env.txi:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
let ledger = env.com.getLedger(head)
|
let ledger = env.com.getLedger(head)
|
||||||
env.runTrial3(ledger, n, rollback = false)
|
env.runTrial3(ledger, n, rollback = false)
|
||||||
|
@ -388,7 +388,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
||||||
test &"Run {env.txi.len} four-step trials with rollback and db frames":
|
test &"Run {env.txi.len} four-step trials with rollback and db frames":
|
||||||
let head = env.xdb.getCanonicalHead()
|
let head = env.xdb.getCanonicalHead()
|
||||||
for n in env.txi:
|
for n in env.txi:
|
||||||
let dbTx = env.xdb.newTransaction()
|
let dbTx = env.xdb.ctx.newTransaction()
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
let ledger = env.com.getLedger(head)
|
let ledger = env.com.getLedger(head)
|
||||||
env.runTrial4(ledger, n, rollback = true)
|
env.runTrial4(ledger, n, rollback = true)
|
||||||
|
@ -524,7 +524,7 @@ proc runLedgerBasicOperationsTests() =
|
||||||
check ac.getCode(addr2) == code
|
check ac.getCode(addr2) == code
|
||||||
let
|
let
|
||||||
key = contractHashKey(keccakHash(code))
|
key = contractHashKey(keccakHash(code))
|
||||||
val = memDB.newKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
val = memDB.ctx.getKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
||||||
check val == code
|
check val == code
|
||||||
|
|
||||||
test "accessList operations":
|
test "accessList operations":
|
||||||
|
|
|
@ -85,7 +85,7 @@ proc verifySlotProof(trustedStorageRoot: Web3Hash, slot: StorageProof): MptProof
|
||||||
proc persistFixtureBlock(chainDB: CoreDbRef) =
|
proc persistFixtureBlock(chainDB: CoreDbRef) =
|
||||||
let header = getBlockHeader4514995()
|
let header = getBlockHeader4514995()
|
||||||
# Manually inserting header to avoid any parent checks
|
# Manually inserting header to avoid any parent checks
|
||||||
discard chainDB.newKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
discard chainDB.ctx.getKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
||||||
chainDB.addBlockNumberToHashLookup(header)
|
chainDB.addBlockNumberToHashLookup(header)
|
||||||
chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions)
|
chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions)
|
||||||
chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995())
|
chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995())
|
||||||
|
@ -167,6 +167,10 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
|
||||||
|
|
||||||
com.db.persistReceipts(vmState.receipts)
|
com.db.persistReceipts(vmState.receipts)
|
||||||
let
|
let
|
||||||
|
# TODO: `getColumn(CtReceipts)` does not exists anymore. There s only the
|
||||||
|
# generic `MPT` left that can be retrieved with `getGeneric()`,
|
||||||
|
# optionally with argument `clearData=true`
|
||||||
|
#
|
||||||
receiptRoot = com.db.ctx.getColumn(CtReceipts).state(updateOk=true).valueOr(EMPTY_ROOT_HASH)
|
receiptRoot = com.db.ctx.getColumn(CtReceipts).state(updateOk=true).valueOr(EMPTY_ROOT_HASH)
|
||||||
date = dateTime(2017, mMar, 30)
|
date = dateTime(2017, mMar, 30)
|
||||||
timeStamp = date.toTime.toUnix.EthTime
|
timeStamp = date.toTime.toUnix.EthTime
|
||||||
|
|
|
@ -34,8 +34,8 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
||||||
txRoot: Hash256 # header with block number `num`
|
txRoot: Hash256 # header with block number `num`
|
||||||
rcptRoot: Hash256 # ditto
|
rcptRoot: Hash256 # ditto
|
||||||
let
|
let
|
||||||
adb = cdb.ctx.getColumn(CtGeneric).backend.toAristo
|
adb = cdb.mpt
|
||||||
kdb = cdb.newKvt.backend.toAristo
|
kdb = cdb.kvt
|
||||||
|
|
||||||
# Fill KVT and collect `proof` data
|
# Fill KVT and collect `proof` data
|
||||||
for (k,v) in jKvp.pairs:
|
for (k,v) in jKvp.pairs:
|
||||||
|
@ -60,6 +60,10 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
||||||
discard
|
discard
|
||||||
check kdb.put(key, val).isOk
|
check kdb.put(key, val).isOk
|
||||||
|
|
||||||
|
# TODO: `getColumn(CtXyy)` does not exists anymore. There is only the generic
|
||||||
|
# `MPT` left that can be retrieved with `getGeneric()`, optionally with
|
||||||
|
# argument `clearData=true`
|
||||||
|
|
||||||
# Install sub-trie roots onto production db
|
# Install sub-trie roots onto production db
|
||||||
if txRoot.isValid:
|
if txRoot.isValid:
|
||||||
doAssert adb.mergeProof(txRoot, VertexID(CtTxs)).isOk
|
doAssert adb.mergeProof(txRoot, VertexID(CtTxs)).isOk
|
||||||
|
|
Loading…
Reference in New Issue