Core db remove legacy phrases (#2468)
* Rename `newKvt()` -> `ctx.getKvt()` why: Clean up legacy shortcut. Also, the `KVT` returned is not instantiated but refers to the shared `KVT` that resides in a context which is a generalisation of an in-memory database fork. The function `ctx` retrieves the default context. * Rename `newTransaction()` -> `ctx.newTransaction()` why: Clean up legacy shortcut. The transaction is applied to a context as a generalisation of an in-memory database fork. The function `ctx` retrieves the default context. * Rename `getColumn(CtGeneric)` -> `getGeneric()` why: No more a list of well known sub-tries needed, a single one is enough. In fact, `getColumn()` did only support a single sub-tree by now. * Reduce TODO list
This commit is contained in:
parent
9fc5495d49
commit
800fd77333
|
@ -42,7 +42,7 @@ proc processBlock(
|
|||
## implementations (but can be savely removed, as well.)
|
||||
## variant of `processBlock()` where the `header` argument is explicitely set.
|
||||
template header: BlockHeader = blk.header
|
||||
var dbTx = vmState.com.db.newTransaction()
|
||||
var dbTx = vmState.com.db.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
let com = vmState.com
|
||||
|
@ -92,7 +92,7 @@ proc getVmState(c: ChainRef, header: BlockHeader):
|
|||
# intended to accepts invalid block
|
||||
proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
|
||||
template header: BlockHeader = blk.header
|
||||
let dbTx = c.db.newTransaction()
|
||||
let dbTx = c.db.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
c.com.hardForkTransition(header)
|
||||
|
|
|
@ -38,11 +38,11 @@ type
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc writeStatus(db: CoreDbRef, status: TransitionStatus) =
|
||||
db.newKvt.put(transitionStatusKey().toOpenArray(), rlp.encode(status)).isOkOr:
|
||||
db.ctx.getKvt.put(transitionStatusKey().toOpenArray(), rlp.encode(status)).isOkOr:
|
||||
raiseAssert "writeStatus(): put() failed " & $$error
|
||||
|
||||
proc readStatus(db: CoreDbRef): TransitionStatus =
|
||||
var bytes = db.newKvt.get(transitionStatusKey().toOpenArray()).valueOr:
|
||||
var bytes = db.ctx.getKvt.get(transitionStatusKey().toOpenArray()).valueOr:
|
||||
EmptyBlob
|
||||
if bytes.len > 0:
|
||||
try:
|
||||
|
|
|
@ -355,7 +355,7 @@ proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType =
|
|||
return com.config.consensusType
|
||||
|
||||
proc initializeEmptyDb*(com: CommonRef) =
|
||||
let kvt = com.db.newKvt()
|
||||
let kvt = com.db.ctx.getKvt()
|
||||
proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
|
||||
kvt.hasKey(key).expect "valid bool"
|
||||
if canonicalHeadHashKey().toOpenArray notin kvt:
|
||||
|
|
|
@ -145,7 +145,7 @@ proc validateBlock(c: ForkedChainRef,
|
|||
parent: BlockHeader,
|
||||
blk: EthBlock,
|
||||
updateCursor: bool = true): Result[void, string] =
|
||||
let dbTx = c.db.newTransaction()
|
||||
let dbTx = c.db.ctx.newTransaction()
|
||||
defer:
|
||||
dbTx.dispose()
|
||||
|
||||
|
@ -172,7 +172,7 @@ proc replaySegment(c: ForkedChainRef, target: Hash256) =
|
|||
prevHash = chain[^1].header.parentHash
|
||||
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
c.cursorHeader = c.baseHeader
|
||||
for i in countdown(chain.high, chain.low):
|
||||
c.validateBlock(c.cursorHeader, chain[i],
|
||||
|
@ -397,7 +397,7 @@ proc importBlock*(c: ForkedChainRef, blk: EthBlock): Result[void, string] =
|
|||
# Try to import block to canonical or side chain.
|
||||
# return error if the block is invalid
|
||||
if c.stagingTx.isNil:
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
|
||||
template header(): BlockHeader =
|
||||
blk.header
|
||||
|
@ -407,7 +407,7 @@ proc importBlock*(c: ForkedChainRef, blk: EthBlock): Result[void, string] =
|
|||
|
||||
if header.parentHash == c.baseHash:
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
return c.validateBlock(c.baseHeader, blk)
|
||||
|
||||
if header.parentHash notin c.blocks:
|
||||
|
@ -442,7 +442,7 @@ proc forkChoice*(c: ForkedChainRef,
|
|||
if c.cursorHash != head.cursorHash:
|
||||
if not c.stagingTx.isNil:
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
c.replaySegment(headHash)
|
||||
|
||||
c.trimCanonicalChain(head, headHash)
|
||||
|
@ -452,7 +452,7 @@ proc forkChoice*(c: ForkedChainRef,
|
|||
|
||||
if c.stagingTx.isNil:
|
||||
# setHead below don't go straight to db
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
|
||||
c.setHead(headHash, head.header.number)
|
||||
return ok()
|
||||
|
@ -489,7 +489,7 @@ proc forkChoice*(c: ForkedChainRef,
|
|||
|
||||
# Write segment from base+1 to newBase into database
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
|
||||
if newBase.header.number > c.baseHeader.number:
|
||||
c.replaySegment(newBase.hash)
|
||||
|
@ -504,7 +504,7 @@ proc forkChoice*(c: ForkedChainRef,
|
|||
if c.stagingTx.isNil:
|
||||
# replaySegment or setHead below don't
|
||||
# go straight to db
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.stagingTx = c.db.ctx.newTransaction()
|
||||
|
||||
# Move chain state forward to current head
|
||||
if newBase.header.number < head.header.number:
|
||||
|
|
|
@ -78,7 +78,7 @@ proc purgeOlderBlocksFromHistory(db: CoreDbRef, bn: BlockNumber) =
|
|||
proc persistBlocksImpl(
|
||||
c: ChainRef, blocks: openArray[EthBlock], flags: PersistBlockFlags = {}
|
||||
): Result[PersistStats, string] =
|
||||
let dbTx = c.db.newTransaction()
|
||||
let dbTx = c.db.ctx.newTransaction()
|
||||
defer:
|
||||
dbTx.dispose()
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ proc vmExecInit(xp: TxPoolRef): Result[TxPackerStateRef, string]
|
|||
|
||||
let packer = TxPackerStateRef( # return value
|
||||
xp: xp,
|
||||
tr: AristoDbMemory.newCoreDbRef().ctx.getColumn(CtGeneric, clearData=true),
|
||||
tr: AristoDbMemory.newCoreDbRef().ctx.getGeneric(clearData=true),
|
||||
balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient),
|
||||
numBlobPerBlock: 0,
|
||||
)
|
||||
|
@ -274,7 +274,7 @@ proc packerVmExec*(xp: TxPoolRef): Result[void, string] {.gcsafe,raises: [Catcha
|
|||
## Rebuild `packed` bucket by selection items from the `staged` bucket
|
||||
## after executing them in the VM.
|
||||
let db = xp.chain.com.db
|
||||
let dbTx = db.newTransaction
|
||||
let dbTx = db.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
var pst = xp.vmExecInit.valueOr:
|
||||
|
|
|
@ -6,13 +6,6 @@
|
|||
remember the follow up vertices which can travel through the tx-layers
|
||||
to be picked up by the backend store.
|
||||
|
||||
* Consider changing fetch/merge/delete prototypes for account and storage. At
|
||||
the moment they all use `openArray[]` for strictly 32 byte arrays (which is
|
||||
only implicitely enforced at run time -- i.e. it would fail otherwise.)
|
||||
|
||||
* Mental note: For *proof-mode* with pre-allocated locked vertices and Merkle
|
||||
keys, verification of a patyion tree must be done by computing sub-tree keys
|
||||
keys, verification of a partial tree must be done by computing sub-tree keys
|
||||
at the relative roots and comparing them with the pre-allocated Merkle keys.
|
||||
|
||||
* Remove legacy state format import from `deblobifyTo()` after a while (last
|
||||
updated 28/06/24).
|
||||
|
|
|
@ -41,9 +41,6 @@ template call(kvt: CoreDbKvtRef; fn: untyped; args: varArgs[untyped]): untyped =
|
|||
template mpt(dsc: CoreDbAccRef | CoreDbMptRef): AristoDbRef =
|
||||
dsc.distinctBase.mpt
|
||||
|
||||
template rootID(mpt: CoreDbMptRef): VertexID =
|
||||
VertexID(CtGeneric)
|
||||
|
||||
template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped =
|
||||
when CoreDbEnableApiJumpTable:
|
||||
api.fn(args)
|
||||
|
@ -67,10 +64,11 @@ iterator aristoReplicate[T](
|
|||
##
|
||||
let p = mpt.call(forkTx, mpt.mpt, 0).valueOrApiError "aristoReplicate()"
|
||||
defer: discard mpt.call(forget, p)
|
||||
for (rvid,key,vtx,node) in T.replicate(p):
|
||||
for (rVid,key,vtx,node) in T.replicate(p):
|
||||
if key.len == 32:
|
||||
yield (@(key.data), node.encode)
|
||||
elif rvid.vid == mpt.rootID:
|
||||
elif rVid.vid == CoreDbVidGeneric:
|
||||
# FIXME: Would an assert rather be appropriate here?
|
||||
yield (@(key.to(Hash256).data), node.encode)
|
||||
|
||||
# End
|
||||
|
|
|
@ -24,8 +24,6 @@ import
|
|||
./aristo_db,
|
||||
../../opts
|
||||
|
||||
include ./aristo_replicate
|
||||
|
||||
const
|
||||
# Expectation messages
|
||||
aristoFail = "Aristo/RocksDB init() failed"
|
||||
|
|
|
@ -44,7 +44,6 @@ export
|
|||
CoreDbApiError,
|
||||
#CoreDbCaptFlags,
|
||||
#CoreDbCaptRef,
|
||||
CoreDbColType,
|
||||
CoreDbCtxRef,
|
||||
CoreDbErrorCode,
|
||||
CoreDbErrorRef,
|
||||
|
@ -170,9 +169,6 @@ template mpt(tx: CoreDbTxRef): AristoDbRef =
|
|||
template ctx(acc: CoreDbAccRef): CoreDbCtxRef =
|
||||
acc.distinctBase
|
||||
|
||||
template rootID(mpt: CoreDbMptRef): VertexID =
|
||||
VertexID(CtGeneric)
|
||||
|
||||
# ---------------
|
||||
|
||||
template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped =
|
||||
|
@ -344,8 +340,8 @@ proc stateBlockNumber*(db: CoreDbRef): BlockNumber =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getKvt*(ctx: CoreDbCtxRef): CoreDbKvtRef =
|
||||
## This function subscribes to the common base object shared with other
|
||||
## KVT descriptors. Any changes are immediately visible to subscribers.
|
||||
## This function retrieves the common base object shared with other KVT
|
||||
## descriptors. Any changes are immediately visible to subscribers.
|
||||
## On destruction (when the constructed object gets out of scope), changes
|
||||
## are not saved to the backend database but are still cached and available.
|
||||
##
|
||||
|
@ -444,7 +440,7 @@ proc getGeneric*(
|
|||
ctx.setTrackNewApi CtxGetGenericFn
|
||||
result = CoreDbMptRef(ctx)
|
||||
if clearData:
|
||||
result.call(deleteGenericTree, ctx.mpt, result.rootID).isOkOr:
|
||||
result.call(deleteGenericTree, ctx.mpt, CoreDbVidGeneric).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, clearData, elapsed
|
||||
|
||||
|
@ -456,7 +452,7 @@ proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
##
|
||||
mpt.setTrackNewApi MptFetchFn
|
||||
result = block:
|
||||
let rc = mpt.call(fetchGenericData, mpt.mpt, mpt.rootID, key)
|
||||
let rc = mpt.call(fetchGenericData, mpt.mpt, CoreDbVidGeneric, key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == FetchPathNotFound:
|
||||
|
@ -471,7 +467,7 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
##
|
||||
mpt.setTrackNewApi MptFetchOrEmptyFn
|
||||
result = block:
|
||||
let rc = mpt.call(fetchGenericData, mpt.mpt, mpt.rootID, key)
|
||||
let rc = mpt.call(fetchGenericData, mpt.mpt, CoreDbVidGeneric, key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == FetchPathNotFound:
|
||||
|
@ -483,7 +479,7 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
mpt.setTrackNewApi MptDeleteFn
|
||||
result = block:
|
||||
let rc = mpt.call(deleteGenericData, mpt.mpt, mpt.rootID, key)
|
||||
let rc = mpt.call(deleteGenericData, mpt.mpt,CoreDbVidGeneric, key)
|
||||
if rc.isOk:
|
||||
ok()
|
||||
elif rc.error == DelPathNotFound:
|
||||
|
@ -499,7 +495,7 @@ proc merge*(
|
|||
): CoreDbRc[void] =
|
||||
mpt.setTrackNewApi MptMergeFn
|
||||
result = block:
|
||||
let rc = mpt.call(mergeGenericData, mpt.mpt, mpt.rootID, key, val)
|
||||
let rc = mpt.call(mergeGenericData, mpt.mpt,CoreDbVidGeneric, key, val)
|
||||
if rc.isOk:
|
||||
ok()
|
||||
else:
|
||||
|
@ -513,7 +509,7 @@ proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
|
|||
##
|
||||
mpt.setTrackNewApi MptHasPathFn
|
||||
result = block:
|
||||
let rc = mpt.call(hasPathGeneric, mpt.mpt, mpt.rootID, key)
|
||||
let rc = mpt.call(hasPathGeneric, mpt.mpt, CoreDbVidGeneric, key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
|
@ -529,7 +525,7 @@ proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash256] =
|
|||
##
|
||||
mpt.setTrackNewApi MptStateFn
|
||||
result = block:
|
||||
let rc = mpt.call(fetchGenericState, mpt.mpt, mpt.rootID, updateOk)
|
||||
let rc = mpt.call(fetchGenericState, mpt.mpt, CoreDbVidGeneric, updateOk)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
|
@ -874,28 +870,6 @@ proc dispose*(tx: CoreDbTxRef) =
|
|||
raiseAssert $api & ": " & $error
|
||||
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, prvLevel
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Legacy and convenience methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newKvt*(db: CoreDbRef): CoreDbKvtRef =
|
||||
## Variant of `getKvt()` retrieving the KVT from the default context
|
||||
db.ctx.getKvt
|
||||
|
||||
proc newTransaction*(db: CoreDbRef): CoreDbTxRef =
|
||||
## Variant of `newTransaction()` starting the transaction on the default
|
||||
## context
|
||||
db.ctx.newTransaction
|
||||
|
||||
proc getColumn*(
|
||||
ctx: CoreDbCtxRef;
|
||||
colType: CoreDbColType;
|
||||
clearData = false;
|
||||
): CoreDbMptRef =
|
||||
## Variant of `getGenteric()` forcing `colType` to be `CtGeneric`
|
||||
doAssert colType == CtGeneric
|
||||
ctx.getGeneric clearData
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public tracer methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -29,6 +29,10 @@ type
|
|||
|
||||
const
|
||||
CoreDbPersistentTypes* = {AristoDbRocks}
|
||||
## List of persistent DB types (currently only a single one)
|
||||
|
||||
CoreDbVidGeneric* = VertexID(2)
|
||||
## Generic `MPT` root vertex ID for calculating Merkle hashes
|
||||
|
||||
type
|
||||
CoreDbProfListRef* = AristoDbProfListRef
|
||||
|
@ -59,9 +63,6 @@ type
|
|||
StoNotFound
|
||||
TxPending
|
||||
|
||||
CoreDbColType* = enum # Keep that legacy type for a while ..
|
||||
CtGeneric = 2 # Actually only this constant is needed
|
||||
|
||||
CoreDbCaptFlags* {.pure.} = enum
|
||||
PersistPut
|
||||
PersistDel
|
||||
|
|
|
@ -69,7 +69,7 @@ iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
|
|||
mpt.setTrackNewApi MptPairsIt
|
||||
case mpt.dbType:
|
||||
of AristoDbMemory, AristoDbRocks, AristoDbVoid:
|
||||
for (path,data) in mpt.mpt.rightPairsGeneric mpt.rootID:
|
||||
for (path,data) in mpt.mpt.rightPairsGeneric CoreDbVidGeneric:
|
||||
yield (mpt.call(pathAsBlob, path), data)
|
||||
of Ooops:
|
||||
raiseAssert: "Unsupported database type: " & $mpt.dbType
|
||||
|
|
|
@ -119,7 +119,7 @@ iterator getBlockTransactionData*(
|
|||
if txRoot == EMPTY_ROOT_HASH:
|
||||
break body
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
for idx in 0'u16..<uint16.high:
|
||||
let key = hashIndexKey(txRoot, idx)
|
||||
let txData = kvt.getOrEmpty(key).valueOr:
|
||||
|
@ -157,7 +157,7 @@ iterator getWithdrawals*(
|
|||
if withdrawalsRoot == EMPTY_ROOT_HASH:
|
||||
break body
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
for idx in 0'u16..<uint16.high:
|
||||
let key = hashIndexKey(withdrawalsRoot, idx)
|
||||
let data = kvt.getOrEmpty(key).valueOr:
|
||||
|
@ -177,7 +177,7 @@ iterator getReceipts*(
|
|||
if receiptsRoot == EMPTY_ROOT_HASH:
|
||||
break body
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
for idx in 0'u16..<uint16.high:
|
||||
let key = hashIndexKey(receiptsRoot, idx)
|
||||
let data = kvt.getOrEmpty(key).valueOr:
|
||||
|
@ -198,7 +198,7 @@ proc removeTransactionFromCanonicalChain(
|
|||
) =
|
||||
## Removes the transaction specified by the given hash from the canonical
|
||||
## chain.
|
||||
db.newKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr:
|
||||
db.ctx.getKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr:
|
||||
warn logTxt "removeTransactionFromCanonicalChain()",
|
||||
transactionHash, action="del()", error=($$error)
|
||||
|
||||
|
@ -233,7 +233,7 @@ proc setAsCanonicalChainHead(
|
|||
db.addBlockNumberToHashLookup(h.number, h.blockHash)
|
||||
|
||||
let canonicalHeadHash = canonicalHeadHashKey()
|
||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "setAsCanonicalChainHead()",
|
||||
canonicalHeadHash, action="put()", error=($$error)
|
||||
|
||||
|
@ -251,7 +251,7 @@ proc markCanonicalChain(
|
|||
|
||||
# mark current header as canonical
|
||||
let
|
||||
kvt = db.newKvt()
|
||||
kvt = db.ctx.getKvt()
|
||||
key = blockNumberToHashKey(currHeader.number)
|
||||
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
|
||||
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
|
||||
|
@ -297,7 +297,7 @@ proc markCanonicalChain(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||
db.newKvt().hasKey(hash.data).valueOr:
|
||||
db.ctx.getKvt().hasKey(hash.data).valueOr:
|
||||
warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
|
||||
return false
|
||||
|
||||
|
@ -315,7 +315,7 @@ proc getBlockHeader*(
|
|||
output: var BlockHeader;
|
||||
): bool =
|
||||
const info = "getBlockHeader()"
|
||||
let data = db.newKvt().get(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||
let data = db.ctx.getKvt().get(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
||||
return false
|
||||
|
@ -339,7 +339,7 @@ proc getHash(
|
|||
db: CoreDbRef;
|
||||
key: DbKey;
|
||||
): Opt[Hash256] =
|
||||
let data = db.newKvt().get(key.toOpenArray).valueOr:
|
||||
let data = db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt "getHash()", key, action="get()", error=($$error)
|
||||
return Opt.none(Hash256)
|
||||
|
@ -435,7 +435,7 @@ proc getScore*(
|
|||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
): Opt[UInt256] =
|
||||
let data = db.newKvt()
|
||||
let data = db.ctx.getKvt()
|
||||
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
|
||||
|
@ -449,7 +449,7 @@ proc getScore*(
|
|||
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
||||
## for testing purpose
|
||||
let scoreKey = blockHashToScoreKey blockHash
|
||||
db.newKvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||
db.ctx.getKvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||
warn logTxt "setScore()", scoreKey, action="put()", error=($$error)
|
||||
return
|
||||
|
||||
|
@ -484,7 +484,7 @@ proc getAncestorsHashes*(
|
|||
proc addBlockNumberToHashLookup*(
|
||||
db: CoreDbRef; blockNumber: BlockNumber, blockHash: Hash256) =
|
||||
let blockNumberKey = blockNumberToHashKey(blockNumber)
|
||||
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||
db.ctx.getKvt.put(blockNumberKey.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||
warn logTxt "addBlockNumberToHashLookup()",
|
||||
blockNumberKey, action="put()", error=($$error)
|
||||
|
||||
|
@ -500,7 +500,7 @@ proc persistTransactions*(
|
|||
if transactions.len == 0:
|
||||
return
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
for idx, tx in transactions:
|
||||
let
|
||||
encodedTx = rlp.encode(tx)
|
||||
|
@ -523,7 +523,7 @@ proc forgetHistory*(
|
|||
## returns `true`, if some history was available and deleted.
|
||||
var blockHash: Hash256
|
||||
if db.getBlockHash(blockNum, blockHash):
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
# delete blockNum->blockHash
|
||||
discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray)
|
||||
result = true
|
||||
|
@ -542,7 +542,7 @@ proc getTransactionByIndex*(
|
|||
const
|
||||
info = "getTransaction()"
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
let key = hashIndexKey(txRoot, txIndex)
|
||||
let txData = kvt.getOrEmpty(key).valueOr:
|
||||
warn logTxt "getTransaction()",
|
||||
|
@ -566,7 +566,7 @@ proc getTransactionCount*(
|
|||
const
|
||||
info = "getTransactionCount()"
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
var txCount = 0'u16
|
||||
while true:
|
||||
let key = hashIndexKey(txRoot, txCount)
|
||||
|
@ -590,7 +590,7 @@ proc getUnclesCount*(
|
|||
if ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = block:
|
||||
let key = genericHashKey(ommersHash)
|
||||
db.newKvt().get(key.toOpenArray).valueOr:
|
||||
db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
||||
return 0
|
||||
|
@ -605,7 +605,7 @@ proc getUncles*(
|
|||
if ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = block:
|
||||
let key = genericHashKey(ommersHash)
|
||||
db.newKvt().get(key.toOpenArray).valueOr:
|
||||
db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
||||
return @[]
|
||||
|
@ -619,7 +619,7 @@ proc persistWithdrawals*(
|
|||
const info = "persistWithdrawals()"
|
||||
if withdrawals.len == 0:
|
||||
return
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
for idx, wd in withdrawals:
|
||||
let key = hashIndexKey(withdrawalsRoot, idx.uint16)
|
||||
kvt.put(key, rlp.encode(wd)).isOkOr:
|
||||
|
@ -718,7 +718,7 @@ proc getUncleHashes*(
|
|||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
let
|
||||
key = genericHashKey(header.ommersHash)
|
||||
encodedUncles = db.newKvt().get(key.toOpenArray).valueOr:
|
||||
encodedUncles = db.ctx.getKvt().get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt "getUncleHashes()",
|
||||
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
||||
|
@ -732,7 +732,7 @@ proc getTransactionKey*(
|
|||
{.gcsafe, raises: [RlpError].} =
|
||||
let
|
||||
txKey = transactionHashToBlockKey(transactionHash)
|
||||
tx = db.newKvt().get(txKey.toOpenArray).valueOr:
|
||||
tx = db.ctx.getKvt().get(txKey.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt "getTransactionKey()",
|
||||
transactionHash, action="get()", `error`=($$error)
|
||||
|
@ -742,7 +742,7 @@ proc getTransactionKey*(
|
|||
|
||||
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
|
||||
## Returns True if the header with the given block hash is in our DB.
|
||||
db.newKvt().hasKey(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||
db.ctx.getKvt().hasKey(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||
warn logTxt "headerExists()", blockHash, action="get()", `error`=($$error)
|
||||
return false
|
||||
|
||||
|
@ -759,7 +759,7 @@ proc setHead*(
|
|||
return false
|
||||
|
||||
let canonicalHeadHash = canonicalHeadHashKey()
|
||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||
db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
|
||||
return true
|
||||
|
||||
|
@ -770,7 +770,7 @@ proc setHead*(
|
|||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var headerHash = rlpHash(header)
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
if writeHeader:
|
||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||
warn logTxt "setHead()", headerHash, action="put()", error=($$error)
|
||||
|
@ -792,7 +792,7 @@ proc persistReceipts*(
|
|||
if receipts.len == 0:
|
||||
return
|
||||
|
||||
let kvt = db.newKvt()
|
||||
let kvt = db.ctx.getKvt()
|
||||
for idx, rec in receipts:
|
||||
let key = hashIndexKey(receiptsRoot, idx.uint16)
|
||||
kvt.put(key, rlp.encode(rec)).isOkOr:
|
||||
|
@ -814,7 +814,7 @@ proc persistScore*(
|
|||
score: UInt256
|
||||
): bool =
|
||||
let
|
||||
kvt = db.newKvt()
|
||||
kvt = db.ctx.getKvt()
|
||||
scoreKey = blockHashToScoreKey(blockHash)
|
||||
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||
warn logTxt "persistHeader()",
|
||||
|
@ -829,7 +829,7 @@ proc persistHeader*(
|
|||
startOfHistory = GENESIS_PARENT_HASH;
|
||||
): bool =
|
||||
let
|
||||
kvt = db.newKvt()
|
||||
kvt = db.ctx.getKvt()
|
||||
isStartOfHistory = header.parentHash == startOfHistory
|
||||
|
||||
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
||||
|
@ -902,7 +902,7 @@ proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
|||
## Returns the uncles hash.
|
||||
let enc = rlp.encode(uncles)
|
||||
result = keccakHash(enc)
|
||||
db.newKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr:
|
||||
db.ctx.getKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr:
|
||||
warn logTxt "persistUncles()",
|
||||
unclesHash=result, action="put()", `error`=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
|
@ -913,7 +913,7 @@ proc safeHeaderHash*(db: CoreDbRef): Hash256 =
|
|||
|
||||
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||
let safeHashKey = safeHashKey()
|
||||
db.newKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
db.ctx.getKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "safeHeaderHash()",
|
||||
safeHashKey, action="put()", `error`=($$error)
|
||||
return
|
||||
|
@ -925,7 +925,7 @@ proc finalizedHeaderHash*(
|
|||
|
||||
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||
let finalizedHashKey = finalizedHashKey()
|
||||
db.newKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
db.ctx.getKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "finalizedHeaderHash()",
|
||||
finalizedHashKey, action="put()", `error`=($$error)
|
||||
return
|
||||
|
|
|
@ -154,7 +154,7 @@ proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
|
|||
root: KeccakHash): AccountsLedgerRef =
|
||||
new result
|
||||
result.ledger = db.ctx.getAccounts()
|
||||
result.kvt = db.newKvt() # save manually in `persist()`
|
||||
result.kvt = db.ctx.getKvt()
|
||||
result.witnessCache = Table[EthAddress, WitnessData]()
|
||||
discard result.beginSavepoint
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ proc captureAccount(n: JsonNode, db: LedgerRef, address: EthAddress, name: strin
|
|||
|
||||
proc dumpMemoryDB*(node: JsonNode, db: CoreDbRef) =
|
||||
var n = newJObject()
|
||||
for k, v in db.newKvt():
|
||||
for k, v in db.ctx.getKvt():
|
||||
n[k.toHex(false)] = %v
|
||||
node["state"] = n
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ proc getMultiKeys*(
|
|||
vmState.collectWitnessData = true # Enable saving witness data
|
||||
vmState.com.hardForkTransition(blockHeader)
|
||||
|
||||
let dbTx = vmState.com.db.newTransaction()
|
||||
let dbTx = vmState.com.db.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
# Execute the block of transactions and collect the keys of the touched account state
|
||||
|
|
|
@ -30,15 +30,15 @@ logScope:
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
template get(sk: SkeletonRef, key: untyped): untyped =
|
||||
sk.db.newKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
||||
sk.db.ctx.getKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
||||
|
||||
template put(sk: SkeletonRef, key, val: untyped): untyped =
|
||||
let rc = sk.db.newKvt().put(key.toOpenArray, val)
|
||||
let rc = sk.db.ctx.getKvt().put(key.toOpenArray, val)
|
||||
if rc.isErr:
|
||||
raiseAssert "put() failed: " & $$rc.error
|
||||
|
||||
template del(sk: SkeletonRef, key: untyped): untyped =
|
||||
discard sk.db.newKvt().del(key.toOpenArray)
|
||||
discard sk.db.ctx.getKvt().del(key.toOpenArray)
|
||||
|
||||
proc append(w: var RlpWriter, s: Segment) =
|
||||
w.startList(3)
|
||||
|
|
|
@ -35,7 +35,7 @@ proc rpcCallEvm*(args: TransactionArgs,
|
|||
let vmState = ? BaseVMState.new(topHeader, com)
|
||||
let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas)
|
||||
|
||||
var dbTx = com.db.newTransaction()
|
||||
var dbTx = com.db.ctx.newTransaction()
|
||||
defer: dbTx.dispose() # always dispose state changes
|
||||
|
||||
ok(runComputation(params))
|
||||
|
@ -47,7 +47,7 @@ proc rpcCallEvm*(args: TransactionArgs,
|
|||
const globalGasCap = 0 # TODO: globalGasCap should configurable by user
|
||||
let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas)
|
||||
|
||||
var dbTx = com.db.newTransaction()
|
||||
var dbTx = com.db.ctx.newTransaction()
|
||||
defer: dbTx.dispose() # always dispose state changes
|
||||
|
||||
ok(runComputation(params))
|
||||
|
@ -72,7 +72,7 @@ proc rpcEstimateGas*(args: TransactionArgs,
|
|||
hi : GasInt = GasInt args.gas.get(0.Quantity)
|
||||
cap: GasInt
|
||||
|
||||
var dbTx = com.db.newTransaction()
|
||||
var dbTx = com.db.ctx.newTransaction()
|
||||
defer: dbTx.dispose() # always dispose state changes
|
||||
|
||||
# Determine the highest gas limit can be used during the estimation.
|
||||
|
|
|
@ -86,7 +86,7 @@ proc calculateTransactionData(
|
|||
## - root of transactions trie
|
||||
## - list of transactions hashes
|
||||
## - total size of transactions in block
|
||||
var tr = newCoreDbRef(DefaultDbMemory).ctx.getColumn(CtGeneric)
|
||||
var tr = newCoreDbRef(DefaultDbMemory).ctx.getGeneric()
|
||||
var txHashes: seq[TxOrHash]
|
||||
var txSize: uint64
|
||||
for i, t in items:
|
||||
|
|
|
@ -21,7 +21,7 @@ import
|
|||
|
||||
proc prepareBlockEnv(node: JsonNode, memoryDB: CoreDbRef) =
|
||||
let state = node["state"]
|
||||
let kvt = memoryDB.newKvt()
|
||||
let kvt = memoryDB.ctx.getKvt()
|
||||
for k, v in state:
|
||||
let key = hexToSeqByte(k)
|
||||
let value = hexToSeqByte(v.getStr())
|
||||
|
@ -34,7 +34,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: CoreDbRef, blockNumber: BlockNum
|
|||
com = CommonRef.new(memoryDB)
|
||||
parent = com.db.getBlockHeader(parentNumber)
|
||||
blk = com.db.getEthBlock(blockNumber)
|
||||
let transaction = memoryDB.newTransaction()
|
||||
let transaction = memoryDB.ctx.newTransaction()
|
||||
defer: transaction.dispose()
|
||||
|
||||
let
|
||||
|
|
|
@ -28,7 +28,7 @@ proc dumpDebug(com: CommonRef, blockNumber: BlockNumber) =
|
|||
capture = com.db.newCapture.value
|
||||
captureCom = com.clone(capture.recorder)
|
||||
|
||||
let transaction = capture.recorder.newTransaction()
|
||||
let transaction = capture.recorder.ctx.newTransaction()
|
||||
defer: transaction.dispose()
|
||||
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ proc main() {.used.} =
|
|||
var parentBlock = requestBlock(conf.head, { DownloadAndValidate })
|
||||
discard com.db.setHead(parentBlock.header)
|
||||
|
||||
let kvt = com.db.newKvt()
|
||||
let kvt = com.db.ctx.getKvt()
|
||||
if canonicalHeadHashKey().toOpenArray notin kvt:
|
||||
persistToDb(com.db):
|
||||
com.initializeEmptyDb()
|
||||
|
|
|
@ -21,7 +21,7 @@ proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: BlockNumber, parent:
|
|||
state = nimbus["state"]
|
||||
headerHash = rlpHash(header)
|
||||
chainDB = newCoreDbRef(DefaultDbMemory)
|
||||
kvt = chainDB.newKvt()
|
||||
kvt = chainDB.ctx.getKvt()
|
||||
|
||||
discard chainDB.setHead(parent, true)
|
||||
chainDB.persistTransactions(blockNumber, header.txRoot, blk.transactions)
|
||||
|
|
|
@ -29,7 +29,7 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber =
|
|||
for i in 0 ..< numBlocks:
|
||||
blocks[i] = com.db.getEthBlock(blockNumber + i.BlockNumber)
|
||||
|
||||
let transaction = com.db.newTransaction()
|
||||
let transaction = com.db.ctx.newTransaction()
|
||||
defer: transaction.dispose()
|
||||
|
||||
for i in 0 ..< numBlocks:
|
||||
|
|
|
@ -117,7 +117,7 @@ proc forkedChainMain*() =
|
|||
blk2 = cc.makeBlk(2, blk1)
|
||||
blk3 = cc.makeBlk(3, blk2)
|
||||
|
||||
dbTx = cc.db.newTransaction()
|
||||
dbTx = cc.db.ctx.newTransaction()
|
||||
blk4 = cc.makeBlk(4, blk3)
|
||||
blk5 = cc.makeBlk(5, blk4)
|
||||
blk6 = cc.makeBlk(6, blk5)
|
||||
|
|
|
@ -213,7 +213,7 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false)
|
|||
let eAddr = env.txs[inx].getRecipient
|
||||
|
||||
block:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
|
||||
block:
|
||||
let accTx = ledger.beginSavepoint
|
||||
|
@ -229,7 +229,7 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false)
|
|||
dbTx.rollback()
|
||||
|
||||
block:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
|
||||
block:
|
||||
let accTx = ledger.beginSavepoint
|
||||
|
@ -248,7 +248,7 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) =
|
|||
let eAddr = env.txs[inx].getRecipient
|
||||
|
||||
block:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
|
||||
block:
|
||||
let accTx = ledger.beginSavepoint
|
||||
|
@ -278,7 +278,7 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) =
|
|||
dbTx.commit()
|
||||
|
||||
block:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
|
||||
block:
|
||||
let accTx = ledger.beginSavepoint
|
||||
|
@ -355,7 +355,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
|||
test &"Run {env.txi.len} two-step trials with rollback":
|
||||
let head = env.xdb.getCanonicalHead()
|
||||
for n in env.txi:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
let ledger = env.com.getLedger(head)
|
||||
env.runTrial2ok(ledger, n)
|
||||
|
@ -363,7 +363,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
|||
test &"Run {env.txi.len} three-step trials with rollback":
|
||||
let head = env.xdb.getCanonicalHead()
|
||||
for n in env.txi:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
let ledger = env.com.getLedger(head)
|
||||
env.runTrial3(ledger, n, rollback = true)
|
||||
|
@ -372,7 +372,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
|||
" throwing Exceptions":
|
||||
let head = env.xdb.getCanonicalHead()
|
||||
for n in env.txi:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
let ledger = env.com.getLedger(head)
|
||||
env.runTrial3Survive(ledger, n, noisy)
|
||||
|
@ -380,7 +380,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
|||
test &"Run {env.txi.len} tree-step trials without rollback":
|
||||
let head = env.xdb.getCanonicalHead()
|
||||
for n in env.txi:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
let ledger = env.com.getLedger(head)
|
||||
env.runTrial3(ledger, n, rollback = false)
|
||||
|
@ -388,7 +388,7 @@ proc runLedgerTransactionTests(noisy = true) =
|
|||
test &"Run {env.txi.len} four-step trials with rollback and db frames":
|
||||
let head = env.xdb.getCanonicalHead()
|
||||
for n in env.txi:
|
||||
let dbTx = env.xdb.newTransaction()
|
||||
let dbTx = env.xdb.ctx.newTransaction()
|
||||
defer: dbTx.dispose()
|
||||
let ledger = env.com.getLedger(head)
|
||||
env.runTrial4(ledger, n, rollback = true)
|
||||
|
@ -524,7 +524,7 @@ proc runLedgerBasicOperationsTests() =
|
|||
check ac.getCode(addr2) == code
|
||||
let
|
||||
key = contractHashKey(keccakHash(code))
|
||||
val = memDB.newKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
||||
val = memDB.ctx.getKvt().get(key.toOpenArray).valueOr: EmptyBlob
|
||||
check val == code
|
||||
|
||||
test "accessList operations":
|
||||
|
|
|
@ -85,7 +85,7 @@ proc verifySlotProof(trustedStorageRoot: Web3Hash, slot: StorageProof): MptProof
|
|||
proc persistFixtureBlock(chainDB: CoreDbRef) =
|
||||
let header = getBlockHeader4514995()
|
||||
# Manually inserting header to avoid any parent checks
|
||||
discard chainDB.newKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
||||
discard chainDB.ctx.getKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
||||
chainDB.addBlockNumberToHashLookup(header)
|
||||
chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions)
|
||||
chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995())
|
||||
|
@ -167,6 +167,10 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
|
|||
|
||||
com.db.persistReceipts(vmState.receipts)
|
||||
let
|
||||
# TODO: `getColumn(CtReceipts)` does not exists anymore. There s only the
|
||||
# generic `MPT` left that can be retrieved with `getGeneric()`,
|
||||
# optionally with argument `clearData=true`
|
||||
#
|
||||
receiptRoot = com.db.ctx.getColumn(CtReceipts).state(updateOk=true).valueOr(EMPTY_ROOT_HASH)
|
||||
date = dateTime(2017, mMar, 30)
|
||||
timeStamp = date.toTime.toUnix.EthTime
|
||||
|
|
|
@ -34,8 +34,8 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
|||
txRoot: Hash256 # header with block number `num`
|
||||
rcptRoot: Hash256 # ditto
|
||||
let
|
||||
adb = cdb.ctx.getColumn(CtGeneric).backend.toAristo
|
||||
kdb = cdb.newKvt.backend.toAristo
|
||||
adb = cdb.mpt
|
||||
kdb = cdb.kvt
|
||||
|
||||
# Fill KVT and collect `proof` data
|
||||
for (k,v) in jKvp.pairs:
|
||||
|
@ -60,6 +60,10 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
|||
discard
|
||||
check kdb.put(key, val).isOk
|
||||
|
||||
# TODO: `getColumn(CtXyy)` does not exists anymore. There is only the generic
|
||||
# `MPT` left that can be retrieved with `getGeneric()`, optionally with
|
||||
# argument `clearData=true`
|
||||
|
||||
# Install sub-trie roots onto production db
|
||||
if txRoot.isValid:
|
||||
doAssert adb.mergeProof(txRoot, VertexID(CtTxs)).isOk
|
||||
|
|
Loading…
Reference in New Issue