Avoid recomputing hashes when persisting data (#2350)

This commit is contained in:
Jacek Sieka 2024-06-14 07:10:00 +02:00 committed by GitHub
parent 5a5cc6295e
commit 189a20bbae
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 254 additions and 281 deletions

View File

@ -104,14 +104,16 @@ proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
stateRootChpt = vmState.parent.stateRoot # Check point
? vmState.processBlock(blk)
if not c.db.persistHeader(
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory):
return err("Could not persist header")
try:
c.db.persistHeaderToDb(
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory)
discard c.db.persistTransactions(header.blockNumber, blk.transactions)
discard c.db.persistReceipts(vmState.receipts)
c.db.persistTransactions(header.blockNumber, blk.transactions)
c.db.persistReceipts(vmState.receipts)
if blk.withdrawals.isSome:
discard c.db.persistWithdrawals(blk.withdrawals.get)
c.db.persistWithdrawals(blk.withdrawals.get)
except CatchableError as exc:
return err(exc.msg)

View File

@ -61,7 +61,7 @@ proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) =
doAssert stateDB.rootHash == genesisHeader.stateRoot
t.com.db.persistHeaderToDb(genesisHeader,
doAssert t.com.db.persistHeader(genesisHeader,
t.com.consensus == ConsensusType.POS)
doAssert(t.com.db.getCanonicalHead().blockHash == genesisHeader.blockHash)

View File

@ -156,7 +156,8 @@ proc newPayload*(ben: BeaconEngineRef,
let ttd = com.ttd.get(high(common.BlockNumber))
if version == Version.V1:
let td = db.getScore(header.parentHash)
let td = db.getScore(header.parentHash).valueOr:
0.u256
if (not com.forkGTE(MergeFork)) and td < ttd:
warn "Ignoring pre-merge payload",
number = header.blockNumber, hash = blockHash, td, ttd

View File

@ -174,11 +174,10 @@ proc tooLargeRequest*(msg: string): ref InvalidRequest =
proc latestValidHash*(db: CoreDbRef,
parent: common.BlockHeader,
ttd: DifficultyInt): common.Hash256
{.gcsafe, raises: [RlpError].} =
ttd: DifficultyInt): common.Hash256 =
if parent.isGenesis:
return common.Hash256()
let ptd = db.getScore(parent.parentHash)
let ptd = db.getScore(parent.parentHash).valueOr(0.u256)
if ptd >= ttd:
parent.blockHash
else:

View File

@ -338,14 +338,14 @@ func forkId*(com: CommonRef, head: BlockNumber, time: EthTime): ForkID {.gcsafe.
func isEIP155*(com: CommonRef, number: BlockNumber): bool =
com.config.eip155Block.isSome and number >= com.config.eip155Block.get
proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool
{.gcsafe, raises: [CatchableError].} =
proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool =
if com.config.terminalTotalDifficulty.isNone:
return false
let
ttd = com.config.terminalTotalDifficulty.get()
ptd = com.db.getScore(header.parentHash)
ptd = com.db.getScore(header.parentHash).valueOr:
return false
td = ptd + header.difficulty
ptd >= ttd and td >= ttd
@ -358,15 +358,13 @@ func isCancunOrLater*(com: CommonRef, t: EthTime): bool =
func isPragueOrLater*(com: CommonRef, t: EthTime): bool =
com.config.pragueTime.isSome and t >= com.config.pragueTime.get
proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType
{.gcsafe, raises: [CatchableError].} =
proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType =
if com.isBlockAfterTtd(header):
return ConsensusType.POS
return com.config.consensusType
proc initializeEmptyDb*(com: CommonRef)
{.gcsafe, raises: [CatchableError].} =
proc initializeEmptyDb*(com: CommonRef) =
let kvt = com.db.newKvt()
proc contains(kvt: CoreDxKvtRef; key: openArray[byte]): bool =
kvt.hasKey(key).expect "valid bool"
@ -374,8 +372,10 @@ proc initializeEmptyDb*(com: CommonRef)
info "Writing genesis to DB"
doAssert(com.genesisHeader.blockNumber.isZero,
"can't commit genesis block with number > 0")
com.db.persistHeaderToDb(com.genesisHeader,
com.consensusType == ConsensusType.POS)
doAssert(com.db.persistHeader(com.genesisHeader,
com.consensusType == ConsensusType.POS,
startOfHistory=com.genesisHeader.parentHash),
"can persist genesis header")
doAssert(canonicalHeadHashKey().toOpenArray in kvt)
proc syncReqNewHead*(com: CommonRef; header: BlockHeader)

View File

@ -25,10 +25,6 @@ type
## common block chain configuration
## used throughout entire app
validateBlock: bool ##\
## If turn off, `persistBlocks` will always return
## ValidationResult.OK and disable extraValidation too.
extraValidation: bool ##\
## Trigger extra validation, currently within `persistBlocks()`
## function only.
@ -53,7 +49,6 @@ proc newChain*(com: CommonRef,
## chain validation if set `true`.
ChainRef(
com: com,
validateBlock: true,
extraValidation: extraValidation,
vmState: vmState
)
@ -64,7 +59,6 @@ func newChain*(com: CommonRef): ChainRef =
let extraValidation = com.consensus == ConsensusType.POS
ChainRef(
com: com,
validateBlock: true,
extraValidation: extraValidation,
)
@ -87,10 +81,6 @@ proc com*(c: ChainRef): CommonRef =
## Getter
c.com
proc validateBlock*(c: ChainRef): bool =
## Getter
c.validateBlock
proc extraValidation*(c: ChainRef): bool =
## Getter
c.extraValidation
@ -109,10 +99,6 @@ proc currentBlock*(c: ChainRef): BlockHeader
# ------------------------------------------------------------------------------
# Public `Chain` setters
# ------------------------------------------------------------------------------
proc `validateBlock=`*(c: ChainRef; validateBlock: bool) =
## Setter. If set `true`, the assignment value `validateBlock` enables
## block execution, else it will always return ValidationResult.OK
c.validateBlock = validateBlock
proc `extraValidation=`*(c: ChainRef; extraValidation: bool) =
## Setter. If set `true`, the assignment value `extraValidation` enables

View File

@ -86,7 +86,9 @@ proc persistBlocksImpl(
toBlock = blocks[blocks.high()].header.blockNumber
trace "Persisting blocks", fromBlock, toBlock
var txs = 0
var
txs = 0
gas = GasInt(0)
for blk in blocks:
template header(): BlockHeader =
blk.header
@ -97,12 +99,11 @@ proc persistBlocksImpl(
debug "Cannot update VmState", blockNumber = header.blockNumber
return err("Cannot update VmState to block " & $header.blockNumber)
if c.validateBlock and c.extraValidation and c.verifyFrom <= header.blockNumber:
if c.extraValidation and c.verifyFrom <= header.blockNumber:
# TODO: how to checkseal from here
?c.com.validateHeaderAndKinship(blk, checkSealOK = false)
if c.validateBlock:
?vmState.processBlock(blk)
?vmState.processBlock(blk)
# when defined(nimbusDumpDebuggingMetaData):
# if validationResult == ValidationResult.Error and
@ -110,32 +111,28 @@ proc persistBlocksImpl(
# vmState.dumpDebuggingMetaData(header, body)
# warn "Validation error. Debugging metadata dumped."
try:
if NoPersistHeader notin flags:
c.db.persistHeaderToDb(
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory
)
if NoPersistHeader notin flags:
if not c.db.persistHeader(
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory
):
return err("Could not persist header")
if NoSaveTxs notin flags:
discard c.db.persistTransactions(header.blockNumber, blk.transactions)
if NoSaveTxs notin flags:
c.db.persistTransactions(header.blockNumber, blk.transactions)
if NoSaveReceipts notin flags:
discard c.db.persistReceipts(vmState.receipts)
if NoSaveReceipts notin flags:
c.db.persistReceipts(vmState.receipts)
if NoSaveWithdrawals notin flags and blk.withdrawals.isSome:
discard c.db.persistWithdrawals(blk.withdrawals.get)
except CatchableError as exc:
return err(exc.msg)
if NoSaveWithdrawals notin flags and blk.withdrawals.isSome:
c.db.persistWithdrawals(blk.withdrawals.get)
# update currentBlock *after* we persist it
# so the rpc return consistent result
# between eth_blockNumber and eth_syncing
c.com.syncCurrent = header.blockNumber
# Done with this block
# lapTx.commit()
txs += blk.transactions.len
gas += blk.header.gasUsed
dbTx.commit()
@ -154,7 +151,7 @@ proc persistBlocksImpl(
except CatchableError as exc:
warn "Could not clean up old blocks from history", err = exc.msg
ok((blocks.len, txs, vmState.cumulativeGasUsed))
ok((blocks.len, txs, gas))
# ------------------------------------------------------------------------------
# Public `ChainDB` methods
@ -163,11 +160,10 @@ proc persistBlocksImpl(
proc insertBlockWithoutSetHead*(c: ChainRef, blk: EthBlock): Result[void, string] =
discard ?c.persistBlocksImpl([blk], {NoPersistHeader, NoSaveReceipts})
try:
c.db.persistHeaderToDbWithoutSetHead(blk.header, c.com.startOfHistory)
ok()
except RlpError as exc:
err(exc.msg)
if not c.db.persistHeader(blk.header.blockHash, blk.header, c.com.startOfHistory):
return err("Could not persist header")
ok()
proc setCanonical*(c: ChainRef, header: BlockHeader): Result[void, string] =
if header.parentHash == Hash256():

View File

@ -117,7 +117,7 @@ func miningHash(header: BlockHeader): Hash256 =
timestamp: header.timestamp,
extraData: header.extraData)
rlp.encode(miningHeader).keccakHash
rlpHash(miningHeader)
# ---------------

View File

@ -45,7 +45,7 @@ proc getBlockHeader*(
n: BlockNumber;
output: var BlockHeader;
): bool
{.gcsafe, raises: [RlpError].}
{.gcsafe.}
proc getBlockHeader*(
db: CoreDbRef,
@ -58,11 +58,12 @@ proc getBlockHash*(
n: BlockNumber;
output: var Hash256;
): bool
{.gcsafe, raises: [RlpError].}
{.gcsafe.}
proc addBlockNumberToHashLookup*(
db: CoreDbRef;
header: BlockHeader;
blockNumber: BlockNumber;
blockHash: Hash256;
) {.gcsafe.}
proc getBlockHeader*(
@ -72,9 +73,7 @@ proc getBlockHeader*(
): bool
{.gcsafe.}
# Copied from `utils/utils` which cannot be imported here in order to
# avoid circular imports.
func hash(b: BlockHeader): Hash256
proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash256] {.gcsafe.}
# ------------------------------------------------------------------------------
# Private helpers
@ -96,14 +95,13 @@ template discardRlpException(info: static[string]; code: untyped) =
iterator findNewAncestors(
db: CoreDbRef;
header: BlockHeader;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
): BlockHeader =
## Returns the chain leading up from the given header until the first
## ancestor it has in common with our canonical chain.
var h = header
var orig: BlockHeader
while true:
if db.getBlockHeader(h.blockNumber, orig) and orig.hash == h.hash:
if db.getBlockHeader(h.blockNumber, orig) and orig.rlpHash == h.rlpHash:
break
yield h
@ -111,7 +109,9 @@ iterator findNewAncestors(
if h.parentHash == GENESIS_PARENT_HASH:
break
else:
h = db.getBlockHeader(h.parentHash)
if not db.getBlockHeader(h.parentHash, h):
warn "Could not find parent while iterating", hash = h.parentHash
break
# ------------------------------------------------------------------------------
# Public iterators
@ -150,23 +150,21 @@ iterator getBlockTransactionData*(
iterator getBlockTransactions*(
db: CoreDbRef;
header: BlockHeader;
): Transaction
{.gcsafe, raises: [RlpError].} =
): Transaction =
for encodedTx in db.getBlockTransactionData(header.txRoot):
yield rlp.decode(encodedTx, Transaction)
try:
yield rlp.decode(encodedTx, Transaction)
except RlpError as exc:
warn "Cannot decode database transaction", data = toHex(encodedTx), error = exc.msg
iterator getBlockTransactionHashes*(
db: CoreDbRef;
blockHeader: BlockHeader;
): Hash256
{.gcsafe, raises: [RlpError].} =
): Hash256 =
## Returns an iterable of the transaction hashes from th block specified
## by the given block header.
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
let tx = rlp.decode(encodedTx, Transaction)
yield rlpHash(tx) # beware EIP-4844
yield keccakHash(encodedTx)
iterator getWithdrawalsData*(
db: CoreDbRef;
@ -232,9 +230,6 @@ iterator getReceipts*(
# Private helpers
# ------------------------------------------------------------------------------
func hash(b: BlockHeader): Hash256 =
rlpHash(b)
proc removeTransactionFromCanonicalChain(
db: CoreDbRef;
transactionHash: Hash256;
@ -248,24 +243,32 @@ proc removeTransactionFromCanonicalChain(
proc setAsCanonicalChainHead(
db: CoreDbRef;
headerHash: Hash256;
) {.gcsafe, raises: [RlpError,BlockNotFound].} =
header: BlockHeader;
) =
## Sets the header as the canonical chain HEAD.
let header = db.getBlockHeader(headerHash)
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
reverse(newCanonicalHeaders)
for h in newCanonicalHeaders:
var oldHash: Hash256
if not db.getBlockHash(h.blockNumber, oldHash):
break
# TODO This code handles reorgs - this should be moved elsewhere because we'll
# be handling reorgs mainly in-memory
if header.blockNumber == 0 or
db.getCanonicalHeaderHash().valueOr(Hash256()) != header.parentHash:
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
reverse(newCanonicalHeaders)
for h in newCanonicalHeaders:
var oldHash: Hash256
if not db.getBlockHash(h.blockNumber, oldHash):
break
let oldHeader = db.getBlockHeader(oldHash)
for txHash in db.getBlockTransactionHashes(oldHeader):
db.removeTransactionFromCanonicalChain(txHash)
# TODO re-add txn to internal pending pool (only if local sender)
try:
let oldHeader = db.getBlockHeader(oldHash)
for txHash in db.getBlockTransactionHashes(oldHeader):
db.removeTransactionFromCanonicalChain(txHash)
# TODO re-add txn to internal pending pool (only if local sender)
except BlockNotFound:
warn "Could not load old header", oldHash
for h in newCanonicalHeaders:
db.addBlockNumberToHashLookup(h)
for h in newCanonicalHeaders:
# TODO don't recompute block hash
db.addBlockNumberToHashLookup(h.blockNumber, h.blockHash)
let canonicalHeadHash = canonicalHeadHashKey()
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
@ -339,8 +342,7 @@ proc exists*(db: CoreDbRef, hash: Hash256): bool =
proc getSavedStateBlockNumber*(
db: CoreDbRef;
relax = false;
): BlockNumber
{.gcsafe, raises: [RlpError].} =
): BlockNumber =
## Returns the block number registered when the database was last time
## updated, or `BlockNumber(0)` if there was no updata found.
##
@ -389,24 +391,29 @@ proc getBlockHeader*(
proc getHash(
db: CoreDbRef;
key: DbKey;
output: var Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
): Opt[Hash256] =
let data = db.newKvt().get(key.toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt "getHash()", key, action="get()", error=($$error)
return false
output = rlp.decode(data, Hash256)
true
return Opt.none(Hash256)
try:
Opt.some(rlp.decode(data, Hash256))
except RlpError as exc:
warn logTxt "getHash()", key, action="rlp.decode()", error=exc.msg
Opt.none(Hash256)
proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash256] =
db.getHash(canonicalHeadHashKey())
proc getCanonicalHead*(
db: CoreDbRef;
output: var BlockHeader;
): bool =
let headHash = db.getCanonicalHeaderHash().valueOr:
return false
discardRlpException "getCanonicalHead()":
var headHash: Hash256
if db.getHash(canonicalHeadHashKey(), headHash) and
db.getBlockHeader(headHash, output):
if db.getBlockHeader(headHash, output):
return true
proc getCanonicalHead*(
@ -417,35 +424,27 @@ proc getCanonicalHead*(
raise newException(
CanonicalHeadNotFound, "No canonical head set for this chain")
proc getCanonicalHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(canonicalHeadHashKey(), result)
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
output: var Hash256;
): bool =
## Return the block hash for the given block number.
db.getHash(blockNumberToHashKey(n), output)
output = db.getHash(blockNumberToHashKey(n)).valueOr:
return false
true
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
): Hash256
{.gcsafe, raises: [RlpError,BlockNotFound].} =
{.gcsafe, raises: [BlockNotFound].} =
## Return the block hash for the given block number.
if not db.getHash(blockNumberToHashKey(n), result):
if not db.getBlockHash(n, result):
raise newException(BlockNotFound, "No block hash for number " & $n)
proc getHeadBlockHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
if not db.getHash(canonicalHeadHashKey(), result):
result = Hash256()
proc getHeadBlockHash*(db: CoreDbRef): Hash256 =
db.getHash(canonicalHeadHashKey()).valueOr(Hash256())
proc getBlockHeader*(
db: CoreDbRef;
@ -460,8 +459,7 @@ proc getBlockHeader*(
proc getBlockHeaderWithHash*(
db: CoreDbRef;
n: BlockNumber;
): Option[(BlockHeader, Hash256)]
{.gcsafe, raises: [RlpError].} =
): Option[(BlockHeader, Hash256)] =
## Returns the block header and its hash, with the given number in the
## canonical chain. Hash is returned to avoid recomputing it
var hash: Hash256
@ -481,7 +479,7 @@ proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
{.raises: [BlockNotFound].} =
## Returns the block header with the given number in the canonical chain.
## Raises BlockNotFound error if the block is not in the DB.
db.getBlockHeader(db.getBlockHash(n))
@ -489,14 +487,17 @@ proc getBlockHeader*(
proc getScore*(
db: CoreDbRef;
blockHash: Hash256;
): UInt256
{.gcsafe, raises: [RlpError].} =
): Opt[UInt256] =
let data = db.newKvt()
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
return
rlp.decode(data, UInt256)
return Opt.none(UInt256)
try:
Opt.some(rlp.decode(data, UInt256))
except RlpError as exc:
warn logTxt "getScore()", data = data.toHex(), error=exc.msg
Opt.none(UInt256)
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
## for testing purpose
@ -506,36 +507,17 @@ proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
return
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
const info = "getTd()"
let bytes = db.newKvt()
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt info, blockHash, action="get()", error=($$error)
td = db.getScore(blockHash).valueOr:
return false
discardRlpException info:
td = rlp.decode(bytes, UInt256)
return true
true
proc headTotalDifficulty*(
db: CoreDbRef;
): UInt256
{.gcsafe, raises: [RlpError].} =
# this is actually a combination of `getHash` and `getScore`
const
info = "headTotalDifficulty()"
key = canonicalHeadHashKey()
let
kvt = db.newKvt()
data = kvt.get(key.toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt info, key, action="get()", error=($$error)
return 0.u256
blockHash = rlp.decode(data, Hash256)
numData = kvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
warn logTxt info, blockHash, action="get()", error=($$error)
return 0.u256
): UInt256 =
let blockHash = db.getCanonicalHeaderHash().valueOr:
return 0.u256
rlp.decode(numData, UInt256)
db.getScore(blockHash).valueOr(0.u256)
proc getAncestorsHashes*(
db: CoreDbRef;
@ -549,12 +531,13 @@ proc getAncestorsHashes*(
result = newSeq[Hash256](ancestorCount)
while ancestorCount > 0:
h = db.getBlockHeader(h.parentHash)
result[ancestorCount - 1] = h.hash
result[ancestorCount - 1] = h.rlpHash
dec ancestorCount
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
let blockNumberKey = blockNumberToHashKey(header.blockNumber)
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr:
proc addBlockNumberToHashLookup*(
db: CoreDbRef; blockNumber: BlockNumber, blockHash: Hash256) =
let blockNumberKey = blockNumberToHashKey(blockNumber)
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(blockHash)).isOkOr:
warn logTxt "addBlockNumberToHashLookup()",
blockNumberKey, action="put()", error=($$error)
@ -562,12 +545,12 @@ proc persistTransactions*(
db: CoreDbRef;
blockNumber: BlockNumber;
transactions: openArray[Transaction];
): Hash256 =
) =
const
info = "persistTransactions()"
if transactions.len == 0:
return EMPTY_ROOT_HASH
return
let
mpt = db.ctx.getMpt(CtTxs)
@ -577,25 +560,20 @@ proc persistTransactions*(
let
encodedKey = rlp.encode(idx)
encodedTx = rlp.encode(tx)
txHash = rlpHash(tx)
txHash = keccakHash(encodedTx)
blockKey = transactionHashToBlockKey(txHash)
txKey: TransactionKey = (blockNumber, idx)
mpt.merge(encodedKey, encodedTx).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
return EMPTY_ROOT_HASH
return
kvt.put(blockKey.toOpenArray, rlp.encode(txKey)).isOkOr:
trace logTxt info, blockKey, action="put()", error=($$error)
return EMPTY_ROOT_HASH
mpt.getColumn.state.valueOr:
when extraTraceMessages:
warn logTxt info, action="state()"
return EMPTY_ROOT_HASH
return
proc forgetHistory*(
db: CoreDbRef;
blockNum: BlockNumber;
): bool
{.gcsafe, raises: [RlpError].} =
): bool =
## Remove all data related to the block number argument `num`. This function
## returns `true`, if some history was available and deleted.
var blockHash: Hash256
@ -615,8 +593,7 @@ proc getTransaction*(
txRoot: Hash256;
txIndex: int;
res: var Transaction;
): bool
{.gcsafe, raises: [RlpError].} =
): bool =
const
info = "getTransaction()"
let
@ -632,7 +609,12 @@ proc getTransaction*(
if error.error != MptNotFound:
warn logTxt info, txIndex, action="fetch()", error=($$error)
return false
res = rlp.decode(txData, Transaction)
try:
res = rlp.decode(txData, Transaction)
except RlpError as exc:
warn logTxt info,
txRoot, action="rlp.decode()", col=($$col), error=exc.msg
return false
true
proc getTransactionCount*(
@ -695,19 +677,16 @@ proc getUncles*(
proc persistWithdrawals*(
db: CoreDbRef;
withdrawals: openArray[Withdrawal];
): Hash256 =
) =
const info = "persistWithdrawals()"
if withdrawals.len == 0:
return EMPTY_ROOT_HASH
return
let mpt = db.ctx.getMpt(CtWithdrawals)
for idx, wd in withdrawals:
mpt.merge(rlp.encode(idx), rlp.encode(wd)).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
return EMPTY_ROOT_HASH
mpt.getColumn.state.valueOr:
warn logTxt info, action="state()"
return EMPTY_ROOT_HASH
return
proc getWithdrawals*(
db: CoreDbRef;
@ -791,7 +770,7 @@ proc getUncleHashes*(
): seq[Hash256]
{.gcsafe, raises: [RlpError,ValueError].} =
for blockHash in blockHashes:
result &= db.getBlockBody(blockHash).uncles.mapIt(it.hash)
result &= db.getBlockBody(blockHash).uncles.mapIt(it.rlpHash)
proc getUncleHashes*(
db: CoreDbRef;
@ -806,7 +785,7 @@ proc getUncleHashes*(
warn logTxt "getUncleHashes()",
ommersHash=header.ommersHash, action="get()", `error`=($$error)
return @[]
return rlp.decode(encodedUncles, seq[BlockHeader]).mapIt(it.hash)
return rlp.decode(encodedUncles, seq[BlockHeader]).mapIt(it.rlpHash)
proc getTransactionKey*(
db: CoreDbRef;
@ -869,19 +848,15 @@ proc setHead*(
proc persistReceipts*(
db: CoreDbRef;
receipts: openArray[Receipt];
): Hash256 =
) =
const info = "persistReceipts()"
if receipts.len == 0:
return EMPTY_ROOT_HASH
return
let mpt = db.ctx.getMpt(CtReceipts)
for idx, rec in receipts:
mpt.merge(rlp.encode(idx), rlp.encode(rec)).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
mpt.getColumn.state.valueOr:
when extraTraceMessages:
trace logTxt info, action="state()"
return EMPTY_ROOT_HASH
proc getReceipts*(
db: CoreDbRef;
@ -893,62 +868,86 @@ proc getReceipts*(
receipts.add(r)
return receipts
proc persistHeaderToDb*(
proc persistScore*(
db: CoreDbRef;
blockHash: Hash256;
score: UInt256
): bool =
let
kvt = db.newKvt()
scoreKey = blockHashToScoreKey(blockHash)
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "persistHeader()",
scoreKey, action="put()", `error`=($$error)
return
true
proc persistHeader*(
db: CoreDbRef;
blockHash: Hash256;
header: BlockHeader;
startOfHistory = GENESIS_PARENT_HASH;
): bool =
let
kvt = db.newKvt()
isStartOfHistory = header.parentHash == startOfHistory
if not isStartOfHistory and not db.headerExists(header.parentHash):
warn logTxt "persistHeaderWithoutSetHead()",
blockHash, action="headerExists(parent)"
return false
kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "persistHeaderWithoutSetHead()",
blockHash, action="put()", `error`=($$error)
return false
let
parentScore = if isStartOfHistory:
0.u256
else:
db.getScore(header.parentHash).valueOr:
# TODO it's slightly wrong to fail here and leave the block in the db,
# but this code is going away soon enough
return false
score = parentScore + header.difficulty
# After EIP-3675, difficulty is set to 0 but we still save the score for
# each block to simplify totalDifficulty reporting
# TODO get rid of this and store a single value
if not db.persistScore(blockHash, score):
return false
db.addBlockNumberToHashLookup(header.blockNumber, blockHash)
true
proc persistHeader*(
db: CoreDbRef;
header: BlockHeader;
forceCanonical: bool;
startOfHistory = GENESIS_PARENT_HASH;
) {.gcsafe, raises: [RlpError,EVMError].} =
let isStartOfHistory = header.parentHash == startOfHistory
let headerHash = header.blockHash
if not isStartOfHistory and not db.headerExists(header.parentHash):
raise newException(ParentNotFound, "Cannot persist block header " &
$headerHash & " with unknown parent " & $header.parentHash)
let kvt = db.newKvt()
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "persistHeaderToDb()",
headerHash, action="put()", `error`=($$error)
return
let score = if isStartOfHistory: header.difficulty
else: db.getScore(header.parentHash) + header.difficulty
let scoreKey = blockHashToScoreKey(headerHash)
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "persistHeaderToDb()",
scoreKey, action="put()", `error`=($$error)
return
db.addBlockNumberToHashLookup(header)
if not forceCanonical:
var canonHeader: BlockHeader
if db.getCanonicalHead canonHeader:
let headScore = db.getScore(canonHeader.hash)
if score <= headScore:
return
db.setAsCanonicalChainHead(headerHash)
proc persistHeaderToDbWithoutSetHead*(
db: CoreDbRef;
header: BlockHeader;
startOfHistory = GENESIS_PARENT_HASH;
) {.gcsafe, raises: [RlpError].} =
let isStartOfHistory = header.parentHash == startOfHistory
let headerHash = header.blockHash
let score = if isStartOfHistory: header.difficulty
else: db.getScore(header.parentHash) + header.difficulty
): bool =
let
kvt = db.newKvt()
scoreKey = blockHashToScoreKey(headerHash)
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "persistHeaderToDbWithoutSetHead()",
scoreKey, action="put()", `error`=($$error)
return
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
warn logTxt "persistHeaderToDbWithoutSetHead()",
headerHash, action="put()", `error`=($$error)
return
blockHash = header.blockHash
if not db.persistHeader(blockHash, header, startOfHistory):
return false
if not forceCanonical and header.parentHash != startOfHistory:
let
canonicalHash = db.getCanonicalHeaderHash().valueOr:
return false
canonScore = db.getScore(canonicalHash).valueOr:
return false
# TODO no need to load score from database _really_, but this code is
# hopefully going away soon
score = db.getScore(blockHash).valueOr:
return false
if score <= canonScore:
return true
db.setAsCanonicalChainHead(blockHash, header)
true
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
## Persists the list of uncles to the database.
@ -961,11 +960,8 @@ proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
return EMPTY_ROOT_HASH
proc safeHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(safeHashKey(), result)
proc safeHeaderHash*(db: CoreDbRef): Hash256 =
db.getHash(safeHashKey()).valueOr(Hash256())
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
let safeHashKey = safeHashKey()
@ -976,9 +972,8 @@ proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
proc finalizedHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(finalizedHashKey(), result)
): Hash256 =
db.getHash(finalizedHashKey()).valueOr(Hash256())
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
let finalizedHashKey = finalizedHashKey()
@ -996,7 +991,7 @@ proc safeHeader*(
proc finalizedHeader*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
{.gcsafe, raises: [BlockNotFound].} =
db.getBlockHeader(db.finalizedHeaderHash)
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =

View File

@ -238,10 +238,10 @@ proc resp(data: openArray[byte]): RespResult =
ok(resp("0x" & data.toHex))
proc getTotalDifficulty(ctx: GraphqlContextRef, blockHash: common.Hash256): RespResult =
try:
bigIntNode(getScore(ctx.chainDB, blockHash))
except CatchableError as e:
err("can't get total difficulty: " & e.msg)
let score = getScore(ctx.chainDB, blockHash).valueOr:
return err("can't get total difficulty")
bigIntNode(score)
proc getOmmerCount(ctx: GraphqlContextRef, ommersHash: common.Hash256): RespResult =
try:

View File

@ -59,18 +59,12 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
setControlCHook(controlCHandler)
let
start =
try:
com.db.getSavedStateBlockNumber().truncate(uint64) + 1
except RlpError as exc:
error "Could not read block number", err = exc.msg
quit(QuitFailure)
start = com.db.getSavedStateBlockNumber().truncate(uint64) + 1
chain = com.newChain()
var
imported = 0'u64
gas = 0.u256
gas = GasInt(0)
txs = 0
time0 = Moment.now()
csv =
@ -121,7 +115,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
quit(QuitFailure)
txs += statsRes[].txs
gas += uint64 statsRes[].gas
gas += statsRes[].gas
let
time2 = Moment.now()
diff1 = (time2 - time1).nanoseconds().float / 1000000000
@ -131,13 +125,13 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
blockNumber,
blocks = imported,
txs,
gas,
mgas = f(gas.float / 1000000),
bps = f(blocks.len.float / diff1),
tps = f(statsRes[].txs.float / diff1),
gps = f(statsRes[].gas.float / diff1),
mgps = f(statsRes[].gas.float / 1000000 / diff1),
avgBps = f(imported.float / diff0),
avgTps = f(txs.float / diff0),
avgGps = f(gas.truncate(uint64).float / diff0), # TODO fix truncate
avgMGps = f(gas.float / 1000000 / diff0),
elapsed = shortLog(time2 - time0, 3)
if csv != nil:

View File

@ -492,7 +492,7 @@ proc setupEthRpc*(
return nil
result = populateBlockObject(uncles[index], chainDB, false, true)
result.totalDifficulty = chainDB.getScore(header.hash)
result.totalDifficulty = chainDB.getScore(header.blockHash).valueOr(0.u256)
server.rpc("eth_getUncleByBlockNumberAndIndex") do(quantityTag: BlockTag, quantity: Web3Quantity) -> BlockObject:
# Returns information about a uncle of a block by number and uncle index position.
@ -509,7 +509,7 @@ proc setupEthRpc*(
return nil
result = populateBlockObject(uncles[index], chainDB, false, true)
result.totalDifficulty = chainDB.getScore(header.hash)
result.totalDifficulty = chainDB.getScore(header.blockHash).valueOr(0.u256)
proc getLogsForBlock(
chain: CoreDbRef,

View File

@ -139,7 +139,7 @@ proc populateTransactionObject*(tx: Transaction,
result.`type` = some w3Qty(tx.txType.ord)
if optionalHeader.isSome:
let header = optionalHeader.get
result.blockHash = some(w3Hash header.hash)
result.blockHash = some(w3Hash header.blockHash)
result.blockNumber = some(w3BlockNumber(header.blockNumber))
result.`from` = w3Addr tx.getSender()
@ -197,7 +197,7 @@ proc populateBlockObject*(header: BlockHeader, chain: CoreDbRef, fullTx: bool, i
else:
none(UInt256)
if not isUncle:
result.totalDifficulty = chain.getScore(blockHash)
result.totalDifficulty = chain.getScore(blockHash).valueOr(0.u256)
result.uncles = w3Hashes chain.getUncleHashes(header)
if fullTx:
@ -228,7 +228,7 @@ proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction,
result = ReceiptObject()
result.transactionHash = w3Hash tx.rlpHash
result.transactionIndex = w3Qty(txIndex)
result.blockHash = w3Hash header.hash
result.blockHash = w3Hash header.blockHash
result.blockNumber = w3BlockNumber(header.blockNumber)
result.`from` = w3Addr tx.getSender()
result.to = some(w3Addr tx.destination)

View File

@ -84,9 +84,6 @@ func generateSafeAddress*(address: EthAddress, salt: ContractSalt,
result[0..19] = hashResult.data.toOpenArray(12, 31)
func hash*(b: BlockHeader): Hash256 {.inline.} =
rlpHash(b)
proc crc32*(crc: uint32, buf: openArray[byte]): uint32 =
const kcrc32 = [ 0'u32, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190,
0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320'u32, 0xf00f9344'u32, 0xd6d6a3e8'u32,

View File

@ -23,12 +23,12 @@ proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: UInt256, parent: Blo
kvt = chainDB.newKvt()
discard chainDB.setHead(parent, true)
discard chainDB.persistTransactions(blockNumber, blk.transactions)
chainDB.persistTransactions(blockNumber, blk.transactions)
discard chainDB.persistUncles(blk.uncles)
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
raiseAssert "generatePrestate(): put() failed " & $$error
chainDB.addBlockNumberToHashLookup(header)
chainDB.addBlockNumberToHashLookup(header.blockNumber, headerHash)
for k, v in state:
let key = hexToSeqByte(k)

View File

@ -125,7 +125,7 @@ print(
.to_string(
formatters=dict(
dict.fromkeys(["bpsd", "tpsd", "timed"], "{:,.2%}".format),
**dict.fromkeys(["bps_x", "bps_y", "tps_x"], "{:,.2f}".format),
**dict.fromkeys(["bps_x", "bps_y", "tps_x", "tps_y"], "{:,.2f}".format),
)
)
)

View File

@ -105,7 +105,7 @@ proc parseHeader(blockHeader: JsonNode, testStatusIMPL: var TestStatus): BlockHe
result = normalizeBlockHeader(blockHeader).parseBlockHeader
var blockHash: Hash256
blockHeader.fromJson "hash", blockHash
check blockHash == hash(result)
check blockHash == rlpHash(result)
proc parseWithdrawals(withdrawals: JsonNode): Option[seq[Withdrawal]] =
case withdrawals.kind
@ -256,7 +256,7 @@ proc collectDebugData(ctx: var TestCtx) =
}
proc runTestCtx(ctx: var TestCtx, com: CommonRef, testStatusIMPL: var TestStatus) =
com.db.persistHeaderToDb(ctx.genesisHeader,
doAssert com.db.persistHeader(ctx.genesisHeader,
com.consensus == ConsensusType.POS)
check com.db.getCanonicalHead().blockHash == ctx.genesisHeader.blockHash
let checkSeal = ctx.shouldCheckSeal

View File

@ -87,8 +87,8 @@ proc persistFixtureBlock(chainDB: CoreDbRef) =
# Manually inserting header to avoid any parent checks
chainDB.kvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
chainDB.addBlockNumberToHashLookup(header)
discard chainDB.persistTransactions(header.blockNumber, getBlockBody4514995().transactions)
discard chainDB.persistReceipts(getReceipts4514995())
chainDB.persistTransactions(header.blockNumber, getBlockBody4514995().transactions)
chainDB.persistReceipts(getReceipts4514995())
proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv =
var
@ -153,7 +153,9 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
signedTx1 = signTransaction(unsignedTx1, acc.privateKey, com.chainId, eip155)
signedTx2 = signTransaction(unsignedTx2, acc.privateKey, com.chainId, eip155)
txs = [signedTx1, signedTx2]
txRoot = com.db.persistTransactions(blockNumber, txs)
com.db.persistTransactions(blockNumber, txs)
let txRoot = com.db.ctx.getMpt(CtTxs).getColumn().state().valueOr(EMPTY_ROOT_HASH)
vmState.receipts = newSeq[Receipt](txs.len)
vmState.cumulativeGasUsed = 0
@ -163,8 +165,9 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
doAssert(rc.isOk, "Invalid transaction: " & rc.error)
vmState.receipts[txIndex] = makeReceipt(vmState, tx.txType)
com.db.persistReceipts(vmState.receipts)
let
receiptRoot = com.db.persistReceipts(vmState.receipts)
receiptRoot = com.db.ctx.getMpt(CtReceipts).getColumn().state().valueOr(EMPTY_ROOT_HASH)
date = dateTime(2017, mMar, 30)
timeStamp = date.toTime.toUnix.EthTime
difficulty = com.calcDifficulty(timeStamp, parent)
@ -192,7 +195,7 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
let uncles = [header]
header.ommersHash = com.db.persistUncles(uncles)
com.db.persistHeaderToDb(header,
doAssert com.db.persistHeader(header,
com.consensus == ConsensusType.POS)
com.db.persistFixtureBlock()
result = TestEnv(

View File

@ -863,7 +863,7 @@ proc runTxPackerTests(noisy = true) =
check bdy == blockBody
else:
# The canonical head will be set to hdr if it scores high enough
# (see implementation of db_chain.persistHeaderToDb()).
# (see implementation of db_chain.persistHeader()).
let
canonScore = xq.chain.com.db.getScore(canonicalHead.blockHash)
headerScore = xq.chain.com.db.getScore(hdr.blockHash)