Avoid recomputing hashes when persisting data (#2350)
This commit is contained in:
parent
5a5cc6295e
commit
189a20bbae
|
@ -104,14 +104,16 @@ proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
|
||||||
stateRootChpt = vmState.parent.stateRoot # Check point
|
stateRootChpt = vmState.parent.stateRoot # Check point
|
||||||
? vmState.processBlock(blk)
|
? vmState.processBlock(blk)
|
||||||
|
|
||||||
|
if not c.db.persistHeader(
|
||||||
|
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory):
|
||||||
|
return err("Could not persist header")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
c.db.persistHeaderToDb(
|
c.db.persistTransactions(header.blockNumber, blk.transactions)
|
||||||
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory)
|
c.db.persistReceipts(vmState.receipts)
|
||||||
discard c.db.persistTransactions(header.blockNumber, blk.transactions)
|
|
||||||
discard c.db.persistReceipts(vmState.receipts)
|
|
||||||
|
|
||||||
if blk.withdrawals.isSome:
|
if blk.withdrawals.isSome:
|
||||||
discard c.db.persistWithdrawals(blk.withdrawals.get)
|
c.db.persistWithdrawals(blk.withdrawals.get)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
return err(exc.msg)
|
return err(exc.msg)
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) =
|
||||||
|
|
||||||
doAssert stateDB.rootHash == genesisHeader.stateRoot
|
doAssert stateDB.rootHash == genesisHeader.stateRoot
|
||||||
|
|
||||||
t.com.db.persistHeaderToDb(genesisHeader,
|
doAssert t.com.db.persistHeader(genesisHeader,
|
||||||
t.com.consensus == ConsensusType.POS)
|
t.com.consensus == ConsensusType.POS)
|
||||||
doAssert(t.com.db.getCanonicalHead().blockHash == genesisHeader.blockHash)
|
doAssert(t.com.db.getCanonicalHead().blockHash == genesisHeader.blockHash)
|
||||||
|
|
||||||
|
|
|
@ -156,7 +156,8 @@ proc newPayload*(ben: BeaconEngineRef,
|
||||||
let ttd = com.ttd.get(high(common.BlockNumber))
|
let ttd = com.ttd.get(high(common.BlockNumber))
|
||||||
|
|
||||||
if version == Version.V1:
|
if version == Version.V1:
|
||||||
let td = db.getScore(header.parentHash)
|
let td = db.getScore(header.parentHash).valueOr:
|
||||||
|
0.u256
|
||||||
if (not com.forkGTE(MergeFork)) and td < ttd:
|
if (not com.forkGTE(MergeFork)) and td < ttd:
|
||||||
warn "Ignoring pre-merge payload",
|
warn "Ignoring pre-merge payload",
|
||||||
number = header.blockNumber, hash = blockHash, td, ttd
|
number = header.blockNumber, hash = blockHash, td, ttd
|
||||||
|
|
|
@ -174,11 +174,10 @@ proc tooLargeRequest*(msg: string): ref InvalidRequest =
|
||||||
|
|
||||||
proc latestValidHash*(db: CoreDbRef,
|
proc latestValidHash*(db: CoreDbRef,
|
||||||
parent: common.BlockHeader,
|
parent: common.BlockHeader,
|
||||||
ttd: DifficultyInt): common.Hash256
|
ttd: DifficultyInt): common.Hash256 =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
if parent.isGenesis:
|
if parent.isGenesis:
|
||||||
return common.Hash256()
|
return common.Hash256()
|
||||||
let ptd = db.getScore(parent.parentHash)
|
let ptd = db.getScore(parent.parentHash).valueOr(0.u256)
|
||||||
if ptd >= ttd:
|
if ptd >= ttd:
|
||||||
parent.blockHash
|
parent.blockHash
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -338,14 +338,14 @@ func forkId*(com: CommonRef, head: BlockNumber, time: EthTime): ForkID {.gcsafe.
|
||||||
func isEIP155*(com: CommonRef, number: BlockNumber): bool =
|
func isEIP155*(com: CommonRef, number: BlockNumber): bool =
|
||||||
com.config.eip155Block.isSome and number >= com.config.eip155Block.get
|
com.config.eip155Block.isSome and number >= com.config.eip155Block.get
|
||||||
|
|
||||||
proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool
|
proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool =
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
|
||||||
if com.config.terminalTotalDifficulty.isNone:
|
if com.config.terminalTotalDifficulty.isNone:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let
|
let
|
||||||
ttd = com.config.terminalTotalDifficulty.get()
|
ttd = com.config.terminalTotalDifficulty.get()
|
||||||
ptd = com.db.getScore(header.parentHash)
|
ptd = com.db.getScore(header.parentHash).valueOr:
|
||||||
|
return false
|
||||||
td = ptd + header.difficulty
|
td = ptd + header.difficulty
|
||||||
ptd >= ttd and td >= ttd
|
ptd >= ttd and td >= ttd
|
||||||
|
|
||||||
|
@ -358,15 +358,13 @@ func isCancunOrLater*(com: CommonRef, t: EthTime): bool =
|
||||||
func isPragueOrLater*(com: CommonRef, t: EthTime): bool =
|
func isPragueOrLater*(com: CommonRef, t: EthTime): bool =
|
||||||
com.config.pragueTime.isSome and t >= com.config.pragueTime.get
|
com.config.pragueTime.isSome and t >= com.config.pragueTime.get
|
||||||
|
|
||||||
proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType
|
proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType =
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
|
||||||
if com.isBlockAfterTtd(header):
|
if com.isBlockAfterTtd(header):
|
||||||
return ConsensusType.POS
|
return ConsensusType.POS
|
||||||
|
|
||||||
return com.config.consensusType
|
return com.config.consensusType
|
||||||
|
|
||||||
proc initializeEmptyDb*(com: CommonRef)
|
proc initializeEmptyDb*(com: CommonRef) =
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
|
||||||
let kvt = com.db.newKvt()
|
let kvt = com.db.newKvt()
|
||||||
proc contains(kvt: CoreDxKvtRef; key: openArray[byte]): bool =
|
proc contains(kvt: CoreDxKvtRef; key: openArray[byte]): bool =
|
||||||
kvt.hasKey(key).expect "valid bool"
|
kvt.hasKey(key).expect "valid bool"
|
||||||
|
@ -374,8 +372,10 @@ proc initializeEmptyDb*(com: CommonRef)
|
||||||
info "Writing genesis to DB"
|
info "Writing genesis to DB"
|
||||||
doAssert(com.genesisHeader.blockNumber.isZero,
|
doAssert(com.genesisHeader.blockNumber.isZero,
|
||||||
"can't commit genesis block with number > 0")
|
"can't commit genesis block with number > 0")
|
||||||
com.db.persistHeaderToDb(com.genesisHeader,
|
doAssert(com.db.persistHeader(com.genesisHeader,
|
||||||
com.consensusType == ConsensusType.POS)
|
com.consensusType == ConsensusType.POS,
|
||||||
|
startOfHistory=com.genesisHeader.parentHash),
|
||||||
|
"can persist genesis header")
|
||||||
doAssert(canonicalHeadHashKey().toOpenArray in kvt)
|
doAssert(canonicalHeadHashKey().toOpenArray in kvt)
|
||||||
|
|
||||||
proc syncReqNewHead*(com: CommonRef; header: BlockHeader)
|
proc syncReqNewHead*(com: CommonRef; header: BlockHeader)
|
||||||
|
|
|
@ -25,10 +25,6 @@ type
|
||||||
## common block chain configuration
|
## common block chain configuration
|
||||||
## used throughout entire app
|
## used throughout entire app
|
||||||
|
|
||||||
validateBlock: bool ##\
|
|
||||||
## If turn off, `persistBlocks` will always return
|
|
||||||
## ValidationResult.OK and disable extraValidation too.
|
|
||||||
|
|
||||||
extraValidation: bool ##\
|
extraValidation: bool ##\
|
||||||
## Trigger extra validation, currently within `persistBlocks()`
|
## Trigger extra validation, currently within `persistBlocks()`
|
||||||
## function only.
|
## function only.
|
||||||
|
@ -53,7 +49,6 @@ proc newChain*(com: CommonRef,
|
||||||
## chain validation if set `true`.
|
## chain validation if set `true`.
|
||||||
ChainRef(
|
ChainRef(
|
||||||
com: com,
|
com: com,
|
||||||
validateBlock: true,
|
|
||||||
extraValidation: extraValidation,
|
extraValidation: extraValidation,
|
||||||
vmState: vmState
|
vmState: vmState
|
||||||
)
|
)
|
||||||
|
@ -64,7 +59,6 @@ func newChain*(com: CommonRef): ChainRef =
|
||||||
let extraValidation = com.consensus == ConsensusType.POS
|
let extraValidation = com.consensus == ConsensusType.POS
|
||||||
ChainRef(
|
ChainRef(
|
||||||
com: com,
|
com: com,
|
||||||
validateBlock: true,
|
|
||||||
extraValidation: extraValidation,
|
extraValidation: extraValidation,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -87,10 +81,6 @@ proc com*(c: ChainRef): CommonRef =
|
||||||
## Getter
|
## Getter
|
||||||
c.com
|
c.com
|
||||||
|
|
||||||
proc validateBlock*(c: ChainRef): bool =
|
|
||||||
## Getter
|
|
||||||
c.validateBlock
|
|
||||||
|
|
||||||
proc extraValidation*(c: ChainRef): bool =
|
proc extraValidation*(c: ChainRef): bool =
|
||||||
## Getter
|
## Getter
|
||||||
c.extraValidation
|
c.extraValidation
|
||||||
|
@ -109,10 +99,6 @@ proc currentBlock*(c: ChainRef): BlockHeader
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public `Chain` setters
|
# Public `Chain` setters
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
proc `validateBlock=`*(c: ChainRef; validateBlock: bool) =
|
|
||||||
## Setter. If set `true`, the assignment value `validateBlock` enables
|
|
||||||
## block execution, else it will always return ValidationResult.OK
|
|
||||||
c.validateBlock = validateBlock
|
|
||||||
|
|
||||||
proc `extraValidation=`*(c: ChainRef; extraValidation: bool) =
|
proc `extraValidation=`*(c: ChainRef; extraValidation: bool) =
|
||||||
## Setter. If set `true`, the assignment value `extraValidation` enables
|
## Setter. If set `true`, the assignment value `extraValidation` enables
|
||||||
|
|
|
@ -86,7 +86,9 @@ proc persistBlocksImpl(
|
||||||
toBlock = blocks[blocks.high()].header.blockNumber
|
toBlock = blocks[blocks.high()].header.blockNumber
|
||||||
trace "Persisting blocks", fromBlock, toBlock
|
trace "Persisting blocks", fromBlock, toBlock
|
||||||
|
|
||||||
var txs = 0
|
var
|
||||||
|
txs = 0
|
||||||
|
gas = GasInt(0)
|
||||||
for blk in blocks:
|
for blk in blocks:
|
||||||
template header(): BlockHeader =
|
template header(): BlockHeader =
|
||||||
blk.header
|
blk.header
|
||||||
|
@ -97,11 +99,10 @@ proc persistBlocksImpl(
|
||||||
debug "Cannot update VmState", blockNumber = header.blockNumber
|
debug "Cannot update VmState", blockNumber = header.blockNumber
|
||||||
return err("Cannot update VmState to block " & $header.blockNumber)
|
return err("Cannot update VmState to block " & $header.blockNumber)
|
||||||
|
|
||||||
if c.validateBlock and c.extraValidation and c.verifyFrom <= header.blockNumber:
|
if c.extraValidation and c.verifyFrom <= header.blockNumber:
|
||||||
# TODO: how to checkseal from here
|
# TODO: how to checkseal from here
|
||||||
?c.com.validateHeaderAndKinship(blk, checkSealOK = false)
|
?c.com.validateHeaderAndKinship(blk, checkSealOK = false)
|
||||||
|
|
||||||
if c.validateBlock:
|
|
||||||
?vmState.processBlock(blk)
|
?vmState.processBlock(blk)
|
||||||
|
|
||||||
# when defined(nimbusDumpDebuggingMetaData):
|
# when defined(nimbusDumpDebuggingMetaData):
|
||||||
|
@ -110,32 +111,28 @@ proc persistBlocksImpl(
|
||||||
# vmState.dumpDebuggingMetaData(header, body)
|
# vmState.dumpDebuggingMetaData(header, body)
|
||||||
# warn "Validation error. Debugging metadata dumped."
|
# warn "Validation error. Debugging metadata dumped."
|
||||||
|
|
||||||
try:
|
|
||||||
if NoPersistHeader notin flags:
|
if NoPersistHeader notin flags:
|
||||||
c.db.persistHeaderToDb(
|
if not c.db.persistHeader(
|
||||||
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory
|
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory
|
||||||
)
|
):
|
||||||
|
return err("Could not persist header")
|
||||||
|
|
||||||
if NoSaveTxs notin flags:
|
if NoSaveTxs notin flags:
|
||||||
discard c.db.persistTransactions(header.blockNumber, blk.transactions)
|
c.db.persistTransactions(header.blockNumber, blk.transactions)
|
||||||
|
|
||||||
if NoSaveReceipts notin flags:
|
if NoSaveReceipts notin flags:
|
||||||
discard c.db.persistReceipts(vmState.receipts)
|
c.db.persistReceipts(vmState.receipts)
|
||||||
|
|
||||||
if NoSaveWithdrawals notin flags and blk.withdrawals.isSome:
|
if NoSaveWithdrawals notin flags and blk.withdrawals.isSome:
|
||||||
discard c.db.persistWithdrawals(blk.withdrawals.get)
|
c.db.persistWithdrawals(blk.withdrawals.get)
|
||||||
except CatchableError as exc:
|
|
||||||
return err(exc.msg)
|
|
||||||
|
|
||||||
# update currentBlock *after* we persist it
|
# update currentBlock *after* we persist it
|
||||||
# so the rpc return consistent result
|
# so the rpc return consistent result
|
||||||
# between eth_blockNumber and eth_syncing
|
# between eth_blockNumber and eth_syncing
|
||||||
c.com.syncCurrent = header.blockNumber
|
c.com.syncCurrent = header.blockNumber
|
||||||
|
|
||||||
# Done with this block
|
|
||||||
# lapTx.commit()
|
|
||||||
|
|
||||||
txs += blk.transactions.len
|
txs += blk.transactions.len
|
||||||
|
gas += blk.header.gasUsed
|
||||||
|
|
||||||
dbTx.commit()
|
dbTx.commit()
|
||||||
|
|
||||||
|
@ -154,7 +151,7 @@ proc persistBlocksImpl(
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
warn "Could not clean up old blocks from history", err = exc.msg
|
warn "Could not clean up old blocks from history", err = exc.msg
|
||||||
|
|
||||||
ok((blocks.len, txs, vmState.cumulativeGasUsed))
|
ok((blocks.len, txs, gas))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public `ChainDB` methods
|
# Public `ChainDB` methods
|
||||||
|
@ -163,11 +160,10 @@ proc persistBlocksImpl(
|
||||||
proc insertBlockWithoutSetHead*(c: ChainRef, blk: EthBlock): Result[void, string] =
|
proc insertBlockWithoutSetHead*(c: ChainRef, blk: EthBlock): Result[void, string] =
|
||||||
discard ?c.persistBlocksImpl([blk], {NoPersistHeader, NoSaveReceipts})
|
discard ?c.persistBlocksImpl([blk], {NoPersistHeader, NoSaveReceipts})
|
||||||
|
|
||||||
try:
|
if not c.db.persistHeader(blk.header.blockHash, blk.header, c.com.startOfHistory):
|
||||||
c.db.persistHeaderToDbWithoutSetHead(blk.header, c.com.startOfHistory)
|
return err("Could not persist header")
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
except RlpError as exc:
|
|
||||||
err(exc.msg)
|
|
||||||
|
|
||||||
proc setCanonical*(c: ChainRef, header: BlockHeader): Result[void, string] =
|
proc setCanonical*(c: ChainRef, header: BlockHeader): Result[void, string] =
|
||||||
if header.parentHash == Hash256():
|
if header.parentHash == Hash256():
|
||||||
|
|
|
@ -117,7 +117,7 @@ func miningHash(header: BlockHeader): Hash256 =
|
||||||
timestamp: header.timestamp,
|
timestamp: header.timestamp,
|
||||||
extraData: header.extraData)
|
extraData: header.extraData)
|
||||||
|
|
||||||
rlp.encode(miningHeader).keccakHash
|
rlpHash(miningHeader)
|
||||||
|
|
||||||
# ---------------
|
# ---------------
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ proc getBlockHeader*(
|
||||||
n: BlockNumber;
|
n: BlockNumber;
|
||||||
output: var BlockHeader;
|
output: var BlockHeader;
|
||||||
): bool
|
): bool
|
||||||
{.gcsafe, raises: [RlpError].}
|
{.gcsafe.}
|
||||||
|
|
||||||
proc getBlockHeader*(
|
proc getBlockHeader*(
|
||||||
db: CoreDbRef,
|
db: CoreDbRef,
|
||||||
|
@ -58,11 +58,12 @@ proc getBlockHash*(
|
||||||
n: BlockNumber;
|
n: BlockNumber;
|
||||||
output: var Hash256;
|
output: var Hash256;
|
||||||
): bool
|
): bool
|
||||||
{.gcsafe, raises: [RlpError].}
|
{.gcsafe.}
|
||||||
|
|
||||||
proc addBlockNumberToHashLookup*(
|
proc addBlockNumberToHashLookup*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
header: BlockHeader;
|
blockNumber: BlockNumber;
|
||||||
|
blockHash: Hash256;
|
||||||
) {.gcsafe.}
|
) {.gcsafe.}
|
||||||
|
|
||||||
proc getBlockHeader*(
|
proc getBlockHeader*(
|
||||||
|
@ -72,9 +73,7 @@ proc getBlockHeader*(
|
||||||
): bool
|
): bool
|
||||||
{.gcsafe.}
|
{.gcsafe.}
|
||||||
|
|
||||||
# Copied from `utils/utils` which cannot be imported here in order to
|
proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash256] {.gcsafe.}
|
||||||
# avoid circular imports.
|
|
||||||
func hash(b: BlockHeader): Hash256
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
|
@ -96,14 +95,13 @@ template discardRlpException(info: static[string]; code: untyped) =
|
||||||
iterator findNewAncestors(
|
iterator findNewAncestors(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
header: BlockHeader;
|
header: BlockHeader;
|
||||||
): BlockHeader
|
): BlockHeader =
|
||||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
|
||||||
## Returns the chain leading up from the given header until the first
|
## Returns the chain leading up from the given header until the first
|
||||||
## ancestor it has in common with our canonical chain.
|
## ancestor it has in common with our canonical chain.
|
||||||
var h = header
|
var h = header
|
||||||
var orig: BlockHeader
|
var orig: BlockHeader
|
||||||
while true:
|
while true:
|
||||||
if db.getBlockHeader(h.blockNumber, orig) and orig.hash == h.hash:
|
if db.getBlockHeader(h.blockNumber, orig) and orig.rlpHash == h.rlpHash:
|
||||||
break
|
break
|
||||||
|
|
||||||
yield h
|
yield h
|
||||||
|
@ -111,7 +109,9 @@ iterator findNewAncestors(
|
||||||
if h.parentHash == GENESIS_PARENT_HASH:
|
if h.parentHash == GENESIS_PARENT_HASH:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
h = db.getBlockHeader(h.parentHash)
|
if not db.getBlockHeader(h.parentHash, h):
|
||||||
|
warn "Could not find parent while iterating", hash = h.parentHash
|
||||||
|
break
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public iterators
|
# Public iterators
|
||||||
|
@ -150,23 +150,21 @@ iterator getBlockTransactionData*(
|
||||||
iterator getBlockTransactions*(
|
iterator getBlockTransactions*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
header: BlockHeader;
|
header: BlockHeader;
|
||||||
): Transaction
|
): Transaction =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
for encodedTx in db.getBlockTransactionData(header.txRoot):
|
for encodedTx in db.getBlockTransactionData(header.txRoot):
|
||||||
|
try:
|
||||||
yield rlp.decode(encodedTx, Transaction)
|
yield rlp.decode(encodedTx, Transaction)
|
||||||
|
except RlpError as exc:
|
||||||
|
warn "Cannot decode database transaction", data = toHex(encodedTx), error = exc.msg
|
||||||
|
|
||||||
iterator getBlockTransactionHashes*(
|
iterator getBlockTransactionHashes*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
blockHeader: BlockHeader;
|
blockHeader: BlockHeader;
|
||||||
): Hash256
|
): Hash256 =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
## Returns an iterable of the transaction hashes from th block specified
|
## Returns an iterable of the transaction hashes from th block specified
|
||||||
## by the given block header.
|
## by the given block header.
|
||||||
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
|
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
|
||||||
let tx = rlp.decode(encodedTx, Transaction)
|
yield keccakHash(encodedTx)
|
||||||
yield rlpHash(tx) # beware EIP-4844
|
|
||||||
|
|
||||||
|
|
||||||
iterator getWithdrawalsData*(
|
iterator getWithdrawalsData*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -232,9 +230,6 @@ iterator getReceipts*(
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
func hash(b: BlockHeader): Hash256 =
|
|
||||||
rlpHash(b)
|
|
||||||
|
|
||||||
proc removeTransactionFromCanonicalChain(
|
proc removeTransactionFromCanonicalChain(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
transactionHash: Hash256;
|
transactionHash: Hash256;
|
||||||
|
@ -248,10 +243,14 @@ proc removeTransactionFromCanonicalChain(
|
||||||
proc setAsCanonicalChainHead(
|
proc setAsCanonicalChainHead(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
headerHash: Hash256;
|
headerHash: Hash256;
|
||||||
) {.gcsafe, raises: [RlpError,BlockNotFound].} =
|
header: BlockHeader;
|
||||||
|
) =
|
||||||
## Sets the header as the canonical chain HEAD.
|
## Sets the header as the canonical chain HEAD.
|
||||||
let header = db.getBlockHeader(headerHash)
|
|
||||||
|
|
||||||
|
# TODO This code handles reorgs - this should be moved elsewhere because we'll
|
||||||
|
# be handling reorgs mainly in-memory
|
||||||
|
if header.blockNumber == 0 or
|
||||||
|
db.getCanonicalHeaderHash().valueOr(Hash256()) != header.parentHash:
|
||||||
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
|
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
|
||||||
reverse(newCanonicalHeaders)
|
reverse(newCanonicalHeaders)
|
||||||
for h in newCanonicalHeaders:
|
for h in newCanonicalHeaders:
|
||||||
|
@ -259,13 +258,17 @@ proc setAsCanonicalChainHead(
|
||||||
if not db.getBlockHash(h.blockNumber, oldHash):
|
if not db.getBlockHash(h.blockNumber, oldHash):
|
||||||
break
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
let oldHeader = db.getBlockHeader(oldHash)
|
let oldHeader = db.getBlockHeader(oldHash)
|
||||||
for txHash in db.getBlockTransactionHashes(oldHeader):
|
for txHash in db.getBlockTransactionHashes(oldHeader):
|
||||||
db.removeTransactionFromCanonicalChain(txHash)
|
db.removeTransactionFromCanonicalChain(txHash)
|
||||||
# TODO re-add txn to internal pending pool (only if local sender)
|
# TODO re-add txn to internal pending pool (only if local sender)
|
||||||
|
except BlockNotFound:
|
||||||
|
warn "Could not load old header", oldHash
|
||||||
|
|
||||||
for h in newCanonicalHeaders:
|
for h in newCanonicalHeaders:
|
||||||
db.addBlockNumberToHashLookup(h)
|
# TODO don't recompute block hash
|
||||||
|
db.addBlockNumberToHashLookup(h.blockNumber, h.blockHash)
|
||||||
|
|
||||||
let canonicalHeadHash = canonicalHeadHashKey()
|
let canonicalHeadHash = canonicalHeadHashKey()
|
||||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||||
|
@ -339,8 +342,7 @@ proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||||
proc getSavedStateBlockNumber*(
|
proc getSavedStateBlockNumber*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
relax = false;
|
relax = false;
|
||||||
): BlockNumber
|
): BlockNumber =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
## Returns the block number registered when the database was last time
|
## Returns the block number registered when the database was last time
|
||||||
## updated, or `BlockNumber(0)` if there was no updata found.
|
## updated, or `BlockNumber(0)` if there was no updata found.
|
||||||
##
|
##
|
||||||
|
@ -389,24 +391,29 @@ proc getBlockHeader*(
|
||||||
proc getHash(
|
proc getHash(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
key: DbKey;
|
key: DbKey;
|
||||||
output: var Hash256;
|
): Opt[Hash256] =
|
||||||
): bool
|
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
let data = db.newKvt().get(key.toOpenArray).valueOr:
|
let data = db.newKvt().get(key.toOpenArray).valueOr:
|
||||||
if error.error != KvtNotFound:
|
if error.error != KvtNotFound:
|
||||||
warn logTxt "getHash()", key, action="get()", error=($$error)
|
warn logTxt "getHash()", key, action="get()", error=($$error)
|
||||||
return false
|
return Opt.none(Hash256)
|
||||||
output = rlp.decode(data, Hash256)
|
|
||||||
true
|
try:
|
||||||
|
Opt.some(rlp.decode(data, Hash256))
|
||||||
|
except RlpError as exc:
|
||||||
|
warn logTxt "getHash()", key, action="rlp.decode()", error=exc.msg
|
||||||
|
Opt.none(Hash256)
|
||||||
|
|
||||||
|
proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash256] =
|
||||||
|
db.getHash(canonicalHeadHashKey())
|
||||||
|
|
||||||
proc getCanonicalHead*(
|
proc getCanonicalHead*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
output: var BlockHeader;
|
output: var BlockHeader;
|
||||||
): bool =
|
): bool =
|
||||||
|
let headHash = db.getCanonicalHeaderHash().valueOr:
|
||||||
|
return false
|
||||||
discardRlpException "getCanonicalHead()":
|
discardRlpException "getCanonicalHead()":
|
||||||
var headHash: Hash256
|
if db.getBlockHeader(headHash, output):
|
||||||
if db.getHash(canonicalHeadHashKey(), headHash) and
|
|
||||||
db.getBlockHeader(headHash, output):
|
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc getCanonicalHead*(
|
proc getCanonicalHead*(
|
||||||
|
@ -417,35 +424,27 @@ proc getCanonicalHead*(
|
||||||
raise newException(
|
raise newException(
|
||||||
CanonicalHeadNotFound, "No canonical head set for this chain")
|
CanonicalHeadNotFound, "No canonical head set for this chain")
|
||||||
|
|
||||||
proc getCanonicalHeaderHash*(
|
|
||||||
db: CoreDbRef;
|
|
||||||
): Hash256
|
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
discard db.getHash(canonicalHeadHashKey(), result)
|
|
||||||
|
|
||||||
proc getBlockHash*(
|
proc getBlockHash*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
n: BlockNumber;
|
n: BlockNumber;
|
||||||
output: var Hash256;
|
output: var Hash256;
|
||||||
): bool =
|
): bool =
|
||||||
## Return the block hash for the given block number.
|
## Return the block hash for the given block number.
|
||||||
db.getHash(blockNumberToHashKey(n), output)
|
output = db.getHash(blockNumberToHashKey(n)).valueOr:
|
||||||
|
return false
|
||||||
|
true
|
||||||
|
|
||||||
proc getBlockHash*(
|
proc getBlockHash*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
n: BlockNumber;
|
n: BlockNumber;
|
||||||
): Hash256
|
): Hash256
|
||||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
{.gcsafe, raises: [BlockNotFound].} =
|
||||||
## Return the block hash for the given block number.
|
## Return the block hash for the given block number.
|
||||||
if not db.getHash(blockNumberToHashKey(n), result):
|
if not db.getBlockHash(n, result):
|
||||||
raise newException(BlockNotFound, "No block hash for number " & $n)
|
raise newException(BlockNotFound, "No block hash for number " & $n)
|
||||||
|
|
||||||
proc getHeadBlockHash*(
|
proc getHeadBlockHash*(db: CoreDbRef): Hash256 =
|
||||||
db: CoreDbRef;
|
db.getHash(canonicalHeadHashKey()).valueOr(Hash256())
|
||||||
): Hash256
|
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
if not db.getHash(canonicalHeadHashKey(), result):
|
|
||||||
result = Hash256()
|
|
||||||
|
|
||||||
proc getBlockHeader*(
|
proc getBlockHeader*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -460,8 +459,7 @@ proc getBlockHeader*(
|
||||||
proc getBlockHeaderWithHash*(
|
proc getBlockHeaderWithHash*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
n: BlockNumber;
|
n: BlockNumber;
|
||||||
): Option[(BlockHeader, Hash256)]
|
): Option[(BlockHeader, Hash256)] =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
## Returns the block header and its hash, with the given number in the
|
## Returns the block header and its hash, with the given number in the
|
||||||
## canonical chain. Hash is returned to avoid recomputing it
|
## canonical chain. Hash is returned to avoid recomputing it
|
||||||
var hash: Hash256
|
var hash: Hash256
|
||||||
|
@ -481,7 +479,7 @@ proc getBlockHeader*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
n: BlockNumber;
|
n: BlockNumber;
|
||||||
): BlockHeader
|
): BlockHeader
|
||||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
{.raises: [BlockNotFound].} =
|
||||||
## Returns the block header with the given number in the canonical chain.
|
## Returns the block header with the given number in the canonical chain.
|
||||||
## Raises BlockNotFound error if the block is not in the DB.
|
## Raises BlockNotFound error if the block is not in the DB.
|
||||||
db.getBlockHeader(db.getBlockHash(n))
|
db.getBlockHeader(db.getBlockHash(n))
|
||||||
|
@ -489,14 +487,17 @@ proc getBlockHeader*(
|
||||||
proc getScore*(
|
proc getScore*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
blockHash: Hash256;
|
blockHash: Hash256;
|
||||||
): UInt256
|
): Opt[UInt256] =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
let data = db.newKvt()
|
let data = db.newKvt()
|
||||||
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
||||||
if error.error != KvtNotFound:
|
if error.error != KvtNotFound:
|
||||||
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
|
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
|
||||||
return
|
return Opt.none(UInt256)
|
||||||
rlp.decode(data, UInt256)
|
try:
|
||||||
|
Opt.some(rlp.decode(data, UInt256))
|
||||||
|
except RlpError as exc:
|
||||||
|
warn logTxt "getScore()", data = data.toHex(), error=exc.msg
|
||||||
|
Opt.none(UInt256)
|
||||||
|
|
||||||
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
||||||
## for testing purpose
|
## for testing purpose
|
||||||
|
@ -506,36 +507,17 @@ proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
||||||
return
|
return
|
||||||
|
|
||||||
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
|
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
|
||||||
const info = "getTd()"
|
td = db.getScore(blockHash).valueOr:
|
||||||
let bytes = db.newKvt()
|
|
||||||
.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
|
||||||
if error.error != KvtNotFound:
|
|
||||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
|
||||||
return false
|
return false
|
||||||
discardRlpException info:
|
true
|
||||||
td = rlp.decode(bytes, UInt256)
|
|
||||||
return true
|
|
||||||
|
|
||||||
proc headTotalDifficulty*(
|
proc headTotalDifficulty*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
): UInt256
|
): UInt256 =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
let blockHash = db.getCanonicalHeaderHash().valueOr:
|
||||||
# this is actually a combination of `getHash` and `getScore`
|
|
||||||
const
|
|
||||||
info = "headTotalDifficulty()"
|
|
||||||
key = canonicalHeadHashKey()
|
|
||||||
let
|
|
||||||
kvt = db.newKvt()
|
|
||||||
data = kvt.get(key.toOpenArray).valueOr:
|
|
||||||
if error.error != KvtNotFound:
|
|
||||||
warn logTxt info, key, action="get()", error=($$error)
|
|
||||||
return 0.u256
|
|
||||||
blockHash = rlp.decode(data, Hash256)
|
|
||||||
numData = kvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
|
||||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
|
||||||
return 0.u256
|
return 0.u256
|
||||||
|
|
||||||
rlp.decode(numData, UInt256)
|
db.getScore(blockHash).valueOr(0.u256)
|
||||||
|
|
||||||
proc getAncestorsHashes*(
|
proc getAncestorsHashes*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -549,12 +531,13 @@ proc getAncestorsHashes*(
|
||||||
result = newSeq[Hash256](ancestorCount)
|
result = newSeq[Hash256](ancestorCount)
|
||||||
while ancestorCount > 0:
|
while ancestorCount > 0:
|
||||||
h = db.getBlockHeader(h.parentHash)
|
h = db.getBlockHeader(h.parentHash)
|
||||||
result[ancestorCount - 1] = h.hash
|
result[ancestorCount - 1] = h.rlpHash
|
||||||
dec ancestorCount
|
dec ancestorCount
|
||||||
|
|
||||||
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
|
proc addBlockNumberToHashLookup*(
|
||||||
let blockNumberKey = blockNumberToHashKey(header.blockNumber)
|
db: CoreDbRef; blockNumber: BlockNumber, blockHash: Hash256) =
|
||||||
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr:
|
let blockNumberKey = blockNumberToHashKey(blockNumber)
|
||||||
|
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||||
warn logTxt "addBlockNumberToHashLookup()",
|
warn logTxt "addBlockNumberToHashLookup()",
|
||||||
blockNumberKey, action="put()", error=($$error)
|
blockNumberKey, action="put()", error=($$error)
|
||||||
|
|
||||||
|
@ -562,12 +545,12 @@ proc persistTransactions*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
blockNumber: BlockNumber;
|
blockNumber: BlockNumber;
|
||||||
transactions: openArray[Transaction];
|
transactions: openArray[Transaction];
|
||||||
): Hash256 =
|
) =
|
||||||
const
|
const
|
||||||
info = "persistTransactions()"
|
info = "persistTransactions()"
|
||||||
|
|
||||||
if transactions.len == 0:
|
if transactions.len == 0:
|
||||||
return EMPTY_ROOT_HASH
|
return
|
||||||
|
|
||||||
let
|
let
|
||||||
mpt = db.ctx.getMpt(CtTxs)
|
mpt = db.ctx.getMpt(CtTxs)
|
||||||
|
@ -577,25 +560,20 @@ proc persistTransactions*(
|
||||||
let
|
let
|
||||||
encodedKey = rlp.encode(idx)
|
encodedKey = rlp.encode(idx)
|
||||||
encodedTx = rlp.encode(tx)
|
encodedTx = rlp.encode(tx)
|
||||||
txHash = rlpHash(tx)
|
txHash = keccakHash(encodedTx)
|
||||||
blockKey = transactionHashToBlockKey(txHash)
|
blockKey = transactionHashToBlockKey(txHash)
|
||||||
txKey: TransactionKey = (blockNumber, idx)
|
txKey: TransactionKey = (blockNumber, idx)
|
||||||
mpt.merge(encodedKey, encodedTx).isOkOr:
|
mpt.merge(encodedKey, encodedTx).isOkOr:
|
||||||
warn logTxt info, idx, action="merge()", error=($$error)
|
warn logTxt info, idx, action="merge()", error=($$error)
|
||||||
return EMPTY_ROOT_HASH
|
return
|
||||||
kvt.put(blockKey.toOpenArray, rlp.encode(txKey)).isOkOr:
|
kvt.put(blockKey.toOpenArray, rlp.encode(txKey)).isOkOr:
|
||||||
trace logTxt info, blockKey, action="put()", error=($$error)
|
trace logTxt info, blockKey, action="put()", error=($$error)
|
||||||
return EMPTY_ROOT_HASH
|
return
|
||||||
mpt.getColumn.state.valueOr:
|
|
||||||
when extraTraceMessages:
|
|
||||||
warn logTxt info, action="state()"
|
|
||||||
return EMPTY_ROOT_HASH
|
|
||||||
|
|
||||||
proc forgetHistory*(
|
proc forgetHistory*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
blockNum: BlockNumber;
|
blockNum: BlockNumber;
|
||||||
): bool
|
): bool =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
## Remove all data related to the block number argument `num`. This function
|
## Remove all data related to the block number argument `num`. This function
|
||||||
## returns `true`, if some history was available and deleted.
|
## returns `true`, if some history was available and deleted.
|
||||||
var blockHash: Hash256
|
var blockHash: Hash256
|
||||||
|
@ -615,8 +593,7 @@ proc getTransaction*(
|
||||||
txRoot: Hash256;
|
txRoot: Hash256;
|
||||||
txIndex: int;
|
txIndex: int;
|
||||||
res: var Transaction;
|
res: var Transaction;
|
||||||
): bool
|
): bool =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
const
|
const
|
||||||
info = "getTransaction()"
|
info = "getTransaction()"
|
||||||
let
|
let
|
||||||
|
@ -632,7 +609,12 @@ proc getTransaction*(
|
||||||
if error.error != MptNotFound:
|
if error.error != MptNotFound:
|
||||||
warn logTxt info, txIndex, action="fetch()", error=($$error)
|
warn logTxt info, txIndex, action="fetch()", error=($$error)
|
||||||
return false
|
return false
|
||||||
|
try:
|
||||||
res = rlp.decode(txData, Transaction)
|
res = rlp.decode(txData, Transaction)
|
||||||
|
except RlpError as exc:
|
||||||
|
warn logTxt info,
|
||||||
|
txRoot, action="rlp.decode()", col=($$col), error=exc.msg
|
||||||
|
return false
|
||||||
true
|
true
|
||||||
|
|
||||||
proc getTransactionCount*(
|
proc getTransactionCount*(
|
||||||
|
@ -695,19 +677,16 @@ proc getUncles*(
|
||||||
proc persistWithdrawals*(
|
proc persistWithdrawals*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
withdrawals: openArray[Withdrawal];
|
withdrawals: openArray[Withdrawal];
|
||||||
): Hash256 =
|
) =
|
||||||
const info = "persistWithdrawals()"
|
const info = "persistWithdrawals()"
|
||||||
if withdrawals.len == 0:
|
if withdrawals.len == 0:
|
||||||
return EMPTY_ROOT_HASH
|
return
|
||||||
|
|
||||||
let mpt = db.ctx.getMpt(CtWithdrawals)
|
let mpt = db.ctx.getMpt(CtWithdrawals)
|
||||||
for idx, wd in withdrawals:
|
for idx, wd in withdrawals:
|
||||||
mpt.merge(rlp.encode(idx), rlp.encode(wd)).isOkOr:
|
mpt.merge(rlp.encode(idx), rlp.encode(wd)).isOkOr:
|
||||||
warn logTxt info, idx, action="merge()", error=($$error)
|
warn logTxt info, idx, action="merge()", error=($$error)
|
||||||
return EMPTY_ROOT_HASH
|
return
|
||||||
mpt.getColumn.state.valueOr:
|
|
||||||
warn logTxt info, action="state()"
|
|
||||||
return EMPTY_ROOT_HASH
|
|
||||||
|
|
||||||
proc getWithdrawals*(
|
proc getWithdrawals*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -791,7 +770,7 @@ proc getUncleHashes*(
|
||||||
): seq[Hash256]
|
): seq[Hash256]
|
||||||
{.gcsafe, raises: [RlpError,ValueError].} =
|
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||||
for blockHash in blockHashes:
|
for blockHash in blockHashes:
|
||||||
result &= db.getBlockBody(blockHash).uncles.mapIt(it.hash)
|
result &= db.getBlockBody(blockHash).uncles.mapIt(it.rlpHash)
|
||||||
|
|
||||||
proc getUncleHashes*(
|
proc getUncleHashes*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -806,7 +785,7 @@ proc getUncleHashes*(
|
||||||
warn logTxt "getUncleHashes()",
|
warn logTxt "getUncleHashes()",
|
||||||
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
||||||
return @[]
|
return @[]
|
||||||
return rlp.decode(encodedUncles, seq[BlockHeader]).mapIt(it.hash)
|
return rlp.decode(encodedUncles, seq[BlockHeader]).mapIt(it.rlpHash)
|
||||||
|
|
||||||
proc getTransactionKey*(
|
proc getTransactionKey*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -869,19 +848,15 @@ proc setHead*(
|
||||||
proc persistReceipts*(
|
proc persistReceipts*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
receipts: openArray[Receipt];
|
receipts: openArray[Receipt];
|
||||||
): Hash256 =
|
) =
|
||||||
const info = "persistReceipts()"
|
const info = "persistReceipts()"
|
||||||
if receipts.len == 0:
|
if receipts.len == 0:
|
||||||
return EMPTY_ROOT_HASH
|
return
|
||||||
|
|
||||||
let mpt = db.ctx.getMpt(CtReceipts)
|
let mpt = db.ctx.getMpt(CtReceipts)
|
||||||
for idx, rec in receipts:
|
for idx, rec in receipts:
|
||||||
mpt.merge(rlp.encode(idx), rlp.encode(rec)).isOkOr:
|
mpt.merge(rlp.encode(idx), rlp.encode(rec)).isOkOr:
|
||||||
warn logTxt info, idx, action="merge()", error=($$error)
|
warn logTxt info, idx, action="merge()", error=($$error)
|
||||||
mpt.getColumn.state.valueOr:
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt info, action="state()"
|
|
||||||
return EMPTY_ROOT_HASH
|
|
||||||
|
|
||||||
proc getReceipts*(
|
proc getReceipts*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -893,62 +868,86 @@ proc getReceipts*(
|
||||||
receipts.add(r)
|
receipts.add(r)
|
||||||
return receipts
|
return receipts
|
||||||
|
|
||||||
proc persistHeaderToDb*(
|
proc persistScore*(
|
||||||
|
db: CoreDbRef;
|
||||||
|
blockHash: Hash256;
|
||||||
|
score: UInt256
|
||||||
|
): bool =
|
||||||
|
let
|
||||||
|
kvt = db.newKvt()
|
||||||
|
scoreKey = blockHashToScoreKey(blockHash)
|
||||||
|
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||||
|
warn logTxt "persistHeader()",
|
||||||
|
scoreKey, action="put()", `error`=($$error)
|
||||||
|
return
|
||||||
|
true
|
||||||
|
|
||||||
|
proc persistHeader*(
|
||||||
|
db: CoreDbRef;
|
||||||
|
blockHash: Hash256;
|
||||||
|
header: BlockHeader;
|
||||||
|
startOfHistory = GENESIS_PARENT_HASH;
|
||||||
|
): bool =
|
||||||
|
let
|
||||||
|
kvt = db.newKvt()
|
||||||
|
isStartOfHistory = header.parentHash == startOfHistory
|
||||||
|
|
||||||
|
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
||||||
|
warn logTxt "persistHeaderWithoutSetHead()",
|
||||||
|
blockHash, action="headerExists(parent)"
|
||||||
|
return false
|
||||||
|
|
||||||
|
kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||||
|
warn logTxt "persistHeaderWithoutSetHead()",
|
||||||
|
blockHash, action="put()", `error`=($$error)
|
||||||
|
return false
|
||||||
|
|
||||||
|
let
|
||||||
|
parentScore = if isStartOfHistory:
|
||||||
|
0.u256
|
||||||
|
else:
|
||||||
|
db.getScore(header.parentHash).valueOr:
|
||||||
|
# TODO it's slightly wrong to fail here and leave the block in the db,
|
||||||
|
# but this code is going away soon enough
|
||||||
|
return false
|
||||||
|
|
||||||
|
score = parentScore + header.difficulty
|
||||||
|
# After EIP-3675, difficulty is set to 0 but we still save the score for
|
||||||
|
# each block to simplify totalDifficulty reporting
|
||||||
|
# TODO get rid of this and store a single value
|
||||||
|
if not db.persistScore(blockHash, score):
|
||||||
|
return false
|
||||||
|
|
||||||
|
db.addBlockNumberToHashLookup(header.blockNumber, blockHash)
|
||||||
|
true
|
||||||
|
|
||||||
|
proc persistHeader*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
header: BlockHeader;
|
header: BlockHeader;
|
||||||
forceCanonical: bool;
|
forceCanonical: bool;
|
||||||
startOfHistory = GENESIS_PARENT_HASH;
|
startOfHistory = GENESIS_PARENT_HASH;
|
||||||
) {.gcsafe, raises: [RlpError,EVMError].} =
|
): bool =
|
||||||
let isStartOfHistory = header.parentHash == startOfHistory
|
|
||||||
let headerHash = header.blockHash
|
|
||||||
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
|
||||||
raise newException(ParentNotFound, "Cannot persist block header " &
|
|
||||||
$headerHash & " with unknown parent " & $header.parentHash)
|
|
||||||
let kvt = db.newKvt()
|
|
||||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
|
||||||
warn logTxt "persistHeaderToDb()",
|
|
||||||
headerHash, action="put()", `error`=($$error)
|
|
||||||
return
|
|
||||||
|
|
||||||
let score = if isStartOfHistory: header.difficulty
|
|
||||||
else: db.getScore(header.parentHash) + header.difficulty
|
|
||||||
let scoreKey = blockHashToScoreKey(headerHash)
|
|
||||||
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
|
||||||
warn logTxt "persistHeaderToDb()",
|
|
||||||
scoreKey, action="put()", `error`=($$error)
|
|
||||||
return
|
|
||||||
|
|
||||||
db.addBlockNumberToHashLookup(header)
|
|
||||||
|
|
||||||
if not forceCanonical:
|
|
||||||
var canonHeader: BlockHeader
|
|
||||||
if db.getCanonicalHead canonHeader:
|
|
||||||
let headScore = db.getScore(canonHeader.hash)
|
|
||||||
if score <= headScore:
|
|
||||||
return
|
|
||||||
|
|
||||||
db.setAsCanonicalChainHead(headerHash)
|
|
||||||
|
|
||||||
proc persistHeaderToDbWithoutSetHead*(
|
|
||||||
db: CoreDbRef;
|
|
||||||
header: BlockHeader;
|
|
||||||
startOfHistory = GENESIS_PARENT_HASH;
|
|
||||||
) {.gcsafe, raises: [RlpError].} =
|
|
||||||
let isStartOfHistory = header.parentHash == startOfHistory
|
|
||||||
let headerHash = header.blockHash
|
|
||||||
let score = if isStartOfHistory: header.difficulty
|
|
||||||
else: db.getScore(header.parentHash) + header.difficulty
|
|
||||||
let
|
let
|
||||||
kvt = db.newKvt()
|
blockHash = header.blockHash
|
||||||
scoreKey = blockHashToScoreKey(headerHash)
|
|
||||||
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
if not db.persistHeader(blockHash, header, startOfHistory):
|
||||||
warn logTxt "persistHeaderToDbWithoutSetHead()",
|
return false
|
||||||
scoreKey, action="put()", `error`=($$error)
|
|
||||||
return
|
if not forceCanonical and header.parentHash != startOfHistory:
|
||||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
let
|
||||||
warn logTxt "persistHeaderToDbWithoutSetHead()",
|
canonicalHash = db.getCanonicalHeaderHash().valueOr:
|
||||||
headerHash, action="put()", `error`=($$error)
|
return false
|
||||||
return
|
canonScore = db.getScore(canonicalHash).valueOr:
|
||||||
|
return false
|
||||||
|
# TODO no need to load score from database _really_, but this code is
|
||||||
|
# hopefully going away soon
|
||||||
|
score = db.getScore(blockHash).valueOr:
|
||||||
|
return false
|
||||||
|
if score <= canonScore:
|
||||||
|
return true
|
||||||
|
|
||||||
|
db.setAsCanonicalChainHead(blockHash, header)
|
||||||
|
true
|
||||||
|
|
||||||
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
||||||
## Persists the list of uncles to the database.
|
## Persists the list of uncles to the database.
|
||||||
|
@ -961,11 +960,8 @@ proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
||||||
return EMPTY_ROOT_HASH
|
return EMPTY_ROOT_HASH
|
||||||
|
|
||||||
|
|
||||||
proc safeHeaderHash*(
|
proc safeHeaderHash*(db: CoreDbRef): Hash256 =
|
||||||
db: CoreDbRef;
|
db.getHash(safeHashKey()).valueOr(Hash256())
|
||||||
): Hash256
|
|
||||||
{.gcsafe, raises: [RlpError].} =
|
|
||||||
discard db.getHash(safeHashKey(), result)
|
|
||||||
|
|
||||||
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||||
let safeHashKey = safeHashKey()
|
let safeHashKey = safeHashKey()
|
||||||
|
@ -976,9 +972,8 @@ proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||||
|
|
||||||
proc finalizedHeaderHash*(
|
proc finalizedHeaderHash*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
): Hash256
|
): Hash256 =
|
||||||
{.gcsafe, raises: [RlpError].} =
|
db.getHash(finalizedHashKey()).valueOr(Hash256())
|
||||||
discard db.getHash(finalizedHashKey(), result)
|
|
||||||
|
|
||||||
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||||
let finalizedHashKey = finalizedHashKey()
|
let finalizedHashKey = finalizedHashKey()
|
||||||
|
@ -996,7 +991,7 @@ proc safeHeader*(
|
||||||
proc finalizedHeader*(
|
proc finalizedHeader*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
): BlockHeader
|
): BlockHeader
|
||||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
{.gcsafe, raises: [BlockNotFound].} =
|
||||||
db.getBlockHeader(db.finalizedHeaderHash)
|
db.getBlockHeader(db.finalizedHeaderHash)
|
||||||
|
|
||||||
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
|
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
|
||||||
|
|
|
@ -238,10 +238,10 @@ proc resp(data: openArray[byte]): RespResult =
|
||||||
ok(resp("0x" & data.toHex))
|
ok(resp("0x" & data.toHex))
|
||||||
|
|
||||||
proc getTotalDifficulty(ctx: GraphqlContextRef, blockHash: common.Hash256): RespResult =
|
proc getTotalDifficulty(ctx: GraphqlContextRef, blockHash: common.Hash256): RespResult =
|
||||||
try:
|
let score = getScore(ctx.chainDB, blockHash).valueOr:
|
||||||
bigIntNode(getScore(ctx.chainDB, blockHash))
|
return err("can't get total difficulty")
|
||||||
except CatchableError as e:
|
|
||||||
err("can't get total difficulty: " & e.msg)
|
bigIntNode(score)
|
||||||
|
|
||||||
proc getOmmerCount(ctx: GraphqlContextRef, ommersHash: common.Hash256): RespResult =
|
proc getOmmerCount(ctx: GraphqlContextRef, ommersHash: common.Hash256): RespResult =
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -59,18 +59,12 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
||||||
setControlCHook(controlCHandler)
|
setControlCHook(controlCHandler)
|
||||||
|
|
||||||
let
|
let
|
||||||
start =
|
start = com.db.getSavedStateBlockNumber().truncate(uint64) + 1
|
||||||
try:
|
|
||||||
com.db.getSavedStateBlockNumber().truncate(uint64) + 1
|
|
||||||
except RlpError as exc:
|
|
||||||
error "Could not read block number", err = exc.msg
|
|
||||||
quit(QuitFailure)
|
|
||||||
|
|
||||||
chain = com.newChain()
|
chain = com.newChain()
|
||||||
|
|
||||||
var
|
var
|
||||||
imported = 0'u64
|
imported = 0'u64
|
||||||
gas = 0.u256
|
gas = GasInt(0)
|
||||||
txs = 0
|
txs = 0
|
||||||
time0 = Moment.now()
|
time0 = Moment.now()
|
||||||
csv =
|
csv =
|
||||||
|
@ -121,7 +115,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
txs += statsRes[].txs
|
txs += statsRes[].txs
|
||||||
gas += uint64 statsRes[].gas
|
gas += statsRes[].gas
|
||||||
let
|
let
|
||||||
time2 = Moment.now()
|
time2 = Moment.now()
|
||||||
diff1 = (time2 - time1).nanoseconds().float / 1000000000
|
diff1 = (time2 - time1).nanoseconds().float / 1000000000
|
||||||
|
@ -131,13 +125,13 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
||||||
blockNumber,
|
blockNumber,
|
||||||
blocks = imported,
|
blocks = imported,
|
||||||
txs,
|
txs,
|
||||||
gas,
|
mgas = f(gas.float / 1000000),
|
||||||
bps = f(blocks.len.float / diff1),
|
bps = f(blocks.len.float / diff1),
|
||||||
tps = f(statsRes[].txs.float / diff1),
|
tps = f(statsRes[].txs.float / diff1),
|
||||||
gps = f(statsRes[].gas.float / diff1),
|
mgps = f(statsRes[].gas.float / 1000000 / diff1),
|
||||||
avgBps = f(imported.float / diff0),
|
avgBps = f(imported.float / diff0),
|
||||||
avgTps = f(txs.float / diff0),
|
avgTps = f(txs.float / diff0),
|
||||||
avgGps = f(gas.truncate(uint64).float / diff0), # TODO fix truncate
|
avgMGps = f(gas.float / 1000000 / diff0),
|
||||||
elapsed = shortLog(time2 - time0, 3)
|
elapsed = shortLog(time2 - time0, 3)
|
||||||
|
|
||||||
if csv != nil:
|
if csv != nil:
|
||||||
|
|
|
@ -492,7 +492,7 @@ proc setupEthRpc*(
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
result = populateBlockObject(uncles[index], chainDB, false, true)
|
result = populateBlockObject(uncles[index], chainDB, false, true)
|
||||||
result.totalDifficulty = chainDB.getScore(header.hash)
|
result.totalDifficulty = chainDB.getScore(header.blockHash).valueOr(0.u256)
|
||||||
|
|
||||||
server.rpc("eth_getUncleByBlockNumberAndIndex") do(quantityTag: BlockTag, quantity: Web3Quantity) -> BlockObject:
|
server.rpc("eth_getUncleByBlockNumberAndIndex") do(quantityTag: BlockTag, quantity: Web3Quantity) -> BlockObject:
|
||||||
# Returns information about a uncle of a block by number and uncle index position.
|
# Returns information about a uncle of a block by number and uncle index position.
|
||||||
|
@ -509,7 +509,7 @@ proc setupEthRpc*(
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
result = populateBlockObject(uncles[index], chainDB, false, true)
|
result = populateBlockObject(uncles[index], chainDB, false, true)
|
||||||
result.totalDifficulty = chainDB.getScore(header.hash)
|
result.totalDifficulty = chainDB.getScore(header.blockHash).valueOr(0.u256)
|
||||||
|
|
||||||
proc getLogsForBlock(
|
proc getLogsForBlock(
|
||||||
chain: CoreDbRef,
|
chain: CoreDbRef,
|
||||||
|
|
|
@ -139,7 +139,7 @@ proc populateTransactionObject*(tx: Transaction,
|
||||||
result.`type` = some w3Qty(tx.txType.ord)
|
result.`type` = some w3Qty(tx.txType.ord)
|
||||||
if optionalHeader.isSome:
|
if optionalHeader.isSome:
|
||||||
let header = optionalHeader.get
|
let header = optionalHeader.get
|
||||||
result.blockHash = some(w3Hash header.hash)
|
result.blockHash = some(w3Hash header.blockHash)
|
||||||
result.blockNumber = some(w3BlockNumber(header.blockNumber))
|
result.blockNumber = some(w3BlockNumber(header.blockNumber))
|
||||||
|
|
||||||
result.`from` = w3Addr tx.getSender()
|
result.`from` = w3Addr tx.getSender()
|
||||||
|
@ -197,7 +197,7 @@ proc populateBlockObject*(header: BlockHeader, chain: CoreDbRef, fullTx: bool, i
|
||||||
else:
|
else:
|
||||||
none(UInt256)
|
none(UInt256)
|
||||||
if not isUncle:
|
if not isUncle:
|
||||||
result.totalDifficulty = chain.getScore(blockHash)
|
result.totalDifficulty = chain.getScore(blockHash).valueOr(0.u256)
|
||||||
result.uncles = w3Hashes chain.getUncleHashes(header)
|
result.uncles = w3Hashes chain.getUncleHashes(header)
|
||||||
|
|
||||||
if fullTx:
|
if fullTx:
|
||||||
|
@ -228,7 +228,7 @@ proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction,
|
||||||
result = ReceiptObject()
|
result = ReceiptObject()
|
||||||
result.transactionHash = w3Hash tx.rlpHash
|
result.transactionHash = w3Hash tx.rlpHash
|
||||||
result.transactionIndex = w3Qty(txIndex)
|
result.transactionIndex = w3Qty(txIndex)
|
||||||
result.blockHash = w3Hash header.hash
|
result.blockHash = w3Hash header.blockHash
|
||||||
result.blockNumber = w3BlockNumber(header.blockNumber)
|
result.blockNumber = w3BlockNumber(header.blockNumber)
|
||||||
result.`from` = w3Addr tx.getSender()
|
result.`from` = w3Addr tx.getSender()
|
||||||
result.to = some(w3Addr tx.destination)
|
result.to = some(w3Addr tx.destination)
|
||||||
|
|
|
@ -84,9 +84,6 @@ func generateSafeAddress*(address: EthAddress, salt: ContractSalt,
|
||||||
|
|
||||||
result[0..19] = hashResult.data.toOpenArray(12, 31)
|
result[0..19] = hashResult.data.toOpenArray(12, 31)
|
||||||
|
|
||||||
func hash*(b: BlockHeader): Hash256 {.inline.} =
|
|
||||||
rlpHash(b)
|
|
||||||
|
|
||||||
proc crc32*(crc: uint32, buf: openArray[byte]): uint32 =
|
proc crc32*(crc: uint32, buf: openArray[byte]): uint32 =
|
||||||
const kcrc32 = [ 0'u32, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190,
|
const kcrc32 = [ 0'u32, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190,
|
||||||
0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320'u32, 0xf00f9344'u32, 0xd6d6a3e8'u32,
|
0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320'u32, 0xf00f9344'u32, 0xd6d6a3e8'u32,
|
||||||
|
|
|
@ -23,12 +23,12 @@ proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: UInt256, parent: Blo
|
||||||
kvt = chainDB.newKvt()
|
kvt = chainDB.newKvt()
|
||||||
|
|
||||||
discard chainDB.setHead(parent, true)
|
discard chainDB.setHead(parent, true)
|
||||||
discard chainDB.persistTransactions(blockNumber, blk.transactions)
|
chainDB.persistTransactions(blockNumber, blk.transactions)
|
||||||
discard chainDB.persistUncles(blk.uncles)
|
discard chainDB.persistUncles(blk.uncles)
|
||||||
|
|
||||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||||
raiseAssert "generatePrestate(): put() failed " & $$error
|
raiseAssert "generatePrestate(): put() failed " & $$error
|
||||||
chainDB.addBlockNumberToHashLookup(header)
|
chainDB.addBlockNumberToHashLookup(header.blockNumber, headerHash)
|
||||||
|
|
||||||
for k, v in state:
|
for k, v in state:
|
||||||
let key = hexToSeqByte(k)
|
let key = hexToSeqByte(k)
|
||||||
|
|
|
@ -125,7 +125,7 @@ print(
|
||||||
.to_string(
|
.to_string(
|
||||||
formatters=dict(
|
formatters=dict(
|
||||||
dict.fromkeys(["bpsd", "tpsd", "timed"], "{:,.2%}".format),
|
dict.fromkeys(["bpsd", "tpsd", "timed"], "{:,.2%}".format),
|
||||||
**dict.fromkeys(["bps_x", "bps_y", "tps_x"], "{:,.2f}".format),
|
**dict.fromkeys(["bps_x", "bps_y", "tps_x", "tps_y"], "{:,.2f}".format),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -105,7 +105,7 @@ proc parseHeader(blockHeader: JsonNode, testStatusIMPL: var TestStatus): BlockHe
|
||||||
result = normalizeBlockHeader(blockHeader).parseBlockHeader
|
result = normalizeBlockHeader(blockHeader).parseBlockHeader
|
||||||
var blockHash: Hash256
|
var blockHash: Hash256
|
||||||
blockHeader.fromJson "hash", blockHash
|
blockHeader.fromJson "hash", blockHash
|
||||||
check blockHash == hash(result)
|
check blockHash == rlpHash(result)
|
||||||
|
|
||||||
proc parseWithdrawals(withdrawals: JsonNode): Option[seq[Withdrawal]] =
|
proc parseWithdrawals(withdrawals: JsonNode): Option[seq[Withdrawal]] =
|
||||||
case withdrawals.kind
|
case withdrawals.kind
|
||||||
|
@ -256,7 +256,7 @@ proc collectDebugData(ctx: var TestCtx) =
|
||||||
}
|
}
|
||||||
|
|
||||||
proc runTestCtx(ctx: var TestCtx, com: CommonRef, testStatusIMPL: var TestStatus) =
|
proc runTestCtx(ctx: var TestCtx, com: CommonRef, testStatusIMPL: var TestStatus) =
|
||||||
com.db.persistHeaderToDb(ctx.genesisHeader,
|
doAssert com.db.persistHeader(ctx.genesisHeader,
|
||||||
com.consensus == ConsensusType.POS)
|
com.consensus == ConsensusType.POS)
|
||||||
check com.db.getCanonicalHead().blockHash == ctx.genesisHeader.blockHash
|
check com.db.getCanonicalHead().blockHash == ctx.genesisHeader.blockHash
|
||||||
let checkSeal = ctx.shouldCheckSeal
|
let checkSeal = ctx.shouldCheckSeal
|
||||||
|
|
|
@ -87,8 +87,8 @@ proc persistFixtureBlock(chainDB: CoreDbRef) =
|
||||||
# Manually inserting header to avoid any parent checks
|
# Manually inserting header to avoid any parent checks
|
||||||
chainDB.kvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
chainDB.kvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
||||||
chainDB.addBlockNumberToHashLookup(header)
|
chainDB.addBlockNumberToHashLookup(header)
|
||||||
discard chainDB.persistTransactions(header.blockNumber, getBlockBody4514995().transactions)
|
chainDB.persistTransactions(header.blockNumber, getBlockBody4514995().transactions)
|
||||||
discard chainDB.persistReceipts(getReceipts4514995())
|
chainDB.persistReceipts(getReceipts4514995())
|
||||||
|
|
||||||
proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv =
|
proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv =
|
||||||
var
|
var
|
||||||
|
@ -153,7 +153,9 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
|
||||||
signedTx1 = signTransaction(unsignedTx1, acc.privateKey, com.chainId, eip155)
|
signedTx1 = signTransaction(unsignedTx1, acc.privateKey, com.chainId, eip155)
|
||||||
signedTx2 = signTransaction(unsignedTx2, acc.privateKey, com.chainId, eip155)
|
signedTx2 = signTransaction(unsignedTx2, acc.privateKey, com.chainId, eip155)
|
||||||
txs = [signedTx1, signedTx2]
|
txs = [signedTx1, signedTx2]
|
||||||
txRoot = com.db.persistTransactions(blockNumber, txs)
|
com.db.persistTransactions(blockNumber, txs)
|
||||||
|
|
||||||
|
let txRoot = com.db.ctx.getMpt(CtTxs).getColumn().state().valueOr(EMPTY_ROOT_HASH)
|
||||||
|
|
||||||
vmState.receipts = newSeq[Receipt](txs.len)
|
vmState.receipts = newSeq[Receipt](txs.len)
|
||||||
vmState.cumulativeGasUsed = 0
|
vmState.cumulativeGasUsed = 0
|
||||||
|
@ -163,8 +165,9 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
|
||||||
doAssert(rc.isOk, "Invalid transaction: " & rc.error)
|
doAssert(rc.isOk, "Invalid transaction: " & rc.error)
|
||||||
vmState.receipts[txIndex] = makeReceipt(vmState, tx.txType)
|
vmState.receipts[txIndex] = makeReceipt(vmState, tx.txType)
|
||||||
|
|
||||||
|
com.db.persistReceipts(vmState.receipts)
|
||||||
let
|
let
|
||||||
receiptRoot = com.db.persistReceipts(vmState.receipts)
|
receiptRoot = com.db.ctx.getMpt(CtReceipts).getColumn().state().valueOr(EMPTY_ROOT_HASH)
|
||||||
date = dateTime(2017, mMar, 30)
|
date = dateTime(2017, mMar, 30)
|
||||||
timeStamp = date.toTime.toUnix.EthTime
|
timeStamp = date.toTime.toUnix.EthTime
|
||||||
difficulty = com.calcDifficulty(timeStamp, parent)
|
difficulty = com.calcDifficulty(timeStamp, parent)
|
||||||
|
@ -192,7 +195,7 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
|
||||||
let uncles = [header]
|
let uncles = [header]
|
||||||
header.ommersHash = com.db.persistUncles(uncles)
|
header.ommersHash = com.db.persistUncles(uncles)
|
||||||
|
|
||||||
com.db.persistHeaderToDb(header,
|
doAssert com.db.persistHeader(header,
|
||||||
com.consensus == ConsensusType.POS)
|
com.consensus == ConsensusType.POS)
|
||||||
com.db.persistFixtureBlock()
|
com.db.persistFixtureBlock()
|
||||||
result = TestEnv(
|
result = TestEnv(
|
||||||
|
|
|
@ -863,7 +863,7 @@ proc runTxPackerTests(noisy = true) =
|
||||||
check bdy == blockBody
|
check bdy == blockBody
|
||||||
else:
|
else:
|
||||||
# The canonical head will be set to hdr if it scores high enough
|
# The canonical head will be set to hdr if it scores high enough
|
||||||
# (see implementation of db_chain.persistHeaderToDb()).
|
# (see implementation of db_chain.persistHeader()).
|
||||||
let
|
let
|
||||||
canonScore = xq.chain.com.db.getScore(canonicalHead.blockHash)
|
canonScore = xq.chain.com.db.getScore(canonicalHead.blockHash)
|
||||||
headerScore = xq.chain.com.db.getScore(hdr.blockHash)
|
headerScore = xq.chain.com.db.getScore(hdr.blockHash)
|
||||||
|
|
Loading…
Reference in New Issue