move block validation from execution payload generator to engine api
This commit is contained in:
parent
709d8ef255
commit
2b4baff8ec
|
@ -80,13 +80,14 @@ proc waitForTTD*(cl: CLMocker): Future[bool] {.async.} =
|
||||||
|
|
||||||
let res = cl.client.forkchoiceUpdatedV1(cl.latestForkchoice)
|
let res = cl.client.forkchoiceUpdatedV1(cl.latestForkchoice)
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
error "forkchoiceUpdated error", msg=res.error
|
error "waitForTTD: forkchoiceUpdated error", msg=res.error
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let s = res.get()
|
let s = res.get()
|
||||||
if s.payloadStatus.status != PayloadExecutionStatus.valid:
|
if s.payloadStatus.status != PayloadExecutionStatus.valid:
|
||||||
error "forkchoiceUpdated response",
|
error "waitForTTD: forkchoiceUpdated response unexpected",
|
||||||
status=s.payloadStatus.status
|
expect = PayloadExecutionStatus.valid,
|
||||||
|
get = s.payloadStatus.status
|
||||||
return false
|
return false
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -417,15 +417,11 @@ template invalidPayloadAttributesGen(procname: untyped, syncingCond: bool) =
|
||||||
when syncingCond:
|
when syncingCond:
|
||||||
# If we are SYNCING, the outcome should be SYNCING regardless of the validity of the payload atttributes
|
# If we are SYNCING, the outcome should be SYNCING regardless of the validity of the payload atttributes
|
||||||
let r = client.forkchoiceUpdatedV1(fcu, some(attr))
|
let r = client.forkchoiceUpdatedV1(fcu, some(attr))
|
||||||
let s = r.get()
|
testFCU(r, syncing)
|
||||||
if s.payloadStatus.status != PayloadExecutionStatus.syncing:
|
|
||||||
return false
|
|
||||||
if s.payloadId.isSome:
|
|
||||||
return false
|
|
||||||
else:
|
else:
|
||||||
let r = client.forkchoiceUpdatedV1(fcu, some(attr))
|
let r = client.forkchoiceUpdatedV1(fcu, some(attr))
|
||||||
if r.isOk:
|
testCond r.isOk:
|
||||||
return false
|
error "Unexpected error", msg = r.error
|
||||||
|
|
||||||
# Check that the forkchoice was applied, regardless of the error
|
# Check that the forkchoice was applied, regardless of the error
|
||||||
testLatestHeader(client, BlockHash blockHash.data)
|
testLatestHeader(client, BlockHash blockHash.data)
|
||||||
|
@ -1326,8 +1322,7 @@ proc reorgBack(t: TestEnv): TestStatus =
|
||||||
let clMock = t.clMock
|
let clMock = t.clMock
|
||||||
let client = t.rpcClient
|
let client = t.rpcClient
|
||||||
|
|
||||||
let r1 = clMock.produceSingleBlock(BlockProcessCallbacks())
|
testCond clMock.produceSingleBlock(BlockProcessCallbacks())
|
||||||
testCond r1
|
|
||||||
|
|
||||||
# We are going to reorg back to this previous hash several times
|
# We are going to reorg back to this previous hash several times
|
||||||
let previousHash = clMock.latestForkchoice.headBlockHash
|
let previousHash = clMock.latestForkchoice.headBlockHash
|
||||||
|
@ -1344,9 +1339,8 @@ proc reorgBack(t: TestEnv): TestStatus =
|
||||||
|
|
||||||
# It is only expected that the client does not produce an error and the CL Mocker is able to progress after the re-org
|
# It is only expected that the client does not produce an error and the CL Mocker is able to progress after the re-org
|
||||||
let r = client.forkchoiceUpdatedV1(forkchoiceUpdatedBack)
|
let r = client.forkchoiceUpdatedV1(forkchoiceUpdatedBack)
|
||||||
if r.isErr:
|
testCond r.isOk:
|
||||||
error "failed to reorg back", msg = r.error
|
error "failed to reorg back", msg = r.error
|
||||||
return false
|
|
||||||
return true
|
return true
|
||||||
))
|
))
|
||||||
testCond r2
|
testCond r2
|
||||||
|
@ -1966,11 +1960,11 @@ const engineTestList* = [
|
||||||
),
|
),
|
||||||
|
|
||||||
# Eth RPC Status on ForkchoiceUpdated Events
|
# Eth RPC Status on ForkchoiceUpdated Events
|
||||||
TestSpec( # TODO: fix/debug
|
TestSpec(
|
||||||
name: "Latest Block after NewPayload",
|
name: "Latest Block after NewPayload",
|
||||||
run: blockStatusExecPayload1,
|
run: blockStatusExecPayload1,
|
||||||
),
|
),
|
||||||
TestSpec( # TODO: fix/debug
|
TestSpec(
|
||||||
name: "Latest Block after NewPayload (Transition Block)",
|
name: "Latest Block after NewPayload (Transition Block)",
|
||||||
run: blockStatusExecPayload2,
|
run: blockStatusExecPayload2,
|
||||||
ttd: 5,
|
ttd: 5,
|
||||||
|
|
|
@ -19,7 +19,6 @@ type
|
||||||
networkId*: NetworkId
|
networkId*: NetworkId
|
||||||
config* : ChainConfig
|
config* : ChainConfig
|
||||||
genesis* : Genesis
|
genesis* : Genesis
|
||||||
totalDifficulty*: DifficultyInt
|
|
||||||
|
|
||||||
# startingBlock, currentBlock, and highestBlock
|
# startingBlock, currentBlock, and highestBlock
|
||||||
# are progress indicator
|
# are progress indicator
|
||||||
|
@ -31,16 +30,6 @@ type
|
||||||
blockNumber: BlockNumber
|
blockNumber: BlockNumber
|
||||||
index: int
|
index: int
|
||||||
|
|
||||||
proc getTotalDifficulty*(self: BaseChainDB): UInt256 =
|
|
||||||
# this is actually a combination of `getHash` and `getScore`
|
|
||||||
const key = canonicalHeadHashKey()
|
|
||||||
let data = self.db.get(key.toOpenArray)
|
|
||||||
if data.len == 0:
|
|
||||||
return 0.u256
|
|
||||||
|
|
||||||
let blockHash = rlp.decode(data, Hash256)
|
|
||||||
rlp.decode(self.db.get(blockHashToScoreKey(blockHash).toOpenArray), UInt256)
|
|
||||||
|
|
||||||
proc newBaseChainDB*(
|
proc newBaseChainDB*(
|
||||||
db: TrieDatabaseRef,
|
db: TrieDatabaseRef,
|
||||||
pruneTrie: bool = true,
|
pruneTrie: bool = true,
|
||||||
|
@ -53,7 +42,6 @@ proc newBaseChainDB*(
|
||||||
result.networkId = id
|
result.networkId = id
|
||||||
result.config = params.config
|
result.config = params.config
|
||||||
result.genesis = params.genesis
|
result.genesis = params.genesis
|
||||||
result.totalDifficulty = result.getTotalDifficulty()
|
|
||||||
|
|
||||||
proc `$`*(db: BaseChainDB): string =
|
proc `$`*(db: BaseChainDB): string =
|
||||||
result = "BaseChainDB"
|
result = "BaseChainDB"
|
||||||
|
@ -165,6 +153,16 @@ proc getTd*(self: BaseChainDB; blockHash: Hash256, td: var UInt256): bool =
|
||||||
return false
|
return false
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
proc headTotalDifficulty*(self: BaseChainDB): UInt256 =
|
||||||
|
# this is actually a combination of `getHash` and `getScore`
|
||||||
|
const key = canonicalHeadHashKey()
|
||||||
|
let data = self.db.get(key.toOpenArray)
|
||||||
|
if data.len == 0:
|
||||||
|
return 0.u256
|
||||||
|
|
||||||
|
let blockHash = rlp.decode(data, Hash256)
|
||||||
|
rlp.decode(self.db.get(blockHashToScoreKey(blockHash).toOpenArray), UInt256)
|
||||||
|
|
||||||
proc getAncestorsHashes*(self: BaseChainDB, limit: UInt256, header: BlockHeader): seq[Hash256] =
|
proc getAncestorsHashes*(self: BaseChainDB, limit: UInt256, header: BlockHeader): seq[Hash256] =
|
||||||
var ancestorCount = min(header.blockNumber, limit).truncate(int)
|
var ancestorCount = min(header.blockNumber, limit).truncate(int)
|
||||||
var h = header
|
var h = header
|
||||||
|
@ -258,21 +256,24 @@ proc getUncles*(self: BaseChainDB, ommersHash: Hash256): seq[BlockHeader] =
|
||||||
if encodedUncles.len != 0:
|
if encodedUncles.len != 0:
|
||||||
result = rlp.decode(encodedUncles, seq[BlockHeader])
|
result = rlp.decode(encodedUncles, seq[BlockHeader])
|
||||||
|
|
||||||
|
proc getBlockBody*(self: BaseChainDB, header: BlockHeader, output: var BlockBody): bool =
|
||||||
|
result = true
|
||||||
|
output.transactions = @[]
|
||||||
|
output.uncles = @[]
|
||||||
|
for encodedTx in self.getBlockTransactionData(header.txRoot):
|
||||||
|
output.transactions.add(rlp.decode(encodedTx, Transaction))
|
||||||
|
|
||||||
|
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||||
|
let encodedUncles = self.db.get(genericHashKey(header.ommersHash).toOpenArray)
|
||||||
|
if encodedUncles.len != 0:
|
||||||
|
output.uncles = rlp.decode(encodedUncles, seq[BlockHeader])
|
||||||
|
else:
|
||||||
|
result = false
|
||||||
|
|
||||||
proc getBlockBody*(self: BaseChainDB, blockHash: Hash256, output: var BlockBody): bool =
|
proc getBlockBody*(self: BaseChainDB, blockHash: Hash256, output: var BlockBody): bool =
|
||||||
var header: BlockHeader
|
var header: BlockHeader
|
||||||
if self.getBlockHeader(blockHash, header):
|
if self.getBlockHeader(blockHash, header):
|
||||||
result = true
|
return self.getBlockBody(header, output)
|
||||||
output.transactions = @[]
|
|
||||||
output.uncles = @[]
|
|
||||||
for encodedTx in self.getBlockTransactionData(header.txRoot):
|
|
||||||
output.transactions.add(rlp.decode(encodedTx, Transaction))
|
|
||||||
|
|
||||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
|
||||||
let encodedUncles = self.db.get(genericHashKey(header.ommersHash).toOpenArray)
|
|
||||||
if encodedUncles.len != 0:
|
|
||||||
output.uncles = rlp.decode(encodedUncles, seq[BlockHeader])
|
|
||||||
else:
|
|
||||||
result = false
|
|
||||||
|
|
||||||
proc getBlockBody*(self: BaseChainDB, hash: Hash256): BlockBody =
|
proc getBlockBody*(self: BaseChainDB, hash: Hash256): BlockBody =
|
||||||
if not self.getBlockBody(hash, result):
|
if not self.getBlockBody(hash, result):
|
||||||
|
@ -465,7 +466,6 @@ proc persistHeaderToDb*(self: BaseChainDB; header: BlockHeader): seq[BlockHeader
|
||||||
self.writeTerminalHash(headerHash)
|
self.writeTerminalHash(headerHash)
|
||||||
|
|
||||||
if score > headScore:
|
if score > headScore:
|
||||||
self.totalDifficulty = score
|
|
||||||
result = self.setAsCanonicalChainHead(headerHash)
|
result = self.setAsCanonicalChainHead(headerHash)
|
||||||
|
|
||||||
proc persistHeaderToDbWithoutSetHead*(self: BaseChainDB; header: BlockHeader) =
|
proc persistHeaderToDbWithoutSetHead*(self: BaseChainDB; header: BlockHeader) =
|
||||||
|
|
|
@ -113,7 +113,6 @@ proc initializeEmptyDb*(cdb: BaseChainDB)
|
||||||
let header = cdb.toGenesisHeader(sdb)
|
let header = cdb.toGenesisHeader(sdb)
|
||||||
doAssert(header.blockNumber.isZero, "can't commit genesis block with number > 0")
|
doAssert(header.blockNumber.isZero, "can't commit genesis block with number > 0")
|
||||||
# faster lookup of curent total difficulty
|
# faster lookup of curent total difficulty
|
||||||
cdb.totalDifficulty = header.difficulty
|
|
||||||
discard cdb.persistHeaderToDb(header)
|
discard cdb.persistHeaderToDb(header)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -247,7 +247,7 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf,
|
||||||
# always create sealing engine instanca but not always run it
|
# always create sealing engine instanca but not always run it
|
||||||
# e.g. engine api need sealing engine without it running
|
# e.g. engine api need sealing engine without it running
|
||||||
var initialState = EngineStopped
|
var initialState = EngineStopped
|
||||||
if chainDB.totalDifficulty > chainDB.ttd:
|
if chainDB.headTotalDifficulty() > chainDB.ttd:
|
||||||
initialState = EnginePostMerge
|
initialState = EnginePostMerge
|
||||||
nimbus.sealingEngine = SealingEngineRef.new(
|
nimbus.sealingEngine = SealingEngineRef.new(
|
||||||
nimbus.chainRef, nimbus.ctx, conf.engineSigner,
|
nimbus.chainRef, nimbus.ctx, conf.engineSigner,
|
||||||
|
|
|
@ -84,17 +84,19 @@ func toNextFork(n: Option[BlockNumber]): uint64 =
|
||||||
else:
|
else:
|
||||||
0'u64
|
0'u64
|
||||||
|
|
||||||
func isBlockAfterTtd*(c: Chain, blockHeader: BlockHeader): bool =
|
proc isBlockAfterTtd*(c: Chain, header: BlockHeader): bool
|
||||||
|
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||||
let
|
let
|
||||||
ttd = c.db.ttd
|
ttd = c.db.ttd
|
||||||
totalDifficulty = c.db.totalDifficulty + blockHeader.difficulty
|
ptd = c.db.getScore(header.parentHash)
|
||||||
|
td = ptd + header.difficulty
|
||||||
|
|
||||||
# c.db.totalDifficulty is parent.totalDifficulty
|
# c.db.totalDifficulty is parent.totalDifficulty
|
||||||
# TerminalBlock is defined as header.totalDifficulty >= TTD
|
# TerminalBlock is defined as header.totalDifficulty >= TTD
|
||||||
# and parent.totalDifficulty < TTD
|
# and parent.totalDifficulty < TTD
|
||||||
# So blockAfterTTD must be both header.totalDifficulty >= TTD
|
# So blockAfterTTD must be both header.totalDifficulty >= TTD
|
||||||
# and parent.totalDifficulty >= TTD
|
# and parent.totalDifficulty >= TTD
|
||||||
c.db.totalDifficulty >= ttd and totalDifficulty >= ttd
|
ptd >= ttd and td >= ttd
|
||||||
|
|
||||||
func getNextFork(c: ChainConfig, fork: ChainFork): uint64 =
|
func getNextFork(c: ChainConfig, fork: ChainFork): uint64 =
|
||||||
let next: array[ChainFork, uint64] = [
|
let next: array[ChainFork, uint64] = [
|
||||||
|
|
|
@ -27,6 +27,14 @@ when not defined(release):
|
||||||
../../tracer,
|
../../tracer,
|
||||||
../../utils
|
../../utils
|
||||||
|
|
||||||
|
type
|
||||||
|
PersistBlockFlag = enum
|
||||||
|
NoPersistHeader
|
||||||
|
NoSaveTxs
|
||||||
|
NoSaveReceipts
|
||||||
|
|
||||||
|
PersistBlockFlags = set[PersistBlockFlag]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -34,7 +42,8 @@ when not defined(release):
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
||||||
bodies: openArray[BlockBody], setHead: bool = true): ValidationResult
|
bodies: openArray[BlockBody],
|
||||||
|
flags: PersistBlockFlags = {}): ValidationResult
|
||||||
# wildcard exception, wrapped below in public section
|
# wildcard exception, wrapped below in public section
|
||||||
{.inline, raises: [Exception].} =
|
{.inline, raises: [Exception].} =
|
||||||
c.db.highestBlock = headers[^1].blockNumber
|
c.db.highestBlock = headers[^1].blockNumber
|
||||||
|
@ -98,13 +107,14 @@ proc persistBlocksImpl(c: Chain; headers: openArray[BlockHeader];
|
||||||
msg = res.error
|
msg = res.error
|
||||||
return ValidationResult.Error
|
return ValidationResult.Error
|
||||||
|
|
||||||
if setHead:
|
if NoPersistHeader notin flags:
|
||||||
discard c.db.persistHeaderToDb(header)
|
discard c.db.persistHeaderToDb(header)
|
||||||
else:
|
|
||||||
c.db.persistHeaderToDbWithoutSetHead(header)
|
|
||||||
|
|
||||||
discard c.db.persistTransactions(header.blockNumber, body.transactions)
|
if NoSaveTxs notin flags:
|
||||||
discard c.db.persistReceipts(vmState.receipts)
|
discard c.db.persistTransactions(header.blockNumber, body.transactions)
|
||||||
|
|
||||||
|
if NoSaveReceipts notin flags:
|
||||||
|
discard c.db.persistReceipts(vmState.receipts)
|
||||||
|
|
||||||
# update currentBlock *after* we persist it
|
# update currentBlock *after* we persist it
|
||||||
# so the rpc return consistent result
|
# so the rpc return consistent result
|
||||||
|
@ -122,7 +132,37 @@ proc insertBlockWithoutSetHead*(c: Chain, header: BlockHeader,
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||||
|
|
||||||
safeP2PChain("persistBlocks"):
|
safeP2PChain("persistBlocks"):
|
||||||
result = c.persistBlocksImpl([header], [body], setHead = false)
|
result = c.persistBlocksImpl([header], [body], {NoPersistHeader, NoSaveReceipts})
|
||||||
|
if result == ValidationResult.OK:
|
||||||
|
c.db.persistHeaderToDbWithoutSetHead(header)
|
||||||
|
|
||||||
|
proc setCanonical*(c: Chain, header: BlockHeader): ValidationResult
|
||||||
|
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||||
|
|
||||||
|
if header.parentHash == Hash256():
|
||||||
|
discard c.db.setHead(header.blockHash)
|
||||||
|
return ValidationResult.OK
|
||||||
|
|
||||||
|
var body: BlockBody
|
||||||
|
if not c.db.getBlockBody(header, body):
|
||||||
|
debug "Failed to get BlockBody",
|
||||||
|
hash = header.blockHash
|
||||||
|
return ValidationResult.Error
|
||||||
|
|
||||||
|
safeP2PChain("persistBlocks"):
|
||||||
|
result = c.persistBlocksImpl([header], [body], {NoPersistHeader, NoSaveTxs})
|
||||||
|
if result == ValidationResult.OK:
|
||||||
|
discard c.db.setHead(header.blockHash)
|
||||||
|
|
||||||
|
proc setCanonical*(c: Chain, blockHash: Hash256): ValidationResult
|
||||||
|
{.gcsafe, raises: [Defect,CatchableError].} =
|
||||||
|
var header: BlockHeader
|
||||||
|
if not c.db.getBlockHeader(blockHash, header):
|
||||||
|
debug "Failed to get BlockHeader",
|
||||||
|
hash = blockHash
|
||||||
|
return ValidationResult.Error
|
||||||
|
|
||||||
|
setCanonical(c, header)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public `AbstractChainDB` overload method
|
# Public `AbstractChainDB` overload method
|
||||||
|
|
|
@ -16,7 +16,8 @@ import
|
||||||
".."/db/db_chain,
|
".."/db/db_chain,
|
||||||
".."/p2p/chain/[chain_desc, persist_blocks],
|
".."/p2p/chain/[chain_desc, persist_blocks],
|
||||||
".."/[sealer, constants],
|
".."/[sealer, constants],
|
||||||
".."/merge/[mergetypes, mergeutils]
|
".."/merge/[mergetypes, mergeutils],
|
||||||
|
".."/utils/tx_pool
|
||||||
|
|
||||||
proc latestValidHash(db: BaseChainDB, parent: EthBlockHeader, ttd: DifficultyInt): Hash256 =
|
proc latestValidHash(db: BaseChainDB, parent: EthBlockHeader, ttd: DifficultyInt): Hash256 =
|
||||||
let ptd = db.getScore(parent.parentHash)
|
let ptd = db.getScore(parent.parentHash)
|
||||||
|
@ -27,6 +28,14 @@ proc latestValidHash(db: BaseChainDB, parent: EthBlockHeader, ttd: DifficultyInt
|
||||||
# latestValidHash MUST be set to ZERO
|
# latestValidHash MUST be set to ZERO
|
||||||
Hash256()
|
Hash256()
|
||||||
|
|
||||||
|
proc invalidFCU(db: BaseChainDB, header: EthBlockHeader): ForkchoiceUpdatedResponse =
|
||||||
|
var parent: EthBlockHeader
|
||||||
|
if not db.getBlockHeader(header.parentHash, parent):
|
||||||
|
return invalidFCU(Hash256())
|
||||||
|
|
||||||
|
let blockHash = latestValidHash(db, parent, db.ttd())
|
||||||
|
invalidFCU(blockHash)
|
||||||
|
|
||||||
proc setupEngineAPI*(
|
proc setupEngineAPI*(
|
||||||
sealingEngine: SealingEngineRef,
|
sealingEngine: SealingEngineRef,
|
||||||
server: RpcServer) =
|
server: RpcServer) =
|
||||||
|
@ -190,7 +199,8 @@ proc setupEngineAPI*(
|
||||||
update: ForkchoiceStateV1,
|
update: ForkchoiceStateV1,
|
||||||
payloadAttributes: Option[PayloadAttributesV1]) -> ForkchoiceUpdatedResponse:
|
payloadAttributes: Option[PayloadAttributesV1]) -> ForkchoiceUpdatedResponse:
|
||||||
let
|
let
|
||||||
db = sealingEngine.chain.db
|
chain = sealingEngine.chain
|
||||||
|
db = chain.db
|
||||||
blockHash = update.headBlockHash.asEthHash
|
blockHash = update.headBlockHash.asEthHash
|
||||||
|
|
||||||
if blockHash == Hash256():
|
if blockHash == Hash256():
|
||||||
|
@ -258,10 +268,10 @@ proc setupEngineAPI*(
|
||||||
# TODO should this be possible?
|
# TODO should this be possible?
|
||||||
# If we allow these types of reorgs, we will do lots and lots of reorgs during sync
|
# If we allow these types of reorgs, we will do lots and lots of reorgs during sync
|
||||||
warn "Reorg to previous block"
|
warn "Reorg to previous block"
|
||||||
if not db.setHead(blockHash):
|
if chain.setCanonical(header) != ValidationResult.OK:
|
||||||
return simpleFCU(PayloadExecutionStatus.invalid)
|
return invalidFCU(db, header)
|
||||||
elif not db.setHead(blockHash):
|
elif chain.setCanonical(header) != ValidationResult.OK:
|
||||||
return simpleFCU(PayloadExecutionStatus.invalid)
|
return invalidFCU(db, header)
|
||||||
|
|
||||||
# If the beacon client also advertised a finalized block, mark the local
|
# If the beacon client also advertised a finalized block, mark the local
|
||||||
# chain final and completely in PoS mode.
|
# chain final and completely in PoS mode.
|
||||||
|
@ -326,7 +336,9 @@ proc setupEngineAPI*(
|
||||||
api.put(id, payload)
|
api.put(id, payload)
|
||||||
|
|
||||||
info "Created payload for sealing",
|
info "Created payload for sealing",
|
||||||
id = id.toHex
|
id = id.toHex,
|
||||||
|
hash = payload.blockHash,
|
||||||
|
number = payload.blockNumber.uint64
|
||||||
|
|
||||||
return validFCU(some(id), blockHash)
|
return validFCU(some(id), blockHash)
|
||||||
|
|
||||||
|
|
|
@ -267,19 +267,10 @@ proc generateExecutionPayload*(engine: SealingEngineRef,
|
||||||
blk.header.prevRandao = prevRandao
|
blk.header.prevRandao = prevRandao
|
||||||
blk.header.fee = some(blk.header.fee.get(UInt256.zero)) # force it with some(UInt256)
|
blk.header.fee = some(blk.header.fee.get(UInt256.zero)) # force it with some(UInt256)
|
||||||
|
|
||||||
let res = engine.chain.persistBlocks([blk.header], [
|
|
||||||
BlockBody(transactions: blk.txs, uncles: blk.uncles)
|
|
||||||
])
|
|
||||||
|
|
||||||
let blockHash = rlpHash(blk.header)
|
let blockHash = rlpHash(blk.header)
|
||||||
if res != ValidationResult.OK:
|
|
||||||
return err("Error when validating generated block. hash=" & blockHash.data.toHex)
|
|
||||||
|
|
||||||
if blk.header.extraData.len > 32:
|
if blk.header.extraData.len > 32:
|
||||||
return err "extraData length should not exceed 32 bytes"
|
return err "extraData length should not exceed 32 bytes"
|
||||||
|
|
||||||
discard engine.txPool.smartHead(blk.header) # add transactions update jobs
|
|
||||||
|
|
||||||
payloadRes.parentHash = Web3BlockHash blk.header.parentHash.data
|
payloadRes.parentHash = Web3BlockHash blk.header.parentHash.data
|
||||||
payloadRes.feeRecipient = Web3Address blk.header.coinbase
|
payloadRes.feeRecipient = Web3Address blk.header.coinbase
|
||||||
payloadRes.stateRoot = Web3BlockHash blk.header.stateRoot.data
|
payloadRes.stateRoot = Web3BlockHash blk.header.stateRoot.data
|
||||||
|
|
|
@ -183,7 +183,7 @@ proc isOk(rc: ValidationResult): bool =
|
||||||
|
|
||||||
proc ttdReached(db: BaseChainDB): bool =
|
proc ttdReached(db: BaseChainDB): bool =
|
||||||
if db.config.terminalTotalDifficulty.isSome:
|
if db.config.terminalTotalDifficulty.isSome:
|
||||||
return db.config.terminalTotalDifficulty.get <= db.totalDifficulty
|
return db.config.terminalTotalDifficulty.get <= db.headTotalDifficulty()
|
||||||
|
|
||||||
proc importBlocks(c: Chain; h: seq[BlockHeader]; b: seq[BlockBody];
|
proc importBlocks(c: Chain; h: seq[BlockHeader]; b: seq[BlockBody];
|
||||||
noisy = false): bool =
|
noisy = false): bool =
|
||||||
|
|
Loading…
Reference in New Issue