Engine-API simulator: allow testee client to import invalid block
This commit is contained in:
parent
047f8ec4b2
commit
053fc79a8b
|
@ -11,6 +11,7 @@
|
|||
import
|
||||
std/strutils,
|
||||
chronicles,
|
||||
eth/common,
|
||||
eth/common/eth_types_rlp,
|
||||
./engine_spec,
|
||||
../cancun/customizer,
|
||||
|
@ -191,8 +192,11 @@ method getName(cs: InvalidMissingAncestorReOrgSyncTest): string =
|
|||
"Invalid Missing Ancestor Syncing ReOrg, $1, EmptyTxs=$2, CanonicalReOrg=$3, Invalid P$4" % [
|
||||
$cs.invalidField, $cs.emptyTransactions, $cs.reOrgFromCanonical, $cs.invalidIndex]
|
||||
|
||||
proc executableDataToBlock(ex: ExecutableData): EthBlock =
|
||||
ethBlock(ex.basePayload, beaconRoot = ex.beaconRoot)
|
||||
func blockHeader(ex: ExecutableData): common.BlockHeader =
|
||||
blockHeader(ex.basePayload, ex.beaconRoot)
|
||||
|
||||
func blockBody(ex: ExecutableData): common.BlockBody =
|
||||
blockBody(ex.basePayload)
|
||||
|
||||
method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
||||
var sec = env.addEngine(true, cs.reOrgFromCanonical)
|
||||
|
@ -228,10 +232,6 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
|||
# Append the common ancestor
|
||||
shadow.payloads.add env.clMock.latestExecutableData
|
||||
|
||||
if not cs.reOrgFromCanonical:
|
||||
# Add back the original client before side chain production
|
||||
env.clMock.addEngine(env.engine)
|
||||
|
||||
# Produce blocks but at the same time create an side chain which contains an invalid payload at some point (INV_P)
|
||||
# CommonAncestor◄─▲── P1 ◄─ P2 ◄─ P3 ◄─ ... ◄─ Pn
|
||||
# │
|
||||
|
@ -285,6 +285,10 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
|||
))
|
||||
testCond pbRes
|
||||
|
||||
if not cs.reOrgFromCanonical:
|
||||
# Add back the original client before side chain production
|
||||
env.clMock.addEngine(env.engine)
|
||||
|
||||
info "Starting side chain production"
|
||||
pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks(
|
||||
# Note: We perform the test in the middle of payload creation by the CL Mock, in order to be able to
|
||||
|
@ -315,13 +319,16 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
|||
s.expectStatusEither([PayloadExecutionStatus.valid, PayloadExecutionStatus.syncing])
|
||||
|
||||
else:
|
||||
let invalidBlock = executableDataToBlock(shadow.payloads[i])
|
||||
testCond sec.client.setBlock(invalidBlock, shadow.payloads[i-1].blockNumber, shadow.payloads[i-1].stateRoot):
|
||||
let
|
||||
invalidHeader = blockHeader(shadow.payloads[i])
|
||||
invalidBody = blockBody(shadow.payloads[i])
|
||||
|
||||
testCond sec.setBlock(invalidHeader, invalidBody):
|
||||
fatal "TEST ISSUE - Failed to set invalid block"
|
||||
info "Invalid block successfully set",
|
||||
idx=i,
|
||||
msg=payloadValidStr,
|
||||
hash=invalidBlock.header.blockHash.short
|
||||
hash=invalidHeader.blockHash.short
|
||||
|
||||
# Check that the second node has the correct head
|
||||
var res = sec.client.latestHeader()
|
||||
|
@ -352,6 +359,7 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
|||
number=head.blockNumber
|
||||
|
||||
# If we are syncing through p2p, we need to keep polling until the client syncs the missing payloads
|
||||
let period = chronos.milliseconds(500)
|
||||
while true:
|
||||
let version = env.engine.version(shadow.payloads[shadow.n].timestamp)
|
||||
let r = env.engine.client.newPayload(version, shadow.payloads[shadow.n])
|
||||
|
@ -395,6 +403,8 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
|||
fatal "Client returned VALID on an invalid chain", status=r.get.status
|
||||
return false
|
||||
|
||||
waitFor sleepAsync(period)
|
||||
|
||||
if not cs.reOrgFromCanonical:
|
||||
# We need to send the canonical chain to the main client here
|
||||
let start = env.clMock.firstPoSBlockNumber.get
|
||||
|
|
|
@ -676,6 +676,3 @@ template expectStorageEqual*(res: Result[FixedBytes[32], string], account: EthAd
|
|||
if res.get != expectedValue:
|
||||
return err("invalid wd storage at $1 is $2, expect $3" % [
|
||||
account.toHex, $res.get, $expectedValue])
|
||||
|
||||
proc setBlock*(client: RpcClient, blk: EthBlock, blockNumber: Web3Quantity, stateRoot: Web3Hash): bool =
|
||||
return true
|
||||
|
|
|
@ -34,6 +34,8 @@ import
|
|||
../../../tests/test_helpers,
|
||||
web3/execution_types
|
||||
|
||||
from ./node import setBlock
|
||||
|
||||
export
|
||||
results
|
||||
|
||||
|
@ -48,6 +50,7 @@ type
|
|||
client : RpcHttpClient
|
||||
sync : BeaconSyncRef
|
||||
txPool : TxPoolRef
|
||||
chain : ChainRef
|
||||
|
||||
const
|
||||
baseFolder = "hive_integration/nodocker/engine"
|
||||
|
@ -154,7 +157,8 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
|
|||
sealer : sealer,
|
||||
client : client,
|
||||
sync : sync,
|
||||
txPool : txPool
|
||||
txPool : txPool,
|
||||
chain : chain
|
||||
)
|
||||
|
||||
proc close*(env: EngineEnv) =
|
||||
|
@ -223,3 +227,6 @@ func version*(env: EngineEnv, time: Web3Quantity): Version =
|
|||
|
||||
func version*(env: EngineEnv, time: uint64): Version =
|
||||
env.version(time.EthTime)
|
||||
|
||||
proc setBlock*(env: EngineEnv, header: common.BlockHeader, body: common.BlockBody): bool =
|
||||
env.chain.setBlock(header, body) == ValidationResult.OK
|
||||
|
|
|
@ -0,0 +1,153 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
../../../nimbus/[
|
||||
utils/utils,
|
||||
common/common,
|
||||
constants,
|
||||
db/ledger,
|
||||
transaction,
|
||||
vm_state,
|
||||
vm_types,
|
||||
core/clique,
|
||||
core/dao,
|
||||
core/validate,
|
||||
core/chain/chain_desc,
|
||||
core/executor/calculate_reward,
|
||||
core/executor/process_transaction,
|
||||
core/executor/process_block
|
||||
],
|
||||
chronicles,
|
||||
stint,
|
||||
results
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
proc processBlock(
|
||||
vmState: BaseVMState; ## Parent environment of header/body block
|
||||
header: BlockHeader; ## Header/body block to add to the blockchain
|
||||
body: BlockBody): ValidationResult
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Generalised function to processes `(header,body)` pair for any network,
|
||||
## regardless of PoA or not.
|
||||
##
|
||||
## Rather than calculating the PoA state change here, it is done with the
|
||||
## verification in the `chain/persist_blocks.persistBlocks()` method. So
|
||||
## the `poa` descriptor is currently unused and only provided for later
|
||||
## implementations (but can be savely removed, as well.)
|
||||
## variant of `processBlock()` where the `header` argument is explicitely set.
|
||||
|
||||
var dbTx = vmState.com.db.beginTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
if vmState.com.daoForkSupport and
|
||||
vmState.com.daoForkBlock.get == header.blockNumber:
|
||||
vmState.mutateStateDB:
|
||||
db.applyDAOHardFork()
|
||||
|
||||
if header.parentBeaconBlockRoot.isSome:
|
||||
let r = vmState.processBeaconBlockRoot(header.parentBeaconBlockRoot.get)
|
||||
if r.isErr:
|
||||
error("error in processing beaconRoot", err=r.error)
|
||||
|
||||
let r = processTransactions(vmState, header, body.transactions)
|
||||
if r.isErr:
|
||||
error("error in processing transactions", err=r.error)
|
||||
|
||||
if vmState.determineFork >= FkShanghai:
|
||||
for withdrawal in body.withdrawals.get:
|
||||
vmState.stateDB.addBalance(withdrawal.address, withdrawal.weiAmount)
|
||||
|
||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
discard vmState.com.db.persistUncles(body.uncles)
|
||||
|
||||
# EIP-3675: no reward for miner in POA/POS
|
||||
if vmState.com.consensus == ConsensusType.POW:
|
||||
vmState.calculateReward(header, body)
|
||||
|
||||
vmState.mutateStateDB:
|
||||
let clearEmptyAccount = vmState.determineFork >= FkSpurious
|
||||
db.persist(clearEmptyAccount, ClearCache in vmState.flags)
|
||||
|
||||
# `applyDeletes = false`
|
||||
# If the trie pruning activated, each of the block will have its own state
|
||||
# trie keep intact, rather than destroyed by trie pruning. But the current
|
||||
# block will still get a pruned trie. If trie pruning deactivated,
|
||||
# `applyDeletes` have no effects.
|
||||
dbTx.commit(applyDeletes = false)
|
||||
|
||||
ValidationResult.OK
|
||||
|
||||
proc getVmState(c: ChainRef, header: BlockHeader):
|
||||
Result[BaseVMState, void]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
if c.vmState.isNil.not:
|
||||
return ok(c.vmState)
|
||||
|
||||
let vmState = BaseVMState()
|
||||
if not vmState.init(header, c.com):
|
||||
debug "Cannot initialise VmState",
|
||||
number = header.blockNumber
|
||||
return err()
|
||||
return ok(vmState)
|
||||
|
||||
# A stripped down version of persistBlocks without validation
|
||||
# intended to accepts invalid block
|
||||
proc setBlock*(c: ChainRef; header: BlockHeader;
|
||||
body: BlockBody): ValidationResult
|
||||
{.inline, raises: [CatchableError].} =
|
||||
let dbTx = c.db.beginTransaction()
|
||||
defer: dbTx.dispose()
|
||||
|
||||
var cliqueState = c.clique.cliqueSave
|
||||
defer: c.clique.cliqueRestore(cliqueState)
|
||||
|
||||
c.com.hardForkTransition(header)
|
||||
|
||||
# Needed for figuring out whether KVT cleanup is due (see at the end)
|
||||
let
|
||||
vmState = c.getVmState(header).valueOr:
|
||||
return ValidationResult.Error
|
||||
stateRootChpt = vmState.parent.stateRoot # Check point
|
||||
validationResult = vmState.processBlock(header, body)
|
||||
|
||||
if validationResult != ValidationResult.OK:
|
||||
return validationResult
|
||||
|
||||
discard c.db.persistHeaderToDb(
|
||||
header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory)
|
||||
discard c.db.persistTransactions(header.blockNumber, body.transactions)
|
||||
discard c.db.persistReceipts(vmState.receipts)
|
||||
|
||||
if body.withdrawals.isSome:
|
||||
discard c.db.persistWithdrawals(body.withdrawals.get)
|
||||
|
||||
# update currentBlock *after* we persist it
|
||||
# so the rpc return consistent result
|
||||
# between eth_blockNumber and eth_syncing
|
||||
c.com.syncCurrent = header.blockNumber
|
||||
|
||||
dbTx.commit()
|
||||
|
||||
# The `c.db.persistent()` call is ignored by the legacy DB which
|
||||
# automatically saves persistently when reaching the zero level transaction.
|
||||
#
|
||||
# For the `Aristo` database, this code position is only reached if the
|
||||
# the parent state of the first block (as registered in `headers[0]`) was
|
||||
# the canonical state before updating. So this state will be saved with
|
||||
# `persistent()` together with the respective block number.
|
||||
c.db.persistent(header.blockNumber - 1)
|
||||
|
||||
ValidationResult.OK
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
Loading…
Reference in New Issue