mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-09 22:06:21 +00:00
49729e1ef3
When a block is introduced to the system both via REST and gossip at the same time, we will call `storeBlock` from two locations leading to a dupliace check race condition as we wait for the EL. This issue may manifest in particular when using an external block builder that itself publishes the block onto the gossip network. * refactor enqueue flow * simplify calling `addBlock` * complete request manager verifier future for blobless blocks * re-verify parent conditions before adding block among other things, it might have gone stale or finalized between one call and the other
121 lines
4.2 KiB
Nim
121 lines
4.2 KiB
Nim
# beacon_chain
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
|
# Licensed and distributed under either of
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
{.used.}
|
|
|
|
import
|
|
chronos,
|
|
std/sequtils,
|
|
unittest2,
|
|
taskpools,
|
|
../beacon_chain/[conf, beacon_clock],
|
|
../beacon_chain/spec/[beaconstate, forks, helpers, state_transition],
|
|
../beacon_chain/spec/datatypes/deneb,
|
|
../beacon_chain/gossip_processing/block_processor,
|
|
../beacon_chain/consensus_object_pools/[
|
|
attestation_pool, blockchain_dag, blob_quarantine, block_quarantine,
|
|
block_clearance, consensus_manager],
|
|
../beacon_chain/el/el_manager,
|
|
./testutil, ./testdbutil, ./testblockutil
|
|
|
|
from chronos/unittest2/asynctests import asyncTest
|
|
from ../beacon_chain/spec/eth2_apis/dynamic_fee_recipients import
|
|
DynamicFeeRecipientsStore, init
|
|
from ../beacon_chain/validators/action_tracker import ActionTracker
|
|
from ../beacon_chain/validators/keystore_management import KeymanagerHost
|
|
|
|
proc pruneAtFinalization(dag: ChainDAGRef) =
|
|
if dag.needStateCachesAndForkChoicePruning():
|
|
dag.pruneStateCachesDAG()
|
|
|
|
suite "Block processor" & preset():
|
|
setup:
|
|
let rng = HmacDrbgContext.new()
|
|
var
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
taskpool = Taskpool.new()
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
|
quarantine = newClone(Quarantine.init())
|
|
blobQuarantine = newClone(BlobQuarantine())
|
|
attestationPool = newClone(AttestationPool.init(dag, quarantine))
|
|
elManager = new ELManager # TODO: initialise this properly
|
|
actionTracker: ActionTracker
|
|
keymanagerHost: ref KeymanagerHost
|
|
consensusManager = ConsensusManager.new(
|
|
dag, attestationPool, quarantine, elManager, actionTracker,
|
|
newClone(DynamicFeeRecipientsStore.init()), "",
|
|
Opt.some default(Eth1Address), defaultGasLimit)
|
|
state = newClone(dag.headState)
|
|
cache = StateCache()
|
|
b1 = addTestBlock(state[], cache).phase0Data
|
|
b2 = addTestBlock(state[], cache).phase0Data
|
|
getTimeFn = proc(): BeaconTime = b2.message.slot.start_beacon_time()
|
|
processor = BlockProcessor.new(
|
|
false, "", "", rng, taskpool, consensusManager,
|
|
validatorMonitor, blobQuarantine, getTimeFn)
|
|
processorFut = processor.runQueueProcessingLoop()
|
|
|
|
asyncTest "Reverse order block add & get" & preset():
|
|
let
|
|
missing = await processor[].addBlock(
|
|
MsgSource.gossip, ForkedSignedBeaconBlock.init(b2),
|
|
Opt.none(BlobSidecars))
|
|
|
|
check: missing.error == VerifierError.MissingParent
|
|
|
|
check:
|
|
not dag.containsForkBlock(b2.root) # Unresolved, shouldn't show up
|
|
|
|
FetchRecord(root: b1.root) in quarantine[].checkMissing(32)
|
|
|
|
let
|
|
status = await processor[].addBlock(
|
|
MsgSource.gossip, ForkedSignedBeaconBlock.init(b1),
|
|
Opt.none(BlobSidecars))
|
|
b1Get = dag.getBlockRef(b1.root)
|
|
|
|
check:
|
|
status.isOk
|
|
b1Get.isSome()
|
|
dag.containsForkBlock(b1.root)
|
|
not dag.containsForkBlock(b2.root) # Async pipeline must still run
|
|
|
|
while processor[].hasBlocks():
|
|
poll()
|
|
|
|
let
|
|
b2Get = dag.getBlockRef(b2.root)
|
|
|
|
check:
|
|
b2Get.isSome()
|
|
|
|
b2Get.get().parent == b1Get.get()
|
|
|
|
dag.updateHead(b2Get.get(), quarantine[], [])
|
|
dag.pruneAtFinalization()
|
|
|
|
# The heads structure should have been updated to contain only the new
|
|
# b2 head
|
|
check:
|
|
dag.heads.mapIt(it) == @[b2Get.get()]
|
|
|
|
# check that init also reloads block graph
|
|
var
|
|
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
|
|
|
|
check:
|
|
# ensure we loaded the correct head state
|
|
dag2.head.root == b2.root
|
|
getStateRoot(dag2.headState) == b2.message.state_root
|
|
dag2.getBlockRef(b1.root).isSome()
|
|
dag2.getBlockRef(b2.root).isSome()
|
|
dag2.heads.len == 1
|
|
dag2.heads[0].root == b2.root
|