nimbus-eth2/tests/test_block_processor.nim
Eugene Kabanov 18409a69e1
Light forward sync mechanism (#6515)
* Initial commit.

* Add hybrid syncing.

* Compilation fixes.

* Cast custom event for our purposes.

* Instantiate AsyncEventQueue properly.

* Fix mistype.

* Further research on optimistic updates.

* Fixing circular deps.

* Add backfilling.

* Add block download feature.

* Add block store.

* Update backfill information before storing block.

* Use custom block verifier for backfilling sync.

* Skip signature verification in backfilling.

* Add one more generic reload to storeBackfillBlock().

* Add block verification debugging statements.

* Add more debugging

* Do not use database for backfilling, part 1.

* Fix for stash.

* Stash fixes part 2.

* Prepare for testing.

* Fix assertion.

* Fix post-restart syncing process.

* Update backfill loading log statement.
Use proper backfill slot callback for sync manager.

* Add handling of Duplicates.

* Fix store duration and block backfilled log statements.

* Add proper syncing state log statement.

* Add snappy compression to beaconchain_file.
Format syncing speed properly.

* Add blobs verification.

* Add `slot` number to file structure for easy navigation over stream of compressed objects.

* Change database filename.

* Fix structure size.

* Add more consistency properties.

* Fix checkRepair() issues.

* Preparation to state rebuild process.

* Add plain & compressed size.

* Debugging snappy encode process.

* Add one more debugging line.

* Dump blocks.

* One more filedump.

* Fix chunk corruption code.

* Fix detection issue.

* Some fixes in state rebuilding process.

* Add more clearance steps.

* Move updateHead() back to block_processor.

* Fix compilation issues.

* Make code more async friendly.

* Fix async issues.
Add more information when proposer verification failed.

* Fix 8192 slots issue.

* Fix Future double completion issue.

* Pass updateFlags to some of the core procedures.

* Fix tests.

* Improve initial sync handling mechanism.

* Fix checkStateTransition() performance improvements.

* Add some performance tuning and meters.

* Light client performance tuning.

* Remove debugging statement.

* Use single file descriptor for blockchain file.

* Attempt to fix LC.

* Fix timeleft calculation when untrusted sync backfilling started right after LC block received.

* Workaround for `chronicles` + `results` `error` issue.
Remove some compilation warnings.
Fix `CatchableError` leaks on Windows.

* Address review comments.

* Address review comments part 2.

* Address review comments part 1.

* Rebase and fix the issues.

* Address review comments part 3.

* Add tests and fix some issues in auto-repair mechanism.

* Add tests to all_tests.

* Rename binary test file to pass restrictions.

* Add `bin` extension to excluded list.
Recover binary test data.

* Rename fixture file to .bin again.

* Update AllTests.

* Address review comments part 4.

* Address review comments part 5 and fix tests.

* Address review comments part 6.

* Eliminate foldl and combine from blobs processing.
Add some tests to ensure that checkResponse() also checks for correct order.

* Fix forgotten place.

* Post rebase fixes.

* Add unique slots tests.

* Optimize updateHead() code.

* Add forgotten changes.

* Address review comments on state as argument.
2024-10-30 05:38:53 +00:00

121 lines
4.2 KiB
Nim

# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.used.}
import
chronos,
std/sequtils,
unittest2,
taskpools,
../beacon_chain/conf,
../beacon_chain/spec/[beaconstate, forks, helpers, state_transition],
../beacon_chain/spec/datatypes/deneb,
../beacon_chain/gossip_processing/block_processor,
../beacon_chain/consensus_object_pools/[
attestation_pool, blockchain_dag, blob_quarantine, block_quarantine,
block_clearance, consensus_manager],
../beacon_chain/el/el_manager,
./testutil, ./testdbutil, ./testblockutil
from chronos/unittest2/asynctests import asyncTest
from ../beacon_chain/spec/eth2_apis/dynamic_fee_recipients import
DynamicFeeRecipientsStore, init
from ../beacon_chain/validators/action_tracker import ActionTracker
from ../beacon_chain/validators/keystore_management import KeymanagerHost
proc pruneAtFinalization(dag: ChainDAGRef) =
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
suite "Block processor" & preset():
setup:
let rng = HmacDrbgContext.new()
var
db = makeTestDB(SLOTS_PER_EPOCH)
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
taskpool = Taskpool.new()
quarantine = newClone(Quarantine.init())
blobQuarantine = newClone(BlobQuarantine())
attestationPool = newClone(AttestationPool.init(dag, quarantine))
elManager = new ELManager # TODO: initialise this properly
actionTracker: ActionTracker
consensusManager = ConsensusManager.new(
dag, attestationPool, quarantine, elManager, actionTracker,
newClone(DynamicFeeRecipientsStore.init()), "",
Opt.some default(Eth1Address), defaultGasLimit)
state = newClone(dag.headState)
cache = StateCache()
b1 = addTestBlock(state[], cache).phase0Data
b2 = addTestBlock(state[], cache).phase0Data
getTimeFn = proc(): BeaconTime = b2.message.slot.start_beacon_time()
batchVerifier = BatchVerifier.new(rng, taskpool)
processor = BlockProcessor.new(
false, "", "", batchVerifier, consensusManager,
validatorMonitor, blobQuarantine, getTimeFn)
processorFut = processor.runQueueProcessingLoop()
asyncTest "Reverse order block add & get" & preset():
let
missing = await processor[].addBlock(
MsgSource.gossip, ForkedSignedBeaconBlock.init(b2),
Opt.none(BlobSidecars))
check: missing.error == VerifierError.MissingParent
check:
not dag.containsForkBlock(b2.root) # Unresolved, shouldn't show up
FetchRecord(root: b1.root) in quarantine[].checkMissing(32)
let
status = await processor[].addBlock(
MsgSource.gossip, ForkedSignedBeaconBlock.init(b1),
Opt.none(BlobSidecars))
b1Get = dag.getBlockRef(b1.root)
check:
status.isOk
b1Get.isSome()
dag.containsForkBlock(b1.root)
not dag.containsForkBlock(b2.root) # Async pipeline must still run
while processor[].hasBlocks():
poll()
let
b2Get = dag.getBlockRef(b2.root)
check:
b2Get.isSome()
b2Get.get().parent == b1Get.get()
dag.updateHead(b2Get.get(), quarantine[], [])
dag.pruneAtFinalization()
# The heads structure should have been updated to contain only the new
# b2 head
check:
dag.heads.mapIt(it) == @[b2Get.get()]
# check that init also reloads block graph
var
validatorMonitor2 = newClone(ValidatorMonitor.init())
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
check:
# ensure we loaded the correct head state
dag2.head.root == b2.root
getStateRoot(dag2.headState) == b2.message.state_root
dag2.getBlockRef(b1.root).isSome()
dag2.getBlockRef(b2.root).isSome()
dag2.heads.len == 1
dag2.heads[0].root == b2.root