nimbus-eth2/tests/test_block_processor.nim
Jacek Sieka 61342c2449
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks

Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.

We can distinguish between two cases where by-root access is useful:

* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really

In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.

Future work includes:

* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)

Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.

* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`

* fix dag.blocks ref
2022-01-21 13:33:16 +02:00

100 lines
3.5 KiB
Nim

# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
chronicles, chronos,
std/[options, sequtils],
unittest2,
eth/keys, taskpools,
../beacon_chain/beacon_clock,
../beacon_chain/spec/[beaconstate, forks, helpers, state_transition],
../beacon_chain/gossip_processing/[block_processor, consensus_manager],
../beacon_chain/consensus_object_pools/[
attestation_pool, blockchain_dag, block_quarantine, block_clearance],
./testutil, ./testdbutil, ./testblockutil
proc pruneAtFinalization(dag: ChainDAGRef) =
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
suite "Block processor" & preset():
setup:
var
db = makeTestDB(SLOTS_PER_EPOCH)
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
taskpool = Taskpool.new()
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
quarantine = newClone(Quarantine.init())
attestationPool = newClone(AttestationPool.init(dag, quarantine))
consensusManager = ConsensusManager.new(dag, attestationPool, quarantine)
state = newClone(dag.headState.data)
cache = StateCache()
b1 = addTestBlock(state[], cache).phase0Data
b2 = addTestBlock(state[], cache).phase0Data
getTimeFn = proc(): BeaconTime = b2.message.slot.start_beacon_time()
processor = BlockProcessor.new(
false, "", "", keys.newRng(), taskpool, consensusManager,
validatorMonitor, getTimeFn)
test "Reverse order block add & get" & preset():
let missing = processor[].storeBlock(
MsgSource.gossip, b2.message.slot.start_beacon_time(), b2)
check: missing.error == BlockError.MissingParent
check:
not dag.containsForkBlock(b2.root) # Unresolved, shouldn't show up
FetchRecord(root: b1.root) in quarantine[].checkMissing()
let
status = processor[].storeBlock(
MsgSource.gossip, b2.message.slot.start_beacon_time(), b1)
b1Get = dag.getBlockRef(b1.root)
check:
status.isOk
b1Get.isSome()
dag.containsForkBlock(b1.root)
not dag.containsForkBlock(b2.root) # Async pipeline must still run
discard processor.runQueueProcessingLoop()
while processor[].hasBlocks():
poll()
let
b2Get = dag.getBlockRef(b2.root)
check:
b2Get.isSome()
b2Get.get().parent == b1Get.get()
dag.updateHead(b2Get.get(), quarantine[])
dag.pruneAtFinalization()
# The heads structure should have been updated to contain only the new
# b2 head
check:
dag.heads.mapIt(it) == @[b2Get.get()]
# check that init also reloads block graph
var
validatorMonitor2 = newClone(ValidatorMonitor.init())
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
check:
# ensure we loaded the correct head state
dag2.head.root == b2.root
getStateRoot(dag2.headState.data) == b2.message.state_root
dag2.getBlockRef(b1.root).isSome()
dag2.getBlockRef(b2.root).isSome()
dag2.heads.len == 1
dag2.heads[0].root == b2.root